From 10be193321cf8419652116536f34f133a8f0d3d3 Mon Sep 17 00:00:00 2001
From: Gennaro Oliva <oliva.g@na.icar.cnr.it>
Date: Thu, 24 Sep 2015 15:46:57 +0200
Subject: [PATCH] Imported Upstream version 15.08.0

---
 BUILD.NOTES                                   |    2 +
 META                                          |   10 +-
 Makefile.am                                   |    2 +
 Makefile.in                                   |   17 +-
 NEWS                                          |  424 +-
 RELEASE_NOTES                                 |  602 ++-
 aclocal.m4                                    |    4 +-
 auxdir/Makefile.am                            |    6 +-
 auxdir/Makefile.in                            |   22 +-
 auxdir/ltmain.sh                              |    4 +-
 auxdir/slurm.m4                               |   16 +-
 auxdir/x_ac_cray.m4                           |   72 +-
 auxdir/x_ac_curl.m4                           |  274 ++
 auxdir/x_ac_json.m4                           |   59 +
 auxdir/x_ac_netloc.m4                         |   79 +
 auxdir/x_ac_xcpu.m4                           |   46 -
 config.h.in                                   |   94 +-
 configure                                     |  918 +++-
 configure.ac                                  |   70 +-
 contribs/Makefile.am                          |    2 +-
 contribs/Makefile.in                          |   18 +-
 contribs/README                               |   12 +
 contribs/cray/Makefile.am                     |    4 +-
 contribs/cray/Makefile.in                     |  267 +-
 contribs/cray/csm/Makefile.am                 |    8 +
 contribs/cray/csm/Makefile.in                 |  604 +++
 contribs/cray/csm/gres.conf.j2                |    9 +
 contribs/cray/csm/slurm.conf.j2               |   59 +
 contribs/cray/csm/slurmconfgen_smw.py         |  271 ++
 contribs/cray/slurm.conf.template             |    2 +-
 contribs/lua/Makefile.in                      |   16 +-
 contribs/mic/Makefile.in                      |   16 +-
 contribs/pam/Makefile.in                      |   16 +-
 contribs/pam_slurm_adopt/Makefile.am          |   42 +
 contribs/pam_slurm_adopt/Makefile.in          |  835 ++++
 contribs/pam_slurm_adopt/README               |  117 +
 contribs/pam_slurm_adopt/helper.c             |  199 +
 contribs/pam_slurm_adopt/helper.h             |   11 +
 contribs/pam_slurm_adopt/pam_slurm_adopt.c    |  543 +++
 contribs/perlapi/Makefile.am                  |    1 +
 contribs/perlapi/Makefile.in                  |   17 +-
 contribs/perlapi/common/msg.h                 |   14 +
 contribs/perlapi/libslurm/Makefile.am         |    3 +
 contribs/perlapi/libslurm/Makefile.in         |   17 +-
 contribs/perlapi/libslurm/perl/Slurm.xs       |  209 +-
 contribs/perlapi/libslurm/perl/alloc.c        |    3 +
 contribs/perlapi/libslurm/perl/bitstr.h       |    5 +-
 contribs/perlapi/libslurm/perl/conf.c         |  212 +-
 contribs/perlapi/libslurm/perl/job.c          |  174 +-
 .../libslurm/perl/lib/Slurm/Constant.pm       |    2 +-
 contribs/perlapi/libslurm/perl/partition.c    |   27 +-
 contribs/perlapi/libslurm/perl/reservation.c  |    2 +-
 contribs/perlapi/libslurm/perl/slurm-perl.h   |    8 +-
 contribs/perlapi/libslurm/perl/step.c         |   12 +-
 contribs/perlapi/libslurm/perl/step_ctx.c     |    8 +-
 contribs/perlapi/libslurm/perl/topo.c         |    8 +-
 contribs/perlapi/libslurm/perl/trigger.c      |    4 +-
 contribs/perlapi/libslurm/perl/typemap        |   10 +
 contribs/perlapi/libslurmdb/Makefile.am       |   17 +-
 contribs/perlapi/libslurmdb/Makefile.in       |   33 +-
 contribs/perlapi/libslurmdb/perl/Slurmdb.xs   |   94 +-
 contribs/perlapi/libslurmdb/perl/cluster.c    |  406 +-
 .../perlapi/libslurmdb/perl/slurmdb-perl.h    |    5 +-
 .../perlapi/libslurmdb/perl/t/06-jobs_get.t   |   37 +
 .../perlapi/libslurmdb/perl/t/07-qos_get.t    |   30 +
 contribs/phpext/Makefile.in                   |   16 +-
 contribs/pmi2/Makefile.am                     |    2 +
 contribs/pmi2/Makefile.in                     |   23 +-
 contribs/pmi2/README                          |   10 +
 contribs/pmi2/pmi2_api.c                      |   54 +
 contribs/pmi2/slurm/pmi2.h                    |   32 +
 contribs/pmi2/testpmi2.c                      |  206 +-
 contribs/pmi2/testpmi2_put.c                  |   75 +-
 contribs/pmi2/testpmixring.c                  |   63 +
 contribs/sgather/Makefile.in                  |   16 +-
 contribs/sgi/Makefile.am                      |   21 +
 contribs/sgi/Makefile.in                      |  817 ++++
 contribs/sgi/README.txt                       |   56 +
 contribs/sgi/netloc_to_topology.c             |  922 ++++
 contribs/sjobexit/Makefile.in                 |   16 +-
 .../slurm_completion_help/slurm_completion.sh | 2097 ++++----
 contribs/slurmdb-direct/Makefile.am           |    1 +
 contribs/slurmdb-direct/Makefile.in           |   17 +-
 contribs/torque/Makefile.in                   |   16 +-
 doc/Makefile.in                               |   16 +-
 doc/html/Makefile.am                          |    6 +-
 doc/html/Makefile.in                          |   22 +-
 doc/html/accounting.shtml                     |  128 +-
 doc/html/accounting_storageplugins.shtml      |   45 +-
 doc/html/acct_gather_energy_plugins.shtml     |   18 +-
 doc/html/acct_gather_profile_plugins.shtml    |   59 +-
 doc/html/authplugins.shtml                    |   28 +-
 doc/html/bb_plugins.shtml                     |  234 +
 doc/html/big_sys.shtml                        |    8 +-
 doc/html/burst_buffer.shtml                   |  273 ++
 doc/html/checkpoint_plugins.shtml             |   27 +-
 doc/html/configurator.html.in                 |    2 +-
 doc/html/core_spec_plugins.shtml              |   20 +-
 doc/html/cpu_management.shtml                 |  404 +-
 doc/html/cray.shtml                           |   63 +-
 doc/html/crypto_plugins.shtml                 |   28 +-
 doc/html/dist_plane.shtml                     |   19 +-
 doc/html/documentation.shtml                  |   10 +-
 doc/html/download.shtml                       |   50 +-
 doc/html/ext_sensorsplugins.shtml             |   22 +-
 doc/html/faq.shtml                            |   76 +-
 doc/html/gres_plugins.shtml                   |   27 +-
 doc/html/high_throughput.shtml                |    2 +-
 doc/html/job_container_plugins.shtml          |   28 +-
 doc/html/job_launch.shtml                     |    4 +-
 doc/html/job_submit_plugins.shtml             |   25 +-
 doc/html/jobacct_gatherplugins.shtml          |   22 +-
 doc/html/jobcompplugins.shtml                 |   29 +-
 doc/html/launch_plugins.shtml                 |   12 +-
 doc/html/man_index.shtml                      |   10 +
 doc/html/mpiplugins.shtml                     |   33 +-
 doc/html/plugins.shtml                        |   56 +-
 doc/html/power_mgmt.shtml                     |  234 +
 doc/html/power_plugins.shtml                  |   96 +
 doc/html/power_save.shtml                     |    5 +-
 doc/html/preempt.shtml                        |   41 +-
 doc/html/preemption_plugins.shtml             |   25 +-
 doc/html/priority_multifactor.shtml           |   71 +-
 doc/html/priority_multifactor2.shtml          |  201 -
 doc/html/priority_plugins.shtml               |   30 +-
 doc/html/proctrack_plugins.shtml              |   49 +-
 doc/html/prolog_epilog.shtml                  |    4 +-
 doc/html/qos.shtml                            |  133 +-
 doc/html/quickstart_admin.shtml               |   17 +-
 doc/html/reservations.shtml                   |   94 +-
 doc/html/resource_limits.shtml                |  150 +-
 doc/html/route_plugin.shtml                   |   23 +-
 doc/html/schedplugins.shtml                   |   33 +-
 doc/html/selectplugins.shtml                  |   34 +-
 doc/html/slurm_ug_agenda.shtml                |   15 +-
 doc/html/slurmctld_plugstack.shtml            |   15 +-
 doc/html/switchplugins.shtml                  |   33 +-
 doc/html/taskplugins.shtml                    |   23 +-
 doc/html/team.shtml                           |   19 +-
 doc/html/topology.shtml                       |   14 +-
 doc/html/topology_plugin.shtml                |   28 +-
 doc/man/Makefile.in                           |   16 +-
 doc/man/man1/Makefile.in                      |   16 +-
 doc/man/man1/sacct.1                          |   61 +-
 doc/man/man1/sacctmgr.1                       |  310 +-
 doc/man/man1/salloc.1                         |  161 +-
 doc/man/man1/sbatch.1                         |  197 +-
 doc/man/man1/sbcast.1                         |    6 +-
 doc/man/man1/scontrol.1                       |  125 +-
 doc/man/man1/sinfo.1                          |  165 +
 doc/man/man1/smap.1                           |   12 +-
 doc/man/man1/sprio.1                          |   10 +-
 doc/man/man1/squeue.1                         |   35 +-
 doc/man/man1/sreport.1                        |   27 +-
 doc/man/man1/srun.1                           |  234 +-
 doc/man/man1/sshare.1                         |    9 +-
 doc/man/man1/sstat.1                          |    5 +
 doc/man/man3/Makefile.am                      |    1 +
 doc/man/man3/Makefile.in                      |   17 +-
 doc/man/man3/slurm_allocate_resources.3       |   20 +-
 doc/man/man3/slurm_free_job_info_msg.3        |   45 +-
 .../slurm_job_cpus_allocated_str_on_node.3    |    1 +
 .../slurm_job_cpus_allocated_str_on_node_id.3 |    1 +
 doc/man/man3/slurm_job_will_run2.3            |    1 +
 doc/man/man5/Makefile.am                      |    2 +
 doc/man/man5/Makefile.in                      |   18 +-
 doc/man/man5/acct_gather.conf.5               |   26 +-
 doc/man/man5/burst_buffer.conf.5              |  216 +
 doc/man/man5/gres.conf.5                      |    7 +-
 doc/man/man5/slurm.conf.5                     |  484 +-
 doc/man/man5/slurmdbd.conf.5                  |    3 +
 doc/man/man8/Makefile.in                      |   16 +-
 etc/init.d.slurm.in                           |    4 +-
 etc/layouts.d.power.conf.example              |    7 +
 etc/layouts.d.power_cpufreq.conf.example      |   15 +
 etc/slurmctld.service.in                      |    5 +-
 etc/slurmd.service.in                         |    5 +-
 etc/slurmdbd.service.in                       |    5 +-
 slurm.spec                                    |   80 +-
 slurm/slurm.h.in                              |  934 +++-
 slurm/slurm_errno.h                           |   14 +-
 slurm/slurmdb.h                               |  503 +-
 src/Makefile.am                               |    1 +
 src/Makefile.in                               |   30 +-
 src/api/Makefile.am                           |   23 +-
 src/api/Makefile.in                           |   60 +-
 src/api/allocate.c                            |   84 +-
 src/api/allocate_msg.c                        |    7 +-
 .../dynalloc/msg.h => api/assoc_mgr_info.c}   |   92 +-
 src/api/block_info.c                          |    5 +-
 src/api/burst_buffer_info.c                   |  410 ++
 src/api/cancel.c                              |    6 +-
 src/api/config_info.c                         |  191 +-
 src/api/init_msg.c                            |    6 +-
 src/api/job_info.c                            |  298 +-
 src/api/job_step_info.c                       |   50 +-
 src/api/layout_info.c                         |  125 +
 src/api/node_info.c                           |   69 +-
 src/api/partition_info.c                      |   37 +-
 src/api/pmi_server.c                          |    2 +-
 src/api/powercap_info.c                       |  149 +
 src/api/reservation_info.c                    |   20 +-
 src/api/slurm_pmi.c                           |   10 +-
 src/api/step_ctx.c                            |    8 +-
 src/api/step_io.c                             |   23 +-
 src/api/step_launch.c                         |   52 +-
 src/api/topo_info.c                           |   44 +-
 src/api/update_config.c                       |   22 +
 src/common/Makefile.am                        |    9 +-
 src/common/Makefile.in                        |  105 +-
 src/common/assoc_mgr.c                        | 2414 ++++++---
 src/common/assoc_mgr.h                        |  276 +-
 src/common/callerid.c                         |  417 ++
 src/common/callerid.h                         |   55 +
 src/common/cpu_frequency.c                    | 1526 ++++--
 src/common/cpu_frequency.h                    |  109 +-
 src/common/eio.c                              |   34 +-
 src/common/eio.h                              |    2 +-
 src/common/entity.c                           |  181 +-
 src/common/entity.h                           |   92 +-
 src/common/env.c                              |  130 +-
 src/common/env.h                              |   14 +-
 src/common/forward.c                          |  368 +-
 src/common/gres.c                             | 1020 +++-
 src/common/gres.h                             |   95 +-
 src/common/hostlist.h                         |    2 +-
 src/common/job_options.c                      |    3 +-
 src/common/job_resources.c                    |  279 +-
 src/common/job_resources.h                    |    8 +
 src/common/layouts_mgr.c                      | 1852 ++++++-
 src/common/layouts_mgr.h                      |  377 +-
 src/common/log.c                              |    9 +-
 src/common/mapping.c                          |  397 ++
 .../dynalloc/allocator.h => common/mapping.h} |   62 +-
 src/common/msg_aggr.c                         |  493 ++
 src/common/msg_aggr.h                         |   62 +
 src/common/net.c                              |    5 +-
 src/common/node_conf.c                        |   87 +-
 src/common/node_conf.h                        |   33 +-
 src/common/node_select.c                      |    8 +-
 src/common/pack.c                             |   92 +-
 src/common/pack.h                             |   39 +-
 src/common/parse_config.c                     |  291 +-
 src/common/parse_config.h                     |  119 +-
 src/common/parse_time.c                       |   19 +-
 src/common/parse_value.c                      |    9 +-
 src/common/parse_value.h                      |    3 +-
 src/common/plugin.c                           |   60 +-
 src/common/plugin.h                           |    1 +
 src/common/plugrack.c                         |    2 +-
 src/common/plugstack.c                        |    6 +-
 src/common/power.c                            |  224 +
 .../slurmctld_dynalloc.c => common/power.h}   |   71 +-
 src/common/print_fields.c                     |   89 +-
 src/common/print_fields.h                     |    3 +-
 src/common/proc_args.c                        |  419 +-
 src/common/proc_args.h                        |   39 +-
 src/common/read_config.c                      |  234 +-
 src/common/read_config.h                      |   11 +-
 src/common/slurm_accounting_storage.c         |  107 +-
 src/common/slurm_accounting_storage.h         |   67 +-
 src/common/slurm_acct_gather_energy.c         |   72 +-
 src/common/slurm_acct_gather_energy.h         |    5 +-
 src/common/slurm_acct_gather_profile.c        |   84 +-
 src/common/slurm_acct_gather_profile.h        |   57 +-
 src/common/slurm_cred.c                       |  156 +-
 src/common/slurm_cred.h                       |    1 +
 src/common/slurm_errno.c                      |   25 +-
 src/common/slurm_ext_sensors.c                |   54 +-
 src/common/slurm_jobacct_gather.c             |  239 +-
 src/common/slurm_jobacct_gather.h             |   14 +-
 src/common/slurm_priority.c                   |    4 +-
 src/common/slurm_priority.h                   |    2 +-
 src/common/slurm_protocol_api.c               |  684 ++-
 src/common/slurm_protocol_api.h               |  210 +-
 src/common/slurm_protocol_common.h            |   13 +-
 src/common/slurm_protocol_defs.c              | 1020 +++-
 src/common/slurm_protocol_defs.h              |  202 +-
 src/common/slurm_protocol_interface.h         |  230 +-
 src/common/slurm_protocol_pack.c              | 2763 +++++++++--
 src/common/slurm_protocol_pack.h              |    5 +
 .../slurm_protocol_socket_implementation.c    |  310 +-
 src/common/slurm_protocol_util.c              |   33 +-
 src/common/slurm_resource_info.c              |    6 +-
 src/common/slurm_route.c                      |   22 +-
 src/common/slurm_selecttype_info.c            |   10 +-
 src/common/slurm_step_layout.c                |  257 +-
 src/common/slurm_step_layout.h                |    2 +-
 src/common/slurm_time.c                       |  178 +
 src/common/slurm_time.h                       |   50 +
 src/common/slurm_topology.c                   |    7 +
 src/common/slurm_topology.h                   |   30 +-
 src/common/slurm_xlator.h                     |   14 +
 src/common/slurmdb_defs.c                     | 1825 +++++--
 src/common/slurmdb_defs.h                     |  152 +-
 src/common/slurmdb_pack.c                     | 2984 +++++++++---
 src/common/slurmdb_pack.h                     |   57 +-
 src/common/slurmdbd_defs.c                    |  492 +-
 src/common/slurmdbd_defs.h                    |   79 +-
 src/common/stepd_api.c                        |   20 +-
 src/common/stepd_api.h                        |    6 +-
 src/common/timers.c                           |    5 +-
 src/common/timers.h                           |    2 +-
 src/common/working_cluster.c                  |   13 -
 src/common/xstring.c                          |   15 +-
 src/common/xstring.h                          |    4 +-
 src/database/Makefile.in                      |   16 +-
 src/database/mysql_common.c                   |   10 +-
 src/db_api/Makefile.am                        |    1 +
 src/db_api/Makefile.in                        |   22 +-
 src/db_api/assoc_functions.c                  |   30 +-
 src/db_api/cluster_report_functions.c         |  104 +-
 src/db_api/extra_get_functions.c              |    6 +-
 src/db_api/job_report_functions.c             |   88 +-
 .../deallocate.h => db_api/tres_functions.c}  |   74 +-
 src/db_api/usage_functions.c                  |    2 +-
 src/db_api/user_report_functions.c            |   34 +-
 src/layouts/Makefile.am                       |    4 +
 src/layouts/Makefile.in                       |  779 +++
 src/layouts/power/Makefile.am                 |   13 +
 src/layouts/power/Makefile.in                 |  825 ++++
 src/layouts/power/cpufreq.c                   |  167 +
 src/layouts/power/default.c                   |  119 +
 src/layouts/unit/Makefile.am                  |   10 +
 src/layouts/unit/Makefile.in                  |  809 ++++
 src/layouts/unit/default.c                    |  225 +
 src/plugins/Makefile.am                       |    4 +-
 src/plugins/Makefile.in                       |   20 +-
 src/plugins/accounting_storage/Makefile.in    |   16 +-
 .../accounting_storage/common/Makefile.in     |   16 +-
 .../accounting_storage/common/common_as.c     |   98 +-
 .../accounting_storage/common/common_as.h     |    3 +-
 .../accounting_storage/filetxt/Makefile.in    |   16 +-
 .../filetxt/accounting_storage_filetxt.c      |   51 +-
 .../filetxt/filetxt_jobacct_process.c         |   23 +-
 .../accounting_storage/mysql/Makefile.am      |    2 +
 .../accounting_storage/mysql/Makefile.in      |   54 +-
 .../mysql/accounting_storage_mysql.c          |  935 ++--
 .../mysql/accounting_storage_mysql.h          |   18 +-
 .../accounting_storage/mysql/as_mysql_acct.c  |   50 +-
 .../mysql/as_mysql_archive.c                  | 1544 +++---
 .../accounting_storage/mysql/as_mysql_assoc.c |  970 ++--
 .../accounting_storage/mysql/as_mysql_assoc.h |   10 +-
 .../mysql/as_mysql_cluster.c                  |  200 +-
 .../mysql/as_mysql_cluster.h                  |    5 +-
 .../mysql/as_mysql_convert.c                  |  953 ++++
 .../mysql/as_mysql_convert.h}                 |   42 +-
 .../accounting_storage/mysql/as_mysql_job.c   |  147 +-
 .../mysql/as_mysql_jobacct_process.c          |   89 +-
 .../mysql/as_mysql_problems.c                 |   26 +-
 .../mysql/as_mysql_problems.h                 |    6 +-
 .../accounting_storage/mysql/as_mysql_qos.c   |  528 +-
 .../mysql/as_mysql_resource.c                 |    8 +
 .../accounting_storage/mysql/as_mysql_resv.c  |   73 +-
 .../mysql/as_mysql_rollup.c                   | 1321 +++--
 .../mysql/as_mysql_rollup.h                   |   14 +-
 .../accounting_storage/mysql/as_mysql_tres.c  |  304 ++
 .../accounting_storage/mysql/as_mysql_tres.h} |   21 +-
 .../accounting_storage/mysql/as_mysql_usage.c |  436 +-
 .../accounting_storage/mysql/as_mysql_usage.h |    1 +
 .../accounting_storage/mysql/as_mysql_user.c  |  118 +-
 .../accounting_storage/mysql/as_mysql_wckey.c |   26 +-
 .../accounting_storage/none/Makefile.in       |   16 +-
 .../none/accounting_storage_none.c            |   50 +-
 .../accounting_storage/slurmdbd/Makefile.in   |   16 +-
 .../slurmdbd/accounting_storage_slurmdbd.c    |  221 +-
 src/plugins/acct_gather_energy/Makefile.am    |    2 +-
 src/plugins/acct_gather_energy/Makefile.in    |   18 +-
 .../acct_gather_energy/cray/Makefile.am       |   15 +
 .../acct_gather_energy/cray/Makefile.in       |  814 ++++
 .../cray/acct_gather_energy_cray.c            |  352 ++
 .../acct_gather_energy/ipmi/Makefile.in       |   16 +-
 .../ipmi/acct_gather_energy_ipmi.c            |  695 ++-
 .../ipmi/acct_gather_energy_ipmi_config.c     |    2 +
 .../ipmi/acct_gather_energy_ipmi_config.h     |    1 +
 .../acct_gather_energy/none/Makefile.in       |   16 +-
 .../none/acct_gather_energy_none.c            |   10 +-
 .../acct_gather_energy/rapl/Makefile.in       |   16 +-
 .../rapl/acct_gather_energy_rapl.c            |   73 +-
 .../acct_gather_filesystem/Makefile.in        |   16 +-
 .../acct_gather_filesystem/lustre/Makefile.in |   16 +-
 .../lustre/acct_gather_filesystem_lustre.c    |  103 +-
 .../acct_gather_filesystem/none/Makefile.in   |   16 +-
 .../none/acct_gather_filesystem_none.c        |   10 +-
 .../acct_gather_infiniband/Makefile.in        |   16 +-
 .../acct_gather_infiniband/none/Makefile.in   |   16 +-
 .../none/acct_gather_infiniband_none.c        |   11 +-
 .../acct_gather_infiniband/ofed/Makefile.in   |   16 +-
 .../ofed/acct_gather_infiniband_ofed.c        |   72 +-
 src/plugins/acct_gather_profile/Makefile.in   |   16 +-
 .../acct_gather_profile/hdf5/Makefile.in      |   16 +-
 .../hdf5/acct_gather_profile_hdf5.c           |  391 +-
 .../acct_gather_profile/hdf5/hdf5_api.c       | 1851 +------
 .../acct_gather_profile/hdf5/hdf5_api.h       |  293 +-
 .../hdf5/sh5util/Makefile.am                  |    6 +-
 .../hdf5/sh5util/Makefile.in                  |  224 +-
 .../hdf5/sh5util/libsh5util_old/Makefile.am   |   13 +
 .../hdf5/sh5util/libsh5util_old/Makefile.in   |  756 +++
 .../hdf5/sh5util/libsh5util_old/hdf5_api.c    | 1868 +++++++
 .../hdf5/sh5util/libsh5util_old/hdf5_api.h    |  361 ++
 .../hdf5/sh5util/libsh5util_old/sh5util.c     | 1571 ++++++
 .../sh5util/libsh5util_old/sh5util_old.h}     |   32 +-
 .../hdf5/sh5util/sh5util.c                    | 2254 ++++-----
 .../hdf5/sh5util/sh5util.h                    |   70 +
 .../acct_gather_profile/none/Makefile.in      |   16 +-
 .../none/acct_gather_profile_none.c           |   31 +-
 src/plugins/auth/Makefile.in                  |   16 +-
 src/plugins/auth/authd/Makefile.in            |   16 +-
 src/plugins/auth/authd/auth_authd.c           |   17 +-
 src/plugins/auth/munge/Makefile.in            |   16 +-
 src/plugins/auth/munge/auth_munge.c           |   42 +-
 src/plugins/auth/none/Makefile.in             |   16 +-
 src/plugins/auth/none/auth_none.c             |   12 +-
 src/plugins/burst_buffer/Makefile.am          |    3 +
 src/plugins/burst_buffer/Makefile.in          |  778 +++
 src/plugins/burst_buffer/common/Makefile.am   |   13 +
 src/plugins/burst_buffer/common/Makefile.in   |  756 +++
 .../burst_buffer/common/burst_buffer_common.c | 1527 ++++++
 .../burst_buffer/common/burst_buffer_common.h |  363 ++
 src/plugins/burst_buffer/cray/Makefile.am     |   22 +
 src/plugins/burst_buffer/cray/Makefile.in     |  824 ++++
 .../burst_buffer/cray/burst_buffer_cray.c     | 4313 +++++++++++++++++
 src/plugins/burst_buffer/cray/dw_wlm_cli      |   49 +
 src/plugins/burst_buffer/generic/Makefile.am  |   22 +
 src/plugins/burst_buffer/generic/Makefile.in  |  823 ++++
 .../burst_buffer/generic/bb_get_state.example |   70 +
 .../generic/bb_start_stage_in.example         |   76 +
 .../generic/bb_start_stage_out.example        |   77 +
 .../generic/bb_stop_stage_out.example         |   48 +
 .../generic/burst_buffer_generic.c            | 1372 ++++++
 src/plugins/checkpoint/Makefile.in            |   16 +-
 src/plugins/checkpoint/aix/Makefile.in        |   16 +-
 src/plugins/checkpoint/aix/checkpoint_aix.c   |   26 +-
 src/plugins/checkpoint/blcr/Makefile.in       |   16 +-
 src/plugins/checkpoint/blcr/checkpoint_blcr.c |   21 +-
 src/plugins/checkpoint/none/Makefile.in       |   16 +-
 src/plugins/checkpoint/none/checkpoint_none.c |    9 +-
 src/plugins/checkpoint/ompi/Makefile.in       |   16 +-
 src/plugins/checkpoint/ompi/checkpoint_ompi.c |   24 +-
 src/plugins/checkpoint/poe/Makefile.in        |   16 +-
 src/plugins/checkpoint/poe/checkpoint_poe.c   |   26 +-
 src/plugins/core_spec/Makefile.in             |   16 +-
 src/plugins/core_spec/cray/Makefile.in        |   16 +-
 src/plugins/core_spec/cray/core_spec_cray.c   |   59 +-
 src/plugins/core_spec/none/Makefile.in        |   16 +-
 src/plugins/core_spec/none/core_spec_none.c   |   55 +-
 src/plugins/crypto/Makefile.in                |   16 +-
 src/plugins/crypto/munge/Makefile.in          |   16 +-
 src/plugins/crypto/munge/crypto_munge.c       |   36 +-
 src/plugins/crypto/openssl/Makefile.in        |   16 +-
 src/plugins/crypto/openssl/crypto_openssl.c   |    9 +-
 src/plugins/ext_sensors/Makefile.in           |   16 +-
 src/plugins/ext_sensors/none/Makefile.in      |   16 +-
 .../ext_sensors/none/ext_sensors_none.c       |   10 +-
 src/plugins/ext_sensors/rrd/Makefile.in       |   16 +-
 src/plugins/ext_sensors/rrd/ext_sensors_rrd.c |   53 +-
 src/plugins/ext_sensors/rrd/ext_sensors_rrd.h |    2 +-
 src/plugins/gres/Makefile.in                  |   16 +-
 src/plugins/gres/gpu/Makefile.in              |   16 +-
 src/plugins/gres/gpu/gres_gpu.c               |   80 +-
 src/plugins/gres/mic/Makefile.in              |   16 +-
 src/plugins/gres/mic/gres_mic.c               |   56 +-
 src/plugins/gres/nic/Makefile.in              |   16 +-
 src/plugins/gres/nic/gres_nic.c               |  289 +-
 src/plugins/job_container/Makefile.in         |   16 +-
 src/plugins/job_container/cncu/Makefile.in    |   16 +-
 .../job_container/cncu/job_container_cncu.c   |    9 +-
 src/plugins/job_container/none/Makefile.in    |   16 +-
 .../job_container/none/job_container_none.c   |    9 +-
 src/plugins/job_submit/Makefile.in            |   16 +-
 .../job_submit/all_partitions/Makefile.in     |   16 +-
 .../job_submit_all_partitions.c               |    8 +-
 src/plugins/job_submit/cnode/Makefile.in      |   16 +-
 .../job_submit/cnode/job_submit_cnode.c       |    8 +-
 src/plugins/job_submit/cray/Makefile.in       |   16 +-
 src/plugins/job_submit/cray/job_submit_cray.c |    8 +-
 src/plugins/job_submit/defaults/Makefile.in   |   16 +-
 .../job_submit/defaults/job_submit_defaults.c |    8 +-
 src/plugins/job_submit/logging/Makefile.in    |   16 +-
 .../job_submit/logging/job_submit_logging.c   |    8 +-
 src/plugins/job_submit/lua/Makefile.in        |   16 +-
 src/plugins/job_submit/lua/job_submit_lua.c   |  306 +-
 src/plugins/job_submit/partition/Makefile.in  |   16 +-
 .../partition/job_submit_partition.c          |    8 +-
 src/plugins/job_submit/pbs/Makefile.in        |   16 +-
 src/plugins/job_submit/pbs/job_submit_pbs.c   |    8 +-
 .../job_submit/require_timelimit/Makefile.in  |   16 +-
 .../job_submit_require_timelimit.c            |   33 +-
 src/plugins/job_submit/throttle/Makefile.in   |   16 +-
 .../job_submit/throttle/job_submit_throttle.c |    8 +-
 src/plugins/jobacct_gather/Makefile.am        |    2 +-
 src/plugins/jobacct_gather/Makefile.in        |   18 +-
 src/plugins/jobacct_gather/aix/Makefile.in    |   16 +-
 .../jobacct_gather/aix/jobacct_gather_aix.c   |   15 +-
 src/plugins/jobacct_gather/cgroup/Makefile.in |   16 +-
 .../cgroup/jobacct_gather_cgroup.c            |   20 +-
 .../cgroup/jobacct_gather_cgroup_cpuacct.c    |    8 +-
 src/plugins/jobacct_gather/common/Makefile.in |   16 +-
 .../jobacct_gather/common/common_jag.c        |  501 +-
 .../jobacct_gather/common/common_jag.h        |    2 +-
 src/plugins/jobacct_gather/linux/Makefile.in  |   16 +-
 .../linux/jobacct_gather_linux.c              |   15 +-
 src/plugins/jobacct_gather/none/Makefile.in   |   16 +-
 .../jobacct_gather/none/jobacct_gather_none.c |   10 +-
 src/plugins/jobcomp/Makefile.am               |    2 +-
 src/plugins/jobcomp/Makefile.in               |   18 +-
 src/plugins/jobcomp/elasticsearch/Makefile.am |   22 +
 src/plugins/jobcomp/elasticsearch/Makefile.in |  818 ++++
 .../elasticsearch/jobcomp_elasticsearch.c     |  983 ++++
 src/plugins/jobcomp/filetxt/Makefile.in       |   16 +-
 .../jobcomp/filetxt/filetxt_jobcomp_process.c |    6 +-
 src/plugins/jobcomp/filetxt/jobcomp_filetxt.c |   15 +-
 src/plugins/jobcomp/mysql/Makefile.in         |   16 +-
 src/plugins/jobcomp/mysql/jobcomp_mysql.c     |   20 +-
 .../jobcomp/mysql/mysql_jobcomp_process.c     |    2 +-
 src/plugins/jobcomp/none/Makefile.in          |   16 +-
 src/plugins/jobcomp/none/jobcomp_none.c       |   10 +-
 src/plugins/jobcomp/script/Makefile.in        |   16 +-
 src/plugins/jobcomp/script/jobcomp_script.c   |   19 +-
 src/plugins/launch/Makefile.in                |   16 +-
 src/plugins/launch/aprun/Makefile.in          |   16 +-
 src/plugins/launch/aprun/launch_aprun.c       |   31 +-
 src/plugins/launch/poe/Makefile.in            |   16 +-
 src/plugins/launch/poe/launch_poe.c           |   14 +-
 src/plugins/launch/runjob/Makefile.in         |   16 +-
 src/plugins/launch/runjob/launch_runjob.c     |   16 +-
 src/plugins/launch/slurm/Makefile.am          |    2 +-
 src/plugins/launch/slurm/Makefile.in          |   18 +-
 src/plugins/launch/slurm/launch_slurm.c       |   14 +-
 src/plugins/mpi/Makefile.in                   |   16 +-
 src/plugins/mpi/lam/Makefile.in               |   16 +-
 src/plugins/mpi/lam/mpi_lam.c                 |    9 +-
 src/plugins/mpi/mpich1_p4/Makefile.in         |   16 +-
 src/plugins/mpi/mpich1_p4/mpich1_p4.c         |    9 +-
 src/plugins/mpi/mpich1_shmem/Makefile.in      |   16 +-
 src/plugins/mpi/mpich1_shmem/mpich1_shmem.c   |    9 +-
 src/plugins/mpi/mpichgm/Makefile.in           |   16 +-
 src/plugins/mpi/mpichgm/mpi_mpichgm.c         |    9 +-
 src/plugins/mpi/mpichmx/Makefile.in           |   16 +-
 src/plugins/mpi/mpichmx/mpi_mpichmx.c         |    9 +-
 src/plugins/mpi/mvapich/Makefile.am           |   12 +-
 src/plugins/mpi/mvapich/Makefile.in           |   28 +-
 src/plugins/mpi/mvapich/mpi_mvapich.c         |    9 +-
 src/plugins/mpi/mvapich/mvapich.c             |  370 +-
 src/plugins/mpi/none/Makefile.in              |   16 +-
 src/plugins/mpi/none/mpi_none.c               |    9 +-
 src/plugins/mpi/openmpi/Makefile.in           |   16 +-
 src/plugins/mpi/openmpi/mpi_openmpi.c         |    9 +-
 src/plugins/mpi/pmi2/Makefile.am              |    3 +-
 src/plugins/mpi/pmi2/Makefile.in              |   22 +-
 src/plugins/mpi/pmi2/agent.c                  |    2 +-
 src/plugins/mpi/pmi2/kvs.c                    |   19 +-
 src/plugins/mpi/pmi2/mpi_pmi2.c               |    9 +-
 src/plugins/mpi/pmi2/pmi.h                    |    6 +
 src/plugins/mpi/pmi2/pmi2.c                   |   33 +-
 src/plugins/mpi/pmi2/ring.c                   |  586 +++
 src/plugins/mpi/pmi2/ring.h                   |   34 +
 src/plugins/mpi/pmi2/setup.c                  |   20 +-
 src/plugins/mpi/pmi2/spawn.c                  |   15 +-
 src/plugins/mpi/pmi2/tree.c                   |  164 +-
 src/plugins/mpi/pmi2/tree.h                   |    4 +-
 src/plugins/power/Makefile.am                 |    3 +
 src/plugins/power/Makefile.in                 |  778 +++
 src/plugins/power/common/Makefile.am          |   13 +
 src/plugins/power/common/Makefile.in          |  755 +++
 src/plugins/power/common/power_common.c       |  414 ++
 src/plugins/power/common/power_common.h       |   94 +
 src/plugins/power/cray/Makefile.am            |   22 +
 src/plugins/power/cray/Makefile.in            |  820 ++++
 src/plugins/power/cray/power_cray.c           | 1742 +++++++
 src/plugins/power/none/Makefile.am            |   16 +
 src/plugins/power/none/Makefile.in            |  814 ++++
 src/plugins/power/none/power_none.c           |  120 +
 src/plugins/preempt/Makefile.in               |   16 +-
 src/plugins/preempt/job_prio/Makefile.in      |   16 +-
 .../preempt/job_prio/preempt_job_prio.c       |   90 +-
 src/plugins/preempt/none/Makefile.in          |   16 +-
 src/plugins/preempt/none/preempt_none.c       |    2 +-
 .../preempt/partition_prio/Makefile.in        |   16 +-
 .../partition_prio/preempt_partition_prio.c   |   12 +-
 src/plugins/preempt/qos/Makefile.in           |   16 +-
 src/plugins/preempt/qos/preempt_qos.c         |    2 +-
 src/plugins/priority/Makefile.in              |   16 +-
 src/plugins/priority/basic/Makefile.am        |    1 +
 src/plugins/priority/basic/Makefile.in        |   19 +-
 src/plugins/priority/basic/priority_basic.c   |  101 +-
 src/plugins/priority/multifactor/Makefile.in  |   16 +-
 src/plugins/priority/multifactor/fair_tree.c  |   65 +-
 .../multifactor/priority_multifactor.c        |  832 ++--
 .../multifactor/priority_multifactor.h        |    4 +-
 src/plugins/proctrack/Makefile.in             |   16 +-
 src/plugins/proctrack/aix/Makefile.in         |   16 +-
 src/plugins/proctrack/aix/proctrack_aix.c     |   10 +-
 src/plugins/proctrack/cgroup/Makefile.in      |   16 +-
 .../proctrack/cgroup/proctrack_cgroup.c       |   80 +-
 src/plugins/proctrack/cray/Makefile.in        |   16 +-
 src/plugins/proctrack/cray/proctrack_cray.c   |    2 +-
 src/plugins/proctrack/linuxproc/Makefile.in   |   16 +-
 src/plugins/proctrack/linuxproc/kill_tree.c   |    7 +-
 .../proctrack/linuxproc/proctrack_linuxproc.c |   10 +-
 src/plugins/proctrack/lua/Makefile.in         |   16 +-
 src/plugins/proctrack/lua/proctrack_lua.c     |    2 +-
 src/plugins/proctrack/pgid/Makefile.in        |   16 +-
 src/plugins/proctrack/pgid/proctrack_pgid.c   |   10 +-
 src/plugins/proctrack/sgi_job/Makefile.in     |   16 +-
 .../proctrack/sgi_job/proctrack_sgi_job.c     |    2 +-
 src/plugins/route/Makefile.in                 |   16 +-
 src/plugins/route/default/Makefile.in         |   16 +-
 src/plugins/route/default/route_default.c     |   16 +-
 src/plugins/route/topology/Makefile.in        |   16 +-
 src/plugins/route/topology/route_topology.c   |   16 +-
 src/plugins/sched/Makefile.in                 |   16 +-
 src/plugins/sched/backfill/Makefile.in        |   16 +-
 src/plugins/sched/backfill/backfill.c         |  149 +-
 src/plugins/sched/backfill/backfill_wrapper.c |    7 +-
 src/plugins/sched/builtin/Makefile.in         |   16 +-
 src/plugins/sched/builtin/builtin.c           |    4 +-
 src/plugins/sched/builtin/builtin_wrapper.c   |    2 +-
 src/plugins/sched/hold/Makefile.in            |   16 +-
 src/plugins/sched/hold/hold_wrapper.c         |    2 +-
 src/plugins/sched/wiki/Makefile.in            |   16 +-
 src/plugins/sched/wiki/job_modify.c           |    6 +-
 src/plugins/sched/wiki/msg.c                  |    6 +-
 src/plugins/sched/wiki/msg.h                  |    2 +-
 src/plugins/sched/wiki/sched_wiki.c           |    2 +-
 src/plugins/sched/wiki2/Makefile.in           |   16 +-
 src/plugins/sched/wiki2/job_modify.c          |   23 +-
 src/plugins/sched/wiki2/job_will_run.c        |    8 +-
 src/plugins/sched/wiki2/msg.c                 |    6 +-
 src/plugins/sched/wiki2/msg.h                 |    2 +-
 src/plugins/sched/wiki2/sched_wiki2.c         |    2 +-
 src/plugins/select/Makefile.in                |   16 +-
 src/plugins/select/alps/Makefile.in           |   16 +-
 src/plugins/select/alps/basil_interface.c     |   18 +-
 src/plugins/select/alps/libalps/Makefile.in   |   16 +-
 .../select/alps/libemulate/Makefile.in        |   16 +-
 src/plugins/select/alps/select_alps.c         |   17 +-
 src/plugins/select/bluegene/Makefile.in       |   16 +-
 src/plugins/select/bluegene/ba/Makefile.in    |   16 +-
 .../select/bluegene/ba/block_allocator.c      |   17 +-
 src/plugins/select/bluegene/ba/wire_test.c    |    8 +-
 .../select/bluegene/ba_bgq/Makefile.in        |   16 +-
 .../select/bluegene/ba_bgq/block_allocator.c  |    3 +-
 .../select/bluegene/ba_bgq/wire_test.c        |    8 +-
 src/plugins/select/bluegene/bg_core.c         |    7 +-
 src/plugins/select/bluegene/bg_core.h         |    2 +-
 .../select/bluegene/bg_defined_block.c        |   18 +-
 .../select/bluegene/bg_dynamic_block.c        |    8 +-
 src/plugins/select/bluegene/bg_job_info.c     |  106 -
 src/plugins/select/bluegene/bg_job_place.c    |   39 +-
 src/plugins/select/bluegene/bg_job_run.c      |    8 +-
 src/plugins/select/bluegene/bg_node_info.c    |    3 +-
 src/plugins/select/bluegene/bg_read_config.c  |    9 +-
 .../select/bluegene/bg_record_functions.c     |   51 +-
 src/plugins/select/bluegene/bg_status.c       |    2 +-
 src/plugins/select/bluegene/bg_status.h       |    2 +-
 src/plugins/select/bluegene/bl/Makefile.in    |   16 +-
 .../select/bluegene/bl/bridge_linker.c        |   10 +-
 .../bluegene/bl/bridge_switch_connections.c   |    8 +-
 .../select/bluegene/bl_bgq/Makefile.in        |   16 +-
 src/plugins/select/bluegene/select_bluegene.c |  121 +-
 src/plugins/select/bluegene/sfree/Makefile.in |   16 +-
 src/plugins/select/bluegene/sfree/sfree.c     |    2 +-
 src/plugins/select/cons_res/Makefile.in       |   16 +-
 src/plugins/select/cons_res/dist_tasks.c      |   74 +-
 src/plugins/select/cons_res/dist_tasks.h      |    8 +-
 src/plugins/select/cons_res/job_test.c        |  750 ++-
 src/plugins/select/cons_res/job_test.h        |    3 +-
 src/plugins/select/cons_res/select_cons_res.c |  157 +-
 src/plugins/select/cons_res/select_cons_res.h |   12 +-
 src/plugins/select/cray/Makefile.in           |   16 +-
 src/plugins/select/cray/select_cray.c         |   59 +-
 src/plugins/select/linear/Makefile.in         |   16 +-
 src/plugins/select/linear/select_linear.c     | 1206 ++++-
 src/plugins/select/other/Makefile.in          |   16 +-
 src/plugins/select/serial/Makefile.in         |   16 +-
 src/plugins/select/serial/job_test.c          |    4 +-
 src/plugins/select/serial/select_serial.c     |   23 +-
 src/plugins/select/serial/select_serial.h     |    1 +
 src/plugins/slurmctld/Makefile.am             |    2 +-
 src/plugins/slurmctld/Makefile.in             |   18 +-
 src/plugins/slurmctld/dynalloc/Makefile.am    |   39 -
 src/plugins/slurmctld/dynalloc/allocate.c     |  730 ---
 src/plugins/slurmctld/dynalloc/allocate.h     |  135 -
 src/plugins/slurmctld/dynalloc/allocator.c    |  293 --
 src/plugins/slurmctld/dynalloc/argv.c         |  553 ---
 src/plugins/slurmctld/dynalloc/argv.h         |  317 --
 src/plugins/slurmctld/dynalloc/deallocate.c   |  167 -
 src/plugins/slurmctld/dynalloc/info.c         |  263 -
 src/plugins/slurmctld/dynalloc/info.h         |  128 -
 .../slurmctld/dynalloc/job_ports_list.c       |  112 -
 src/plugins/slurmctld/dynalloc/msg.c          |    2 +-
 src/plugins/slurmctld/nonstop/Makefile.in     |   16 +-
 src/plugins/slurmctld/nonstop/do_work.c       |    9 +-
 src/plugins/slurmctld/nonstop/msg.c           |    4 +-
 src/plugins/slurmctld/nonstop/nonstop.c       |    6 +-
 src/plugins/slurmd/Makefile.in                |   16 +-
 src/plugins/switch/Makefile.in                |   16 +-
 src/plugins/switch/cray/Makefile.in           |   16 +-
 src/plugins/switch/cray/switch_cray.c         |    9 +-
 src/plugins/switch/generic/Makefile.in        |   16 +-
 src/plugins/switch/generic/switch_generic.c   |    9 +-
 src/plugins/switch/none/Makefile.in           |   16 +-
 src/plugins/switch/none/switch_none.c         |    9 +-
 src/plugins/switch/nrt/Makefile.in            |   16 +-
 src/plugins/switch/nrt/libpermapi/Makefile.in |   16 +-
 src/plugins/switch/nrt/libpermapi/shr_64.c    |    5 +-
 src/plugins/switch/nrt/nrt.c                  |    9 +-
 src/plugins/switch/nrt/switch_nrt.c           |    9 +-
 src/plugins/task/Makefile.in                  |   16 +-
 src/plugins/task/affinity/Makefile.in         |   16 +-
 src/plugins/task/affinity/affinity.h          |    1 +
 src/plugins/task/affinity/cpuset.c            |   40 +
 src/plugins/task/affinity/dist_tasks.c        |   91 +-
 src/plugins/task/affinity/schedutils.c        |   29 +
 src/plugins/task/affinity/task_affinity.c     |    9 +-
 src/plugins/task/cgroup/Makefile.in           |   16 +-
 src/plugins/task/cgroup/task_cgroup.c         |    9 +-
 src/plugins/task/cgroup/task_cgroup_cpuset.c  |  429 +-
 src/plugins/task/cgroup/task_cgroup_devices.c |   19 +-
 src/plugins/task/cgroup/task_cgroup_memory.c  |   13 +
 src/plugins/task/cray/Makefile.in             |   16 +-
 src/plugins/task/cray/task_cray.c             |   18 +-
 src/plugins/task/none/Makefile.in             |   16 +-
 src/plugins/task/none/task_none.c             |    9 +-
 src/plugins/topology/3d_torus/Makefile.in     |   16 +-
 .../topology/3d_torus/topology_3d_torus.c     |    9 +-
 src/plugins/topology/Makefile.am              |    2 +-
 src/plugins/topology/Makefile.in              |   18 +-
 src/plugins/topology/hypercube/Makefile.am    |   12 +
 .../hypercube}/Makefile.in                    |   91 +-
 .../topology/hypercube/topology_hypercube.c   | 1440 ++++++
 src/plugins/topology/node_rank/Makefile.in    |   16 +-
 .../topology/node_rank/topology_node_rank.c   |    9 +-
 src/plugins/topology/none/Makefile.in         |   16 +-
 src/plugins/topology/none/topology_none.c     |    9 +-
 src/plugins/topology/tree/Makefile.in         |   16 +-
 src/plugins/topology/tree/topology_tree.c     |    9 +-
 src/sacct/Makefile.in                         |   16 +-
 src/sacct/options.c                           |  228 +-
 src/sacct/print.c                             |  242 +-
 src/sacct/sacct.c                             |    7 +-
 src/sacct/sacct.h                             |   13 +-
 src/sacctmgr/Makefile.am                      |    4 +-
 src/sacctmgr/Makefile.in                      |   31 +-
 src/sacctmgr/account_functions.c              |  173 +-
 src/sacctmgr/archive_functions.c              |    2 +-
 src/sacctmgr/association_functions.c          |  487 +-
 src/sacctmgr/cluster_functions.c              |  244 +-
 src/sacctmgr/common.c                         |  442 +-
 src/sacctmgr/config_functions.c               |    3 +-
 src/sacctmgr/event_functions.c                |  169 +-
 src/sacctmgr/file_functions.c                 |  861 ++--
 src/sacctmgr/job_functions.c                  |    3 +-
 src/sacctmgr/problem_functions.c              |   29 +-
 src/sacctmgr/qos_functions.c                  |  589 ++-
 src/sacctmgr/reservation_functions.c          |  310 ++
 src/sacctmgr/sacctmgr.c                       |   84 +-
 src/sacctmgr/sacctmgr.h                       |   56 +-
 src/sacctmgr/tres_function.c                  |  215 +
 src/sacctmgr/txn_functions.c                  |   12 +-
 src/sacctmgr/user_functions.c                 |  281 +-
 src/sacctmgr/wckey_functions.c                |   12 +-
 src/salloc/Makefile.in                        |   16 +-
 src/salloc/opt.c                              |  241 +-
 src/salloc/opt.h                              |    6 +
 src/salloc/salloc.c                           |   37 +-
 src/sattach/Makefile.in                       |   16 +-
 src/sattach/sattach.c                         |    7 +-
 src/sbatch/Makefile.am                        |    2 +-
 src/sbatch/Makefile.in                        |   22 +-
 src/sbatch/mult_cluster.c                     |  211 -
 src/sbatch/opt.c                              |  311 +-
 src/sbatch/opt.h                              |   14 +-
 src/sbatch/sbatch.c                           |   44 +-
 src/sbcast/Makefile.in                        |   16 +-
 src/sbcast/agent.c                            |    3 +-
 src/sbcast/opts.c                             |   23 +-
 src/sbcast/sbcast.c                           |   32 +-
 src/sbcast/sbcast.h                           |    3 +-
 src/scancel/Makefile.in                       |   16 +-
 src/scancel/opt.c                             |   64 +-
 src/scancel/scancel.c                         |  768 +--
 src/scancel/scancel.h                         |   26 +-
 src/scontrol/Makefile.am                      |    7 +-
 src/scontrol/Makefile.in                      |   39 +-
 src/scontrol/create_res.c                     |   44 +-
 src/scontrol/info_assoc_mgr.c                 |  408 ++
 .../info_burst_buffer.c}                      |   70 +-
 src/scontrol/info_job.c                       |  111 +-
 src/scontrol/info_layout.c                    |   93 +
 src/scontrol/info_node.c                      |   23 +
 src/scontrol/scontrol.c                       |  100 +-
 src/scontrol/scontrol.h                       |   11 +-
 src/scontrol/update_job.c                     |   23 +-
 src/scontrol/update_layout.c                  |  112 +
 src/scontrol/update_node.c                    |    7 +-
 src/scontrol/update_part.c                    |   18 +
 src/scontrol/update_powercap.c                |  111 +
 src/sdiag/Makefile.in                         |   16 +-
 src/sdiag/sdiag.c                             |    9 +-
 src/sinfo/Makefile.in                         |   16 +-
 src/sinfo/opts.c                              |  365 +-
 src/sinfo/print.c                             |  119 +-
 src/sinfo/print.h                             |    8 +
 src/sinfo/sinfo.c                             |   56 +-
 src/sinfo/sinfo.h                             |    6 +
 src/sinfo/sort.c                              |   18 +
 src/slurmctld/Makefile.am                     |    6 +
 src/slurmctld/Makefile.in                     |   41 +-
 src/slurmctld/acct_policy.c                   | 3942 +++++++++------
 src/slurmctld/acct_policy.h                   |   17 +-
 src/slurmctld/agent.c                         |   65 +-
 src/slurmctld/agent.h                         |    6 +-
 src/slurmctld/backup.c                        |    7 +-
 src/slurmctld/burst_buffer.c                  |  656 +++
 src/slurmctld/burst_buffer.h                  |  188 +
 src/slurmctld/controller.c                    |  504 +-
 src/slurmctld/front_end.c                     |   84 +-
 src/slurmctld/gang.c                          |   93 +-
 src/slurmctld/groups.c                        |   28 +-
 src/slurmctld/job_mgr.c                       | 2498 +++++++---
 src/slurmctld/job_scheduler.c                 |  489 +-
 src/slurmctld/job_scheduler.h                 |    7 +-
 src/slurmctld/licenses.c                      |  154 +-
 src/slurmctld/licenses.h                      |   32 +-
 src/slurmctld/locks.h                         |    4 +
 src/slurmctld/node_mgr.c                      |  226 +-
 src/slurmctld/node_scheduler.c                |  448 +-
 src/slurmctld/partition_mgr.c                 |  302 +-
 src/slurmctld/ping_nodes.c                    |    6 +-
 src/slurmctld/powercapping.c                  |  597 +++
 src/slurmctld/powercapping.h                  |  178 +
 src/slurmctld/proc_req.c                      | 1171 ++++-
 src/slurmctld/proc_req.h                      |    2 +-
 src/slurmctld/read_config.c                   |  329 +-
 src/slurmctld/reservation.c                   | 1204 ++++-
 src/slurmctld/reservation.h                   |   36 +
 src/slurmctld/sicp.c                          |  480 ++
 src/slurmctld/sicp.h                          |   62 +
 src/slurmctld/slurmctld.h                     |  247 +-
 src/slurmctld/srun_comm.c                     |   15 +
 src/slurmctld/statistics.c                    |   59 +-
 src/slurmctld/step_mgr.c                      |  486 +-
 src/slurmctld/trigger_mgr.c                   |    5 +-
 src/slurmd/Makefile.in                        |   16 +-
 src/slurmd/common/Makefile.in                 |   16 +-
 src/slurmd/common/reverse_tree.h              |   24 +-
 src/slurmd/common/reverse_tree_math.c         |  108 +-
 src/slurmd/common/reverse_tree_math.h         |    6 +
 src/slurmd/common/run_script.c                |   10 +-
 src/slurmd/common/slurmd_cgroup.c             |    7 +
 src/slurmd/common/slurmd_cgroup.h             |    3 +
 src/slurmd/common/slurmstepd_init.c           |    9 +-
 src/slurmd/common/xcgroup.c                   |   12 +-
 src/slurmd/slurmd/Makefile.am                 |    1 -
 src/slurmd/slurmd/Makefile.in                 |   20 +-
 src/slurmd/slurmd/get_mach_stat.c             |   30 +-
 src/slurmd/slurmd/get_mach_stat.h             |    1 +
 src/slurmd/slurmd/req.c                       |  989 +++-
 src/slurmd/slurmd/slurmd.c                    |  146 +-
 src/slurmd/slurmd/slurmd.h                    |   15 +-
 src/slurmd/slurmd/xcpu.c                      |  148 -
 src/slurmd/slurmstepd/Makefile.in             |   16 +-
 src/slurmd/slurmstepd/io.c                    |    1 +
 src/slurmd/slurmstepd/mgr.c                   |  329 +-
 src/slurmd/slurmstepd/mgr.h                   |    6 +-
 src/slurmd/slurmstepd/pam_ses.c               |   40 +-
 src/slurmd/slurmstepd/req.c                   |   13 +-
 src/slurmd/slurmstepd/slurmstepd.c            |   32 +-
 src/slurmd/slurmstepd/slurmstepd_job.c        |   42 +-
 src/slurmd/slurmstepd/slurmstepd_job.h        |   11 +-
 src/slurmd/slurmstepd/task.c                  |   24 +-
 src/slurmd/slurmstepd/ulimits.c               |    3 +-
 src/slurmdbd/Makefile.in                      |   16 +-
 src/slurmdbd/proc_req.c                       |  581 +--
 src/slurmdbd/proc_req.h                       |    2 +-
 src/slurmdbd/read_config.c                    |   37 +-
 src/slurmdbd/rpc_mgr.c                        |    5 +-
 src/slurmdbd/slurmdbd.c                       |   23 +-
 src/smap/Makefile.in                          |   16 +-
 src/smap/configure_functions.c                |   11 +-
 src/smap/job_functions.c                      |   58 +-
 src/smap/opts.c                               |   10 +-
 src/smap/partition_functions.c                |   13 +-
 src/smd/Makefile.in                           |   16 +-
 src/sprio/Makefile.in                         |   16 +-
 src/sprio/opts.c                              |   13 +-
 src/sprio/print.c                             |   63 +
 src/sprio/print.h                             |    8 +
 src/sprio/sprio.c                             |   16 +-
 src/sprio/sprio.h                             |    1 +
 src/squeue/Makefile.in                        |   16 +-
 src/squeue/opts.c                             |   73 +-
 src/squeue/print.c                            |  182 +-
 src/squeue/print.h                            |    8 +
 src/squeue/squeue.c                           |    7 +-
 src/squeue/squeue.h                           |    2 +
 src/sreport/Makefile.in                       |   16 +-
 src/sreport/cluster_reports.c                 | 1323 ++---
 src/sreport/common.c                          |   34 +-
 src/sreport/job_reports.c                     |  662 +--
 src/sreport/resv_reports.c                    |  499 +-
 src/sreport/sreport.c                         |   86 +-
 src/sreport/sreport.h                         |    4 +
 src/sreport/user_reports.c                    |  292 +-
 src/srun/Makefile.in                          |   16 +-
 src/srun/libsrun/Makefile.am                  |    6 +-
 src/srun/libsrun/Makefile.in                  |   24 +-
 src/srun/libsrun/allocate.c                   |   64 +-
 src/srun/libsrun/launch.c                     |   23 +-
 src/srun/libsrun/opt.c                        |  334 +-
 src/srun/libsrun/opt.h                        |    8 +-
 src/srun/libsrun/srun_job.c                   |   34 +-
 src/srun/libsrun/srun_job.h                   |    3 +
 src/srun/srun.c                               |   11 +-
 src/srun_cr/Makefile.in                       |   16 +-
 src/sshare/Makefile.in                        |   16 +-
 src/sshare/process.c                          |  271 +-
 src/sshare/sshare.c                           |   72 +-
 src/sshare/sshare.h                           |   26 +
 src/sstat/Makefile.in                         |   16 +-
 src/sstat/options.c                           |   10 +
 src/sstat/print.c                             |   40 +-
 src/sstat/sstat.c                             |   37 +-
 src/sstat/sstat.h                             |    7 +-
 src/strigger/Makefile.in                      |   16 +-
 src/strigger/opts.c                           |    3 +-
 src/sview/Makefile.am                         |    4 +-
 src/sview/Makefile.in                         |   42 +-
 src/sview/bb_info.c                           | 1037 ++++
 src/sview/block_info.c                        |   20 +-
 src/sview/common.c                            |   39 +-
 src/sview/defaults.c                          |   14 +-
 src/sview/front_end_info.c                    |    4 +-
 src/sview/grid.c                              |    3 +-
 src/sview/job_info.c                          |  235 +-
 src/sview/node_info.c                         |  154 +-
 src/sview/part_info.c                         |  141 +-
 src/sview/popups.c                            |    8 +-
 src/sview/resv_info.c                         |  118 +-
 src/sview/sview.c                             |   56 +-
 src/sview/sview.h                             |   19 +
 testsuite/Makefile.in                         |   16 +-
 testsuite/expect/Makefile.am                  |   58 +-
 testsuite/expect/Makefile.in                  |   74 +-
 testsuite/expect/README                       |   26 +-
 testsuite/expect/globals                      |  209 +-
 testsuite/expect/globals_accounting           |   20 +-
 testsuite/expect/inc12.3.1                    |    4 +-
 testsuite/expect/inc12.3.2                    |    4 +-
 testsuite/expect/inc21.21.1                   |   66 -
 testsuite/expect/inc21.21.2                   |   68 -
 testsuite/expect/inc21.21.3                   |   67 -
 testsuite/expect/inc21.21.4                   |   69 -
 testsuite/expect/inc21.21_tests               |  519 ++
 testsuite/expect/inc21.30.15                  |    4 +-
 testsuite/expect/inc21.30.9                   |    3 +-
 testsuite/expect/inc21.34.1                   |   96 +
 testsuite/expect/inc21.34.2                   |   91 +
 testsuite/expect/inc21.34_tests               |  610 +++
 testsuite/expect/inc3.11.4                    |    2 +-
 testsuite/expect/inc3.11.5                    |    2 +-
 testsuite/expect/regression                   |    2 +-
 testsuite/expect/test1.100                    |  171 +
 testsuite/expect/test1.14                     |   11 +-
 testsuite/expect/test1.27                     |   14 +
 testsuite/expect/test1.28                     |    2 +-
 testsuite/expect/test1.58                     |    7 +-
 testsuite/expect/test1.59                     |    4 +-
 testsuite/expect/test1.75                     |   95 +-
 testsuite/expect/test1.76                     |  368 ++
 testsuite/expect/test1.76.bash                |   27 +
 testsuite/expect/test1.76.batch               |   17 +
 testsuite/expect/test1.77                     |  145 +
 testsuite/expect/test1.91                     |    2 +-
 testsuite/expect/test1.92                     |    4 +-
 testsuite/expect/test1.97                     |    2 +-
 testsuite/expect/test1.99                     |    2 +-
 testsuite/expect/test11.5                     |    2 +
 testsuite/expect/test12.1                     |    4 +-
 testsuite/expect/test12.2                     |    4 +-
 testsuite/expect/test12.2.prog.c              |    7 +-
 testsuite/expect/test12.3                     |    2 +-
 testsuite/expect/test12.4                     |  828 ++--
 testsuite/expect/test12.6                     |  150 +-
 testsuite/expect/test12.7                     |   25 +-
 testsuite/expect/test14.10                    |  153 +
 testsuite/expect/test14.4                     |    2 +-
 testsuite/expect/test14.6                     |   10 +-
 testsuite/expect/test14.8                     |    2 +-
 testsuite/expect/test15.27                    |    2 +-
 testsuite/expect/test15.4                     |    2 +-
 testsuite/expect/test15.7                     |    5 +-
 testsuite/expect/test16.4                     |    2 +-
 testsuite/expect/test17.10                    |    5 +-
 testsuite/expect/test17.11                    |   40 +-
 testsuite/expect/test17.19                    |   12 +-
 testsuite/expect/test17.27                    |    4 +-
 testsuite/expect/test17.34                    |   13 +-
 testsuite/expect/test17.35                    |   12 +-
 testsuite/expect/test17.36                    |   69 +-
 testsuite/expect/test17.37                    |   18 +-
 testsuite/expect/test17.38                    |   42 +
 testsuite/expect/test17.39                    |  192 +
 testsuite/expect/test17.40                    |  304 ++
 testsuite/expect/test2.24                     |    3 +-
 testsuite/expect/test2.25                     |  337 ++
 testsuite/expect/test20.3                     |    2 +-
 testsuite/expect/test20.6                     |    8 +-
 testsuite/expect/test20.7                     |    5 +-
 testsuite/expect/test20.8                     |    7 +-
 testsuite/expect/test21.21                    |  389 +-
 testsuite/expect/test21.24                    |   96 +-
 testsuite/expect/test21.30                    |   21 +-
 testsuite/expect/test21.31                    |   26 +-
 testsuite/expect/test21.32                    |    2 +-
 testsuite/expect/test21.34                    |  441 ++
 testsuite/expect/test22.1                     |   68 +-
 testsuite/expect/test22.2                     |   10 +-
 testsuite/expect/test24.1.prog.c              |   90 +-
 testsuite/expect/test24.2                     |   14 +-
 testsuite/expect/test24.3                     |   44 +-
 testsuite/expect/test24.3.prog.c              |  125 +-
 testsuite/expect/test24.4                     |   70 +-
 testsuite/expect/test24.4.prog.c              |  175 +-
 testsuite/expect/test28.2                     |   22 +-
 testsuite/expect/test28.3                     |    4 +-
 testsuite/expect/test28.4                     |    6 +-
 testsuite/expect/test28.5                     |    4 +-
 testsuite/expect/test28.8                     |  199 +
 testsuite/expect/test28.9                     |  116 +
 testsuite/expect/test3.10                     |    5 +-
 testsuite/expect/test3.11                     |   11 +-
 testsuite/expect/test3.14                     |  172 +
 testsuite/expect/test3.15                     |  355 ++
 testsuite/expect/test31.1                     |    9 +-
 testsuite/expect/test33.1                     |   12 +
 testsuite/expect/test34.1                     |    2 +-
 testsuite/expect/test34.2                     |    2 +-
 testsuite/expect/test35.1                     |  210 +
 testsuite/expect/test35.2                     |  175 +
 testsuite/expect/test4.12                     |   12 +-
 testsuite/expect/test4.13                     |  258 +
 testsuite/expect/test4.5                      |    1 +
 testsuite/expect/test5.10                     |    2 +-
 testsuite/expect/test5.4                      |    8 +-
 testsuite/expect/test5.5                      |    8 +-
 testsuite/expect/test5.6                      |    8 +-
 testsuite/expect/test5.9                      |   12 +-
 testsuite/expect/test6.11                     |    7 +-
 testsuite/expect/test6.12                     |    8 +-
 testsuite/expect/test6.13                     |    6 +-
 testsuite/expect/test6.5                      |    3 +-
 testsuite/expect/test6.7                      |    2 +-
 testsuite/expect/test7.1                      |   23 +-
 testsuite/expect/test7.13                     |   35 +-
 testsuite/expect/test7.17                     |  144 +-
 testsuite/expect/test7.17.prog.c              |   43 +-
 .../test7.17_configs/test7.17.1/gres.conf     |    5 +
 .../test7.17_configs/test7.17.1/slurm.conf    |   12 +
 .../test7.17_configs/test7.17.2/gres.conf     |    7 +
 .../test7.17_configs/test7.17.2/slurm.conf    |   12 +
 .../test7.17_configs/test7.17.3/gres.conf     |   11 +
 .../test7.17_configs/test7.17.3/slurm.conf    |   12 +
 .../test7.17_configs/test7.17.4/gres.conf     |    5 +
 .../test7.17_configs/test7.17.4/slurm.conf    |   12 +
 .../test7.17_configs/test7.17.5/slurm.conf    |   12 +
 .../test7.17_configs/test7.17.6/gres.conf     |    5 +
 .../test7.17_configs/test7.17.6/slurm.conf    |   12 +
 .../test7.17_configs/test7.17.7/gres.conf     |    5 +
 .../test7.17_configs/test7.17.7/slurm.conf    |   12 +
 testsuite/expect/test8.1                      |    7 +-
 testsuite/expect/test8.2                      |    3 +
 testsuite/expect/test8.21                     |    2 +-
 testsuite/expect/test8.21.bash                |   25 +-
 testsuite/expect/test8.4                      |    1 +
 testsuite/slurm_unit/Makefile.in              |   16 +-
 testsuite/slurm_unit/api/Makefile.in          |   16 +-
 testsuite/slurm_unit/api/manual/Makefile.in   |   16 +-
 testsuite/slurm_unit/common/Makefile.in       |   16 +-
 1081 files changed, 109784 insertions(+), 33346 deletions(-)
 create mode 100644 auxdir/x_ac_curl.m4
 create mode 100644 auxdir/x_ac_json.m4
 create mode 100644 auxdir/x_ac_netloc.m4
 delete mode 100644 auxdir/x_ac_xcpu.m4
 create mode 100644 contribs/cray/csm/Makefile.am
 create mode 100644 contribs/cray/csm/Makefile.in
 create mode 100644 contribs/cray/csm/gres.conf.j2
 create mode 100644 contribs/cray/csm/slurm.conf.j2
 create mode 100644 contribs/cray/csm/slurmconfgen_smw.py
 create mode 100644 contribs/pam_slurm_adopt/Makefile.am
 create mode 100644 contribs/pam_slurm_adopt/Makefile.in
 create mode 100644 contribs/pam_slurm_adopt/README
 create mode 100644 contribs/pam_slurm_adopt/helper.c
 create mode 100644 contribs/pam_slurm_adopt/helper.h
 create mode 100644 contribs/pam_slurm_adopt/pam_slurm_adopt.c
 create mode 100755 contribs/perlapi/libslurmdb/perl/t/06-jobs_get.t
 create mode 100755 contribs/perlapi/libslurmdb/perl/t/07-qos_get.t
 create mode 100644 contribs/pmi2/README
 create mode 100644 contribs/pmi2/testpmixring.c
 create mode 100644 contribs/sgi/Makefile.am
 create mode 100644 contribs/sgi/Makefile.in
 create mode 100644 contribs/sgi/README.txt
 create mode 100644 contribs/sgi/netloc_to_topology.c
 create mode 100644 doc/html/bb_plugins.shtml
 create mode 100644 doc/html/burst_buffer.shtml
 create mode 100644 doc/html/power_mgmt.shtml
 create mode 100644 doc/html/power_plugins.shtml
 delete mode 100644 doc/html/priority_multifactor2.shtml
 create mode 100644 doc/man/man3/slurm_job_cpus_allocated_str_on_node.3
 create mode 100644 doc/man/man3/slurm_job_cpus_allocated_str_on_node_id.3
 create mode 100644 doc/man/man3/slurm_job_will_run2.3
 create mode 100644 doc/man/man5/burst_buffer.conf.5
 create mode 100644 etc/layouts.d.power.conf.example
 create mode 100644 etc/layouts.d.power_cpufreq.conf.example
 rename src/{plugins/slurmctld/dynalloc/msg.h => api/assoc_mgr_info.c} (60%)
 create mode 100644 src/api/burst_buffer_info.c
 create mode 100644 src/api/layout_info.c
 create mode 100644 src/api/powercap_info.c
 create mode 100644 src/common/callerid.c
 create mode 100644 src/common/callerid.h
 create mode 100644 src/common/mapping.c
 rename src/{plugins/slurmctld/dynalloc/allocator.h => common/mapping.h} (67%)
 create mode 100644 src/common/msg_aggr.c
 create mode 100644 src/common/msg_aggr.h
 create mode 100644 src/common/power.c
 rename src/{plugins/slurmctld/dynalloc/slurmctld_dynalloc.c => common/power.h} (57%)
 create mode 100644 src/common/slurm_time.c
 create mode 100644 src/common/slurm_time.h
 rename src/{plugins/slurmctld/dynalloc/deallocate.h => db_api/tres_functions.c} (63%)
 create mode 100644 src/layouts/Makefile.am
 create mode 100644 src/layouts/Makefile.in
 create mode 100644 src/layouts/power/Makefile.am
 create mode 100644 src/layouts/power/Makefile.in
 create mode 100644 src/layouts/power/cpufreq.c
 create mode 100644 src/layouts/power/default.c
 create mode 100644 src/layouts/unit/Makefile.am
 create mode 100644 src/layouts/unit/Makefile.in
 create mode 100644 src/layouts/unit/default.c
 create mode 100644 src/plugins/accounting_storage/mysql/as_mysql_convert.c
 rename src/{slurmd/slurmd/xcpu.h => plugins/accounting_storage/mysql/as_mysql_convert.h} (71%)
 create mode 100644 src/plugins/accounting_storage/mysql/as_mysql_tres.c
 rename src/{sbatch/mult_cluster.h => plugins/accounting_storage/mysql/as_mysql_tres.h} (81%)
 create mode 100644 src/plugins/acct_gather_energy/cray/Makefile.am
 create mode 100644 src/plugins/acct_gather_energy/cray/Makefile.in
 create mode 100644 src/plugins/acct_gather_energy/cray/acct_gather_energy_cray.c
 create mode 100644 src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.am
 create mode 100644 src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.in
 create mode 100644 src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.c
 create mode 100644 src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.h
 create mode 100644 src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util.c
 rename src/plugins/{slurmctld/dynalloc/constants.h => acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util_old.h} (78%)
 create mode 100644 src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.h
 create mode 100644 src/plugins/burst_buffer/Makefile.am
 create mode 100644 src/plugins/burst_buffer/Makefile.in
 create mode 100644 src/plugins/burst_buffer/common/Makefile.am
 create mode 100644 src/plugins/burst_buffer/common/Makefile.in
 create mode 100644 src/plugins/burst_buffer/common/burst_buffer_common.c
 create mode 100644 src/plugins/burst_buffer/common/burst_buffer_common.h
 create mode 100644 src/plugins/burst_buffer/cray/Makefile.am
 create mode 100644 src/plugins/burst_buffer/cray/Makefile.in
 create mode 100644 src/plugins/burst_buffer/cray/burst_buffer_cray.c
 create mode 100755 src/plugins/burst_buffer/cray/dw_wlm_cli
 create mode 100644 src/plugins/burst_buffer/generic/Makefile.am
 create mode 100644 src/plugins/burst_buffer/generic/Makefile.in
 create mode 100755 src/plugins/burst_buffer/generic/bb_get_state.example
 create mode 100644 src/plugins/burst_buffer/generic/bb_start_stage_in.example
 create mode 100755 src/plugins/burst_buffer/generic/bb_start_stage_out.example
 create mode 100755 src/plugins/burst_buffer/generic/bb_stop_stage_out.example
 create mode 100644 src/plugins/burst_buffer/generic/burst_buffer_generic.c
 create mode 100644 src/plugins/jobcomp/elasticsearch/Makefile.am
 create mode 100644 src/plugins/jobcomp/elasticsearch/Makefile.in
 create mode 100644 src/plugins/jobcomp/elasticsearch/jobcomp_elasticsearch.c
 create mode 100644 src/plugins/mpi/pmi2/ring.c
 create mode 100644 src/plugins/mpi/pmi2/ring.h
 create mode 100644 src/plugins/power/Makefile.am
 create mode 100644 src/plugins/power/Makefile.in
 create mode 100644 src/plugins/power/common/Makefile.am
 create mode 100644 src/plugins/power/common/Makefile.in
 create mode 100644 src/plugins/power/common/power_common.c
 create mode 100644 src/plugins/power/common/power_common.h
 create mode 100644 src/plugins/power/cray/Makefile.am
 create mode 100644 src/plugins/power/cray/Makefile.in
 create mode 100644 src/plugins/power/cray/power_cray.c
 create mode 100644 src/plugins/power/none/Makefile.am
 create mode 100644 src/plugins/power/none/Makefile.in
 create mode 100644 src/plugins/power/none/power_none.c
 delete mode 100644 src/plugins/slurmctld/dynalloc/Makefile.am
 delete mode 100644 src/plugins/slurmctld/dynalloc/allocate.c
 delete mode 100644 src/plugins/slurmctld/dynalloc/allocate.h
 delete mode 100644 src/plugins/slurmctld/dynalloc/allocator.c
 delete mode 100644 src/plugins/slurmctld/dynalloc/argv.c
 delete mode 100644 src/plugins/slurmctld/dynalloc/argv.h
 delete mode 100644 src/plugins/slurmctld/dynalloc/deallocate.c
 delete mode 100644 src/plugins/slurmctld/dynalloc/info.c
 delete mode 100644 src/plugins/slurmctld/dynalloc/info.h
 delete mode 100644 src/plugins/slurmctld/dynalloc/job_ports_list.c
 create mode 100644 src/plugins/topology/hypercube/Makefile.am
 rename src/plugins/{slurmctld/dynalloc => topology/hypercube}/Makefile.in (88%)
 create mode 100644 src/plugins/topology/hypercube/topology_hypercube.c
 create mode 100644 src/sacctmgr/reservation_functions.c
 create mode 100644 src/sacctmgr/tres_function.c
 delete mode 100644 src/sbatch/mult_cluster.c
 create mode 100644 src/scontrol/info_assoc_mgr.c
 rename src/{plugins/slurmctld/dynalloc/job_ports_list.h => scontrol/info_burst_buffer.c} (63%)
 create mode 100644 src/scontrol/info_layout.c
 create mode 100644 src/scontrol/update_layout.c
 create mode 100644 src/scontrol/update_powercap.c
 create mode 100644 src/slurmctld/burst_buffer.c
 create mode 100644 src/slurmctld/burst_buffer.h
 create mode 100644 src/slurmctld/powercapping.c
 create mode 100644 src/slurmctld/powercapping.h
 create mode 100644 src/slurmctld/sicp.c
 create mode 100644 src/slurmctld/sicp.h
 delete mode 100644 src/slurmd/slurmd/xcpu.c
 create mode 100644 src/sview/bb_info.c
 delete mode 100644 testsuite/expect/inc21.21.1
 delete mode 100644 testsuite/expect/inc21.21.2
 delete mode 100644 testsuite/expect/inc21.21.3
 delete mode 100644 testsuite/expect/inc21.21.4
 create mode 100644 testsuite/expect/inc21.21_tests
 create mode 100644 testsuite/expect/inc21.34.1
 create mode 100644 testsuite/expect/inc21.34.2
 create mode 100644 testsuite/expect/inc21.34_tests
 create mode 100755 testsuite/expect/test1.100
 create mode 100755 testsuite/expect/test1.76
 create mode 100755 testsuite/expect/test1.76.bash
 create mode 100755 testsuite/expect/test1.76.batch
 create mode 100755 testsuite/expect/test1.77
 mode change 100644 => 100755 testsuite/expect/test12.2.prog.c
 create mode 100755 testsuite/expect/test14.10
 create mode 100755 testsuite/expect/test17.39
 create mode 100755 testsuite/expect/test17.40
 create mode 100755 testsuite/expect/test2.25
 create mode 100755 testsuite/expect/test21.34
 create mode 100755 testsuite/expect/test28.8
 create mode 100755 testsuite/expect/test28.9
 create mode 100755 testsuite/expect/test3.14
 create mode 100755 testsuite/expect/test3.15
 create mode 100755 testsuite/expect/test35.1
 create mode 100755 testsuite/expect/test35.2
 create mode 100755 testsuite/expect/test4.13
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.1/gres.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.1/slurm.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.2/gres.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.2/slurm.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.3/gres.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.3/slurm.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.4/gres.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.4/slurm.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.5/slurm.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.6/gres.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.6/slurm.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.7/gres.conf
 create mode 100644 testsuite/expect/test7.17_configs/test7.17.7/slurm.conf

diff --git a/BUILD.NOTES b/BUILD.NOTES
index 74f29a8dd..8601681e6 100644
--- a/BUILD.NOTES
+++ b/BUILD.NOTES
@@ -260,5 +260,7 @@ Before new major release:
  - Test that the prolog and epilog run
  - Run the test suite with SlurmUser NOT being self
  - Test for errors reported by CLANG tool:
+   NOTE: Run "configure" with "--enable-developer" option so assert functions
+   take effect.
    scan-build -k -v make >m.sb.out 2>&1 
    # and look for output in /tmp/scan-build-<DATE>
diff --git a/META b/META
index 9b64fd2b9..1e9eb2ff3 100644
--- a/META
+++ b/META
@@ -7,10 +7,10 @@
 ##
   Meta:		1
   Name:		slurm
-  Major:	14
-  Minor:	11
-  Micro:	8
-  Version:	14.11.8
+  Major:	15
+  Minor:	08
+  Micro:	0
+  Version:	15.08.0
   Release:	1
 
 ##
@@ -31,6 +31,6 @@
 #        the config.h file that may be used throughout Slurm, so don't remove
 #	 them.
 ##
-  API_CURRENT:	28
+  API_CURRENT:	29
   API_AGE:	0
   API_REVISION:	0
diff --git a/Makefile.am b/Makefile.am
index 18f418895..46161d912 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,6 +10,7 @@ EXTRA_DIST =			\
 	etc/cgroup_allowed_devices_file.conf.example \
 	etc/init.d.slurm.in	\
 	etc/init.d.slurmdbd.in	\
+	etc/layouts.d.power.conf.example \
 	etc/slurm.conf.example	\
 	etc/slurm.epilog.clean	\
 	etc/slurmctld.service.in \
@@ -62,6 +63,7 @@ contrib:
 	$(MAKE) && \
 	cd ..;
 
+
 install-contrib:
 	@cd contribs && \
 	$(MAKE) DESTDIR=$(DESTDIR) install && \
diff --git a/Makefile.in b/Makefile.in
index 90edefddf..136c83b86 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -114,6 +114,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -122,10 +123,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -138,7 +141,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
@@ -322,6 +325,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -371,8 +376,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -391,6 +400,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -434,6 +446,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -457,6 +470,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -528,6 +542,7 @@ EXTRA_DIST = \
 	etc/cgroup_allowed_devices_file.conf.example \
 	etc/init.d.slurm.in	\
 	etc/init.d.slurmdbd.in	\
+	etc/layouts.d.power.conf.example \
 	etc/slurm.conf.example	\
 	etc/slurm.epilog.clean	\
 	etc/slurmctld.service.in \
diff --git a/NEWS b/NEWS
index 2d8c991bb..72093cb80 100644
--- a/NEWS
+++ b/NEWS
@@ -1,6 +1,426 @@
 This file describes changes in recent versions of Slurm. It primarily
 documents those changes that are of interest to users and administrators.
 
+* Changes in Slurm 15.08.0
+==========================
+ -- Fix issue with frontend systems (outside ALPs or BlueGene) where srun
+    wouldn't get the correct protocol version to launch a step.
+ -- Fix for message aggregation return rpcs where none of the messages are
+    intended for the head of the tree.
+ -- Fix segfault in sreport when there was no response from the dbd.
+ -- ALPS - Fix compile to not link against -ljob and -lexpat with every lib
+    or binary.
+ -- Fix testing for CR_Memory when CR_Memory and CR_ONE_TASK_PER_CORE are used
+    with select/linear.
+ -- When restarting or reconfiging the slurmctld, if job is completing handle
+    accounting correctly to avoid meaningless errors about overflow.
+ -- Add AccountingStorageTRES to scontrol show config
+ -- MySQL - Fix minor memory leak if a connection ever goes away whist using it.
+ -- ALPS - Make it so srun --hint=nomultithread works correctly.
+ -- Make MaxTRESPerUser work in sacctmgr.
+ -- Fix handling of requeued jobs with steps that are still finishing.
+ -- Cleaner copy for PriorityWeightTRES, it also fixes a core dump when trying
+    to free it otherwise.
+ -- Add environment variables SLURM_ARRAY_TASK_MAX, SLURM_ARRAY_TASK_MIN,
+    SLURM_ARRAY_TASK_STEP for job arrays.
+ -- Fix srun to use the NoInAddrAny TopologyParam option.
+ -- Change QOS flag name from PartitionQOS to OverPartQOS to be a better
+    description.
+ -- Fix rpmbuild issue on Centos7.
+
+* Changes in Slurm 15.08.0rc1
+==============================
+ -- Added power_cpufreq layout.
+ -- Make complete_batch_script RPC work with message aggregation.
+ -- Do not count slurmctld threads waiting in a "throttle" lock against the
+    daemon's thread limit as they are not contending for resources.
+ -- Modify slurmctld outgoing RPC logic to support more parallel tasks (up to
+    85 RPCs and 256 pthreads; the old logic supported up to 21 RPCs and 256
+    threads). This change can dramatically improve performance for RPCs
+    operating on small node counts.
+ -- Increase total backfill scheduler run time in stats_info_response_msg data
+    structure from 32 to 64 bits in order to prevent overflow.
+ -- Add NoInAddrAny option to TopologyParam in the slurm.conf which allows to
+    bind to the interface of return of gethostname instead of any address on
+    the node which avoid RSIP issues in Cray systems.  This is most likely
+    useful in other systems as well.
+ -- Fix memory leak in Slurm::load_jobs perl api call.
+ -- Added --noconvert option to sacct, sstat, squeue and sinfo which allows
+    values to be displayed in their original unit types (e.g. 2048M won't be
+    converted to 2G).
+ -- Fix spelling of node_rescrs to node_resrcs in Perl API.
+ -- Fix node state race condition, UNKNOWN->IDLE without configuration info.
+ -- Cray: Disable LDAP references from slurmstepd on job launch due for
+    improved scalability.
+ -- Remove srun "read header error" due to application termination race
+    condition.
+ -- Optimize sacct queries with additional db indexes.
+ -- Add SLURM_TOPO_LEN env variable for scontrol show topology.
+ -- Add free_mem to node information.
+ -- Fix abort of batch launch if prolog is running, wait for prolog instead.
+ -- Fix case where job would get the wrong cpu count when using
+    --ntasks-per-core and --cpus-per-task together.
+ -- Add TRESBillingWeights to partitions in slurm.conf which allows taking into
+    consideration any TRES Type when calculating the usage of a job.
+ -- Add PriorityWeightTRES slurm.conf option to be able to configure priority
+    factors for TRES types.
+
+* Changes in Slurm 15.08.0pre6
+==============================
+ -- Add scontrol options to view and modify layouts tables.
+ -- Add MsgAggregationParams which controls a reverse tree to the slurmctld
+    which can be used to aggregate messages to the slurmctld into a single
+    message to reduce communication to the slurmctld.  Currently only epilog
+    complete messages and node registration messages use this logic.
+ -- Add sacct and squeue options to print trackable resources.
+ -- Add sacctmgr option to display trackable resources.
+ -- If an salloc or srun command is executed on a "front-end" configuration,
+    that job will be assigned a slurmd shepherd daemon on the same host as used
+    to execute the command when possible rather than an slurmd daemon on an
+    arbitrary front-end node.
+ -- Add srun --accel-bind option to control how tasks are bound to GPUs and NIC
+    Generic RESources (GRES).
+ -- gres/nic plugin modified to set OMPI_MCA_btl_openib_if_include environment
+    variable based upon allocated devices (usable with OpenMPI and Melanox).
+ -- Make it so info options for srun/salloc/sbatch print with just 1 -v instead
+    of 4.
+ -- Add "no_backup_scheduling" SchedulerParameter to prevent jobs from being
+    scheduled when the backup takes over. Jobs can be submitted, modified and
+    cancelled while the backup is in control.
+ -- Enable native Slurm backup controller to reside on an external Cray node
+    when the "no_backup_scheduling" SchedulerParameter is used.
+ -- Removed TICKET_BASED fairshare. Consider using the FAIR_TREE algorithm.
+ -- Disable advanced reservation "REPLACE" option on IBM Bluegene systems.
+ -- Add support for control distribution of tasks across cores (in addition
+    to existing support for nodes and sockets, (e.g. "block", "cyclic" or
+    "fcyclic" task distribution at 3 levels in the hardware rather than 2).
+ -- Create db index on <cluster>_assoc_table.acct. Deleting accounts that didn't
+    have jobs in the job table could take a long time.
+ -- The performance of Profiling with HDF5 is improved. In addition, internal
+    structures are changed to make it easier to add new profile types,
+    particularly energy sensors. sh5util will continue to work with either
+    format.
+ -- Add partition information to sshare output if the --partition option
+    is specified on the sshare command line.
+ -- Add sreport -T/--tres option to identify Trackable RESources (TRES) to
+    report.
+ -- Display job in sacct when single step's cpus are different from the job
+    allocation.
+ -- Add association usage information to "scontrol show cache" command output.
+ -- MPI/MVAPICH plugin now requires Munge for authentication.
+ -- job_submit/lua: Add default_qos fields. Add job record qos.  Add partition
+    record allow_qos and qos_char fields.
+
+* Changes in Slurm 15.08.0pre5
+==============================
+ -- Add jobcomp/elasticsearch plugin. Libcurl is required for build. Configure
+    the server as follows: "JobCompLoc=http://YOUR_ELASTICSEARCH_SERVER:9200".
+ -- Scancel logic large re-written to better support job arrays.
+ -- Added a slurm.conf parameter PrologEpilogTimeout to control how long
+    prolog/epilog can run.
+ -- Added TRES (Trackable resources) to track Mem, GRES, license, etc
+    utilization.
+ -- Add re-entrant versions of glibc time functions (e.g. localtime) to Slurm
+    in order to eliminate rare deadlock of slurmstepd fork and exec calls.
+ -- Constrain kernel memory (if available) in cgroups.
+ -- Add PrologFlags option of "Contain" to create a proctrack container at
+    job resource allocation time.
+ -- Disable the OOM Killer in slurmd and slurmstepd's memory cgroup when using
+    MemSpecLimit.
+
+* Changes in Slurm 15.08.0pre4
+==============================
+ -- Burst_buffer/cray - Convert logic to use new commands/API names (e.g.
+    "dws_setup" rather than "bbs_setup").
+ -- Remove the MinJobAge size limitation. It can now exceed 65533 as it
+    is represented using an unsigned integer.
+ -- Verify that all plugin version numbers are identical to the component
+    attempting to load them. Without this verification, the plugin can reference
+    Slurm functions in the caller which differ (e.g. the underlying function's
+    arguments could have changed between Slurm versions).
+    NOTE: All plugins (except SPANK) must be built against the identical
+    version of Slurm in order to be used by any Slurm command or daemon. This
+    should eliminate some very difficult to diagnose problems due to use of old
+    plugins.
+ -- Increase the MAX_PACK_MEM_LEN define to avoid PMI2 failure when fencing
+    with large amount of ranks (to 1GB).
+ -- Requests by normal user to reset a job priority (even to lower it) will
+    result in an error saying to change the job's nice value instead.
+ -- SPANK naming changes: For environment variables set using the
+    spank_job_control_setenv() function, the values were available in the
+    slurm_spank_job_prolog() and slurm_spank_job_epilog() functions using
+    getenv where the name was given a prefix of "SPANK_". That prefix has
+    been removed for consistency with the environment variables available in
+    the Prolog and Epilog scripts.
+ -- Major additions to the layouts framework code.
+ -- Add "TopologyParam" configuration parameter. Optional value of "dragonfly"
+    is supported.
+ -- Optimize resource allocation for systems with dragonfly networks.
+ -- Add "--thread-spec" option to salloc, sbatch and srun commands. This is
+    the count of threads reserved for system use per node.
+ -- job_submit/lua: Enable reading and writing job environment variables.
+    For example: if (job_desc.environment.LANGUAGE == "en_US") then ...
+ -- Added two new APIs slurm_job_cpus_allocated_str_on_node_id()
+    and slurm_job_cpus_allocated_str_on_node() to print the CPUs id
+    allocated to a job.
+ -- Specialized memory (a node's MemSpecLimit configuration parameter) is not
+    available for allocation to jobs.
+ -- Modify scontrol update job to allow jobid specification without
+    the = sign. 'scontrol update job=123 ...' and 'scontrol update job 123 ...'
+    are both valid syntax.
+ -- Archive a month at a time when there are lots of records to archive.
+ -- Introduce new sbatch option '--kill-on-invalid-dep=yes|no' which allows
+    users to specify which behavior they want if a job dependency is not
+    satisfied.
+ -- Add Slurmdb::qos_get() interface to perl api.
+ -- If a job fails to start set the requeue reason to be:
+    job requeued in held state.
+ -- Implemented a new MPI key,value PMIX_RING() exchange algorithm as
+    an alternative to PMI2.
+ -- Remove possible deadlocks in the slurmctld when the slurmdbd is busy
+    archiving/purging.
+ -- Add DB_ARCHIVE debug flag for filtering out debug messages in the slurmdbd
+    when the slurmdbd is archiving/purging.
+ -- Fix some power_save mode issues: Parsing of SuspendTime in slurm.conf was
+    bad, powered down nodes would get set non-responding if there was an
+    in-flight message, and permit nodes to be powered down from any state.
+ -- Initialize variables in consumable resource plugin to prevent core dump.
+
+* Changes in Slurm 15.08.0pre3
+==============================
+ -- CRAY - addition of acct_gather_energy/cray plugin.
+ -- Add job credential to "Run Prolog" RPC used with a configuration of
+    PrologFlags=alloc. This allows the Prolog to be passed identification of
+    GPUs allocated to the job.
+ -- Add SLURM_JOB_CONSTAINTS to environment variables available to the Prolog.
+ -- Added "--mail=stage_out" option to job submission commands to notify user
+    when burst buffer state out is complete.
+ -- Require a "Reason" when using scontrol to set a node state to DOWN.
+ -- Mail notifications on job BEGIN, END and FAIL now apply to a job array as a
+    whole rather than generating individual email messages for each task in the
+    job array.
+ -- task/affinity - Fix memory binding to NUMA with cpusets.
+ -- Display job's estimated NodeCount based off of partition's configured
+    resources rather than the whole system's.
+ -- Add AuthInfo option of "cred_expire=#" to specify the lifetime of a job
+    step credential. The default value was changed from 1200 to 120 seconds.
+ -- Set the delay time for job requeue to the job credential lifetime (120
+    seconds by default). This insures that prolog runs on every node when a
+    job is requeued. (This change will slow down launch of re-queued jobs).
+ -- Add AuthInfo option of "cred_expire=#" to specify the lifetime of a job
+    step credential.
+ -- Remove srun --max-launch-time option. The option has not been functional
+    since Slurm version 2.0.
+ -- Add sockets and cores to TaskPluginParams' autobind option.
+ -- Added LaunchParameters configuration parameter. Have srun command test
+    locally for the executable file if LaunchParameters=test_exec or the
+    environment variable SLURM_TEST_EXEC is set. Without this an invalid
+    command will generate one error message per task launched.
+ -- Fix the slurm /etc/init.d script to return 0 upon stopping the
+    daemons and return 1 in case of failure.
+ -- Add the ability for a compute node to be allocated to multiple jobs, but
+    restricted to a single user. Added "--exclusive=user" option to salloc,
+    sbatch and srun commands. Added "owner" field to node record, visible using
+    the scontrol and sview commands. Added new partition configuration parameter
+    "ExclusiveUser=yes|no".
+
+* Changes in Slurm 15.08.0pre2
+==============================
+ -- Add the environment variables SLURM_JOB_ACCOUNT, SLURM_JOB_QOS
+    and SLURM_JOB_RESERVATION in the batch/srun jobs.
+ -- Add sview burst buffer display.
+ -- Properly enforce partition Shared=YES option. Previously oversubscribing
+    resources required gang scheduling to be configured.
+ -- Enable per-partition gang scheduling resource resolution (e.g. the partition
+    can have SelectTypeParameters=CR_CORE, while the global value is CR_SOCKET).
+ -- Make it so a newer version of a slurmstepd can talk to an older srun.
+    allocation. Nodes could have been added while waiting for an allocation.
+ -- Expanded --cpu-freq parameters to include min-max:governor specifications.
+    --cpu-freq now supported on salloc and sbatch.
+ -- Add support for optimized job allocations with respect to SGI Hypercube
+    topology.
+    NOTE: Only supported with select/linear plugin.
+    NOTE: The program contribs/sgi/netloc_to_topology can be used to build
+    Slurm's topology.conf file.
+ -- Remove 64k validation of incoming RPC nodelist size. Validated at 64MB
+    when unpacking.
+ -- In slurmstepd() add the user primary group if it is not part of the
+    groups sent from the client.
+ -- Added BurstBuffer field to advanced reservations.
+ -- For advanced reservation, replace flag "License_only" with flag "Any_Nodes".
+    It can be used to indicate the an advanced reservation resources (licenses
+    and/or burst buffers) can be used with any compute nodes.
+ -- Allow users to specify the srun --resv-ports as 0 in which case no ports
+    will be reserved. The default behaviour is to allocate one port per task.
+ -- Interpret a partition configuration of "Nodes=ALL" in slurm.conf as
+    including all nodes defined in the cluster.
+ -- Added new configuration parameters PowerParameters and PowerPlugin.
+ -- Added power management plugin infrastructure.
+ -- If job already exceeded one of its QOS/Accounting limits do not
+    return error if user modifies QOS unrelated job settings.
+ -- Added DebugFlags value of "Power".
+ -- When caching user ids of AllowGroups use both getgrnam_r() and getgrent_r()
+    then remove eventual duplicate entries.
+ -- Remove rpm dependency between slurm-pam and slurm-devel.
+ -- Remove support for the XCPU (cluster management) package.
+ -- Add Slurmdb::jobs_get() interface to perl api.
+ -- Performance improvement when sending data from srun to stepds when
+    processing fencing.
+ -- Add the feature to specify arbitrary field separator when running
+    sacct -p or sacct -P. The command line option is --separator.
+ -- Introduce slurm.conf parameter to use Proportional Set Size (PSS) instead
+    of RSS to determinate the memory footprint of a job.
+    Add an slurm.conf option not to kill jobs that is over memory limit.
+ -- Add job submission command options: --sicp (available for inter-cluster
+    dependencies) and --power (specify power management options) to salloc,
+    sbatch, and srun commands.
+ -- Add DebugFlags option of SICP (inter-cluster option logging).
+ -- In order to support inter-cluster job dependencies, the MaxJobID
+    configuration parameter default value has been reduced from 4,294,901,760
+    to 2,147,418,112 and it's maximum value is now 2,147,463,647.
+    ANY JOBS WITH A JOB ID ABOVE 2,147,463,647 WILL BE PURGED WHEN SLURM IS
+    UPGRADED FROM AN OLDER VERSION!
+ -- Add QOS name to the output of a partition in squeue/scontrol/sview/smap.
+
+* Changes in Slurm 15.08.0pre1
+==============================
+ -- Add sbcast support for file transfer to resources allocated to a job step
+    rather than a job allocation.
+ -- Change structures with association in them to assoc to save space.
+ -- Add support for job dependencies jointed with OR operator (e.g.
+    "--depend=afterok:123?afternotok:124").
+ -- Add "--bb" (burst buffer specification) option to salloc, sbatch, and srun.
+ -- Added configuration parameters BurstBufferParameters and BurstBufferType.
+ -- Added burst_buffer plugin infrastructure (needs many more functions).
+ -- Make it so when the fanout logic comes across a node that is down we abandon
+    the tree to avoid worst case scenarios when the entire branch is down and
+    we have to try each serially.
+ -- Add better error reporting of invalid partitions at submission time.
+ -- Move will-run test for multiple clusters from the sbatch code into the API
+    so that it can be used with DRMAA.
+ -- If a non-exclusive allocation requests --hint=nomultithread on a
+    CR_CORE/SOCKET system lay out tasks correctly.
+ -- Avoid including unused CPUs in a job's allocation when cores or sockets are
+    allocated.
+ -- Added new job state of STOPPED indicating processes have been stopped with a
+    SIGSTOP (using scancel or sview), but retain its allocated CPUs. Job state
+    returns to RUNNING when SIGCONT is sent (also using scancel or sview).
+ -- Added EioTimeout parameter to slurm.conf. It is the number of seconds srun
+    waits for slurmstepd to close the TCP/IP connection used to relay data
+    between the user application and srun when the user application terminates.
+ -- Remove slurmctld/dynalloc plugin as the work was never completed, so it is
+    not worth the effort of continued support at this time.
+ -- Remove DynAllocPort configuration parameter.
+ -- Add advance reservation flag of "replace" that causes allocated resources
+    to be replaced with idle resources. This maintains a pool of available
+    resources that maintains a constant size (to the extent possible).
+ -- Added SchedulerParameters option of "bf_busy_nodes". When selecting
+    resources for pending jobs to reserve for future execution (i.e. the job
+    can not be started immediately), then preferentially select nodes that are
+    in use. This will tend to leave currently idle resources available for
+    backfilling longer running jobs, but may result in allocations having less
+    than optimal network topology. This option is currently only supported by
+    the select/cons_res plugin.
+ -- Permit "SuspendTime=NONE" as slurm.conf value rather than only a numeric
+    value to match "scontrol show config" output.
+ -- Add the 'scontrol show cache' command which displays the associations
+    in slurmctld.
+ -- Test more frequently for node boot completion before starting a job.
+    Provides better responsiveness.
+ -- Fix PMI2 singleton initialization.
+ -- Permit PreemptType=qos and PreemptMode=suspend,gang to be used together.
+    A high-priority QOS job will now oversubscribe resources and gang schedule,
+    but only if there are insufficient resources for the job to be started
+    without preemption. NOTE: That with PreemptType=qos, the partition's
+    Shared=FORCE:# configuration option will permit one job more per resource
+    to be run than than specified, but only if started by preemption.
+ -- Remove the CR_ALLOCATE_FULL_SOCKET configuration option.  It is now the
+    default.
+ -- Fix a race condition in PMI2 when fencing counters can be out of sync.
+ -- Increase the MAX_PACK_MEM_LEN define to avoid PMI2 failure when fencing
+    with large amount of ranks.
+ -- Add QOS option to a partition.  This will allow a partition to have
+    all the limits a QOS has.  If a limit is set in both QOS the partition
+    QOS will override the job's QOS unless the job's QOS has the
+    OverPartQOS flag set.
+ -- The task_dist_states variable has been split into "flags" and "base"
+    components. Added SLURM_DIST_PACK_NODES and SLURM_DIST_NO_PACK_NODES values
+    to give user greater control over task distribution. The srun --dist options
+    has been modified to accept a "Pack" and "NoPack" option. These options can
+    be used to override the CR_PACK_NODE configuration option.
+
+* Changes in Slurm 14.11.9
+==========================
+ -- Correct "sdiag" backfill cycle time calculation if it yields locks. A
+    microsecond value was being treated as a second value resulting in an
+    overflow in the calcuation.
+ -- Fix segfault when updating timelimit on jobarray task.
+ -- Fix to job array update logic that can result in a task ID of 4294967294.
+ -- Fix of job array update, previous logic could fail to update some tasks
+    of a job array for some fields.
+ -- CRAY - Fix seg fault if a blade is replaced and slurmctld is restarted.
+ -- Fix plane distribution to allocate in blocks rather than cyclically.
+ -- squeue - Remove newline from job array ID value printed.
+ -- squeue - Enable filtering for job state SPECIAL_EXIT.
+ -- Prevent job array task ID being inappropriately set to NO_VAL.
+ -- MYSQL - Make it so you don't have to restart the slurmctld
+    to gain the correct limit when a parent account is root and you
+    remove a subaccount's limit which exists on the parent account.
+ -- MYSQL - Close chance of setting the wrong limit on an association
+    when removing a limit from an association on multiple clusters
+    at the same time.
+ -- MYSQL - Fix minor memory leak when modifying an association but no
+    change was made.
+ -- srun command line of either --mem or --mem-per-cpu will override both the
+    SLURM_MEM_PER_CPU and SLURM_MEM_PER_NODE environment variables.
+ -- Prevent slurmctld abort on update of advanced reservation that contains no
+    nodes.
+ -- ALPS - Revert commit 2c95e2d22 which also removes commit 2e2de6a4 allowing
+    cray with the SubAllocate option to work as it did with 2.5.
+ -- Properly parse CPU frequency data on POWER systems.
+ -- Correct sacct.a man pages describing -i option.
+ -- Capture salloc/srun information in sdiag statistics.
+ -- Fix bug in node selection with topology optimization.
+ -- Don't set distribution when srun requests 0 memory.
+ -- Read in correct number of nodes from SLURM_HOSTFILE when specifying nodes
+    and --distribution=arbitrary.
+ -- Fix segfault in Bluegene setups where RebootQOSList is defined in
+    bluegene.conf and accounting is not setup.
+ -- MYSQL - Update mod_time when updating a start job record or adding one.
+ -- MYSQL - Fix issue where if an association id ever changes on at least a
+    portion of a job array is pending after it's initial start in the
+    database it could create another row for the remain array instead
+    of using the already existing row.
+ -- Fix scheduling anomaly with job arrays submitted to multiple partitions,
+    jobs could be started out of priority order.
+ -- If a host has suspened jobs do not reboot it. Reboot only hosts
+    with no jobs in any state.
+ -- ALPS - Fix issue when using --exclusive flag on srun to do the correct
+    thing (-F exclusive) instead of -F share.
+ -- Fix various memory leaks in the Perl API.
+ -- Fix a bug in the controller which display jobs in CF state as RUNNING.
+ -- Preserve advanced _core_ reservation when nodes added/removed/resized on
+    slurmctld restart. Rebuild core_bitmap as needed.
+ -- Fix for non-standard Munge port location for srun/pmi use.
+ -- Fix gang scheduling/preemption issue that could cancel job at startup.
+ -- Fix a bug in squeue which prevented squeue -tPD to print array jobs.
+ -- Sort job arrays in job queue according to array_task_id when priorities are
+    equal.
+ -- Fix segfault in sreport when there was no response from the dbd.
+ -- ALPS - Fix compile to not link against -ljob and -lexpat with every lib
+    or binary.
+ -- Fix testing for CR_Memory when CR_Memory and CR_ONE_TASK_PER_CORE are used
+    with select/linear.
+ -- MySQL - Fix minor memory leak if a connection ever goes away whist using it.
+ -- ALPS - Make it so srun --hint=nomultithread works correctly.
+ -- Prevent job array task ID from being reported as NO_VAL if last task in the
+    array gets requeued.
+ -- Fix some potential deadlock issues when state files don't exist in the
+    association manager.
+ -- Correct RebootProgram logic when executed outside of a maintenance
+    reservation.
+ -- Requeue job if possible when slurmstepd aborts.
+
 * Changes in Slurm 14.11.8
 ==========================
  -- Eliminate need for user to set user_id on job_update calls.
@@ -38,11 +458,11 @@ documents those changes that are of interest to users and administrators.
     using the FAIR_TREE algorithm instead.
  -- Set job's reason to BadConstaints when job can't run on any node.
  -- Prevent abort on update of reservation with no nodes (licenses only).
- -- Prevent slurmctld from dumping core ifjob_resrcs is missing in the
+ -- Prevent slurmctld from dumping core if job_resrcs is missing in the
     job data structure.
  -- Fix squeue to print array task ids according to man page when
     SLURM_BITSTR_LEN is defined in the environment.
- -- In squeue sort jobs based on array job ID if available.
+ -- In squeue, sort jobs based on array job ID if available.
  -- Fix the calculation of job energy by not including the NO_VAL values.
  -- Advanced reservation fixes: enable update of bluegene reservation, avoid
     abort on multi-core reservations.
diff --git a/RELEASE_NOTES b/RELEASE_NOTES
index d7ed26713..2e2af8bf0 100644
--- a/RELEASE_NOTES
+++ b/RELEASE_NOTES
@@ -1,10 +1,13 @@
-RELEASE NOTES FOR SLURM VERSION 14.11
-18 November 2014
+RELEASE NOTES FOR SLURM VERSION 15.08
+12 May 2015
 
+IMPORTANT NOTES:
+ANY JOBS WITH A JOB ID ABOVE 2,147,463,647 WILL BE PURGED WHEN SLURM IS
+UPGRADED FROM AN OLDER VERSION! Reduce your configured MaxJobID value as needed
+prior to upgrading in order to eliminate these jobs.
 
-IMPORTANT NOTE:
 If using the slurmdbd (Slurm DataBase Daemon) you must update this first.
-The 14.11 slurmdbd will work with Slurm daemons of version 2.6 and above.
+The 15.08 slurmdbd will work with Slurm daemons of version 14.03 and above.
 You will not need to update all clusters at the same time, but it is very
 important to update slurmdbd first and having it running before updating
 any other clusters making use of it.  No real harm will come from updating
@@ -18,9 +21,9 @@ innodb_buffer_pool_size=64M
 under the [mysqld] reference in the my.cnf file and restarting the mysqld.
 This is needed when converting large tables over to the new database schema.
 
-Slurm can be upgraded from version 2.6 or 14.03 to version 14.11 without loss of
-jobs or other state information. Upgrading directly from an earlier version of
-Slurm will result in loss of state information.
+Slurm can be upgraded from version 14.03 or 14.11 to version 15.08 without loss
+of jobs or other state information. Upgrading directly from an earlier version
+of Slurm will result in loss of state information.
 
 If using SPANK plugins that use the Slurm APIs, they should be recompiled when
 upgrading Slurm to a new major release.
@@ -28,28 +31,75 @@ upgrading Slurm to a new major release.
 
 HIGHLIGHTS
 ==========
- -- Added job array data structure and removed 64k array size restriction.
- -- Added support for reserving CPUs and/or memory on a compute node for system
-    use.
- -- Added support for allocation of generic resources by model type for
-    heterogeneous systems (e.g. request a Kepler GPU, a Tesla GPU, or a GPU of
-    any type).
- -- Added support for non-consumable generic resources that are limited, but
-    can be shared between jobs.
- -- Added support for automatic job requeue policy based on exit value.
- -- Refactor job_submit/lua interface. LUA FUNCTIONS NEED TO CHANGE! The
-    lua script no longer needs to explicitly load meta-tables, but information
-    is available directly using names slurm.reservations, slurm.jobs,
-    slurm.log_info, etc. Also, the job_submit.lua script is reloaded when
-    updated without restarting the slurmctld daemon.
- -- Eliminate native Cray specific port management. Native Cray systems must
-    now use the MpiParams configuration parameter to specify ports to be used
-    for communications. When upgrading Native Cray systems from version 14.03,
-    all running jobs should be killed and the switch_cray_state file (in
-    SaveStateLocation of the nodes where the slurmctld daemon runs) must be
-    explicitly deleted.
- -- Added support for "floating reservations", an advanced reservation with a
-    start time that remains constant relative to the current time.
+ -- Added TRES (Trackable resources) to track utilization of memory, GRES,
+    burst buffer, license, and any other configurable resources in the
+    accounting database.
+ -- Add configurable billing weight that takes into consideration any TRES when
+    calculating a job's resource utilization.
+ -- Add configurable prioritization factor that takes into consideration any
+    TRES when calculating a job's resource utilization.
+ -- Add burst buffer support infrastructure. Currently available plugin include
+    burst_buffer/generic (uses administrator supplied programs to manage file
+    staging) and burst_buffer/cray (uses Cray APIs to manage buffers).
+ -- Add power capping support for Cray systems with automatic rebalancing of
+    power allocation between nodes.
+ -- Modify slurmctld outgoing RPC logic to support more parallel tasks (up to
+    85 RPCs and 256 pthreads; the old logic supported up to 21 RPCs and 256
+    threads).
+ -- Add support for job dependencies joined with OR operator (e.g.
+    "--depend=afterok:123?afternotok:124").
+ -- Add advance reservation flag of "replace" that causes allocated resources
+    to be replaced with idle resources. This maintains a pool of available
+    resources that maintains a constant size (to the extent possible).
+ -- Permit PreemptType=qos and PreemptMode=suspend,gang to be used together.
+    A high-priority QOS job will now oversubscribe resources and gang schedule,
+    but only if there are insufficient resources for the job to be started
+    without preemption. NOTE: That with PreemptType=qos, the partition's
+    Shared=FORCE:# configuration option will permit one job more per resource
+    to be run than than specified, but only if started by preemption.
+ -- A partition can now have an associated QOS.  This will allow a partition
+    to have all the limits a QOS has.  If a limit is set in both QOS
+    the partition QOS will override the job's QOS unless the job's QOS has the
+    'OverPartQOS' flag set.
+ -- Expanded --cpu-freq parameters to include min-max:governor specifications.
+    --cpu-freq now supported on salloc and sbatch.
+ -- Add support for optimized job allocations with respect to SGI Hypercube
+    topology.
+    NOTE: Only supported with select/linear plugin.
+    NOTE: The program contribs/sgi/netloc_to_topology can be used to build
+    Slurm's topology.conf file.
+ -- Add the ability for a compute node to be allocated to multiple jobs, but
+    restricted to a single user. Added "--exclusive=user" option to salloc,
+    the scontrol and sview commands. Added new partition configuration parameter
+    "ExclusiveUser=yes|no".
+ -- Verify that all plugin version numbers are identical to the component
+    attempting to load them. Without this verification, the plugin can reference
+    Slurm functions in the caller which differ (e.g. the underlying function's
+    arguments could have changed between Slurm versions).
+    NOTE: All plugins (except SPANK) must be built against the identical
+    version of Slurm in order to be used by any Slurm command or daemon. This
+    should eliminate some very difficult to diagnose problems due to use of old
+    plugins.
+ -- Optimize resource allocation for systems with dragonfly networks.
+ -- Added plugin to record job completion information using Elasticsearch.
+    Libcurl is required for build. Configure slurm.conf as follows
+    JobCompType=jobcomp/elasticsearch
+    JobCompLoc=http://YOUR_ELASTICSEARCH_SERVER:9200
+ -- DATABASE SCHEME HAS CHANGED.  WHEN UPDATING THE MIGRATION PROCESS MAY TAKE
+    SOME AMOUNT OF TIME DEPENDING ON HOW LARGE YOUR DATABASE IS.  WHILE UPDATING
+    NO RECORDS WILL BE LOST, BUT THE SLURMDBD MAY NOT BE RESPONSIVE DURING THE
+    UPDATE. IT WILL ALSO NOT BE POSSIBLE TO AUTOMATICALLY REVERT THE DATABASE
+    TO THE FORMAT FOR AN EARLIER VERSION OF SLURM. PLAN ACCORDINGLY.
+ -- The performance of Profiling with HDF5 is improved. In addition, internal
+    structures are changed to make it easier to add new profile types,
+    particularly energy sensors. This has introduced an operational issue. See
+    OTHER CHANGES.
+ -- MPI/MVAPICH plugin now requires Munge for authentication.
+ -- In order to support inter-cluster job dependencies, the MaxJobID
+    configuration parameter default value has been reduced from 4,294,901,760
+    to 2,147,418,112 and it's maximum value is now 2,147,463,647.
+    ANY JOBS WITH A JOB ID ABOVE 2,147,463,647 WILL BE PURGED WHEN SLURM IS
+    UPGRADED FROM AN OLDER VERSION!
 
 RPMBUILD CHANGES
 ================
@@ -57,227 +107,369 @@ RPMBUILD CHANGES
 
 CONFIGURATION FILE CHANGES (see man appropriate man page for details)
 =====================================================================
- -- Modify etc/cgroup.release_common.example to set specify full path to the
-    scontrol command. Also find cgroup mount point by reading cgroup.conf file.
- -- Added SchedulerParameters options of bf_yield_interval and bf_yield_sleep
-    to control how frequently and for how long the backfill scheduler will
-    relinquish its locks.
- -- To support larger numbers of jobs when the StateSaveDirectory is on a
-    file system that supports a limited number of files in a directory, add a
-    subdirectory called "hash.#" based upon the last digit of the job ID.
- -- Added GRES type (e.g. model name) and "non_consume" fields for resources
-    that are limited, but can be shared between jobs.
- -- Modify AuthInfo configuration parameter to accept credential lifetime
-    option.
- -- Added ChosLoc configuration parameter in slurm.conf (Chroot OS tool
-    location).
- -- Added MemLimitEnforce configuration parameter in slurm.conf (Used to disable
-    enforcement of memory limits)
- -- Added PriorityParameters configuration parameter in slurm.conf (String used
-    to hold configuration information for the PriorityType plugin).
- -- Added RequeueExit and RequeueExitHold configuration parameter in slurm.conf
-    (Defines job exit codes which trigger a job being requeued and/or held).
- -- Add SelectTypeParameters option of CR_PACK_NODES to pack a job's tasks
-    tightly on its allocated nodes rather than distributing them evenly across
-    the allocated nodes.
- -- Added PriorityFlags option of Calculate_Running to continue recalculating
-    the priority of running jobs.
- -- Add new node configuration parameters CoreSpecCount, CPUSpecList and
-    MemSpecLimit which support the reservation of resources for system use
-    with Linux cgroup.
- -- Added AllowSpecResourcesUsage configuration parameter in slurm.conf. This
-    allows jobs to use specialized resources on nodes allocated to them if the
-    job designates --core-spec=0.
- -- Add new SchedulerParameters option of build_queue_timeout to throttle how
-    much time can be consumed building the job queue for scheduling.
- -- Added HealthCheckNodeState option of "cycle" to cycle through the compute
-    nodes over the course of HealthCheckInterval rather than running all at
-    the same time.
- -- Added CpuFreqDef configuration parameter in slurm.conf to specify the
-    default CPU frequency and governor to be set at job end.
- -- Add RoutePlugin with route/default and route/topology implementations to
-    allow messages to be forwarded through the switch network defined in
-    the topology.conf file for TopologyPlugin=topology/tree.
- -- Add DebugFlags=Route to allow debugging of RoutePlugin.
- -- Added SchedulerParameters options of bf_max_job_array_resv to control how
-    many tasks of a job array should have resources reserved for them.
- -- Add ability to include other files in slurm.conf based upon the ClusterName.
- -- Add SchedulerParameters option of pack_serial_at_end to put serial jobs at
-    the end of the available nodes rather than using a best fit algorithm.
- -- Add PrivateData value of "cloud". If set, powered down nodes in the cloud
-    will be visible.
+ -- Remove DynAllocPort configuration parameter.
+ -- Added new configuration parameters to support burst buffers:
+    BurstBufferParameters, and BurstBufferType.
+ -- Added SchedulerParameters option of "bf_busy_nodes". When selecting
+    resources for pending jobs to reserve for future execution (i.e. the job
+    can not be started immediately), then preferentially select nodes that are
+    in use. This will tend to leave currently idle resources available for
+    backfilling longer running jobs, but may result in allocations having less
+    than optimal network topology. This option is currently only supported by
+    the select/cons_res plugin.
+ -- Added "EioTimeout" parameter to slurm.conf. It is the number of seconds srun
+    waits for slurmstepd to close the TCP/IP connection used to relay data
+    between the user application and srun when the user application terminates.
+ -- Remove the CR_ALLOCATE_FULL_SOCKET configuration option.  It is now the
+    default.
+ -- Added DebugFlags values of "CpuFrequency", "Power" and "SICP".
+ -- Added CpuFreqGovernors which lists governors allowed to be set with
+    --cpu-freq on salloc, sbatch, and srun.
+ -- Interpret a partition configuration of "Nodes=ALL" in slurm.conf as
+    including all nodes defined in the cluster.
+ -- Added new configuration parameters PowerParameters and PowerPlugin.
+ -- Add AuthInfo option of "cred_expire=#" to specify the lifetime of a job
+    step credential. The default value was changed from 1200 to 120 seconds.
+    This value also controls how long a requeued job must wait before it can
+    be started again.
+ -- Added LaunchParameters configuration parameter.
+ -- Added new partition configuration parameter "ExclusiveUser=yes|no".
+ -- Add TopologyParam configuration parameter. Optional value of "dragonfly"
+    is supported.
+ -- Added a slurm.conf parameter "PrologEpilogTimeout" to control how long
+    prolog/epilog can run.
+ -- Add PrologFlags option of "Contain" to create a proctrack container at
+    job resource allocation time.
 
 DBD CONFIGURATION FILE CHANGES (see "man slurmdbd.conf" for details)
 ====================================================================
- -- Added DebugFlags
 
 
 COMMAND CHANGES (see man pages for details)
 ===========================================
- -- Improve qsub wrapper support for passing environment variables.
- -- Modify sdiag to report Slurm RPC traffic by type, count and time consumed.
- -- Enable display of nodes anticipated to be used for pending jobs by squeue,
-    sview or scontrol.
- -- Modify squeue --start option to print the nodes expected to be used for
-    pending job (in addition to expected start time, etc.).
- -- Add srun --cpu-freq options to set the CPU governor (OnDemand, Performance,
-    PowerSave or UserSpace).
- -- Added squeue -O/--Format option that makes all job and step fields available
-    for printing.
- -- Add "CPUs" count to output of "scontrol show step".
- -- Add job "reboot" option for Linux clusters. This invokes the configured
-    RebootProgram to reboot nodes allocated to a job before it begins execution.
- -- Added squeue -O/--Format option that makes all job and step fields available
-    for printing.
- -- Add "CPUs" count to output of "scontrol show step".
- -- Added support for job email triggers: TIME_LIMIT, TIME_LIMIT_90 (reached
-    90% of time limit), TIME_LIMIT_80 (reached 80% of time limit), and
-    TIME_LIMIT_50 (reached 50% of time limit). Applies to salloc, sbatch and
-    srun commands.
- -- Added srun --export option to set/export specific environment variables.
- -- Scontrol modified to print separate error messages for job arrays with
-    different exit codes on the different tasks of the job array. Applies to
-    job suspend and resume operations.
- -- Add node state string suffix of "$" to identify nodes in maintenance
-    reservation or scheduled for reboot. This applies to scontrol, sinfo,
-    and sview commands.
- -- Enable scontrol to clear a nodes's scheduled reboot by setting its state
-    to "RESUME".
- -- Added squeue -P/--priority option that can be used to display pending jobs
-    in the same order as used by the Slurm scheduler even if jobs are submitted
-    to multiple partitions (job is reported once per usable partition).
- -- Add sbatch job array option to limit the number of simultaneously running
-    tasks from a job array (e.g. "--array=0-15%4").
- -- Removed --cpu_bind from sbatch and salloc.  It just seemed to cause
-    confusion and wasn't ever handled in the allocation.  A user can now only
-    specify the option with srun.
- -- Modify scontrol job operations to accept comma delimited list of job IDs.
-    Applies to job update, hold, release, suspend, resume, requeue, and
-    requeuehold operations.
- -- Added ability for "scontrol update" to references jobs by JobName (and
-    filtered optionally by UserID).
- -- Add support for an advanced reservation start time that remains constant
-    relative to the current time. This can be used to prevent the starting of
-    longer running jobs on select nodes for maintenance purpose. See the
-    reservation flag "TIME_FLOAT" for more information.
- -- Added "scontrol write config" option to save a copy of the current
-    configuration in a file containing a time stamp.
- -- Added "sacctmgr reconfigure" option to cause slurmdbd to read current
-    configuration.
-
+ -- Added "--cpu_freq" option to salloc and sbatch.
+ -- Add sbcast support for file transfer to resources allocated to a job step
+    rather than a job allocation (e.g. "sbcast -j 123.4 ...").
+ -- Added new job state of STOPPED indicating processes have been stopped with a
+    SIGSTOP (using scancel or sview), but retain its allocated CPUs. Job state
+    returns to RUNNING when SIGCONT is sent (also using scancel or sview).
+ -- The task_dist_states variable has been split into "flags" and "base"
+    components. Added SLURM_DIST_PACK_NODES and SLURM_DIST_NO_PACK_NODES values
+    to give user greater control over task distribution. The srun --dist options
+    has been modified to accept a "Pack" and "NoPack" option. These options can
+    be used to override the CR_PACK_NODE configuration option.
+ -- Added BurstBuffer specification to advanced reservation.
+ -- For advanced reservation, replace flag "License_only" with flag "Any_Nodes".
+    It can be used to indicate the advanced reservation resources (licenses
+    and/or burst buffers) can be used with any compute nodes.
+ -- Add "--sicp" (available for inter-cluster dependencies) and "--power"
+    (specify power management options) to salloc, sbatch and srun commands.
+ -- Added "--mail=stage_out" option to job submission commands to notify user
+    when burst buffer state out is complete.
+ -- Require a "Reason" when using scontrol to set a node state to DOWN.
+ -- Mail notifications on job BEGIN, END and FAIL now apply to a job array as a
+    whole rather than generating individual email messages for each task in the
+    job array.
+ -- Remove srun --max-launch-time option. The option has not been functional
+    or documented since Slurm version 2.0.
+ -- Add "--thread-spec" option to salloc, sbatch and srun commands. This is
+    the count of threads reserved for system use per node.
+ -- Introduce new sbatch option '--kill-on-invalid-dep=yes|no' which allows
+    users to specify which behavior they want if a job dependency is not
+    satisfied.
+ -- Add scontrol options to view and modify layouts tables.
+ -- Add srun --accel-bind option to control how tasks are bound to GPUs and NIC
+    Generic RESources (GRES).
+ -- Add sreport -T/--tres option to identify Trackable RESources (TRES) to
+    report.
 
 OTHER CHANGES
 =============
- -- Add job "reboot" option for Linux clusters. This invokes the configured
-    RebootProgram to reboot nodes allocated to a job before it begins execution.
- -- In the job_submit plugin: Remove all slurmctld locks prior to job_submit()
-    being called for improved performance. If any slurmctld data structures are
-    read or modified, add locks directly in the plugin.
- -- Cray MPMD (Multiple-Program Multiple-Data) support completed.
-
+ -- SPANK naming changes: For environment variables set using the
+    spank_job_control_setenv() function, the values were available in the
+    slurm_spank_job_prolog() and slurm_spank_job_epilog() functions using
+    getenv where the name was given a prefix of "SPANK_". That prefix has
+    been removed for consistency with the environment variables available in
+    the Prolog and Epilog scripts.
+ -- job_submit/lua: Enable reading and writing job environment variables.
+    For example: if (job_desc.environment.LANGUAGE == "en_US") then ...
+ -- The format of HDF5 node-step files has changed, so the sh5util program that
+    merges them into job files has changed. The command line options to sh5util
+    have not changed and will continue to service both versions for the next
+    few releases of Slurm.
+ -- Add environment variables SLURM_ARRAY_TASK_MAX, SLURM_ARRAY_TASK_MIN,
+    SLURM_ARRAY_TASK_STEP for job arrays.
 
 API CHANGES
 ===========
 
 Changed members of the following structs
 ========================================
- -- Changed the following fields in struct front_end_info:
-    node_state change from 16-bits to 32-bits
- -- Changed the following fields in struct node_info:
-    node_state change from 16-bits to 32-bits
- -- Changed the following fields in struct slurm_update_front_end_msg:
-    node_state change from 16-bits to 32-bits
- -- Changed the following fields in struct slurm_update_node_msg:
-    node_state change from 16-bits to 32-bits
-
-
-Added the following struct definitions
-======================================
- -- Added the following fields to struct stats_info_response_msg:
-    rpc_type_size, rpc_type_id, rpc_type_cnt, rpc_type_time,
-    rpc_user_size, rpc_user_id, rpc_user_cnt, rpc_user_time.
+ -- Changed the following fields in struct struct job_descriptor:
+    cpu_freq renamed cpu_freq_max.
+    task_dist changed from 16 to 32 bit.
+ -- Changed the following fields in struct job_info:
+    cpu_freq renamed cpu_freq_max.
+    job_state changed from 16 to 32 bit.
+ -- Changed the following fields in struct slurm_ctl_conf:
+    min_job_age, task_plugin_param changed from 16 to 32 bit.
+    bf_cycle_sum changed from 32 to 64 bit.
+ -- Changed the following fields in struct slurm_step_ctx_params_t:
+    cpu_freq renamed cpu_freq_max.
+    task_dist changed from 16 to 32 bit.
+ -- Changed the following fields in struct slurm_step_launch_params_t:
+    cpu_freq renamed cpu_freq_max.
+    task_dist changed from 16 to 32 bit.
+ -- Changed the following fields in struct slurm_step_layout_t:
+    task_dist changed from 16 to 32 bit.
+ -- Changed the following fields in struct job_step_info_t:
+    cpu_freq renamed cpu_freq_max.
+    state changed from 16 to 32 bit.
+ -- Changed the following fields in struct resource_allocation_response_msg_t:
+    cpu_freq renamed cpu_freq_max.
+ -- Changed the following fields in struct stats_info_response_msg:
+    bf_cycle_sum changed from 32 to 64 bits.
+ -- Changed the following fields in struct acct_gather_energy:
+    base_consumed_energy, consumed_energy, previous_consumed_energy
+    changed from 32 to 64 bits.
+ -- Changed the following fields in struct ext_sensors_data:
+    consumed_energy changed from 32 to 64 bits.
+
+Added members to the following struct definitions
+=================================================
+ -- Added the following fields to struct acct_gather_node_resp_msg:
+    sensor_cnt
+ -- Added the following fields to struct slurm_ctl_conf:
+    accounting_storage_tres, bb_type, cpu_freq_govs,
+    eio_timeout, launch_params, msg_aggr_params, power_parameters,
+    power_plugin, priority_weight_tres, prolog_epilog_timeout
+    topology_param.
  -- Added the following fields to struct job_descriptor:
-    job_id_str
+    bit_flags, burst_buffer, clusters, cpu_freq_min,
+    cpu_freq_gov, power_flags, sicp_mode
+    tres_req_cnt.
  -- Added the following fields to struct job_info:
-    array_bitmap, array_max_tasks, array_task_str, reboot, sched_nodes
- -- Added the following fields to struct node_info:
-    gres_drain and gres_used
-    core_spec_cnt, cpu_spec_list, mem_spec_limit
- -- Added the following fields to struct slurm_ctl_conf:
-    chos_loc, cpu_freq_def, layouts, mem_limit_enforce, priority_params,
-    requeue_exit, requeue_exit_hold, route_plugin, srun_port_range,
-    use_spec_resources
- -- Added the following fields to struct suspend_msg:
-    job_id_str
+    bitflags, burst_buffer, cpu_freq_min, cpu_freq_gov,
+    billable_tres, power_flags, sicp_mode tres_req_str, tres_alloc_str.
  -- Added the following fields to struct slurm_step_ctx_params_t:
-    profile
-
+    cpu_freq_min, cpu_freq_gov.
+ -- Added the following fields to struct slurm_step_launch_params_t:
+    accel_bind_type, cpu_freq_min, cpu_freq_gov.
+ -- Added the following fields to struct job_step_info_t:
+    cpu_freq_min, cpu_freq_gov task_dist, tres_alloc_str.
+ -- Added the following fields to struct resource_allocation_response_msg_t:
+    account, cpu_freq_min, cpu_freq_gov, env_size, environment, qos
+    resv_name.
+ -- Added the following fields to struct reserve_info_t:
+    burst_buffer, core_cnt, resv_watts, tres_str.
+ -- Added the following fields to struct resv_desc_msg_t:
+    burst_buffer, core_cnt, resv_watts, tres_str.
+ -- Added the following fields to struct node_info_t:
+    free_mem, power, owner, tres_fmt_str.
+ -- Added the following fields to struct partition_info:
+    billing_weights_str, qos_char, tres_fmt_str
 
 Added the following struct definitions
 ======================================
-job_array_resp_msg_t - Job array response data structure
-
+ -- Added power_mgmt_data_t: Power managment data stucture
+ -- Added sicp_info_t: sicp data structure
+ -- Added sicp_info_msg_t: sicp data structure message
+ -- Added layout_info_msg_t: layout message data structure
+ -- Added update_layout_msg_t: layout update message data structure
+ -- Added step_alloc_info_msg_t: Step allocation message data structure.
+ -- Added powercap_info_msg_t: Powercap information data structure.
+ -- Added update_powercap_msg_t: Update message for powercap info
+    data structure.
+ -- Added will_run_response_msg_t: Data structure to test if a job can run.
+ -- Added assoc_mgr_info_msg_t: Association manager information data structure.
+ -- Added assoc_mgr_info_request_msg_t: Association manager request message.
+ -- Added network_callerid_msg_t: Network callerid data structure.
+ -- Added burst_buffer_gres_t: Burst buffer gres data structure.
+ -- Added burst_buffer_resv_t: Burst buffer reservation data structure.
+ -- Added burst_buffer_use_t: Burst buffer user information.
+ -- Added burst_buffer_info_t: Burst buffer information data structure.
+ -- Added burst_buffer_info_msg_t: Burst buffer message data structure.
 
 Changed the following enums and #defines
 ========================================
-CPU_FREQ_* - Identification of CPU governors to use
-DEBUG_FLAG_* - Many new DebugFlag values defined
-HEALTH_CHECK_CYCLE - Cycle through nodes for health check rather than trying
-		to run health check in parallel on large numbers of nodes
-KILL_STEPS_ONLY - Do not signal batch script
-MAIL_JOB_TIME* - Email event triggers based upon job's run time relative to
-		its time limit
-PRIORITY_FLAGS_* - New job priority calculation options
-RESERVE_FLAG_TIME_FLOAT - Identify a reservation with a start time that is
-		relative to the current time
-WAIT_ASSOC_*, WAIT_QOS_* - Many new job "reason" flags added to better identify
-		why a job is pending rather than running. This includes a
-		detailed identify of specific association and QOS limits.
-
+ -- Added INFINITE64: 64 bit infinite value.
+ -- Added NO_VAL: 64 bit no val value.
+ -- Added SLURM_EXTERN_CONT: Job step id of external process container.
+ -- Added DEFAULT_EIO_SHUTDOWN_WAIT: Time to wait after eio shutdown signal.
+ -- Added MAIL_JOB_STAGE_OUT: Mail job stage out flag.
+ -- Added CPU_FREQ_GOV_MASK: Mask for all defined cpu-frequency governors.
+ -- Added JOB_LAUNCH_FAILED: Job launch failed state.
+ -- Added JOB_STOPPED: Job stopped state.
+ -- Added SLURM_DIST*: Slurm distribution flags.
+ -- Added CPU_FREQ_GOV_MASK: Cpu frequency gov mask
+ -- Added CPU_FREQ_*_OLD: Vestigial values for transition from v14.11 systems.
+ -- Added PRIORITY_FLAGS_MAX_TRES: Flag for max tres limit.
+ -- Added KILL_INV_DEP and NO_KILL_INV_DEP: Invalid dependency flags.
+ -- Added CORE_SPEC_THREAD: Flag for thread count
+ -- Changed WAIT_QOS/ASSOC_*: changed to tres values.
+ -- Added WAIT_ASSOC/QOS_GRP/MAX_*: Association tres states.
+ -- Added ENERGY_DATA_*: sensor count and node energy added to
+    jobacct_data_type enum
+ -- Added accel_bind_type: enum for accel bind type
+ -- Added SLURM_POWER_FLAGS_LEVEL: Slurm power cap flag.
+ -- Added PART_FLAG_EXCLUSIVE_USER: mask for exclusive allocation of nodes.
+ -- Added PART_FLAG_EXC_USER_CLR: mask to clear exclusive allocation of nodes.
+ -- Added RESERVE_FLAG_ANY_NODES: mask to allow usage for any compute node.
+ -- Added RESERVE_FLAG_NO_ANY_NODES: mask to clear any compute node flag.
+ -- Added RESERVE_FLAG_REPLACE: Replace job resources flag.
+ -- Added DEBUG_FLAG_*: Debug flags for burst buffer, cpu freq, power
+    managment, sicp, DB archive, and tres.
+ -- Added PROLOG_FLAG_CONTAIN: Proctrack plugin container flag
+ -- Added ASSOC_MGR_INFO_FLAG_*: Association manager info flags for
+    association, user, and qos.
+ -- Added BB_FLAG_DISABLE_PERSISTENT: Disable peristant burst buffers.
+ -- Added BB_FLAG_ENABLE_PERSISTENT: Enable peristant burst buffers.
+ -- Added BB_FLAG_EMULATE_CRAY: Using dw_wlm_cli emulator flag
+ -- Added BB_FLAG_PRIVATE_DATA: Flag to allow buffer to be seen by owner.
+ -- Added BB_STATE_*: Burst buffer state masks
 
 Added the following API's
 =========================
-slurm_free_job_array_resp() - Free job array RPC responses
-slurm_kill_job2() - Similar to slurm_kill_job(), but supports job arrays
-slurm_kill_job_step2()- Similar to slurm_kill_job_step(), but supports job arrays
-slurm_requeue2()  - Similar to slurm_requeue(), but supports job arrays
-slurm_resume2()   - Similar to slurm_resume(), but supports job arrays
-slurm_suspend2()  - Similar to slurm_suspend(), but supports job arrays
-slurm_update_job2() - Similar to slurm_update_job(), but supports job arrays
-slurm_write_ctl_conf() - write the contents of slurm control configuration
-		message as loaded using slurm_load_ctl_conf() to a file
-
+ -- Added slurm_job_will_run2 to determine of a job could execute immediately.
+ -- Added APIs to load, print, and update layouts:
+    slurm_print_layout_info, slurm_load_layout, slurm_update_layout.
+ -- Added APIs to free and get association manager information:
+    slurm_load_assoc_mgr_info, slurm_free_assoc_mgr_info_msg,
+    slurm_free_assoc_mgr_info_request_msg
+ -- Added APIs to get cpu allocation from node name or id:
+    slurm_job_cpus_allocated_str_on_node_id,
+    slurm_job_cpus_allocated_str_on_node
+ -- Added APIs to load, free, print, and update powercap information
+    slurm_load_powercap, slurm_free_powercap_info_msg,
+    slurm_print_powercap_info_msg, slurm_update_powercap
+ -- Added slurm_burst_buffer_state_string to translate state number to string
+    equivalent
+ -- Added APIs to load, free, and print burst buffer information
+    slurm_load_burst_buffer_info, slurm_free_burst_buffer_info_msg,
+    slurm_print_burst_buffer_info_msg, slurm_print_burst_buffer_record
+ -- Added slurm_network_callerid to get the job id of a job based upon network
+    socket information.
 
 Changed the following API's
 ============================
-slurm_set_debugflags() - Debug flags argument changed from 32-bit to 64-bit
-slurm_signal_job_step() - Signal value changed from 16-bit to 32-bit
-
+ -- slurm_get_node_energy - Changed argument
+    acct_gather_energy_t **acct_gather_energy to uint16_t *sensors_cnt
+    and acct_gather_energy_t **energy
+ -- slurm_sbcast_lookup - Added step_id argument
 
 DBD API Changes
 ===============
 
 Changed members of the following structs
 ========================================
-
+ -- Changed slurmdb_association_cond_t to slurmdb_assoc_cond_t:
+ -- Changed the following fields in struct slurmdb_account_cond_t:
+    slurmdb_association_cond_t *assoc_cond changed to
+    slurmdb_assoc_cond_t *assoc_cond
+ -- Changed the following fields in struct slurmdb_assoc_rec:
+    slurmdb_association_rec *assoc_next to
+    slurmdb_assoc_rec *assoc_next
+    slurmdb_association_rec *assoc_next_id to
+    slurmdb_assoc_rec *assoc_next_id
+    assoc_mgr_association_usage_t *usage to
+    slurmdb_assoc_usage_t *usage
+ -- Changed the following fields in struct slurmdb_cluster_rec_t:
+    slurmdb_association_rec_t *root_assoc to
+    slurmdb_assoc_rec_t *root_assoc
+ -- Changed the following fields in struct slurmdb_job_rec_t:
+    state changed from 16 to 32 bit
+ -- Changed the following fields in struct slurmdb_qos_rec_t:
+    assoc_mgr_qos_usage_t *usage to slurmdb_qos_usage_t *usage
+ -- Changed the following fields in struct slurmdb_step_rec_t:
+    task_dist was changed from 16 to 32 bit
+ -- Changed the following fields in struct slurmdb_wckey_cond_t:
+    slurmdb_association_cond_t *assoc_cond to slurmdb_assoc_cond_t *assoc_cond
+ -- Changed the following fields in struct slurmdb_hierarchical_rec_t:
+    slurmdb_association_cond_t *assoc_cond to slurmdb_assoc_cond_t *assoc_cond
 
 Added the following struct definitions
 ======================================
- -- Added the following fields to struct slurmdb_association_rec:
-    assoc_next, assoc_next_id (for hash table)
+ -- Added slurmdb_tres_rec_t: Tres data structure for the slurmdbd.
+ -- Added slurmdb_assoc_cond_t: Slurmdbd association condition.
+ -- Added slurmdb_tres_cond_t: Tres condition data structure.
+ -- Added slurmdb_assoc_usage: Slurmdbd association usage limits.
+ -- Added slurmdbd_qos_usage_t: slurmdbd qos usage data structure.
+
+Added members to the following struct definitions
+=================================================
+ -- Added the following fields to struct slurmdb_accounting_rec_t:
+    tres_rec
+ -- Added the following fields to struct slurmdb_assoc_rec_t:
+    accounting_list, grp_tres, grp_tre_ctld, grp_tres_mins, grp_tres_mins_ctld,
+    grp_tres_run_mins, grp_tres_run_mins_ctld, max_tres_mins_pj,
+    max_tres_mins_ctld, max_tres_run_mins, max_tres_run_mins_ctld,
+    max_tres_pj, max_tres_ctld, max_tres_pn, max_tres_pn_ctld
+ -- Added the following fields to struct slurmdb_cluser_rec_t:
+    tres_str
+ -- Added the following fields to struct slurmdb_cluster_accounting_rec_t:
+    tres_rec
+ -- Added the following fields to struct slurmdb_event_rec_t:
+    tres_str
  -- Added the following fields to struct slurmdb_job_rec_t:
-    array_max_tasks, array_task_str, alloc_gres, req_gres, used_gres, resv_name
- -- Added the following fields to struct slurmdb_reservation_rec_t:
-    array_job_id, array_task_id
- -- Added the following fields to struct slurmdb_select_step_t:
-    array_task_id
+    tres_alloc_str, tres_req_str
  -- Added the following fields to struct slurmdb_qos_rec_t:
-    min_cpus_pj
+    grp_tres, grp_tres_ctld, grp_tres_mins, grp_tres_mins_ctld,
+    grp_tres_run_mins, grp_tres_run_mins_ctld, max_tres_mins_pj,
+    max_tres_mins_pj_ctld, max_tres_pj, max_tres_pj_ctld, max_tres_pn,
+    max_tres_pn_ctld, max_tres_pn, max_tres_pn_ctld, max_tres_pu,
+    max_tres_pu_ctld, max_tres_run_mins_pu, max_tres_run_mins_pu_ctld,
+    min_tres_pj, min_tres_pj_ctld
+ -- Added the following fields to struct slurmdb_reservation_rec_t:
+    tres_str, tres_list
+ -- Added the following fields to struct slurmdb_step_rec_t:
+    req_cpufreq_min, req_cpufreq_max, req_cpufreq_gov, tres_alloc_str
+ -- Added the following fields to struct slurmdb_used_limits_t:
+    tres, tres_run_mins
+ -- Added the following fields to struct slurmdb_report_assoc_rec_t:
+    tres_list
+ -- Added the following fields to struct slurmdb_report_user_rec_t:
+    tres_list
+ -- Added the following fields to struct slurmdb_report_cluster_rec_t:
+    accounting_list, tres_list
+ -- Added the following fields to struct slurmdb_report_job_grouping_t:
+    tres_list
+ -- Added the following fields to struct slurmdb_report_acct_grouping_t:
+    tres_list
+ -- Added the following fields to struct slurmdb_report_cluster_grouping_t:
+    tres_list
 
 Added the following enums and #defines
 ========================================
-
+-- Added QOS_FLAG_PART_QOS: partition qos flag
 
 Added the following API's
 =========================
-slurmdb_reconfig() - Reconfigure the slurmdbd (re-read the configuration file)
+ -- slurmdb_get_first_avail_cluster() - Get the first cluster that will run
+    a job
+ -- slurmdb_destroy_assoc_usage() - Helper function
+ -- slurmdb_destroy_qos_usage() - Helper function
+ -- slurmdb_free_assoc_mgr_state_msg() - Helper function
+ -- slurmdb_free_assoc_rec_members() - Helper function
+ -- slurmdb_destroy_assoc_rec() - Helper function
+ -- slurmdb_free_qos_rec_members() - Helper function
+ -- slurmdb_destroy_tres_rec_noalloc() - Helper function
+ -- slurmdb_destroy_tres_rec() - Helper function
+ -- slurmdb_destroy_tres_cond() - Helper function
+ -- slurmdb_destroy_assoc_cond() - Helper function
+ -- slurmdb_init_assoc_rec() - Helper function
+ -- slurmdb_init_tres_cond() - Helper function
+ -- slurmdb_tres_add() - Add tres to accounting
+ -- slurmdb_tres_get() - Get tres info from accounting
+
+Changed the following API's
+============================
+ -- slurmdb_associations_get() - Changed assoc_cond arg type to
+    slurmdb_assoc_cond_t
+ -- slurmdb_associations_modify() - Changed assoc_cond arg type to
+    slurmdb_assoc_cond_t and assoc arg type to slurmdb_assoc_rec_t
+ -- slurmdb_associations_remove() - Changed assoc_cond arg type to
+    slurmdb_assoc_cond_t
+ -- slurmdb_report_cluster_account_by_user(),
+    slurmdb_report_cluster_user_by_account(), slurmdb_problems_get() - Changed
+    assoc_cond arg type to slurmdb_assoc_cond_t
+ -- slurmdb_init_qos_rec() - added init_val argument
diff --git a/aclocal.m4 b/aclocal.m4
index 530c3acef..9a5cc6d27 100644
--- a/aclocal.m4
+++ b/aclocal.m4
@@ -1823,6 +1823,7 @@ m4_include([auxdir/x_ac_blcr.m4])
 m4_include([auxdir/x_ac_bluegene.m4])
 m4_include([auxdir/x_ac_cflags.m4])
 m4_include([auxdir/x_ac_cray.m4])
+m4_include([auxdir/x_ac_curl.m4])
 m4_include([auxdir/x_ac_databases.m4])
 m4_include([auxdir/x_ac_debug.m4])
 m4_include([auxdir/x_ac_dlfcn.m4])
@@ -1831,10 +1832,12 @@ m4_include([auxdir/x_ac_freeipmi.m4])
 m4_include([auxdir/x_ac_gpl_licensed.m4])
 m4_include([auxdir/x_ac_hwloc.m4])
 m4_include([auxdir/x_ac_iso.m4])
+m4_include([auxdir/x_ac_json.m4])
 m4_include([auxdir/x_ac_lua.m4])
 m4_include([auxdir/x_ac_man2html.m4])
 m4_include([auxdir/x_ac_munge.m4])
 m4_include([auxdir/x_ac_ncurses.m4])
+m4_include([auxdir/x_ac_netloc.m4])
 m4_include([auxdir/x_ac_nrt.m4])
 m4_include([auxdir/x_ac_ofed.m4])
 m4_include([auxdir/x_ac_pam.m4])
@@ -1847,4 +1850,3 @@ m4_include([auxdir/x_ac_setproctitle.m4])
 m4_include([auxdir/x_ac_sgi_job.m4])
 m4_include([auxdir/x_ac_slurm_ssl.m4])
 m4_include([auxdir/x_ac_sun_const.m4])
-m4_include([auxdir/x_ac_xcpu.m4])
diff --git a/auxdir/Makefile.am b/auxdir/Makefile.am
index 54a67d286..494bd06e5 100644
--- a/auxdir/Makefile.am
+++ b/auxdir/Makefile.am
@@ -16,6 +16,7 @@ EXTRA_DIST = \
     x_ac_bluegene.m4 \
     x_ac_cflags.m4 \
     x_ac_cray.m4 \
+    x_ac_curl.m4 \
     x_ac_databases.m4 \
     x_ac_debug.m4 \
     x_ac_dlfcn.m4 \
@@ -25,10 +26,12 @@ EXTRA_DIST = \
     x_ac_gpl_licensed.m4 \
     x_ac_hwloc.m4 \
     x_ac_iso.m4 \
+    x_ac_json.m4 \
     x_ac_lua.m4 \
     x_ac_man2html.m4 \
     x_ac_munge.m4 \
     x_ac_ncurses.m4 \
+    x_ac_netloc.m4 \
     x_ac_nrt.m4 \
     x_ac_pam.m4 \
     x_ac_printf_null.m4 \
@@ -37,5 +40,4 @@ EXTRA_DIST = \
     x_ac_setproctitle.m4 \
     x_ac_sgi_job.m4 \
     x_ac_slurm_ssl.m4 \
-    x_ac_sun_const.m4 \
-    x_ac_xcpu.m4
+    x_ac_sun_const.m4
diff --git a/auxdir/Makefile.in b/auxdir/Makefile.in
index 91c30a4d5..8b334427e 100644
--- a/auxdir/Makefile.in
+++ b/auxdir/Makefile.in
@@ -97,6 +97,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -105,10 +106,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -121,7 +124,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -190,6 +193,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -239,8 +244,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -259,6 +268,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -302,6 +314,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -325,6 +338,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -398,6 +412,7 @@ EXTRA_DIST = \
     x_ac_bluegene.m4 \
     x_ac_cflags.m4 \
     x_ac_cray.m4 \
+    x_ac_curl.m4 \
     x_ac_databases.m4 \
     x_ac_debug.m4 \
     x_ac_dlfcn.m4 \
@@ -407,10 +422,12 @@ EXTRA_DIST = \
     x_ac_gpl_licensed.m4 \
     x_ac_hwloc.m4 \
     x_ac_iso.m4 \
+    x_ac_json.m4 \
     x_ac_lua.m4 \
     x_ac_man2html.m4 \
     x_ac_munge.m4 \
     x_ac_ncurses.m4 \
+    x_ac_netloc.m4 \
     x_ac_nrt.m4 \
     x_ac_pam.m4 \
     x_ac_printf_null.m4 \
@@ -419,8 +436,7 @@ EXTRA_DIST = \
     x_ac_setproctitle.m4 \
     x_ac_sgi_job.m4 \
     x_ac_slurm_ssl.m4 \
-    x_ac_sun_const.m4 \
-    x_ac_xcpu.m4
+    x_ac_sun_const.m4
 
 all: all-am
 
diff --git a/auxdir/ltmain.sh b/auxdir/ltmain.sh
index c29db3631..bffda5418 100644
--- a/auxdir/ltmain.sh
+++ b/auxdir/ltmain.sh
@@ -70,7 +70,7 @@
 #         compiler:		$LTCC
 #         compiler flags:		$LTCFLAGS
 #         linker:		$LD (gnu? $with_gnu_ld)
-#         $progname:	(GNU libtool) 2.4.2 Debian-2.4.2-1.10ubuntu1
+#         $progname:	(GNU libtool) 2.4.2 Debian-2.4.2-1.11
 #         automake:	$automake_version
 #         autoconf:	$autoconf_version
 #
@@ -80,7 +80,7 @@
 
 PROGRAM=libtool
 PACKAGE=libtool
-VERSION="2.4.2 Debian-2.4.2-1.10ubuntu1"
+VERSION="2.4.2 Debian-2.4.2-1.11"
 TIMESTAMP=""
 package_revision=1.3337
 
diff --git a/auxdir/slurm.m4 b/auxdir/slurm.m4
index 62fbce28d..8aafeb080 100644
--- a/auxdir/slurm.m4
+++ b/auxdir/slurm.m4
@@ -65,18 +65,6 @@ AC_DEFUN([X_AC_SLURM_PORTS],
   AC_DEFINE_UNQUOTED(SLURMCTLD_PORT_COUNT, [$slurmctldportcount],
                      [Define the default port count for slurmctld])
   AC_SUBST(SLURMCTLD_PORT_COUNT)
-
-  AC_MSG_CHECKING([for dynamic allocation port to be enabled for Hadoop])
-  AC_ARG_ENABLE([dynamic-allocation],
-    AS_HELP_STRING([--enable-dynamic-allocation, enable dynamic allocation requests from user programs for Hadoop ([disabled])]))
-  if test "$enable_dynamic_allocation" = "yes"; then
-    AC_MSG_RESULT([yes])
-    slurm_enable_dynamic_allocation="yes"
-  else
-    AC_MSG_RESULT([no])
-    slurm_enable_dynamic_allocation="no"
-  fi
-  AM_CONDITIONAL(SLURM_ENABLE_DYNAMIC_ALLOCATION, test "$slurm_enable_dynamic_allocation" = "yes")
 ])
 
 dnl
@@ -180,7 +168,7 @@ for name in CURRENT REVISION AGE; do
    eval SLURM_API_$name=$API
 done
 SLURM_API_MAJOR=`expr $SLURM_API_CURRENT - $SLURM_API_AGE`
-SLURM_API_VERSION=`printf "0x%02x%02x%02x" $SLURM_API_MAJOR $SLURM_API_AGE $SLURM_API_REVISION`
+SLURM_API_VERSION=`printf "0x%02x%02x%02x" $((10#$SLURM_API_MAJOR)) $((10#$SLURM_API_AGE)) $((10#$SLURM_API_REVISION))`
 
 AC_DEFINE_UNQUOTED(SLURM_API_VERSION,  $SLURM_API_VERSION,  [Define the API's version])
 AC_DEFINE_UNQUOTED(SLURM_API_CURRENT,  $SLURM_API_CURRENT,  [API current version])
@@ -208,7 +196,7 @@ RELEASE="`perl -ne 'print,exit if s/^\s*RELEASE:\s*(\S*).*/\1/i' $srcdir/META`"
 # NOTE: SLURM_VERSION_NUMBER excludes any non-numeric component 
 # (e.g. "pre1" in the MICRO), but may be suitable for the user determining 
 # how to use the APIs or other differences. 
-SLURM_VERSION_NUMBER="`printf "0x%02x%02x%02x" $SLURM_MAJOR $SLURM_MINOR $SLURM_MICRO`"
+SLURM_VERSION_NUMBER="`printf "0x%02x%02x%02x" $((10#$SLURM_MAJOR)) $((10#$SLURM_MINOR)) $((10#$SLURM_MICRO))`"
 AC_DEFINE_UNQUOTED(SLURM_VERSION_NUMBER, $SLURM_VERSION_NUMBER, [SLURM Version Number])
 AC_SUBST(SLURM_VERSION_NUMBER)
 
diff --git a/auxdir/x_ac_cray.m4 b/auxdir/x_ac_cray.m4
index 3bc90132d..2e608eed2 100644
--- a/auxdir/x_ac_cray.m4
+++ b/auxdir/x_ac_cray.m4
@@ -16,6 +16,7 @@
 #    Tests for required libraries (non-Cray systems with a Cray network):
 #    * libalpscomm_sn
 #    * libalpscomm_cn
+#    Tests for DataWarp files
 #*****************************************************************************
 #
 # Copyright 2013 Cray Inc. All Rights Reserved.
@@ -29,6 +30,7 @@ AC_DEFUN([X_AC_CRAY],
   ac_have_alps_emulation="no"
   ac_have_alps_cray_emulation="no"
   ac_have_cray_network="no"
+  ac_really_no_cray="no"
 
   AC_ARG_WITH(
     [alps-emulation],
@@ -65,6 +67,15 @@ AC_DEFUN([X_AC_CRAY],
 	  *) AC_MSG_ERROR([bad value "$enableval" for --enable-cray-network]) ;:
       esac ]
   )
+  AC_ARG_ENABLE(
+    [really-no-cray],
+    AS_HELP_STRING(--enable-really-no-cray,Disable cray support for eslogin machines),
+      [ case "$enableval" in
+   yes) ac_really_no_cray="yes" ;;
+    no) ac_really_no_cray="no"  ;;
+     *) AC_MSG_ERROR([bad value "$enableval" for --enable-really-no-cray])  ;;
+      esac ]
+  )
 
   if test "$ac_have_alps_emulation" = "yes"; then
     ac_have_alps_cray="yes"
@@ -139,7 +150,7 @@ AC_DEFUN([X_AC_CRAY],
 	    ]]
 	  )],
 	  [have_cray_files="yes"],
-	  [AC_MSG_ERROR(There is a problem linking to the Cray api.)])
+	  [AC_MSG_ERROR(There is a problem linking to the Cray API)])
 
         # See if we have 5.2UP01 alpscomm functions
         AC_SEARCH_LIBS([alpsc_pre_suspend],
@@ -159,7 +170,7 @@ AC_DEFUN([X_AC_CRAY],
             ]]
           )],
           [have_cray_files="yes"],
-          [AC_MSG_ERROR(There is a problem linking to the Cray API.)])
+          [AC_MSG_ERROR(There is a problem linking to the Cray API)])
       fi
 
       LIBS="$saved_LIBS"
@@ -169,7 +180,7 @@ AC_DEFUN([X_AC_CRAY],
     done
 
     if test -z "$have_cray_files"; then
-      AC_MSG_ERROR([Unable to locate Cray API dir install. (usually in /opt/cray)])
+      AC_MSG_ERROR([Unable to locate Cray APIs (usually in /opt/cray/alpscomm and /opt/cray/job)])
     else
       if test "$ac_have_native_cray" = "yes"; then
         AC_MSG_NOTICE([Running on a Cray system in native mode without ALPS])
@@ -204,15 +215,23 @@ AC_DEFUN([X_AC_CRAY],
     AC_MSG_RESULT([$ac_have_alps_cray])
   fi
 
+  if test "$ac_really_no_cray" = "yes"; then
+    ac_have_alps_cray="no"
+    ac_have_real_cray="no"
+  fi
   if test "$ac_have_alps_cray" = "yes"; then
-    # libexpat is always required for the XML-RPC interface
+    # libexpat is always required for the XML-RPC interface, but it is only
+    # needed in the select plugin, so set it up here instead of everywhere.
     AC_CHECK_HEADER(expat.h, [],
 		    AC_MSG_ERROR([Cray BASIL requires expat headers/rpm]))
-    AC_CHECK_LIB(expat, XML_ParserCreate, [],
+    AC_CHECK_LIB(expat, XML_ParserCreate, [CRAY_SELECT_LDFLAGS="$CRAY_SELECT_LDFLAGS -lexpat"],
 		 AC_MSG_ERROR([Cray BASIL requires libexpat.so (i.e. libexpat1-dev)]))
 
     if test "$ac_have_real_cray" = "yes"; then
-      AC_CHECK_LIB([job], [job_getjid], [],
+      # libjob is needed, but we don't want to put it on the LIBS line here.
+      # If we are on a native system it is handled elsewhere, and on a hybrid
+      # we only need this in libsrun.
+      AC_CHECK_LIB([job], [job_getjid], [CRAY_JOB_LDFLAGS="$CRAY_JOB_LDFLAGS -ljob"],
 	      AC_MSG_ERROR([Need cray-job (usually in /opt/cray/job/default)]))
       AC_DEFINE(HAVE_REAL_CRAY, 1, [Define to 1 for running on a real Cray system])
     fi
@@ -247,4 +266,45 @@ AC_DEFUN([X_AC_CRAY],
   AC_SUBST(CRAY_TASK_CPPFLAGS)
   AC_SUBST(CRAY_TASK_LDFLAGS)
 
+
+  _x_ac_datawarp_dirs="/opt/cray/dws/default"
+  _x_ac_datawarp_libs="lib64 lib"
+
+  AC_ARG_WITH(
+    [datawarp],
+    AS_HELP_STRING(--with-datawarp=PATH,Specify path to DataWarp installation),
+    [_x_ac_datawarp_dirs="$withval $_x_ac_datawarp_dirs"])
+
+  AC_CACHE_CHECK(
+    [for datawarp installation],
+    [x_ac_cv_datawarp_dir],
+    [
+      for d in $_x_ac_datawarp_dirs; do
+        test -d "$d" || continue
+        test -d "$d/include" || continue
+        test -f "$d/include/dws_thin.h" || continue
+	for bit in $_x_ac_datawarp_libs; do
+          test -d "$d/$bit" || continue
+          test -f "$d/$bit/libdws_thin.so" || continue
+          AS_VAR_SET(x_ac_cv_datawarp_dir, $d)
+          break
+        done
+        test -n "$x_ac_cv_datawarp_dir" && break
+      done
+    ])
+
+  if test -z "$x_ac_cv_datawarp_dir"; then
+    AC_MSG_WARN([unable to locate DataWarp installation])
+  else
+    DATAWARP_CPPFLAGS="-I$x_ac_cv_datawarp_dir/include"
+    if test "$ac_with_rpath" = "yes"; then
+      DATAWARP_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_datawarp_dir/$bit -L$x_ac_cv_datawarp_dir/$bit -ldws_thin"
+    else
+      DATAWARP_LDFLAGS="-L$x_ac_cv_datawarp_dir/$bit -ldws_thin"
+    fi
+    AC_DEFINE(HAVE_DATAWARP, 1, [Define to 1 if DataWarp library found])
+  fi
+
+  AC_SUBST(DATAWARP_CPPFLAGS)
+  AC_SUBST(DATAWARP_LDFLAGS)
 ])
diff --git a/auxdir/x_ac_curl.m4 b/auxdir/x_ac_curl.m4
new file mode 100644
index 000000000..47bf40ad1
--- /dev/null
+++ b/auxdir/x_ac_curl.m4
@@ -0,0 +1,274 @@
+#***************************************************************************
+#                                  _   _ ____  _
+#  Project                     ___| | | |  _ \| |
+#                             / __| | | | |_) | |
+#                            | (__| |_| |  _ <| |___
+#                             \___|\___/|_| \_\_____|
+#
+# Copyright (C) 2006, David Shaw <dshaw@jabberwocky.com>
+#
+# This software is licensed as described in the file COPYING, which
+# you should have received as part of this distribution. The terms
+# are also available at http://curl.haxx.se/docs/copyright.html.
+#
+# You may opt to use, copy, modify, merge, publish, distribute and/or sell
+# copies of the Software, and permit persons to whom the Software is
+# furnished to do so, under the terms of the COPYING file.
+#
+# This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
+# KIND, either express or implied.
+#
+###########################################################################
+# LIBCURL_CHECK_CONFIG ([DEFAULT-ACTION], [MINIMUM-VERSION],
+#                       [ACTION-IF-YES], [ACTION-IF-NO])
+# ----------------------------------------------------------
+#      David Shaw <dshaw@jabberwocky.com>   May-09-2006
+#
+# Checks for libcurl.  DEFAULT-ACTION is the string yes or no to
+# specify whether to default to --with-libcurl or --without-libcurl.
+# If not supplied, DEFAULT-ACTION is yes.  MINIMUM-VERSION is the
+# minimum version of libcurl to accept.  Pass the version as a regular
+# version number like 7.10.1. If not supplied, any version is
+# accepted.  ACTION-IF-YES is a list of shell commands to run if
+# libcurl was successfully found and passed the various tests.
+# ACTION-IF-NO is a list of shell commands that are run otherwise.
+# Note that using --without-libcurl does run ACTION-IF-NO.
+#
+# This macro #defines HAVE_LIBCURL if a working libcurl setup is
+# found, and sets @LIBCURL@ and @LIBCURL_CPPFLAGS@ to the necessary
+# values.  Other useful defines are LIBCURL_FEATURE_xxx where xxx are
+# the various features supported by libcurl, and LIBCURL_PROTOCOL_yyy
+# where yyy are the various protocols supported by libcurl.  Both xxx
+# and yyy are capitalized.  See the list of AH_TEMPLATEs at the top of
+# the macro for the complete list of possible defines.  Shell
+# variables $libcurl_feature_xxx and $libcurl_protocol_yyy are also
+# defined to 'yes' for those features and protocols that were found.
+# Note that xxx and yyy keep the same capitalization as in the
+# curl-config list (e.g. it's "HTTP" and not "http").
+#
+# Users may override the detected values by doing something like:
+# LIBCURL="-lcurl" LIBCURL_CPPFLAGS="-I/usr/myinclude" ./configure
+#
+# For the sake of sanity, this macro assumes that any libcurl that is
+# found is after version 7.7.2, the first version that included the
+# curl-config script.  Note that it is very important for people
+# packaging binary versions of libcurl to include this script!
+# Without curl-config, we can only guess what protocols are available,
+# or use curl_version_info to figure it out at runtime.
+
+AC_DEFUN([LIBCURL_CHECK_CONFIG],
+[
+  AH_TEMPLATE([LIBCURL_FEATURE_SSL],[Defined if libcurl supports SSL])
+  AH_TEMPLATE([LIBCURL_FEATURE_KRB4],[Defined if libcurl supports KRB4])
+  AH_TEMPLATE([LIBCURL_FEATURE_IPV6],[Defined if libcurl supports IPv6])
+  AH_TEMPLATE([LIBCURL_FEATURE_LIBZ],[Defined if libcurl supports libz])
+  AH_TEMPLATE([LIBCURL_FEATURE_ASYNCHDNS],[Defined if libcurl supports AsynchDNS])
+  AH_TEMPLATE([LIBCURL_FEATURE_IDN],[Defined if libcurl supports IDN])
+  AH_TEMPLATE([LIBCURL_FEATURE_SSPI],[Defined if libcurl supports SSPI])
+  AH_TEMPLATE([LIBCURL_FEATURE_NTLM],[Defined if libcurl supports NTLM])
+
+  AH_TEMPLATE([LIBCURL_PROTOCOL_HTTP],[Defined if libcurl supports HTTP])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_HTTPS],[Defined if libcurl supports HTTPS])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_FTP],[Defined if libcurl supports FTP])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_FTPS],[Defined if libcurl supports FTPS])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_FILE],[Defined if libcurl supports FILE])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_TELNET],[Defined if libcurl supports TELNET])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_LDAP],[Defined if libcurl supports LDAP])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_DICT],[Defined if libcurl supports DICT])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_TFTP],[Defined if libcurl supports TFTP])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_RTSP],[Defined if libcurl supports RTSP])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_POP3],[Defined if libcurl supports POP3])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_IMAP],[Defined if libcurl supports IMAP])
+  AH_TEMPLATE([LIBCURL_PROTOCOL_SMTP],[Defined if libcurl supports SMTP])
+
+  AC_ARG_WITH(libcurl,
+     AC_HELP_STRING([--with-libcurl=PREFIX],[look for the curl library in PREFIX/lib and headers in PREFIX/include]),
+     [_libcurl_with=$withval],[_libcurl_with=ifelse([$1],,[yes],[$1])])
+
+  if test "$_libcurl_with" != "no" ; then
+
+     AC_PROG_AWK
+
+     _libcurl_version_parse="eval $AWK '{split(\$NF,A,\".\"); X=256*256*A[[1]]+256*A[[2]]+A[[3]]; print X;}'"
+
+     _libcurl_try_link=yes
+
+     if test -d "$_libcurl_with" ; then
+        LIBCURL_CPPFLAGS="-I$withval/include"
+        _libcurl_ldflags="-L$withval/lib"
+        AC_PATH_PROG([_libcurl_config],[curl-config],[],
+                     ["$withval/bin"])
+     else
+        AC_PATH_PROG([_libcurl_config],[curl-config],[],[$PATH])
+     fi
+
+     if test x$_libcurl_config != "x" ; then
+        AC_CACHE_CHECK([for the version of libcurl],
+           [libcurl_cv_lib_curl_version],
+           [libcurl_cv_lib_curl_version=`$_libcurl_config --version | $AWK '{print $[]2}'`])
+
+        _libcurl_version=`echo $libcurl_cv_lib_curl_version | $_libcurl_version_parse`
+        _libcurl_wanted=`echo ifelse([$2],,[0],[$2]) | $_libcurl_version_parse`
+
+        if test $_libcurl_wanted -gt 0 ; then
+           AC_CACHE_CHECK([for libcurl >= version $2],
+              [libcurl_cv_lib_version_ok],
+              [
+              if test $_libcurl_version -ge $_libcurl_wanted ; then
+                 libcurl_cv_lib_version_ok=yes
+              else
+                 libcurl_cv_lib_version_ok=no
+              fi
+              ])
+        fi
+
+        if test $_libcurl_wanted -eq 0 || test x$libcurl_cv_lib_version_ok = xyes ; then
+           if test x"$LIBCURL_CPPFLAGS" = "x" ; then
+              LIBCURL_CPPFLAGS=`$_libcurl_config --cflags`
+           fi
+           if test x"$LIBCURL" = "x" ; then
+              LIBCURL=`$_libcurl_config --libs`
+
+              # This is so silly, but Apple actually has a bug in their
+              # curl-config script.  Fixed in Tiger, but there are still
+              # lots of Panther installs around.
+              case "${host}" in
+                 powerpc-apple-darwin7*)
+                    LIBCURL=`echo $LIBCURL | sed -e 's|-arch i386||g'`
+                 ;;
+              esac
+           fi
+
+           # All curl-config scripts support --feature
+           _libcurl_features=`$_libcurl_config --feature`
+
+           # Is it modern enough to have --protocols? (7.12.4)
+           if test $_libcurl_version -ge 461828 ; then
+              _libcurl_protocols=`$_libcurl_config --protocols`
+           fi
+        else
+           _libcurl_try_link=no
+        fi
+
+        unset _libcurl_wanted
+     fi
+
+     if test $_libcurl_try_link = yes ; then
+
+        # we didn't find curl-config, so let's see if the user-supplied
+        # link line (or failing that, "-lcurl") is enough.
+        LIBCURL=${LIBCURL-"$_libcurl_ldflags -lcurl"}
+
+        AC_CACHE_CHECK([whether libcurl is usable],
+           [libcurl_cv_lib_curl_usable],
+           [
+           _libcurl_save_cppflags=$CPPFLAGS
+           CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
+           _libcurl_save_libs=$LIBS
+           LIBS="$LIBCURL $LIBS"
+
+           AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include <curl/curl.h>]],[[
+/* Try and use a few common options to force a failure if we are
+   missing symbols or can't link. */
+int x;
+curl_easy_setopt(NULL,CURLOPT_URL,NULL);
+x=CURL_ERROR_SIZE;
+x=CURLOPT_WRITEFUNCTION;
+x=CURLOPT_WRITEDATA;
+x=CURLOPT_ERRORBUFFER;
+x=CURLOPT_STDERR;
+x=CURLOPT_VERBOSE;
+if (x) ;
+]])],libcurl_cv_lib_curl_usable=yes,libcurl_cv_lib_curl_usable=no)
+
+           CPPFLAGS=$_libcurl_save_cppflags
+           LIBS=$_libcurl_save_libs
+           unset _libcurl_save_cppflags
+           unset _libcurl_save_libs
+           ])
+
+        if test $libcurl_cv_lib_curl_usable = yes ; then
+
+           # Does curl_free() exist in this version of libcurl?
+           # If not, fake it with free()
+
+           _libcurl_save_cppflags=$CPPFLAGS
+           CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+           _libcurl_save_libs=$LIBS
+           LIBS="$LIBS $LIBCURL"
+
+           AC_CHECK_FUNC(curl_free,,
+              AC_DEFINE(curl_free,free,
+                [Define curl_free() as free() if our version of curl lacks curl_free.]))
+
+           CPPFLAGS=$_libcurl_save_cppflags
+           LIBS=$_libcurl_save_libs
+           unset _libcurl_save_cppflags
+           unset _libcurl_save_libs
+
+           AC_DEFINE(HAVE_LIBCURL,1,
+             [Define to 1 if you have a functional curl library.])
+           AC_SUBST(LIBCURL_CPPFLAGS)
+           AC_SUBST(LIBCURL)
+
+           for _libcurl_feature in $_libcurl_features ; do
+              AC_DEFINE_UNQUOTED(AS_TR_CPP(libcurl_feature_$_libcurl_feature),[1])
+              eval AS_TR_SH(libcurl_feature_$_libcurl_feature)=yes
+           done
+
+           if test "x$_libcurl_protocols" = "x" ; then
+
+              # We don't have --protocols, so just assume that all
+              # protocols are available
+              _libcurl_protocols="HTTP FTP FILE TELNET LDAP DICT TFTP"
+
+              if test x$libcurl_feature_SSL = xyes ; then
+                 _libcurl_protocols="$_libcurl_protocols HTTPS"
+
+                 # FTPS wasn't standards-compliant until version
+                 # 7.11.0 (0x070b00 == 461568)
+                 if test $_libcurl_version -ge 461568; then
+                    _libcurl_protocols="$_libcurl_protocols FTPS"
+                 fi
+              fi
+
+              # RTSP, IMAP, POP3 and SMTP were added in
+              # 7.20.0 (0x071400 == 463872)
+              if test $_libcurl_version -ge 463872; then
+                 _libcurl_protocols="$_libcurl_protocols RTSP IMAP POP3 SMTP"
+              fi
+           fi
+
+           for _libcurl_protocol in $_libcurl_protocols ; do
+              AC_DEFINE_UNQUOTED(AS_TR_CPP(libcurl_protocol_$_libcurl_protocol),[1])
+              eval AS_TR_SH(libcurl_protocol_$_libcurl_protocol)=yes
+           done
+        else
+           unset LIBCURL
+           unset LIBCURL_CPPFLAGS
+        fi
+     fi
+
+     unset _libcurl_try_link
+     unset _libcurl_version_parse
+     unset _libcurl_config
+     unset _libcurl_feature
+     unset _libcurl_features
+     unset _libcurl_protocol
+     unset _libcurl_protocols
+     unset _libcurl_version
+     unset _libcurl_ldflags
+  fi
+
+  if test x$_libcurl_with = xno || test x$libcurl_cv_lib_curl_usable != xyes ; then
+     # This is the IF-NO path
+     ifelse([$4],,:,[$4])
+  else
+     # This is the IF-YES path
+     ifelse([$3],,:,[$3])
+  fi
+
+  AM_CONDITIONAL(WITH_CURL, test x$_libcurl_with = xyes && test x$libcurl_cv_lib_curl_usable = xyes)
+
+  unset _libcurl_with
+])dnl
diff --git a/auxdir/x_ac_json.m4 b/auxdir/x_ac_json.m4
new file mode 100644
index 000000000..be865011f
--- /dev/null
+++ b/auxdir/x_ac_json.m4
@@ -0,0 +1,59 @@
+##*****************************************************************************
+#  AUTHOR:
+#    Derived from x_ac_munge.
+#
+#  SYNOPSIS:
+#    X_AC_JSON()
+#
+#  DESCRIPTION:
+#    Check for JSON parser libraries.
+#    Right now, just check for json-c header and library.
+#
+#  WARNINGS:
+#    This macro must be placed after AC_PROG_CC and before AC_PROG_LIBTOOL.
+##*****************************************************************************
+
+AC_DEFUN([X_AC_JSON], [
+
+  x_ac_json_dirs="/usr /usr/local"
+  x_ac_json_libs="lib64 lib"
+
+  AC_ARG_WITH(
+    [json],
+    AS_HELP_STRING(--with-json=PATH,Specify path to json-c installation),
+    [x_ac_json_dirs="$withval $x_ac_json_dirs"])
+
+  AC_CACHE_CHECK(
+    [for json installation],
+    [x_ac_cv_json_dir],
+    [
+     for d in $x_ac_json_dirs; do
+       test -d "$d" || continue
+       test -d "$d/include" || continue
+       test -f "$d/include/json-c/json_object.h" || continue
+       for bit in $x_ac_json_libs; do
+         test -d "$d/$bit" || continue
+         _x_ac_json_libs_save="$LIBS"
+         LIBS="-L$d/$bit -ljson-c $LIBS"
+         AC_LINK_IFELSE(
+           [AC_LANG_CALL([], json_tokener_parse)],
+           AS_VAR_SET(x_ac_cv_json_dir, $d))
+        LIBS="$_x_ac_json_libs_save"
+        test -n "$x_ac_cv_json_dir" && break
+     done
+     test -n "$x_ac_cv_json_dir" && break
+  done
+  ])
+
+  if test -z "$x_ac_cv_json_dir"; then
+    AC_MSG_WARN([unable to locate json parser library])
+  else
+    AC_DEFINE([HAVE_JSON], [1], [Define if you are compiling with json.])
+    JSON_CPPFLAGS="-I$x_ac_cv_json_dir/include"
+    JSON_LDFLAGS="-L$x_ac_cv_json_dir/$bit -ljson-c"
+  fi
+
+  AC_SUBST(JSON_CPPFLAGS)
+  AC_SUBST(JSON_LDFLAGS)
+  AM_CONDITIONAL(WITH_JSON_PARSER, test -n "$x_ac_cv_json_dir")
+])
diff --git a/auxdir/x_ac_netloc.m4 b/auxdir/x_ac_netloc.m4
new file mode 100644
index 000000000..93bbfac54
--- /dev/null
+++ b/auxdir/x_ac_netloc.m4
@@ -0,0 +1,79 @@
+##*****************************************************************************
+#  AUTHOR:
+#    Daniel Pou <danielp@sgi.com>
+#
+#  SYNOPSIS:
+#    X_AC_NETLOC
+#
+#  DESCRIPTION:
+#    Determine if the NETLOC libraries exists
+##*****************************************************************************
+
+AC_DEFUN([X_AC_NETLOC],
+[
+  _x_ac_netloc_dirs="/usr /usr/local"
+  _x_ac_netloc_libs="lib64 lib"
+  x_ac_cv_netloc_nosub="no"
+
+  AC_ARG_WITH(
+    [netloc],
+    AS_HELP_STRING(--with-netloc=PATH,Specify path to netloc installation),
+    [_x_ac_netloc_dirs="$withval $_x_ac_netloc_dirs"])
+
+  AC_CACHE_CHECK(
+    [for netloc installation],
+    [x_ac_cv_netloc_dir],
+    [
+      for d in $_x_ac_netloc_dirs; do
+        test -d "$d" || continue
+        test -d "$d/include" || continue
+        test -f "$d/include/netloc.h" || continue
+        for bit in $_x_ac_netloc_libs; do
+          test -d "$d/$bit" || continue
+          _x_ac_netloc_cppflags_save="$CPPFLAGS"
+          CPPFLAGS="-I$d/include $CPPFLAGS"
+          _x_ac_netloc_libs_save="$LIBS"
+          LIBS="-L$d/$bit -lnetloc $LIBS"
+          AC_LINK_IFELSE(
+            [AC_LANG_PROGRAM([#include <netloc.h>
+                              #include <netloc/map.h>],
+                             [netloc_map_t map;
+                              netloc_map_create(&map);]) ],
+            AS_VAR_SET(x_ac_cv_netloc_dir, $d))
+          AC_LINK_IFELSE(
+            [AC_LANG_PROGRAM([#include <netloc.h>
+                              #include <netloc_map.h>],
+                             [netloc_map_t map;
+                              netloc_map_create(&map)]) ],
+            AS_VAR_SET(x_ac_cv_netloc_dir, $d)
+            x_ac_cv_netloc_nosub="yes"
+            )
+          CPPFLAGS="$_x_ac_netloc_cppflags_save"
+          LIBS="$_x_ac_netloc_libs_save"
+          test -n "$x_ac_cv_netloc_dir" && break
+        done
+        test -n "$x_ac_cv_netloc_dir" && break
+      done
+    ])
+
+  if test -z "$x_ac_cv_netloc_dir"; then
+    AC_MSG_WARN([unable to locate netloc installation])
+  else
+    NETLOC_CPPFLAGS="-I$x_ac_cv_netloc_dir/include"
+    if test "$ac_with_rpath" = "yes"; then
+      NETLOC_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_netloc_dir/$bit -L$x_ac_cv_netloc_dir/$bit"
+    else
+      NETLOC_LDFLAGS="-L$x_ac_cv_netloc_dir/$bit"
+    fi
+    NETLOC_LIBS="-lnetloc"
+    AC_DEFINE(HAVE_NETLOC, 1, [Define to 1 if netloc library found])
+    if test "$x_ac_cv_netloc_nosub" = "yes"; then
+      AC_DEFINE(HAVE_NETLOC_NOSUB, 1, [Define to 1 if netloc includes use underscore not subdirectory])
+    fi
+  fi
+
+  AM_CONDITIONAL(HAVE_NETLOC, test -n "$x_ac_cv_netloc_dir")
+  AC_SUBST(NETLOC_LIBS)
+  AC_SUBST(NETLOC_CPPFLAGS)
+  AC_SUBST(NETLOC_LDFLAGS)
+])
diff --git a/auxdir/x_ac_xcpu.m4 b/auxdir/x_ac_xcpu.m4
deleted file mode 100644
index 5a2efe7c1..000000000
--- a/auxdir/x_ac_xcpu.m4
+++ /dev/null
@@ -1,46 +0,0 @@
-##*****************************************************************************
-## $Id: x_ac_xcpu.m4 7443 2006-03-08 20:23:25Z da $
-##*****************************************************************************
-#  AUTHOR:
-#    Morris Jette <jette1@llnl.gov>
-#
-#  SYNOPSIS:
-#    X_AC_XCPU
-#
-#  DESCRIPTION:
-#    Test for XCPU job launch support. 
-#    If found define HAVE_XCPU, XCPU_DIR and HAVE_FRONT_END.
-#    Explicitly set path with --with-xcpu=PATH, defaults to "/mnt".
-#
-#  NOTES:
-#    SLURM still has no way to signal XCPU spawned processes.
-#    SLURM is not confirming that all processes have completed prior
-#    to marking a job/node as COMPLETED. For that it needs to check 
-#    for subdirectories (not files) under /mnt/xcpu/<host>/xcpu.
-##*****************************************************************************
-
-
-AC_DEFUN([X_AC_XCPU],
-[
-   AC_MSG_CHECKING([whether XCPU is enabled])
-
-   xcpu_default_dirs="/mnt"
-
-   AC_ARG_WITH([xcpu],
-    AS_HELP_STRING(--with-xcpu=PATH,specify path to XCPU directory),
-    [ try_path=$withval ]
-   )
-
-   ac_xcpu=no
-   for xcpu_dir in $try_path "" $xcpu_default_dirs; do
-      if test -d "$xcpu_dir/xcpu" ; then
-         ac_xcpu=yes
-         AC_DEFINE(HAVE_XCPU, 1, [Define to 1 if using XCPU for job launch])
-         AC_DEFINE_UNQUOTED(XCPU_DIR, "$xcpu_dir/xcpu", [Define location of XCPU directory])
-         AC_DEFINE(HAVE_FRONT_END, 1, [Define to 1 if running slurmd on front-end only])
-         break
-      fi
-   done
-
-   AC_MSG_RESULT($ac_xcpu)
-])
diff --git a/config.h.in b/config.h.in
index 3625a3de5..875c95931 100644
--- a/config.h.in
+++ b/config.h.in
@@ -90,6 +90,9 @@
 /* Define to 1 if you have the <curses.h> header file. */
 #undef HAVE_CURSES_H
 
+/* Define to 1 if DataWarp library found */
+#undef HAVE_DATAWARP
+
 /* Define to 1 if you have the declaration of `hstrerror', and to 0 if you
    don't. */
 #undef HAVE_DECL_HSTRERROR
@@ -163,14 +166,14 @@
 /* Define to 1 if you have the <inttypes.h> header file. */
 #undef HAVE_INTTYPES_H
 
+/* Define if you are compiling with json. */
+#undef HAVE_JSON
+
 /* Define to 1 if you have the <kstat.h> header file. */
 #undef HAVE_KSTAT_H
 
-/* Define to 1 if you have the `expat' library (-lexpat). */
-#undef HAVE_LIBEXPAT
-
-/* Define to 1 if you have the `job' library (-ljob). */
-#undef HAVE_LIBJOB
+/* Define to 1 if you have a functional curl library. */
+#undef HAVE_LIBCURL
 
 /* Define to 1 if you have the <limits.h> header file. */
 #undef HAVE_LIMITS_H
@@ -206,6 +209,12 @@
 /* Define to 1 if you have the <netdb.h> header file. */
 #undef HAVE_NETDB_H
 
+/* Define to 1 if netloc library found */
+#undef HAVE_NETLOC
+
+/* Define to 1 if netloc includes use underscore not subdirectory */
+#undef HAVE_NETLOC_NOSUB
+
 /* define if you have nrt.h */
 #undef HAVE_NRT_H
 
@@ -389,9 +398,6 @@
 /* Define to 1 if you have the <values.h> header file. */
 #undef HAVE_VALUES_H
 
-/* Define to 1 if using XCPU for job launch */
-#undef HAVE_XCPU
-
 /* Define if you have __progname. */
 #undef HAVE__PROGNAME
 
@@ -399,6 +405,69 @@
    member named physmem. */
 #undef HAVE__SYSTEM_CONFIGURATION
 
+/* Defined if libcurl supports AsynchDNS */
+#undef LIBCURL_FEATURE_ASYNCHDNS
+
+/* Defined if libcurl supports IDN */
+#undef LIBCURL_FEATURE_IDN
+
+/* Defined if libcurl supports IPv6 */
+#undef LIBCURL_FEATURE_IPV6
+
+/* Defined if libcurl supports KRB4 */
+#undef LIBCURL_FEATURE_KRB4
+
+/* Defined if libcurl supports libz */
+#undef LIBCURL_FEATURE_LIBZ
+
+/* Defined if libcurl supports NTLM */
+#undef LIBCURL_FEATURE_NTLM
+
+/* Defined if libcurl supports SSL */
+#undef LIBCURL_FEATURE_SSL
+
+/* Defined if libcurl supports SSPI */
+#undef LIBCURL_FEATURE_SSPI
+
+/* Defined if libcurl supports DICT */
+#undef LIBCURL_PROTOCOL_DICT
+
+/* Defined if libcurl supports FILE */
+#undef LIBCURL_PROTOCOL_FILE
+
+/* Defined if libcurl supports FTP */
+#undef LIBCURL_PROTOCOL_FTP
+
+/* Defined if libcurl supports FTPS */
+#undef LIBCURL_PROTOCOL_FTPS
+
+/* Defined if libcurl supports HTTP */
+#undef LIBCURL_PROTOCOL_HTTP
+
+/* Defined if libcurl supports HTTPS */
+#undef LIBCURL_PROTOCOL_HTTPS
+
+/* Defined if libcurl supports IMAP */
+#undef LIBCURL_PROTOCOL_IMAP
+
+/* Defined if libcurl supports LDAP */
+#undef LIBCURL_PROTOCOL_LDAP
+
+/* Defined if libcurl supports POP3 */
+#undef LIBCURL_PROTOCOL_POP3
+
+/* Defined if libcurl supports RTSP */
+#undef LIBCURL_PROTOCOL_RTSP
+
+/* Defined if libcurl supports SMTP */
+#undef LIBCURL_PROTOCOL_SMTP
+
+/* Defined if libcurl supports TELNET */
+#undef LIBCURL_PROTOCOL_TELNET
+
+/* Defined if libcurl supports TFTP */
+#undef LIBCURL_PROTOCOL_TFTP
+
 /* Define the libnrt.so location */
 #undef LIBNRT_SO
 
@@ -476,6 +545,9 @@
 /* Define to the setproctitle() emulation type */
 #undef SETPROCTITLE_STRATEGY
 
+/* Define path to sleep command */
+#undef SLEEP_CMD
+
 /* Define the default port number for slurmctld */
 #undef SLURMCTLD_PORT
 
@@ -566,9 +638,6 @@
 # endif
 #endif
 
-/* Define location of XCPU directory */
-#undef XCPU_DIR
-
 /* Enable large inode numbers on Mac OS X 10.5.  */
 #ifndef _DARWIN_USE_64_BIT_INODE
 # define _DARWIN_USE_64_BIT_INODE 1
@@ -580,5 +649,8 @@
 /* Define for large files, on AIX-style hosts. */
 #undef _LARGE_FILES
 
+/* Define curl_free() as free() if our version of curl lacks curl_free. */
+#undef curl_free
+
 /* Define to rpl_malloc if the replacement function should be used. */
 #undef malloc
diff --git a/configure b/configure
index 3cac00041..bc86885bc 100755
--- a/configure
+++ b/configure
@@ -1,6 +1,6 @@
 #! /bin/sh
 # Guess values for system-dependent variables and create Makefiles.
-# Generated by GNU Autoconf 2.69 for slurm 14.11.
+# Generated by GNU Autoconf 2.69 for slurm 15.08.
 #
 # Report bugs to <slurm-dev@schedmd.com>.
 #
@@ -590,8 +590,8 @@ MAKEFLAGS=
 # Identity of this package.
 PACKAGE_NAME='slurm'
 PACKAGE_TARNAME='slurm'
-PACKAGE_VERSION='14.11'
-PACKAGE_STRING='slurm 14.11'
+PACKAGE_VERSION='15.08'
+PACKAGE_STRING='slurm 15.08'
 PACKAGE_BUGREPORT='slurm-dev@schedmd.com'
 PACKAGE_URL='http://slurm.schedmd.com'
 
@@ -637,6 +637,11 @@ am__EXEEXT_TRUE
 LTLIBOBJS
 BUILD_SMAP_FALSE
 BUILD_SMAP_TRUE
+WITH_CURL_FALSE
+WITH_CURL_TRUE
+LIBCURL
+LIBCURL_CPPFLAGS
+_libcurl_config
 WITH_BLCR_FALSE
 WITH_BLCR_TRUE
 BLCR_LDFLAGS
@@ -669,6 +674,11 @@ HAVE_LUA_FALSE
 HAVE_LUA_TRUE
 lua_LIBS
 lua_CFLAGS
+NETLOC_LDFLAGS
+NETLOC_CPPFLAGS
+NETLOC_LIBS
+HAVE_NETLOC_FALSE
+HAVE_NETLOC_TRUE
 HAVE_SGI_JOB_FALSE
 HAVE_SGI_JOB_TRUE
 HAVE_NRT
@@ -676,14 +686,14 @@ HAVE_NRT_FALSE
 HAVE_NRT_TRUE
 NRT_CPPFLAGS
 SLURM_PREFIX
-SLURM_ENABLE_DYNAMIC_ALLOCATION_FALSE
-SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE
 SLURMCTLD_PORT_COUNT
 SLURMDBD_PORT
 SLURMD_PORT
 SLURMCTLD_PORT
 DEBUG_MODULES_FALSE
 DEBUG_MODULES_TRUE
+DATAWARP_LDFLAGS
+DATAWARP_CPPFLAGS
 CRAY_TASK_LDFLAGS
 CRAY_TASK_CPPFLAGS
 CRAY_SWITCH_LDFLAGS
@@ -767,6 +777,10 @@ ax_pthread_config
 HAVE_UNSETENV_FALSE
 HAVE_UNSETENV_TRUE
 LIBOBJS
+WITH_JSON_PARSER_FALSE
+WITH_JSON_PARSER_TRUE
+JSON_LDFLAGS
+JSON_CPPFLAGS
 PAM_DIR
 HAVE_PAM_FALSE
 HAVE_PAM_TRUE
@@ -778,6 +792,7 @@ HAVE_NUMA_TRUE
 NUMA_LIBS
 DL_LIBS
 SUCMD
+SLEEP_CMD
 WITH_GNU_LD_FALSE
 WITH_GNU_LD_TRUE
 WITH_CXX_FALSE
@@ -984,13 +999,13 @@ enable_pam
 with_pam_dir
 enable_iso8601
 enable_load_env_no_login
+with_json
 enable_sun_const
 with_dimensions
 with_ofed
 with_hdf5
 with_hwloc
 with_freeipmi
-with_xcpu
 with_rrdtool
 enable_glibtest
 enable_gtktest
@@ -999,6 +1014,8 @@ with_alps_emulation
 enable_cray_emulation
 enable_native_cray
 enable_cray_network
+enable_really_no_cray
+with_datawarp
 enable_developer
 enable_debug
 enable_memory_leak_debug
@@ -1011,14 +1028,15 @@ with_slurmctld_port
 with_slurmd_port
 with_slurmdbd_port
 with_slurmctld_port_count
-enable_dynamic_allocation
 with_nrth
 with_libnrt
+with_netloc
 with_readline
 with_ssl
 with_munge
 enable_multiple_slurmd
 with_blcr
+with_libcurl
 '
       ac_precious_vars='build_alias
 host_alias
@@ -1580,7 +1598,7 @@ if test "$ac_init_help" = "long"; then
   # Omit some internal or obsolete options to make the list less imposing.
   # This message is too long to be a string in the A/UX 3.1 sh.
   cat <<_ACEOF
-\`configure' configures slurm 14.11 to adapt to many kinds of systems.
+\`configure' configures slurm 15.08 to adapt to many kinds of systems.
 
 Usage: $0 [OPTION]... [VAR=VALUE]...
 
@@ -1651,7 +1669,7 @@ fi
 
 if test -n "$ac_init_help"; then
   case $ac_init_help in
-     short | recursive ) echo "Configuration of slurm 14.11:";;
+     short | recursive ) echo "Configuration of slurm 15.08:";;
    esac
   cat <<\_ACEOF
 
@@ -1692,6 +1710,7 @@ Optional Features:
                           Run SLURM in an emulated Cray mode
   --enable-native-cray    Run SLURM natively on a Cray without ALPS
   --enable-cray-network   Run SLURM on a non-Cray system with a Cray network
+  --enable-really-no-cray Disable cray support for eslogin machines
   --enable-developer      enable developer options (asserts, -Werror - also
                           sets --enable-debug as well)
   --disable-debug         disable debugging symbols and compile with
@@ -1707,8 +1726,6 @@ Optional Features:
   --disable-salloc-background
                           disable salloc execution in the background
   --enable-simulator      enable slurm simulator
-  --enable-dynamic-allocation, enable dynamic allocation requests from user programs for Hadoop (disabled)
-
   --enable-multiple-slurmd
                           enable multiple-slurmd support
 
@@ -1728,18 +1745,19 @@ Optional Packages:
   --with-cpusetdir=PATH   specify path to cpuset directory default is
                           /dev/cpuset
   --with-pam_dir=PATH     Specify path to PAM module installation
+  --with-json=PATH        Specify path to json-c installation
   --with-dimensions=N     set system dimension count for generic computer
                           system
   --with-ofed=PATH        Specify path to ofed installation
   --with-hdf5=yes/no/PATH location of h5cc or h5pcc for HDF5 configuration
   --with-hwloc=PATH       Specify path to hwloc installation
   --with-freeipmi=PATH    Specify path to freeipmi installation
-  --with-xcpu=PATH        specify path to XCPU directory
   --with-rrdtool=PATH     Specify path to rrdtool-devel installation
   --with-mysql_config=PATH
                           Specify path to mysql_config binary
   --with-alps-emulation   Run SLURM against an emulated ALPS system - requires
                           option cray.conf [default=no]
+  --with-datawarp=PATH    Specify path to DataWarp installation
   --with-slurmctld-port=N set slurmctld default port [6817]
   --with-slurmd-port=N    set slurmd default port [6818]
   --with-slurmdbd-port=N  set slurmdbd default port [6819]
@@ -1747,10 +1765,13 @@ Optional Packages:
                           set slurmctld default port count [1]
   --with-nrth=PATH        Parent directory of nrt.h and permapi.h
   --with-libnrt=PATH      Parent directory of libnrt.so
+  --with-netloc=PATH      Specify path to netloc installation
   --without-readline      compile without readline support
   --with-ssl=PATH         Specify path to OpenSSL installation
   --with-munge=PATH       Specify path to munge installation
   --with-blcr=PATH        Specify path to BLCR installation
+  --with-libcurl=PREFIX   look for the curl library in PREFIX/lib and headers
+                          in PREFIX/include
 
 Some influential environment variables:
   CC          C compiler command
@@ -1842,7 +1863,7 @@ fi
 test -n "$ac_init_help" && exit $ac_status
 if $ac_init_version; then
   cat <<\_ACEOF
-slurm configure 14.11
+slurm configure 15.08
 generated by GNU Autoconf 2.69
 
 Copyright (C) 2012 Free Software Foundation, Inc.
@@ -2378,7 +2399,7 @@ cat >config.log <<_ACEOF
 This file contains any messages produced by compilers while
 running configure, to aid debugging if configure makes a mistake.
 
-It was created by slurm $as_me 14.11, which was
+It was created by slurm $as_me 15.08, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   $ $0 $@
@@ -2910,7 +2931,7 @@ for name in CURRENT REVISION AGE; do
    eval SLURM_API_$name=$API
 done
 SLURM_API_MAJOR=`expr $SLURM_API_CURRENT - $SLURM_API_AGE`
-SLURM_API_VERSION=`printf "0x%02x%02x%02x" $SLURM_API_MAJOR $SLURM_API_AGE $SLURM_API_REVISION`
+SLURM_API_VERSION=`printf "0x%02x%02x%02x" $((10#$SLURM_API_MAJOR)) $((10#$SLURM_API_AGE)) $((10#$SLURM_API_REVISION))`
 
 
 cat >>confdefs.h <<_ACEOF
@@ -2962,7 +2983,7 @@ RELEASE="`perl -ne 'print,exit if s/^\s*RELEASE:\s*(\S*).*/\1/i' $srcdir/META`"
 # NOTE: SLURM_VERSION_NUMBER excludes any non-numeric component
 # (e.g. "pre1" in the MICRO), but may be suitable for the user determining
 # how to use the APIs or other differences.
-SLURM_VERSION_NUMBER="`printf "0x%02x%02x%02x" $SLURM_MAJOR $SLURM_MINOR $SLURM_MICRO`"
+SLURM_VERSION_NUMBER="`printf "0x%02x%02x%02x" $((10#$SLURM_MAJOR)) $((10#$SLURM_MINOR)) $((10#$SLURM_MICRO))`"
 
 cat >>confdefs.h <<_ACEOF
 #define SLURM_VERSION_NUMBER $SLURM_VERSION_NUMBER
@@ -3504,7 +3525,7 @@ fi
 
 # Define the identity of the package.
  PACKAGE='slurm'
- VERSION='14.11'
+ VERSION='15.08'
 
 
 # Some tools Automake needs.
@@ -18018,6 +18039,53 @@ else
 fi
 
 
+# Extract the first word of "sleep", so it can be a program name with args.
+set dummy sleep; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path_SLEEP_CMD+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  case $SLEEP_CMD in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path_SLEEP_CMD="$SLEEP_CMD" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_path_SLEEP_CMD="$as_dir/$ac_word$ac_exec_ext"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  test -z "$ac_cv_path_SLEEP_CMD" && ac_cv_path_SLEEP_CMD="/bin/sleep"
+  ;;
+esac
+fi
+SLEEP_CMD=$ac_cv_path_SLEEP_CMD
+if test -n "$SLEEP_CMD"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $SLEEP_CMD" >&5
+$as_echo "$SLEEP_CMD" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+
+cat >>confdefs.h <<_ACEOF
+#define SLEEP_CMD "$SLEEP_CMD"
+_ACEOF
+
+
 # Extract the first word of "su", so it can be a program name with args.
 set dummy su; ac_word=$2
 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
@@ -19341,6 +19409,88 @@ $as_echo "#define SLURM_BIGENDIAN 1" >>confdefs.h
   fi
 
 
+
+
+  x_ac_json_dirs="/usr /usr/local"
+  x_ac_json_libs="lib64 lib"
+
+
+# Check whether --with-json was given.
+if test "${with_json+set}" = set; then :
+  withval=$with_json; x_ac_json_dirs="$withval $x_ac_json_dirs"
+fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for json installation" >&5
+$as_echo_n "checking for json installation... " >&6; }
+if ${x_ac_cv_json_dir+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+
+     for d in $x_ac_json_dirs; do
+       test -d "$d" || continue
+       test -d "$d/include" || continue
+       test -f "$d/include/json-c/json_object.h" || continue
+       for bit in $x_ac_json_libs; do
+         test -d "$d/$bit" || continue
+         _x_ac_json_libs_save="$LIBS"
+         LIBS="-L$d/$bit -ljson-c $LIBS"
+         cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+
+/* Override any GCC internal prototype to avoid an error.
+   Use char because int might match the return type of a GCC
+   builtin and then its argument prototype would still apply.  */
+#ifdef __cplusplus
+extern "C"
+#endif
+char json_tokener_parse ();
+int
+main ()
+{
+return json_tokener_parse ();
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  x_ac_cv_json_dir=$d
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+        LIBS="$_x_ac_json_libs_save"
+        test -n "$x_ac_cv_json_dir" && break
+     done
+     test -n "$x_ac_cv_json_dir" && break
+  done
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $x_ac_cv_json_dir" >&5
+$as_echo "$x_ac_cv_json_dir" >&6; }
+
+  if test -z "$x_ac_cv_json_dir"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unable to locate json parser library" >&5
+$as_echo "$as_me: WARNING: unable to locate json parser library" >&2;}
+  else
+
+$as_echo "#define HAVE_JSON 1" >>confdefs.h
+
+    JSON_CPPFLAGS="-I$x_ac_cv_json_dir/include"
+    JSON_LDFLAGS="-L$x_ac_cv_json_dir/$bit -ljson-c"
+  fi
+
+
+
+   if test -n "$x_ac_cv_json_dir"; then
+  WITH_JSON_PARSER_TRUE=
+  WITH_JSON_PARSER_FALSE='#'
+else
+  WITH_JSON_PARSER_TRUE='#'
+  WITH_JSON_PARSER_FALSE=
+fi
+
+
+
 if test $ac_cv_c_compiler_gnu = yes; then
     { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC needs -traditional" >&5
 $as_echo_n "checking whether $CC needs -traditional... " >&6; }
@@ -20762,42 +20912,6 @@ fi
 
 
 
-   { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether XCPU is enabled" >&5
-$as_echo_n "checking whether XCPU is enabled... " >&6; }
-
-   xcpu_default_dirs="/mnt"
-
-
-# Check whether --with-xcpu was given.
-if test "${with_xcpu+set}" = set; then :
-  withval=$with_xcpu;  try_path=$withval
-
-fi
-
-
-   ac_xcpu=no
-   for xcpu_dir in $try_path "" $xcpu_default_dirs; do
-      if test -d "$xcpu_dir/xcpu" ; then
-         ac_xcpu=yes
-
-$as_echo "#define HAVE_XCPU 1" >>confdefs.h
-
-
-cat >>confdefs.h <<_ACEOF
-#define XCPU_DIR "$xcpu_dir/xcpu"
-_ACEOF
-
-
-$as_echo "#define HAVE_FRONT_END 1" >>confdefs.h
-
-         break
-      fi
-   done
-
-   { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_xcpu" >&5
-$as_echo "$ac_xcpu" >&6; }
-
-
   SEMAPHORE_SOURCES=""
   SEMAPHORE_LIBS=""
   { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sem_open in -lposix4" >&5
@@ -22090,6 +22204,7 @@ fi
   ac_have_alps_emulation="no"
   ac_have_alps_cray_emulation="no"
   ac_have_cray_network="no"
+  ac_really_no_cray="no"
 
 
 # Check whether --with-alps-emulation was given.
@@ -22130,6 +22245,16 @@ if test "${enable_cray_network+set}" = set; then :
 	  *) as_fn_error $? "bad value \"$enableval\" for --enable-cray-network" "$LINENO" 5 ;:
       esac
 
+fi
+
+  # Check whether --enable-really-no-cray was given.
+if test "${enable_really_no_cray+set}" = set; then :
+  enableval=$enable_really_no_cray;  case "$enableval" in
+   yes) ac_really_no_cray="yes" ;;
+    no) ac_really_no_cray="no"  ;;
+     *) as_fn_error $? "bad value \"$enableval\" for --enable-really-no-cray" "$LINENO" 5  ;;
+      esac
+
 fi
 
 
@@ -22221,7 +22346,7 @@ _ACEOF
 if ac_fn_c_try_link "$LINENO"; then :
   have_cray_files="yes"
 else
-  as_fn_error $? "There is a problem linking to the Cray api." "$LINENO" 5
+  as_fn_error $? "There is a problem linking to the Cray API" "$LINENO" 5
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
@@ -22307,7 +22432,7 @@ _ACEOF
 if ac_fn_c_try_link "$LINENO"; then :
   have_cray_files="yes"
 else
-  as_fn_error $? "There is a problem linking to the Cray API." "$LINENO" 5
+  as_fn_error $? "There is a problem linking to the Cray API" "$LINENO" 5
 fi
 rm -f core conftest.err conftest.$ac_objext \
     conftest$ac_exeext conftest.$ac_ext
@@ -22320,7 +22445,7 @@ rm -f core conftest.err conftest.$ac_objext \
     done
 
     if test -z "$have_cray_files"; then
-      as_fn_error $? "Unable to locate Cray API dir install. (usually in /opt/cray)" "$LINENO" 5
+      as_fn_error $? "Unable to locate Cray APIs (usually in /opt/cray/alpscomm and /opt/cray/job)" "$LINENO" 5
     else
       if test "$ac_have_native_cray" = "yes"; then
         { $as_echo "$as_me:${as_lineno-$LINENO}: Running on a Cray system in native mode without ALPS" >&5
@@ -22369,8 +22494,13 @@ $as_echo_n "checking whether this is a Cray XT or XE system running on ALPS or A
 $as_echo "$ac_have_alps_cray" >&6; }
   fi
 
+  if test "$ac_really_no_cray" = "yes"; then
+    ac_have_alps_cray="no"
+    ac_have_real_cray="no"
+  fi
   if test "$ac_have_alps_cray" = "yes"; then
-    # libexpat is always required for the XML-RPC interface
+    # libexpat is always required for the XML-RPC interface, but it is only
+    # needed in the select plugin, so set it up here instead of everywhere.
     ac_fn_c_check_header_mongrel "$LINENO" "expat.h" "ac_cv_header_expat_h" "$ac_includes_default"
 if test "x$ac_cv_header_expat_h" = xyes; then :
 
@@ -22416,18 +22546,16 @@ fi
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_expat_XML_ParserCreate" >&5
 $as_echo "$ac_cv_lib_expat_XML_ParserCreate" >&6; }
 if test "x$ac_cv_lib_expat_XML_ParserCreate" = xyes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_LIBEXPAT 1
-_ACEOF
-
-  LIBS="-lexpat $LIBS"
-
+  CRAY_SELECT_LDFLAGS="$CRAY_SELECT_LDFLAGS -lexpat"
 else
   as_fn_error $? "Cray BASIL requires libexpat.so (i.e. libexpat1-dev)" "$LINENO" 5
 fi
 
 
     if test "$ac_have_real_cray" = "yes"; then
+      # libjob is needed, but we don't want to put it on the LIBS line here.
+      # If we are on a native system it is handled elsewhere, and on a hybrid
+      # we only need this in libsrun.
       { $as_echo "$as_me:${as_lineno-$LINENO}: checking for job_getjid in -ljob" >&5
 $as_echo_n "checking for job_getjid in -ljob... " >&6; }
 if ${ac_cv_lib_job_job_getjid+:} false; then :
@@ -22465,12 +22593,7 @@ fi
 { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_job_job_getjid" >&5
 $as_echo "$ac_cv_lib_job_job_getjid" >&6; }
 if test "x$ac_cv_lib_job_job_getjid" = xyes; then :
-  cat >>confdefs.h <<_ACEOF
-#define HAVE_LIBJOB 1
-_ACEOF
-
-  LIBS="-ljob $LIBS"
-
+  CRAY_JOB_LDFLAGS="$CRAY_JOB_LDFLAGS -ljob"
 else
   as_fn_error $? "Need cray-job (usually in /opt/cray/job/default)" "$LINENO" 5
 fi
@@ -22563,6 +22686,57 @@ fi
 
 
 
+  _x_ac_datawarp_dirs="/opt/cray/dws/default"
+  _x_ac_datawarp_libs="lib64 lib"
+
+
+# Check whether --with-datawarp was given.
+if test "${with_datawarp+set}" = set; then :
+  withval=$with_datawarp; _x_ac_datawarp_dirs="$withval $_x_ac_datawarp_dirs"
+fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for datawarp installation" >&5
+$as_echo_n "checking for datawarp installation... " >&6; }
+if ${x_ac_cv_datawarp_dir+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+
+      for d in $_x_ac_datawarp_dirs; do
+        test -d "$d" || continue
+        test -d "$d/include" || continue
+        test -f "$d/include/dws_thin.h" || continue
+	for bit in $_x_ac_datawarp_libs; do
+          test -d "$d/$bit" || continue
+          test -f "$d/$bit/libdws_thin.so" || continue
+          x_ac_cv_datawarp_dir=$d
+          break
+        done
+        test -n "$x_ac_cv_datawarp_dir" && break
+      done
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $x_ac_cv_datawarp_dir" >&5
+$as_echo "$x_ac_cv_datawarp_dir" >&6; }
+
+  if test -z "$x_ac_cv_datawarp_dir"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unable to locate DataWarp installation" >&5
+$as_echo "$as_me: WARNING: unable to locate DataWarp installation" >&2;}
+  else
+    DATAWARP_CPPFLAGS="-I$x_ac_cv_datawarp_dir/include"
+    if test "$ac_with_rpath" = "yes"; then
+      DATAWARP_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_datawarp_dir/$bit -L$x_ac_cv_datawarp_dir/$bit -ldws_thin"
+    else
+      DATAWARP_LDFLAGS="-L$x_ac_cv_datawarp_dir/$bit -ldws_thin"
+    fi
+
+$as_echo "#define HAVE_DATAWARP 1" >>confdefs.h
+
+  fi
+
+
+
+
 
 
 
@@ -22925,31 +23099,6 @@ _ACEOF
 
 
 
-  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dynamic allocation port to be enabled for Hadoop" >&5
-$as_echo_n "checking for dynamic allocation port to be enabled for Hadoop... " >&6; }
-  # Check whether --enable-dynamic-allocation was given.
-if test "${enable_dynamic_allocation+set}" = set; then :
-  enableval=$enable_dynamic_allocation;
-fi
-
-  if test "$enable_dynamic_allocation" = "yes"; then
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
-$as_echo "yes" >&6; }
-    slurm_enable_dynamic_allocation="yes"
-  else
-    { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
-$as_echo "no" >&6; }
-    slurm_enable_dynamic_allocation="no"
-  fi
-   if test "$slurm_enable_dynamic_allocation" = "yes"; then
-  SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE=
-  SLURM_ENABLE_DYNAMIC_ALLOCATION_FALSE='#'
-else
-  SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE='#'
-  SLURM_ENABLE_DYNAMIC_ALLOCATION_FALSE=
-fi
-
-
 
 
 if test "x$prefix" = "xNONE" ; then
@@ -23111,6 +23260,118 @@ fi
 
 
 
+
+  _x_ac_netloc_dirs="/usr /usr/local"
+  _x_ac_netloc_libs="lib64 lib"
+  x_ac_cv_netloc_nosub="no"
+
+
+# Check whether --with-netloc was given.
+if test "${with_netloc+set}" = set; then :
+  withval=$with_netloc; _x_ac_netloc_dirs="$withval $_x_ac_netloc_dirs"
+fi
+
+
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for netloc installation" >&5
+$as_echo_n "checking for netloc installation... " >&6; }
+if ${x_ac_cv_netloc_dir+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+
+      for d in $_x_ac_netloc_dirs; do
+        test -d "$d" || continue
+        test -d "$d/include" || continue
+        test -f "$d/include/netloc.h" || continue
+        for bit in $_x_ac_netloc_libs; do
+          test -d "$d/$bit" || continue
+          _x_ac_netloc_cppflags_save="$CPPFLAGS"
+          CPPFLAGS="-I$d/include $CPPFLAGS"
+          _x_ac_netloc_libs_save="$LIBS"
+          LIBS="-L$d/$bit -lnetloc $LIBS"
+          cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <netloc.h>
+                              #include <netloc/map.h>
+int
+main ()
+{
+netloc_map_t map;
+                              netloc_map_create(&map);
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  x_ac_cv_netloc_dir=$d
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+          cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <netloc.h>
+                              #include <netloc_map.h>
+int
+main ()
+{
+netloc_map_t map;
+                              netloc_map_create(&map)
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  x_ac_cv_netloc_dir=$d
+            x_ac_cv_netloc_nosub="yes"
+
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+          CPPFLAGS="$_x_ac_netloc_cppflags_save"
+          LIBS="$_x_ac_netloc_libs_save"
+          test -n "$x_ac_cv_netloc_dir" && break
+        done
+        test -n "$x_ac_cv_netloc_dir" && break
+      done
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $x_ac_cv_netloc_dir" >&5
+$as_echo "$x_ac_cv_netloc_dir" >&6; }
+
+  if test -z "$x_ac_cv_netloc_dir"; then
+    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unable to locate netloc installation" >&5
+$as_echo "$as_me: WARNING: unable to locate netloc installation" >&2;}
+  else
+    NETLOC_CPPFLAGS="-I$x_ac_cv_netloc_dir/include"
+    if test "$ac_with_rpath" = "yes"; then
+      NETLOC_LDFLAGS="-Wl,-rpath -Wl,$x_ac_cv_netloc_dir/$bit -L$x_ac_cv_netloc_dir/$bit"
+    else
+      NETLOC_LDFLAGS="-L$x_ac_cv_netloc_dir/$bit"
+    fi
+    NETLOC_LIBS="-lnetloc"
+
+$as_echo "#define HAVE_NETLOC 1" >>confdefs.h
+
+    if test "$x_ac_cv_netloc_nosub" = "yes"; then
+
+$as_echo "#define HAVE_NETLOC_NOSUB 1" >>confdefs.h
+
+    fi
+  fi
+
+   if test -n "$x_ac_cv_netloc_dir"; then
+  HAVE_NETLOC_TRUE=
+  HAVE_NETLOC_FALSE='#'
+else
+  HAVE_NETLOC_TRUE='#'
+  HAVE_NETLOC_FALSE=
+fi
+
+
+
+
+
+
+
 	x_ac_lua_pkg_name="lua"
 	#check for 5.2 if that fails check for 5.1
 	if test -n "$PKG_CONFIG" && \
@@ -23935,6 +24196,400 @@ fi
 
 
 
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+# Check whether --with-libcurl was given.
+if test "${with_libcurl+set}" = set; then :
+  withval=$with_libcurl; _libcurl_with=$withval
+else
+  _libcurl_with=yes
+fi
+
+
+  if test "$_libcurl_with" != "no" ; then
+
+     for ac_prog in gawk mawk nawk awk
+do
+  # Extract the first word of "$ac_prog", so it can be a program name with args.
+set dummy $ac_prog; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_prog_AWK+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  if test -n "$AWK"; then
+  ac_cv_prog_AWK="$AWK" # Let the user override the test.
+else
+as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_prog_AWK="$ac_prog"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+fi
+fi
+AWK=$ac_cv_prog_AWK
+if test -n "$AWK"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+$as_echo "$AWK" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+  test -n "$AWK" && break
+done
+
+
+     _libcurl_version_parse="eval $AWK '{split(\$NF,A,\".\"); X=256*256*A[1]+256*A[2]+A[3]; print X;}'"
+
+     _libcurl_try_link=yes
+
+     if test -d "$_libcurl_with" ; then
+        LIBCURL_CPPFLAGS="-I$withval/include"
+        _libcurl_ldflags="-L$withval/lib"
+        # Extract the first word of "curl-config", so it can be a program name with args.
+set dummy curl-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path__libcurl_config+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  case $_libcurl_config in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path__libcurl_config="$_libcurl_config" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in "$withval/bin"
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_path__libcurl_config="$as_dir/$ac_word$ac_exec_ext"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  ;;
+esac
+fi
+_libcurl_config=$ac_cv_path__libcurl_config
+if test -n "$_libcurl_config"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_libcurl_config" >&5
+$as_echo "$_libcurl_config" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+     else
+        # Extract the first word of "curl-config", so it can be a program name with args.
+set dummy curl-config; ac_word=$2
+{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+$as_echo_n "checking for $ac_word... " >&6; }
+if ${ac_cv_path__libcurl_config+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  case $_libcurl_config in
+  [\\/]* | ?:[\\/]*)
+  ac_cv_path__libcurl_config="$_libcurl_config" # Let the user override the test with a path.
+  ;;
+  *)
+  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+for as_dir in $PATH
+do
+  IFS=$as_save_IFS
+  test -z "$as_dir" && as_dir=.
+    for ac_exec_ext in '' $ac_executable_extensions; do
+  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+    ac_cv_path__libcurl_config="$as_dir/$ac_word$ac_exec_ext"
+    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+    break 2
+  fi
+done
+  done
+IFS=$as_save_IFS
+
+  ;;
+esac
+fi
+_libcurl_config=$ac_cv_path__libcurl_config
+if test -n "$_libcurl_config"; then
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $_libcurl_config" >&5
+$as_echo "$_libcurl_config" >&6; }
+else
+  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
+$as_echo "no" >&6; }
+fi
+
+
+     fi
+
+     if test x$_libcurl_config != "x" ; then
+        { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the version of libcurl" >&5
+$as_echo_n "checking for the version of libcurl... " >&6; }
+if ${libcurl_cv_lib_curl_version+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+  libcurl_cv_lib_curl_version=`$_libcurl_config --version | $AWK '{print $2}'`
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libcurl_cv_lib_curl_version" >&5
+$as_echo "$libcurl_cv_lib_curl_version" >&6; }
+
+        _libcurl_version=`echo $libcurl_cv_lib_curl_version | $_libcurl_version_parse`
+        _libcurl_wanted=`echo 0 | $_libcurl_version_parse`
+
+        if test $_libcurl_wanted -gt 0 ; then
+           { $as_echo "$as_me:${as_lineno-$LINENO}: checking for libcurl >= version " >&5
+$as_echo_n "checking for libcurl >= version ... " >&6; }
+if ${libcurl_cv_lib_version_ok+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+
+              if test $_libcurl_version -ge $_libcurl_wanted ; then
+                 libcurl_cv_lib_version_ok=yes
+              else
+                 libcurl_cv_lib_version_ok=no
+              fi
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libcurl_cv_lib_version_ok" >&5
+$as_echo "$libcurl_cv_lib_version_ok" >&6; }
+        fi
+
+        if test $_libcurl_wanted -eq 0 || test x$libcurl_cv_lib_version_ok = xyes ; then
+           if test x"$LIBCURL_CPPFLAGS" = "x" ; then
+              LIBCURL_CPPFLAGS=`$_libcurl_config --cflags`
+           fi
+           if test x"$LIBCURL" = "x" ; then
+              LIBCURL=`$_libcurl_config --libs`
+
+              # This is so silly, but Apple actually has a bug in their
+              # curl-config script.  Fixed in Tiger, but there are still
+              # lots of Panther installs around.
+              case "${host}" in
+                 powerpc-apple-darwin7*)
+                    LIBCURL=`echo $LIBCURL | sed -e 's|-arch i386||g'`
+                 ;;
+              esac
+           fi
+
+           # All curl-config scripts support --feature
+           _libcurl_features=`$_libcurl_config --feature`
+
+           # Is it modern enough to have --protocols? (7.12.4)
+           if test $_libcurl_version -ge 461828 ; then
+              _libcurl_protocols=`$_libcurl_config --protocols`
+           fi
+        else
+           _libcurl_try_link=no
+        fi
+
+        unset _libcurl_wanted
+     fi
+
+     if test $_libcurl_try_link = yes ; then
+
+        # we didn't find curl-config, so let's see if the user-supplied
+        # link line (or failing that, "-lcurl") is enough.
+        LIBCURL=${LIBCURL-"$_libcurl_ldflags -lcurl"}
+
+        { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether libcurl is usable" >&5
+$as_echo_n "checking whether libcurl is usable... " >&6; }
+if ${libcurl_cv_lib_curl_usable+:} false; then :
+  $as_echo_n "(cached) " >&6
+else
+
+           _libcurl_save_cppflags=$CPPFLAGS
+           CPPFLAGS="$LIBCURL_CPPFLAGS $CPPFLAGS"
+           _libcurl_save_libs=$LIBS
+           LIBS="$LIBCURL $LIBS"
+
+           cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+/* end confdefs.h.  */
+#include <curl/curl.h>
+int
+main ()
+{
+
+/* Try and use a few common options to force a failure if we are
+   missing symbols or can't link. */
+int x;
+curl_easy_setopt(NULL,CURLOPT_URL,NULL);
+x=CURL_ERROR_SIZE;
+x=CURLOPT_WRITEFUNCTION;
+x=CURLOPT_WRITEDATA;
+x=CURLOPT_ERRORBUFFER;
+x=CURLOPT_STDERR;
+x=CURLOPT_VERBOSE;
+if (x) ;
+
+  ;
+  return 0;
+}
+_ACEOF
+if ac_fn_c_try_link "$LINENO"; then :
+  libcurl_cv_lib_curl_usable=yes
+else
+  libcurl_cv_lib_curl_usable=no
+fi
+rm -f core conftest.err conftest.$ac_objext \
+    conftest$ac_exeext conftest.$ac_ext
+
+           CPPFLAGS=$_libcurl_save_cppflags
+           LIBS=$_libcurl_save_libs
+           unset _libcurl_save_cppflags
+           unset _libcurl_save_libs
+
+fi
+{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $libcurl_cv_lib_curl_usable" >&5
+$as_echo "$libcurl_cv_lib_curl_usable" >&6; }
+
+        if test $libcurl_cv_lib_curl_usable = yes ; then
+
+           # Does curl_free() exist in this version of libcurl?
+           # If not, fake it with free()
+
+           _libcurl_save_cppflags=$CPPFLAGS
+           CPPFLAGS="$CPPFLAGS $LIBCURL_CPPFLAGS"
+           _libcurl_save_libs=$LIBS
+           LIBS="$LIBS $LIBCURL"
+
+           ac_fn_c_check_func "$LINENO" "curl_free" "ac_cv_func_curl_free"
+if test "x$ac_cv_func_curl_free" = xyes; then :
+
+else
+
+$as_echo "#define curl_free free" >>confdefs.h
+
+fi
+
+
+           CPPFLAGS=$_libcurl_save_cppflags
+           LIBS=$_libcurl_save_libs
+           unset _libcurl_save_cppflags
+           unset _libcurl_save_libs
+
+
+$as_echo "#define HAVE_LIBCURL 1" >>confdefs.h
+
+
+
+
+           for _libcurl_feature in $_libcurl_features ; do
+              cat >>confdefs.h <<_ACEOF
+#define `$as_echo "libcurl_feature_$_libcurl_feature" | $as_tr_cpp` 1
+_ACEOF
+
+              eval `$as_echo "libcurl_feature_$_libcurl_feature" | $as_tr_sh`=yes
+           done
+
+           if test "x$_libcurl_protocols" = "x" ; then
+
+              # We don't have --protocols, so just assume that all
+              # protocols are available
+              _libcurl_protocols="HTTP FTP FILE TELNET LDAP DICT TFTP"
+
+              if test x$libcurl_feature_SSL = xyes ; then
+                 _libcurl_protocols="$_libcurl_protocols HTTPS"
+
+                 # FTPS wasn't standards-compliant until version
+                 # 7.11.0 (0x070b00 == 461568)
+                 if test $_libcurl_version -ge 461568; then
+                    _libcurl_protocols="$_libcurl_protocols FTPS"
+                 fi
+              fi
+
+              # RTSP, IMAP, POP3 and SMTP were added in
+              # 7.20.0 (0x071400 == 463872)
+              if test $_libcurl_version -ge 463872; then
+                 _libcurl_protocols="$_libcurl_protocols RTSP IMAP POP3 SMTP"
+              fi
+           fi
+
+           for _libcurl_protocol in $_libcurl_protocols ; do
+              cat >>confdefs.h <<_ACEOF
+#define `$as_echo "libcurl_protocol_$_libcurl_protocol" | $as_tr_cpp` 1
+_ACEOF
+
+              eval `$as_echo "libcurl_protocol_$_libcurl_protocol" | $as_tr_sh`=yes
+           done
+        else
+           unset LIBCURL
+           unset LIBCURL_CPPFLAGS
+        fi
+     fi
+
+     unset _libcurl_try_link
+     unset _libcurl_version_parse
+     unset _libcurl_config
+     unset _libcurl_feature
+     unset _libcurl_features
+     unset _libcurl_protocol
+     unset _libcurl_protocols
+     unset _libcurl_version
+     unset _libcurl_ldflags
+  fi
+
+  if test x$_libcurl_with = xno || test x$libcurl_cv_lib_curl_usable != xyes ; then
+     # This is the IF-NO path
+     :
+  else
+     # This is the IF-YES path
+     :
+  fi
+
+   if test x$_libcurl_with = xyes && test x$libcurl_cv_lib_curl_usable = xyes; then
+  WITH_CURL_TRUE=
+  WITH_CURL_FALSE='#'
+else
+  WITH_CURL_TRUE='#'
+  WITH_CURL_FALSE=
+fi
+
+
+  unset _libcurl_with
+
+
 ac_build_smap="no"
 if test "x$ac_have_some_curses" = "xyes" ; then
    ac_build_smap="yes"
@@ -23949,7 +24604,7 @@ fi
 
 
 
-ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/cray/Makefile contribs/lua/Makefile contribs/mic/Makefile contribs/pam/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm/Makefile contribs/perlapi/libslurm/perl/Makefile.PL contribs/perlapi/libslurmdb/Makefile contribs/perlapi/libslurmdb/perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/sgather/Makefile contribs/sjobexit/Makefile contribs/slurmdb-direct/Makefile contribs/pmi2/Makefile doc/Makefile doc/man/Makefile doc/man/man1/Makefile doc/man/man3/Makefile doc/man/man5/Makefile doc/man/man8/Makefile doc/html/Makefile doc/html/configurator.html doc/html/configurator.easy.html etc/cgroup.release_common.example etc/init.d.slurm etc/init.d.slurmdbd etc/slurmctld.service etc/slurmd.service etc/slurmdbd.service src/Makefile src/api/Makefile src/common/Makefile src/db_api/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/salloc/Makefile src/sbatch/Makefile src/sbcast/Makefile src/sattach/Makefile src/scancel/Makefile src/scontrol/Makefile src/sdiag/Makefile src/sinfo/Makefile src/slurmctld/Makefile src/slurmd/Makefile src/slurmd/common/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/smap/Makefile src/smd/Makefile src/sprio/Makefile src/squeue/Makefile src/srun/Makefile src/srun/libsrun/Makefile src/srun_cr/Makefile src/sshare/Makefile src/sstat/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/common/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/poe/Makefile src/plugins/core_spec/Makefile src/plugins/core_spec/cray/Makefile src/plugins/core_spec/none/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/ext_sensors/Makefile src/plugins/ext_sensors/rrd/Makefile src/plugins/ext_sensors/none/Makefile src/plugins/route/Makefile src/plugins/route/default/Makefile src/plugins/route/topology/Makefile src/plugins/gres/Makefile src/plugins/gres/gpu/Makefile src/plugins/gres/nic/Makefile src/plugins/gres/mic/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/common/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/cgroup/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/acct_gather_energy/Makefile src/plugins/acct_gather_energy/rapl/Makefile src/plugins/acct_gather_energy/ipmi/Makefile src/plugins/acct_gather_energy/none/Makefile src/plugins/acct_gather_infiniband/Makefile src/plugins/acct_gather_infiniband/ofed/Makefile src/plugins/acct_gather_infiniband/none/Makefile src/plugins/acct_gather_filesystem/Makefile src/plugins/acct_gather_filesystem/lustre/Makefile src/plugins/acct_gather_filesystem/none/Makefile src/plugins/acct_gather_profile/Makefile src/plugins/acct_gather_profile/hdf5/Makefile src/plugins/acct_gather_profile/hdf5/sh5util/Makefile src/plugins/acct_gather_profile/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/job_container/Makefile src/plugins/job_container/cncu/Makefile src/plugins/job_container/none/Makefile src/plugins/job_submit/Makefile src/plugins/job_submit/all_partitions/Makefile src/plugins/job_submit/cnode/Makefile src/plugins/job_submit/cray/Makefile src/plugins/job_submit/defaults/Makefile src/plugins/job_submit/logging/Makefile src/plugins/job_submit/lua/Makefile src/plugins/job_submit/partition/Makefile src/plugins/job_submit/pbs/Makefile src/plugins/job_submit/require_timelimit/Makefile src/plugins/job_submit/throttle/Makefile src/plugins/launch/Makefile src/plugins/launch/aprun/Makefile src/plugins/launch/poe/Makefile src/plugins/launch/runjob/Makefile src/plugins/launch/slurm/Makefile src/plugins/preempt/Makefile src/plugins/preempt/job_prio/Makefile src/plugins/preempt/none/Makefile src/plugins/preempt/partition_prio/Makefile src/plugins/preempt/qos/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/cray/Makefile src/plugins/proctrack/cgroup/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/proctrack/lua/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/alps/Makefile src/plugins/select/alps/libalps/Makefile src/plugins/select/alps/libemulate/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/ba/Makefile src/plugins/select/bluegene/ba_bgq/Makefile src/plugins/select/bluegene/bl/Makefile src/plugins/select/bluegene/bl_bgq/Makefile src/plugins/select/bluegene/sfree/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/cray/Makefile src/plugins/select/linear/Makefile src/plugins/select/other/Makefile src/plugins/select/serial/Makefile src/plugins/slurmctld/Makefile src/plugins/slurmctld/dynalloc/Makefile src/plugins/slurmctld/nonstop/Makefile src/plugins/slurmd/Makefile src/plugins/switch/Makefile src/plugins/switch/cray/Makefile src/plugins/switch/generic/Makefile src/plugins/switch/none/Makefile src/plugins/switch/nrt/Makefile src/plugins/switch/nrt/libpermapi/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/mpi/pmi2/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/cgroup/Makefile src/plugins/task/cray/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/node_rank/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile"
+ac_config_files="$ac_config_files Makefile config.xml auxdir/Makefile contribs/Makefile contribs/cray/Makefile contribs/cray/csm/Makefile contribs/lua/Makefile contribs/mic/Makefile contribs/pam/Makefile contribs/pam_slurm_adopt/Makefile contribs/perlapi/Makefile contribs/perlapi/libslurm/Makefile contribs/perlapi/libslurm/perl/Makefile.PL contribs/perlapi/libslurmdb/Makefile contribs/perlapi/libslurmdb/perl/Makefile.PL contribs/torque/Makefile contribs/phpext/Makefile contribs/phpext/slurm_php/config.m4 contribs/sgather/Makefile contribs/sgi/Makefile contribs/sjobexit/Makefile contribs/slurmdb-direct/Makefile contribs/pmi2/Makefile doc/Makefile doc/man/Makefile doc/man/man1/Makefile doc/man/man3/Makefile doc/man/man5/Makefile doc/man/man8/Makefile doc/html/Makefile doc/html/configurator.html doc/html/configurator.easy.html etc/cgroup.release_common.example etc/init.d.slurm etc/init.d.slurmdbd etc/slurmctld.service etc/slurmd.service etc/slurmdbd.service src/Makefile src/api/Makefile src/common/Makefile src/db_api/Makefile src/layouts/Makefile src/layouts/power/Makefile src/layouts/unit/Makefile src/database/Makefile src/sacct/Makefile src/sacctmgr/Makefile src/sreport/Makefile src/salloc/Makefile src/sbatch/Makefile src/sbcast/Makefile src/sattach/Makefile src/scancel/Makefile src/scontrol/Makefile src/sdiag/Makefile src/sinfo/Makefile src/slurmctld/Makefile src/slurmd/Makefile src/slurmd/common/Makefile src/slurmd/slurmd/Makefile src/slurmd/slurmstepd/Makefile src/slurmdbd/Makefile src/smap/Makefile src/smd/Makefile src/sprio/Makefile src/squeue/Makefile src/srun/Makefile src/srun/libsrun/Makefile src/srun_cr/Makefile src/sshare/Makefile src/sstat/Makefile src/strigger/Makefile src/sview/Makefile src/plugins/Makefile src/plugins/accounting_storage/Makefile src/plugins/accounting_storage/common/Makefile src/plugins/accounting_storage/filetxt/Makefile src/plugins/accounting_storage/mysql/Makefile src/plugins/accounting_storage/none/Makefile src/plugins/accounting_storage/slurmdbd/Makefile src/plugins/acct_gather_energy/Makefile src/plugins/acct_gather_energy/cray/Makefile src/plugins/acct_gather_energy/rapl/Makefile src/plugins/acct_gather_energy/ipmi/Makefile src/plugins/acct_gather_energy/none/Makefile src/plugins/acct_gather_infiniband/Makefile src/plugins/acct_gather_infiniband/ofed/Makefile src/plugins/acct_gather_infiniband/none/Makefile src/plugins/acct_gather_filesystem/Makefile src/plugins/acct_gather_filesystem/lustre/Makefile src/plugins/acct_gather_filesystem/none/Makefile src/plugins/acct_gather_profile/Makefile src/plugins/acct_gather_profile/hdf5/Makefile src/plugins/acct_gather_profile/hdf5/sh5util/Makefile src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile src/plugins/acct_gather_profile/none/Makefile src/plugins/auth/Makefile src/plugins/auth/authd/Makefile src/plugins/auth/munge/Makefile src/plugins/auth/none/Makefile src/plugins/burst_buffer/Makefile src/plugins/burst_buffer/common/Makefile src/plugins/burst_buffer/cray/Makefile src/plugins/burst_buffer/generic/Makefile src/plugins/checkpoint/Makefile src/plugins/checkpoint/aix/Makefile src/plugins/checkpoint/blcr/Makefile src/plugins/checkpoint/blcr/cr_checkpoint.sh src/plugins/checkpoint/blcr/cr_restart.sh src/plugins/checkpoint/none/Makefile src/plugins/checkpoint/ompi/Makefile src/plugins/checkpoint/poe/Makefile src/plugins/core_spec/Makefile src/plugins/core_spec/cray/Makefile src/plugins/core_spec/none/Makefile src/plugins/crypto/Makefile src/plugins/crypto/munge/Makefile src/plugins/crypto/openssl/Makefile src/plugins/ext_sensors/Makefile src/plugins/ext_sensors/rrd/Makefile src/plugins/ext_sensors/none/Makefile src/plugins/gres/Makefile src/plugins/gres/gpu/Makefile src/plugins/gres/nic/Makefile src/plugins/gres/mic/Makefile src/plugins/jobacct_gather/Makefile src/plugins/jobacct_gather/common/Makefile src/plugins/jobacct_gather/linux/Makefile src/plugins/jobacct_gather/aix/Makefile src/plugins/jobacct_gather/cgroup/Makefile src/plugins/jobacct_gather/none/Makefile src/plugins/jobcomp/Makefile src/plugins/jobcomp/elasticsearch/Makefile src/plugins/jobcomp/filetxt/Makefile src/plugins/jobcomp/none/Makefile src/plugins/jobcomp/script/Makefile src/plugins/jobcomp/mysql/Makefile src/plugins/job_container/Makefile src/plugins/job_container/cncu/Makefile src/plugins/job_container/none/Makefile src/plugins/job_submit/Makefile src/plugins/job_submit/all_partitions/Makefile src/plugins/job_submit/cnode/Makefile src/plugins/job_submit/cray/Makefile src/plugins/job_submit/defaults/Makefile src/plugins/job_submit/logging/Makefile src/plugins/job_submit/lua/Makefile src/plugins/job_submit/partition/Makefile src/plugins/job_submit/pbs/Makefile src/plugins/job_submit/require_timelimit/Makefile src/plugins/job_submit/throttle/Makefile src/plugins/launch/Makefile src/plugins/launch/aprun/Makefile src/plugins/launch/poe/Makefile src/plugins/launch/runjob/Makefile src/plugins/launch/slurm/Makefile src/plugins/power/Makefile src/plugins/power/common/Makefile src/plugins/power/cray/Makefile src/plugins/power/none/Makefile src/plugins/preempt/Makefile src/plugins/preempt/job_prio/Makefile src/plugins/preempt/none/Makefile src/plugins/preempt/partition_prio/Makefile src/plugins/preempt/qos/Makefile src/plugins/priority/Makefile src/plugins/priority/basic/Makefile src/plugins/priority/multifactor/Makefile src/plugins/proctrack/Makefile src/plugins/proctrack/aix/Makefile src/plugins/proctrack/cray/Makefile src/plugins/proctrack/cgroup/Makefile src/plugins/proctrack/pgid/Makefile src/plugins/proctrack/linuxproc/Makefile src/plugins/proctrack/sgi_job/Makefile src/plugins/proctrack/lua/Makefile src/plugins/route/Makefile src/plugins/route/default/Makefile src/plugins/route/topology/Makefile src/plugins/sched/Makefile src/plugins/sched/backfill/Makefile src/plugins/sched/builtin/Makefile src/plugins/sched/hold/Makefile src/plugins/sched/wiki/Makefile src/plugins/sched/wiki2/Makefile src/plugins/select/Makefile src/plugins/select/alps/Makefile src/plugins/select/alps/libalps/Makefile src/plugins/select/alps/libemulate/Makefile src/plugins/select/bluegene/Makefile src/plugins/select/bluegene/ba/Makefile src/plugins/select/bluegene/ba_bgq/Makefile src/plugins/select/bluegene/bl/Makefile src/plugins/select/bluegene/bl_bgq/Makefile src/plugins/select/bluegene/sfree/Makefile src/plugins/select/cons_res/Makefile src/plugins/select/cray/Makefile src/plugins/select/linear/Makefile src/plugins/select/other/Makefile src/plugins/select/serial/Makefile src/plugins/slurmctld/Makefile src/plugins/slurmctld/nonstop/Makefile src/plugins/slurmd/Makefile src/plugins/switch/Makefile src/plugins/switch/cray/Makefile src/plugins/switch/generic/Makefile src/plugins/switch/none/Makefile src/plugins/switch/nrt/Makefile src/plugins/switch/nrt/libpermapi/Makefile src/plugins/mpi/Makefile src/plugins/mpi/mpich1_p4/Makefile src/plugins/mpi/mpich1_shmem/Makefile src/plugins/mpi/mpichgm/Makefile src/plugins/mpi/mpichmx/Makefile src/plugins/mpi/mvapich/Makefile src/plugins/mpi/lam/Makefile src/plugins/mpi/none/Makefile src/plugins/mpi/openmpi/Makefile src/plugins/mpi/pmi2/Makefile src/plugins/task/Makefile src/plugins/task/affinity/Makefile src/plugins/task/cgroup/Makefile src/plugins/task/cray/Makefile src/plugins/task/none/Makefile src/plugins/topology/Makefile src/plugins/topology/3d_torus/Makefile src/plugins/topology/hypercube/Makefile src/plugins/topology/node_rank/Makefile src/plugins/topology/none/Makefile src/plugins/topology/tree/Makefile testsuite/Makefile testsuite/expect/Makefile testsuite/slurm_unit/Makefile testsuite/slurm_unit/api/Makefile testsuite/slurm_unit/api/manual/Makefile testsuite/slurm_unit/common/Makefile"
 
 
 cat >confcache <<\_ACEOF
@@ -24162,6 +24817,10 @@ if test -z "${HAVE_PAM_TRUE}" && test -z "${HAVE_PAM_FALSE}"; then
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
 
+if test -z "${WITH_JSON_PARSER_TRUE}" && test -z "${WITH_JSON_PARSER_FALSE}"; then
+  as_fn_error $? "conditional \"WITH_JSON_PARSER\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${HAVE_UNSETENV_TRUE}" && test -z "${HAVE_UNSETENV_FALSE}"; then
   as_fn_error $? "conditional \"HAVE_UNSETENV\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -24226,10 +24885,6 @@ if test -z "${DEBUG_MODULES_TRUE}" && test -z "${DEBUG_MODULES_FALSE}"; then
   as_fn_error $? "conditional \"DEBUG_MODULES\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
-if test -z "${SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE}" && test -z "${SLURM_ENABLE_DYNAMIC_ALLOCATION_FALSE}"; then
-  as_fn_error $? "conditional \"SLURM_ENABLE_DYNAMIC_ALLOCATION\" was never defined.
-Usually this means the macro was only invoked conditionally." "$LINENO" 5
-fi
 if test -z "${HAVE_NRT_TRUE}" && test -z "${HAVE_NRT_FALSE}"; then
   as_fn_error $? "conditional \"HAVE_NRT\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -24238,6 +24893,10 @@ if test -z "${HAVE_SGI_JOB_TRUE}" && test -z "${HAVE_SGI_JOB_FALSE}"; then
   as_fn_error $? "conditional \"HAVE_SGI_JOB\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${HAVE_NETLOC_TRUE}" && test -z "${HAVE_NETLOC_FALSE}"; then
+  as_fn_error $? "conditional \"HAVE_NETLOC\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${HAVE_LUA_TRUE}" && test -z "${HAVE_LUA_FALSE}"; then
   as_fn_error $? "conditional \"HAVE_LUA\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -24266,6 +24925,10 @@ if test -z "${WITH_BLCR_TRUE}" && test -z "${WITH_BLCR_FALSE}"; then
   as_fn_error $? "conditional \"WITH_BLCR\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
 fi
+if test -z "${WITH_CURL_TRUE}" && test -z "${WITH_CURL_FALSE}"; then
+  as_fn_error $? "conditional \"WITH_CURL\" was never defined.
+Usually this means the macro was only invoked conditionally." "$LINENO" 5
+fi
 if test -z "${BUILD_SMAP_TRUE}" && test -z "${BUILD_SMAP_FALSE}"; then
   as_fn_error $? "conditional \"BUILD_SMAP\" was never defined.
 Usually this means the macro was only invoked conditionally." "$LINENO" 5
@@ -24667,7 +25330,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
 # report actual input values of CONFIG_FILES etc. instead of their
 # values after options handling.
 ac_log="
-This file was extended by slurm $as_me 14.11, which was
+This file was extended by slurm $as_me 15.08, which was
 generated by GNU Autoconf 2.69.  Invocation command line was
 
   CONFIG_FILES    = $CONFIG_FILES
@@ -24734,7 +25397,7 @@ _ACEOF
 cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
 ac_cs_version="\\
-slurm config.status 14.11
+slurm config.status 15.08
 configured by $0, generated by GNU Autoconf 2.69,
   with options \\"\$ac_cs_config\\"
 
@@ -25247,9 +25910,11 @@ do
     "auxdir/Makefile") CONFIG_FILES="$CONFIG_FILES auxdir/Makefile" ;;
     "contribs/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/Makefile" ;;
     "contribs/cray/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/cray/Makefile" ;;
+    "contribs/cray/csm/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/cray/csm/Makefile" ;;
     "contribs/lua/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/lua/Makefile" ;;
     "contribs/mic/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/mic/Makefile" ;;
     "contribs/pam/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/pam/Makefile" ;;
+    "contribs/pam_slurm_adopt/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/pam_slurm_adopt/Makefile" ;;
     "contribs/perlapi/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/Makefile" ;;
     "contribs/perlapi/libslurm/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/libslurm/Makefile" ;;
     "contribs/perlapi/libslurm/perl/Makefile.PL") CONFIG_FILES="$CONFIG_FILES contribs/perlapi/libslurm/perl/Makefile.PL" ;;
@@ -25259,6 +25924,7 @@ do
     "contribs/phpext/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/phpext/Makefile" ;;
     "contribs/phpext/slurm_php/config.m4") CONFIG_FILES="$CONFIG_FILES contribs/phpext/slurm_php/config.m4" ;;
     "contribs/sgather/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/sgather/Makefile" ;;
+    "contribs/sgi/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/sgi/Makefile" ;;
     "contribs/sjobexit/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/sjobexit/Makefile" ;;
     "contribs/slurmdb-direct/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/slurmdb-direct/Makefile" ;;
     "contribs/pmi2/Makefile") CONFIG_FILES="$CONFIG_FILES contribs/pmi2/Makefile" ;;
@@ -25281,6 +25947,9 @@ do
     "src/api/Makefile") CONFIG_FILES="$CONFIG_FILES src/api/Makefile" ;;
     "src/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/common/Makefile" ;;
     "src/db_api/Makefile") CONFIG_FILES="$CONFIG_FILES src/db_api/Makefile" ;;
+    "src/layouts/Makefile") CONFIG_FILES="$CONFIG_FILES src/layouts/Makefile" ;;
+    "src/layouts/power/Makefile") CONFIG_FILES="$CONFIG_FILES src/layouts/power/Makefile" ;;
+    "src/layouts/unit/Makefile") CONFIG_FILES="$CONFIG_FILES src/layouts/unit/Makefile" ;;
     "src/database/Makefile") CONFIG_FILES="$CONFIG_FILES src/database/Makefile" ;;
     "src/sacct/Makefile") CONFIG_FILES="$CONFIG_FILES src/sacct/Makefile" ;;
     "src/sacctmgr/Makefile") CONFIG_FILES="$CONFIG_FILES src/sacctmgr/Makefile" ;;
@@ -25317,10 +25986,30 @@ do
     "src/plugins/accounting_storage/mysql/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/mysql/Makefile" ;;
     "src/plugins/accounting_storage/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/none/Makefile" ;;
     "src/plugins/accounting_storage/slurmdbd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/accounting_storage/slurmdbd/Makefile" ;;
+    "src/plugins/acct_gather_energy/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/Makefile" ;;
+    "src/plugins/acct_gather_energy/cray/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/cray/Makefile" ;;
+    "src/plugins/acct_gather_energy/rapl/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/rapl/Makefile" ;;
+    "src/plugins/acct_gather_energy/ipmi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/ipmi/Makefile" ;;
+    "src/plugins/acct_gather_energy/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/none/Makefile" ;;
+    "src/plugins/acct_gather_infiniband/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_infiniband/Makefile" ;;
+    "src/plugins/acct_gather_infiniband/ofed/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_infiniband/ofed/Makefile" ;;
+    "src/plugins/acct_gather_infiniband/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_infiniband/none/Makefile" ;;
+    "src/plugins/acct_gather_filesystem/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_filesystem/Makefile" ;;
+    "src/plugins/acct_gather_filesystem/lustre/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_filesystem/lustre/Makefile" ;;
+    "src/plugins/acct_gather_filesystem/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_filesystem/none/Makefile" ;;
+    "src/plugins/acct_gather_profile/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/Makefile" ;;
+    "src/plugins/acct_gather_profile/hdf5/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/hdf5/Makefile" ;;
+    "src/plugins/acct_gather_profile/hdf5/sh5util/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/hdf5/sh5util/Makefile" ;;
+    "src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile" ;;
+    "src/plugins/acct_gather_profile/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/none/Makefile" ;;
     "src/plugins/auth/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/Makefile" ;;
     "src/plugins/auth/authd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/authd/Makefile" ;;
     "src/plugins/auth/munge/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/munge/Makefile" ;;
     "src/plugins/auth/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/auth/none/Makefile" ;;
+    "src/plugins/burst_buffer/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/burst_buffer/Makefile" ;;
+    "src/plugins/burst_buffer/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/burst_buffer/common/Makefile" ;;
+    "src/plugins/burst_buffer/cray/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/burst_buffer/cray/Makefile" ;;
+    "src/plugins/burst_buffer/generic/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/burst_buffer/generic/Makefile" ;;
     "src/plugins/checkpoint/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/Makefile" ;;
     "src/plugins/checkpoint/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/aix/Makefile" ;;
     "src/plugins/checkpoint/blcr/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/checkpoint/blcr/Makefile" ;;
@@ -25338,9 +26027,6 @@ do
     "src/plugins/ext_sensors/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/ext_sensors/Makefile" ;;
     "src/plugins/ext_sensors/rrd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/ext_sensors/rrd/Makefile" ;;
     "src/plugins/ext_sensors/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/ext_sensors/none/Makefile" ;;
-    "src/plugins/route/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/route/Makefile" ;;
-    "src/plugins/route/default/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/route/default/Makefile" ;;
-    "src/plugins/route/topology/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/route/topology/Makefile" ;;
     "src/plugins/gres/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/gres/Makefile" ;;
     "src/plugins/gres/gpu/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/gres/gpu/Makefile" ;;
     "src/plugins/gres/nic/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/gres/nic/Makefile" ;;
@@ -25351,21 +26037,8 @@ do
     "src/plugins/jobacct_gather/aix/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/aix/Makefile" ;;
     "src/plugins/jobacct_gather/cgroup/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/cgroup/Makefile" ;;
     "src/plugins/jobacct_gather/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobacct_gather/none/Makefile" ;;
-    "src/plugins/acct_gather_energy/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/Makefile" ;;
-    "src/plugins/acct_gather_energy/rapl/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/rapl/Makefile" ;;
-    "src/plugins/acct_gather_energy/ipmi/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/ipmi/Makefile" ;;
-    "src/plugins/acct_gather_energy/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_energy/none/Makefile" ;;
-    "src/plugins/acct_gather_infiniband/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_infiniband/Makefile" ;;
-    "src/plugins/acct_gather_infiniband/ofed/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_infiniband/ofed/Makefile" ;;
-    "src/plugins/acct_gather_infiniband/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_infiniband/none/Makefile" ;;
-    "src/plugins/acct_gather_filesystem/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_filesystem/Makefile" ;;
-    "src/plugins/acct_gather_filesystem/lustre/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_filesystem/lustre/Makefile" ;;
-    "src/plugins/acct_gather_filesystem/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_filesystem/none/Makefile" ;;
-    "src/plugins/acct_gather_profile/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/Makefile" ;;
-    "src/plugins/acct_gather_profile/hdf5/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/hdf5/Makefile" ;;
-    "src/plugins/acct_gather_profile/hdf5/sh5util/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/hdf5/sh5util/Makefile" ;;
-    "src/plugins/acct_gather_profile/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/acct_gather_profile/none/Makefile" ;;
     "src/plugins/jobcomp/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/Makefile" ;;
+    "src/plugins/jobcomp/elasticsearch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/elasticsearch/Makefile" ;;
     "src/plugins/jobcomp/filetxt/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/filetxt/Makefile" ;;
     "src/plugins/jobcomp/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/none/Makefile" ;;
     "src/plugins/jobcomp/script/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/jobcomp/script/Makefile" ;;
@@ -25389,6 +26062,10 @@ do
     "src/plugins/launch/poe/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/launch/poe/Makefile" ;;
     "src/plugins/launch/runjob/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/launch/runjob/Makefile" ;;
     "src/plugins/launch/slurm/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/launch/slurm/Makefile" ;;
+    "src/plugins/power/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/power/Makefile" ;;
+    "src/plugins/power/common/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/power/common/Makefile" ;;
+    "src/plugins/power/cray/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/power/cray/Makefile" ;;
+    "src/plugins/power/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/power/none/Makefile" ;;
     "src/plugins/preempt/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/preempt/Makefile" ;;
     "src/plugins/preempt/job_prio/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/preempt/job_prio/Makefile" ;;
     "src/plugins/preempt/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/preempt/none/Makefile" ;;
@@ -25405,6 +26082,9 @@ do
     "src/plugins/proctrack/linuxproc/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/linuxproc/Makefile" ;;
     "src/plugins/proctrack/sgi_job/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/sgi_job/Makefile" ;;
     "src/plugins/proctrack/lua/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/proctrack/lua/Makefile" ;;
+    "src/plugins/route/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/route/Makefile" ;;
+    "src/plugins/route/default/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/route/default/Makefile" ;;
+    "src/plugins/route/topology/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/route/topology/Makefile" ;;
     "src/plugins/sched/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/Makefile" ;;
     "src/plugins/sched/backfill/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/backfill/Makefile" ;;
     "src/plugins/sched/builtin/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/sched/builtin/Makefile" ;;
@@ -25427,7 +26107,6 @@ do
     "src/plugins/select/other/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/other/Makefile" ;;
     "src/plugins/select/serial/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/select/serial/Makefile" ;;
     "src/plugins/slurmctld/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/slurmctld/Makefile" ;;
-    "src/plugins/slurmctld/dynalloc/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/slurmctld/dynalloc/Makefile" ;;
     "src/plugins/slurmctld/nonstop/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/slurmctld/nonstop/Makefile" ;;
     "src/plugins/slurmd/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/slurmd/Makefile" ;;
     "src/plugins/switch/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/switch/Makefile" ;;
@@ -25453,6 +26132,7 @@ do
     "src/plugins/task/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/task/none/Makefile" ;;
     "src/plugins/topology/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/Makefile" ;;
     "src/plugins/topology/3d_torus/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/3d_torus/Makefile" ;;
+    "src/plugins/topology/hypercube/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/hypercube/Makefile" ;;
     "src/plugins/topology/node_rank/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/node_rank/Makefile" ;;
     "src/plugins/topology/none/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/none/Makefile" ;;
     "src/plugins/topology/tree/Makefile") CONFIG_FILES="$CONFIG_FILES src/plugins/topology/tree/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index bf46efbfd..4c26de899 100644
--- a/configure.ac
+++ b/configure.ac
@@ -110,6 +110,9 @@ PKG_PROG_PKG_CONFIG([0.9.0])
 AM_CONDITIONAL(WITH_CXX, test -n "$ac_ct_CXX")
 AM_CONDITIONAL(WITH_GNU_LD, test "$with_gnu_ld" = "yes")
 
+AC_PATH_PROG([SLEEP_CMD], [sleep], [/bin/sleep])
+AC_DEFINE_UNQUOTED([SLEEP_CMD], ["$SLEEP_CMD"], [Define path to sleep command])
+
 AC_PATH_PROG([SUCMD], [su], [/bin/su])
 AC_DEFINE_UNQUOTED([SUCMD], ["$SUCMD"], [Define path to su command])
 
@@ -178,6 +181,9 @@ dnl Checks for types.
 dnl
 X_AC_SLURM_BIGENDIAN
 
+dnl Check for JSON parser
+X_AC_JSON
+
 dnl Checks for compiler characteristics.
 dnl
 AC_PROG_GCC_TRADITIONAL([])
@@ -238,7 +244,6 @@ AC_DEFINE([H5_NO_DEPRECATED_SYMBOLS], [1], [Make sure we get the 1.8 HDF5 API])
 
 X_AC_HWLOC
 X_AC_FREEIPMI
-X_AC_XCPU
 X_AC_SLURM_SEMAPHORE
 X_AC_RRDTOOL
 
@@ -336,6 +341,11 @@ dnl check for SGI job container support
 dnl
 X_AC_SGI_JOB
 
+
+dnl check for netloc library
+dnl
+X_AC_NETLOC
+
 dnl check for lua library
 dnl
 X_AC_LUA
@@ -409,6 +419,11 @@ dnl Check for compilation of SLURM with BLCR support:
 dnl
 X_AC_BLCR
 
+dnl
+dnl Check for compilation of SLURM with CURL support:
+dnl
+LIBCURL_CHECK_CONFIG
+
 dnl
 dnl Set some configuration based upon multiple configuration parameters
 dnl
@@ -425,9 +440,11 @@ AC_CONFIG_FILES([Makefile
 		 auxdir/Makefile
 		 contribs/Makefile
 		 contribs/cray/Makefile
+		 contribs/cray/csm/Makefile
 		 contribs/lua/Makefile
 		 contribs/mic/Makefile
 		 contribs/pam/Makefile
+		 contribs/pam_slurm_adopt/Makefile
 		 contribs/perlapi/Makefile
 		 contribs/perlapi/libslurm/Makefile
 		 contribs/perlapi/libslurm/perl/Makefile.PL
@@ -437,6 +454,7 @@ AC_CONFIG_FILES([Makefile
 		 contribs/phpext/Makefile
 		 contribs/phpext/slurm_php/config.m4
 		 contribs/sgather/Makefile
+		 contribs/sgi/Makefile
 		 contribs/sjobexit/Makefile
 		 contribs/slurmdb-direct/Makefile
 		 contribs/pmi2/Makefile
@@ -459,6 +477,9 @@ AC_CONFIG_FILES([Makefile
 		 src/api/Makefile
 		 src/common/Makefile
 		 src/db_api/Makefile
+		 src/layouts/Makefile
+		 src/layouts/power/Makefile
+		 src/layouts/unit/Makefile
 		 src/database/Makefile
 		 src/sacct/Makefile
 		 src/sacctmgr/Makefile
@@ -495,10 +516,30 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/accounting_storage/mysql/Makefile
 		 src/plugins/accounting_storage/none/Makefile
 		 src/plugins/accounting_storage/slurmdbd/Makefile
+		 src/plugins/acct_gather_energy/Makefile
+		 src/plugins/acct_gather_energy/cray/Makefile
+		 src/plugins/acct_gather_energy/rapl/Makefile
+		 src/plugins/acct_gather_energy/ipmi/Makefile
+		 src/plugins/acct_gather_energy/none/Makefile
+		 src/plugins/acct_gather_infiniband/Makefile
+		 src/plugins/acct_gather_infiniband/ofed/Makefile
+		 src/plugins/acct_gather_infiniband/none/Makefile
+		 src/plugins/acct_gather_filesystem/Makefile
+		 src/plugins/acct_gather_filesystem/lustre/Makefile
+		 src/plugins/acct_gather_filesystem/none/Makefile
+		 src/plugins/acct_gather_profile/Makefile
+		 src/plugins/acct_gather_profile/hdf5/Makefile
+		 src/plugins/acct_gather_profile/hdf5/sh5util/Makefile
+		 src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile
+		 src/plugins/acct_gather_profile/none/Makefile
 		 src/plugins/auth/Makefile
 		 src/plugins/auth/authd/Makefile
 		 src/plugins/auth/munge/Makefile
 		 src/plugins/auth/none/Makefile
+		 src/plugins/burst_buffer/Makefile
+		 src/plugins/burst_buffer/common/Makefile
+		 src/plugins/burst_buffer/cray/Makefile
+		 src/plugins/burst_buffer/generic/Makefile
 		 src/plugins/checkpoint/Makefile
 		 src/plugins/checkpoint/aix/Makefile
 		 src/plugins/checkpoint/blcr/Makefile
@@ -516,9 +557,6 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/ext_sensors/Makefile
 		 src/plugins/ext_sensors/rrd/Makefile
 		 src/plugins/ext_sensors/none/Makefile
-		 src/plugins/route/Makefile
-		 src/plugins/route/default/Makefile
-		 src/plugins/route/topology/Makefile
 		 src/plugins/gres/Makefile
 		 src/plugins/gres/gpu/Makefile
 		 src/plugins/gres/nic/Makefile
@@ -529,21 +567,8 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/jobacct_gather/aix/Makefile
 		 src/plugins/jobacct_gather/cgroup/Makefile
 		 src/plugins/jobacct_gather/none/Makefile
-		 src/plugins/acct_gather_energy/Makefile
-		 src/plugins/acct_gather_energy/rapl/Makefile
-		 src/plugins/acct_gather_energy/ipmi/Makefile
-		 src/plugins/acct_gather_energy/none/Makefile
-		 src/plugins/acct_gather_infiniband/Makefile
-		 src/plugins/acct_gather_infiniband/ofed/Makefile
-		 src/plugins/acct_gather_infiniband/none/Makefile
-		 src/plugins/acct_gather_filesystem/Makefile
-		 src/plugins/acct_gather_filesystem/lustre/Makefile
-		 src/plugins/acct_gather_filesystem/none/Makefile
-		 src/plugins/acct_gather_profile/Makefile
-		 src/plugins/acct_gather_profile/hdf5/Makefile
-		 src/plugins/acct_gather_profile/hdf5/sh5util/Makefile
-		 src/plugins/acct_gather_profile/none/Makefile
 		 src/plugins/jobcomp/Makefile
+		 src/plugins/jobcomp/elasticsearch/Makefile
 		 src/plugins/jobcomp/filetxt/Makefile
 		 src/plugins/jobcomp/none/Makefile
 		 src/plugins/jobcomp/script/Makefile
@@ -567,6 +592,10 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/launch/poe/Makefile
 		 src/plugins/launch/runjob/Makefile
 		 src/plugins/launch/slurm/Makefile
+		 src/plugins/power/Makefile
+		 src/plugins/power/common/Makefile
+		 src/plugins/power/cray/Makefile
+		 src/plugins/power/none/Makefile
 		 src/plugins/preempt/Makefile
 		 src/plugins/preempt/job_prio/Makefile
 		 src/plugins/preempt/none/Makefile
@@ -583,6 +612,9 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/proctrack/linuxproc/Makefile
 		 src/plugins/proctrack/sgi_job/Makefile
 		 src/plugins/proctrack/lua/Makefile
+		 src/plugins/route/Makefile
+		 src/plugins/route/default/Makefile
+		 src/plugins/route/topology/Makefile
 		 src/plugins/sched/Makefile
 		 src/plugins/sched/backfill/Makefile
 		 src/plugins/sched/builtin/Makefile
@@ -605,7 +637,6 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/select/other/Makefile
 		 src/plugins/select/serial/Makefile
 		 src/plugins/slurmctld/Makefile
-		 src/plugins/slurmctld/dynalloc/Makefile
 		 src/plugins/slurmctld/nonstop/Makefile
 		 src/plugins/slurmd/Makefile
 		 src/plugins/switch/Makefile
@@ -631,6 +662,7 @@ AC_CONFIG_FILES([Makefile
 		 src/plugins/task/none/Makefile
 		 src/plugins/topology/Makefile
 		 src/plugins/topology/3d_torus/Makefile
+		 src/plugins/topology/hypercube/Makefile
 		 src/plugins/topology/node_rank/Makefile
 		 src/plugins/topology/none/Makefile
 		 src/plugins/topology/tree/Makefile
diff --git a/contribs/Makefile.am b/contribs/Makefile.am
index 89feb67ce..6eb15a8c7 100644
--- a/contribs/Makefile.am
+++ b/contribs/Makefile.am
@@ -1,4 +1,4 @@
-SUBDIRS = cray lua pam perlapi torque sgather sjobexit slurmdb-direct pmi2 mic
+SUBDIRS = cray lua pam pam_slurm_adopt perlapi torque sgather sgi sjobexit slurmdb-direct pmi2 mic
 
 EXTRA_DIST = \
 	env_cache_builder.c	\
diff --git a/contribs/Makefile.in b/contribs/Makefile.in
index 3e46eed61..1a4435d25 100644
--- a/contribs/Makefile.in
+++ b/contribs/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -445,7 +459,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = cray lua pam perlapi torque sgather sjobexit slurmdb-direct pmi2 mic
+SUBDIRS = cray lua pam pam_slurm_adopt perlapi torque sgather sgi sjobexit slurmdb-direct pmi2 mic
 EXTRA_DIST = \
 	env_cache_builder.c	\
 	make-3.81.slurm.patch	\
diff --git a/contribs/README b/contribs/README
index 29ebbdd19..b854cc84a 100644
--- a/contribs/README
+++ b/contribs/README
@@ -69,6 +69,13 @@ of the Slurm contribs distribution follows:
      This PAM module will restrict who can login to a node to users who have
      been allocated resources on the node and user root.
 
+  pam_slurm_adopt    [ Plugin for PAM to place incoming connections into
+                       existing Slurm job container ]
+     This Slurm plugin provides a mechanism for new incomming connections to
+     be placed into existing Slurm jobs containers so that then can be accounted
+     for and killed at job termination. See the README file in the subdirectory
+     for more details.
+
   perlapi/           [ Perl API to Slurm source ]
      API to Slurm using perl.  Making available all Slurm command that exist
      in the Slurm proper API.
@@ -93,6 +100,11 @@ of the Slurm contribs distribution follows:
      Gather remote files from a job into a central location. Reverse of of
      sbcast command.
 
+  sgi/               [Tools for use on SGI systems]
+     netloc_to_topology.c   [ C program ]
+     Used to construct a Slurm topology.conf file based upon SGI network APIs.
+     README.txt      [Documentation]
+
   sjobexit/          [ Perl programs ]
      Tools for managing job exit code records
 
diff --git a/contribs/cray/Makefile.am b/contribs/cray/Makefile.am
index 446b30779..fa90b5225 100644
--- a/contribs/cray/Makefile.am
+++ b/contribs/cray/Makefile.am
@@ -1,7 +1,9 @@
-
+#
 # Makefile for cray scripts
 #
 
+SUBDIRS = csm
+
 AUTOMAKE_OPTIONS = foreign
 
 EXTRA_DIST = \
diff --git a/contribs/cray/Makefile.in b/contribs/cray/Makefile.in
index 4466dfce0..ef137882a 100644
--- a/contribs/cray/Makefile.in
+++ b/contribs/cray/Makefile.in
@@ -14,6 +14,7 @@
 
 @SET_MAKE@
 
+#
 # Makefile for cray scripts
 #
 
@@ -101,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -175,14 +179,74 @@ am__v_at_0 = @
 am__v_at_1 = 
 SOURCES =
 DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+	ctags-recursive dvi-recursive html-recursive info-recursive \
+	install-data-recursive install-dvi-recursive \
+	install-exec-recursive install-html-recursive \
+	install-info-recursive install-pdf-recursive \
+	install-ps-recursive install-recursive installcheck-recursive \
+	installdirs-recursive pdf-recursive ps-recursive \
+	tags-recursive uninstall-recursive
 am__can_run_installinfo = \
   case $$AM_UPDATE_INFO_DIR in \
     n|no|NO) false;; \
     *) (install-info --version) >/dev/null 2>&1;; \
   esac
 DATA = $(noinst_DATA)
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+  $(RECURSIVE_TARGETS) \
+  $(RECURSIVE_CLEAN_TARGETS) \
+  $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+	distdir
 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
 ACLOCAL = @ACLOCAL@
 AMTAR = @AMTAR@
 AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
@@ -224,6 +288,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -273,8 +339,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -293,6 +363,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -336,6 +409,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -359,6 +433,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -420,6 +495,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
+SUBDIRS = csm
 AUTOMAKE_OPTIONS = foreign
 EXTRA_DIST = \
 	etc_sysconfig_slurm		\
@@ -432,7 +508,7 @@ EXTRA_DIST = \
 
 @HAVE_NATIVE_CRAY_TRUE@sbin_SCRIPTS = slurmconfgen.py
 @HAVE_REAL_CRAY_TRUE@noinst_DATA = opt_modulefiles_slurm
-all: all-am
+all: all-recursive
 
 .SUFFIXES:
 $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
@@ -506,12 +582,105 @@ mostlyclean-libtool:
 
 clean-libtool:
 	-rm -rf .libs _libs
-tags TAGS:
-
-ctags CTAGS:
 
-cscope cscopelist:
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+#     (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+	@fail=; \
+	if $(am__make_keepgoing); then \
+	  failcom='fail=yes'; \
+	else \
+	  failcom='exit 1'; \
+	fi; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
 
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
 
 distdir: $(DISTFILES)
 	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
@@ -543,22 +712,48 @@ distdir: $(DISTFILES)
 	    || exit 1; \
 	  fi; \
 	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
 check-am: all-am
-check: check-am
+check: check-recursive
 all-am: Makefile $(SCRIPTS) $(DATA)
-installdirs:
+installdirs: installdirs-recursive
+installdirs-am:
 	for dir in "$(DESTDIR)$(sbindir)"; do \
 	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
 	done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
 
 install-am: all-am
 	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
 
-installcheck: installcheck-am
+installcheck: installcheck-recursive
 install-strip:
 	if test -z '$(STRIP)'; then \
 	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
@@ -578,85 +773,87 @@ distclean-generic:
 maintainer-clean-generic:
 	@echo "This command is intended for maintainers to use"
 	@echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
+clean: clean-recursive
 
 clean-am: clean-generic clean-libtool mostlyclean-am
 
-distclean: distclean-am
+distclean: distclean-recursive
 	-rm -f Makefile
-distclean-am: clean-am distclean-generic
+distclean-am: clean-am distclean-generic distclean-tags
 
-dvi: dvi-am
+dvi: dvi-recursive
 
 dvi-am:
 
-html: html-am
+html: html-recursive
 
 html-am:
 
-info: info-am
+info: info-recursive
 
 info-am:
 
 install-data-am:
 
-install-dvi: install-dvi-am
+install-dvi: install-dvi-recursive
 
 install-dvi-am:
 
 install-exec-am: install-sbinSCRIPTS
 
-install-html: install-html-am
+install-html: install-html-recursive
 
 install-html-am:
 
-install-info: install-info-am
+install-info: install-info-recursive
 
 install-info-am:
 
 install-man:
 
-install-pdf: install-pdf-am
+install-pdf: install-pdf-recursive
 
 install-pdf-am:
 
-install-ps: install-ps-am
+install-ps: install-ps-recursive
 
 install-ps-am:
 
 installcheck-am:
 
-maintainer-clean: maintainer-clean-am
+maintainer-clean: maintainer-clean-recursive
 	-rm -f Makefile
 maintainer-clean-am: distclean-am maintainer-clean-generic
 
-mostlyclean: mostlyclean-am
+mostlyclean: mostlyclean-recursive
 
 mostlyclean-am: mostlyclean-generic mostlyclean-libtool
 
-pdf: pdf-am
+pdf: pdf-recursive
 
 pdf-am:
 
-ps: ps-am
+ps: ps-recursive
 
 ps-am:
 
 uninstall-am: uninstall-sbinSCRIPTS
 
-.MAKE: install-am install-strip
+.MAKE: $(am__recursive_targets) install-am install-strip
 
-.PHONY: all all-am check check-am clean clean-generic clean-libtool \
-	cscopelist-am ctags-am distclean distclean-generic \
-	distclean-libtool distdir dvi dvi-am html html-am info info-am \
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+	check-am clean clean-generic clean-libtool cscopelist-am ctags \
+	ctags-am distclean distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
 	install install-am install-data install-data-am install-dvi \
 	install-dvi-am install-exec install-exec-am install-html \
 	install-html-am install-info install-info-am install-man \
 	install-pdf install-pdf-am install-ps install-ps-am \
 	install-sbinSCRIPTS install-strip installcheck installcheck-am \
-	installdirs maintainer-clean maintainer-clean-generic \
-	mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
-	ps ps-am tags-am uninstall uninstall-am uninstall-sbinSCRIPTS
+	installdirs installdirs-am maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \
+	uninstall-am uninstall-sbinSCRIPTS
 
 
 # Don't rely on autoconf to replace variables outside of makefiles
diff --git a/contribs/cray/csm/Makefile.am b/contribs/cray/csm/Makefile.am
new file mode 100644
index 000000000..88f1a59d6
--- /dev/null
+++ b/contribs/cray/csm/Makefile.am
@@ -0,0 +1,8 @@
+#
+# Makefile for cray/csm scripts
+#
+
+EXTRA_DIST = \
+	gres.conf.j2			\
+	slurm.conf.j2			\
+	slurmconfgen_smw.py
diff --git a/contribs/cray/csm/Makefile.in b/contribs/cray/csm/Makefile.in
new file mode 100644
index 000000000..331ce327c
--- /dev/null
+++ b/contribs/cray/csm/Makefile.in
@@ -0,0 +1,604 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for cray/csm scripts
+#
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/cray/csm
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+SOURCES =
+DIST_SOURCES =
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+EXTRA_DIST = \
+	gres.conf.j2			\
+	slurm.conf.j2			\
+	slurmconfgen_smw.py
+
+all: all-am
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu contribs/cray/csm/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu contribs/cray/csm/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+tags TAGS:
+
+ctags CTAGS:
+
+cscope cscopelist:
+
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: all all-am check check-am clean clean-generic clean-libtool \
+	cscopelist-am ctags-am distclean distclean-generic \
+	distclean-libtool distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/cray/csm/gres.conf.j2 b/contribs/cray/csm/gres.conf.j2
new file mode 100644
index 000000000..938747715
--- /dev/null
+++ b/contribs/cray/csm/gres.conf.j2
@@ -0,0 +1,9 @@
+#
+# (c) Copyright 2015 Cray Inc.  All Rights Reserved.
+#
+# This file was generated by {{script}} on {{date}}.
+#
+# See the gres.conf man page for more information.
+#
+{% for node in nodes.values() %}{% for gres in node.Gres %}NodeName={{ node.NodeName }} Name={{ gres.Name }} {% if gres.File %}File={{ gres.File }}{% else %}Count={{ gres.Count }}{% endif %}
+{% endfor %}{% endfor %}
diff --git a/contribs/cray/csm/slurm.conf.j2 b/contribs/cray/csm/slurm.conf.j2
new file mode 100644
index 000000000..527137d99
--- /dev/null
+++ b/contribs/cray/csm/slurm.conf.j2
@@ -0,0 +1,59 @@
+#
+# (c) Copyright 2015 Cray Inc.  All Rights Reserved.
+#
+# This file was generated by {{ script }} on {{ date }}.
+#
+# See the slurm.conf man page for more information.
+#
+ControlMachine={{ controlmachine }}
+AuthType=auth/munge
+CoreSpecPlugin=cray
+CryptoType=crypto/munge
+GresTypes={{ grestypes|join(',') }}
+JobContainerType=job_container/cncu
+JobSubmitPlugins=cray
+KillOnBadExit=1
+MpiParams=ports=20000-32767
+ProctrackType=proctrack/cray
+# Some programming models require unlimited virtual memory
+PropagateResourceLimitsExcept=AS
+# ReturnToService 2 will let rebooted nodes come back up immediately
+ReturnToService=2
+SlurmctldPidFile=/var/spool/slurm/slurmctld.pid
+SlurmdPidFile=/var/spool/slurmd/slurmd.pid
+SlurmdSpoolDir=/var/spool/slurmd
+SlurmUser=root
+StateSaveLocation=/var/spool/slurm
+SwitchType=switch/cray
+TaskPlugin=task/affinity,task/cgroup,task/cray
+#
+#
+# SCHEDULING
+DefMemPerCPU={{ defmem }}
+FastSchedule=0
+MaxMemPerCPU={{ maxmem }}
+SchedulerType=sched/backfill
+SelectType=select/cray
+SelectTypeParameters=CR_CORE_Memory,other_cons_res
+#
+#
+# LOGGING AND ACCOUNTING
+JobCompType=jobcomp/none
+JobAcctGatherFrequency=30
+JobAcctGatherType=jobacct_gather/linux
+SlurmctldDebug=info
+SlurmctldLogFile=/var/spool/slurm/slurmctld.log
+SlurmdDebug=info
+SlurmdLogFile=/var/spool/slurmd/%h.log
+#
+#
+# POWER SAVE SUPPORT FOR IDLE NODES (optional)
+CpuFreqDef=performance
+#
+#
+# COMPUTE NODES
+{% for node in nodes.values() %}NodeName={{ node.NodeName }} Sockets={{ node.Sockets }} CoresPerSocket={{ node.CoresPerSocket }} ThreadsPerCore={{ node.ThreadsPerCore }} Gres={{ node.Gres|join(',') }} # RealMemory={{ node.RealMemory }}
+{% endfor %}#
+#
+# PARTITIONS
+PartitionName=workq Nodes={{ nodelist }} Shared=EXCLUSIVE Priority=1 Default=YES DefaultTime=60 MaxTime=24:00:00 State=UP
diff --git a/contribs/cray/csm/slurmconfgen_smw.py b/contribs/cray/csm/slurmconfgen_smw.py
new file mode 100644
index 000000000..7e5e8c23f
--- /dev/null
+++ b/contribs/cray/csm/slurmconfgen_smw.py
@@ -0,0 +1,271 @@
+#!/usr/bin/env python
+#
+# Copyright 2015 Cray Inc. All Rights Reserved
+""" A script to generate slurm.conf and gres.conf for a
+    Cray system on the smw """
+
+import argparse
+import os
+import subprocess
+import sys
+import time
+import xml.etree.ElementTree
+from jinja2 import Environment, FileSystemLoader
+
+NAME = 'slurmconfgen_smw.py'
+
+class Gres(object):
+    """ A class for generic resources """
+    def __init__(self, name, count):
+        """ Initialize a gres with the given name and count """
+        self.Name = name
+        self.Count = count
+        if name == 'gpu':
+            if count == 1:
+                self.File = '/dev/nvidia0'
+            else:
+                self.File = '/dev/nvidia[0-{0}]'.format(count - 1)
+        elif name == 'mic':
+            if count == 1:
+                self.File = '/dev/mic0'
+            else:
+                self.File = '/dev/mic[0-{0}]'.format(count - 1)
+        else:
+            self.File = None
+
+    def __eq__(self, other):
+        """ Check if two gres are equal """
+        return (self.Name == other.Name and self.Count == other.Count and
+                self.File == other.File)
+
+    def __str__(self):
+        """ Return a gres string suitable for slurm.conf """
+        if self.Count == 1:
+            return self.Name
+        else:
+            return '{0}:{1}'.format(self.Name, self.Count)
+
+
+
+def parse_args():
+    """ Parse arguments """
+    parser = argparse.ArgumentParser(
+        description='Generate slurm.conf and gres.conf on a Cray smw')
+    parser.add_argument('controlmachine',
+                        help='Hostname of the node to run slurmctld')
+    parser.add_argument('partition',
+                        help='Partition to generate slurm.conf for')
+    parser.add_argument('-t', '--templatedir',
+                        help='Directory containing j2 templates',
+                        default='.')
+    parser.add_argument('-o', '--output',
+                        help='Output directory for slurm.conf and gres.conf',
+                        default='.')
+    return parser.parse_args()
+
+
+def get_inventory(partition):
+    """ Gets a hardware inventory for the given partition.
+        Returns the node dictionary """
+    print 'Gathering hardware inventory...'
+    nodes = {}
+
+    # Get an inventory and parse the XML
+    xthwinv = subprocess.Popen(['/opt/cray/hss/default/bin/xthwinv',
+                                '-X', partition], stdout=subprocess.PIPE)
+    inventory, _ = xthwinv.communicate()
+    inventoryxml = xml.etree.ElementTree.fromstring(inventory)
+
+    # Loop through all modules
+    for modulexml in inventoryxml.findall('module_list/module'):
+        # Skip service nodes
+        board_type = modulexml.find('board_type').text
+        if board_type == '10':
+            continue
+        elif board_type != '13':
+            print 'WARNING: board type {} unknown'.format(board_type)
+
+        # Loop through nodes in this module
+        for nodexml in modulexml.findall('node_list/node'):
+            nid = int(nodexml.find('nic').text)
+            cores = int(nodexml.find('cores').text)
+            sockets = int(nodexml.find('sockets').text)
+            memory = int(nodexml.find('memory/sizeGB').text) * 1024
+
+            node = {'CoresPerSocket': cores / sockets,
+                    'RealMemory': memory,
+                    'Sockets': sockets,
+                    'ThreadsPerCore': int(nodexml.find('hyper_threads').text)}
+
+            # Determine the generic resources
+            craynetwork = 4
+            gpu = 0
+            mic = 0
+            for accelxml in nodexml.findall(
+                    'accelerator_list/accelerator/type'):
+                if accelxml.text == 'GPU':
+                    gpu += 1
+                elif accelxml.text == 'MIC':
+                    mic += 1
+                    craynetwork = 2
+                else:
+                    print ('WARNING: accelerator type {0} unknown'
+                           .format(accelxml.text))
+
+            node['Gres'] = [Gres('craynetwork', craynetwork)]
+            if gpu > 0:
+                node['Gres'].append(Gres('gpu', gpu))
+            if mic > 0:
+                node['Gres'].append(Gres('mic', mic))
+
+            # Add to output data structures
+            nodes[nid] = node
+
+    return nodes
+
+
+def compact_nodes(nodes):
+    """ Compacts nodes when possible into single entries """
+    basenode = None
+    toremove = []
+
+    print 'Compacting node configuration...'
+    for curnid in sorted(nodes):
+        if basenode is None:
+            basenode = nodes[curnid]
+            nidlist = [int(curnid)]
+            continue
+
+        curnode = nodes[curnid]
+        if (curnode['CoresPerSocket'] == basenode['CoresPerSocket'] and
+                curnode['Gres'] == basenode['Gres'] and
+                curnode['RealMemory'] == basenode['RealMemory'] and
+                curnode['Sockets'] == basenode['Sockets'] and
+                curnode['ThreadsPerCore'] == basenode['ThreadsPerCore']):
+            # Append this nid to the nidlist
+            nidlist.append(int(curnid))
+            toremove.append(curnid)
+        else:
+            # We can't consolidate, move on
+            basenode['NodeName'] = rli_compress(nidlist)
+            basenode = curnode
+            nidlist = [int(curnid)]
+
+    basenode['NodeName'] = rli_compress(nidlist)
+
+    # Remove nodes we've consolidated
+    for nid in toremove:
+        del nodes[nid]
+
+
+def scale_mem(mem):
+    """ Scale memory values back since available memory is
+        lower than total memory """
+    return mem * 98 / 100
+
+
+def get_mem_per_cpu(nodes):
+    """ Given the node configuration, determine the
+        default memory per cpu (mem)/(cores)
+        and max memory per cpu, returned as a tuple """
+    defmem = 0
+    maxmem = 0
+    for node in nodes.values():
+        if node['RealMemory'] > maxmem:
+            maxmem = node['RealMemory']
+
+        mem_per_thread = (node['RealMemory'] / node['Sockets'] /
+                          node['CoresPerSocket'] / node['ThreadsPerCore'])
+        if defmem == 0 or mem_per_thread < defmem:
+            defmem = mem_per_thread
+
+    return (scale_mem(defmem), scale_mem(maxmem))
+
+
+def range_str(range_start, range_end, field_width):
+    """ Returns a string representation of the given range
+            using the given field width """
+    if range_end < range_start:
+        raise Exception('Range end before range start')
+    elif range_start == range_end:
+        return '{0:0{1}d}'.format(range_end, field_width)
+    elif range_start + 1 == range_end:
+        return '{0:0{2}d},{1:0{2}d}'.format(range_start, range_end,
+                                            field_width)
+
+    return '{0:0{2}d}-{1:0{2}d}'.format(range_start, range_end,
+                                        field_width)
+
+
+def rli_compress(nidlist):
+    """ Given a list of node ids, rli compress them into a slurm hostlist
+       (ex. list [1,2,3,5] becomes string nid0000[1-3,5]) """
+
+    # Determine number of digits in the highest nid number
+    numdigits = len(str(max(nidlist)))
+    if numdigits > 5:
+        raise Exception('Nid number too high')
+
+    range_start = nidlist[0]
+    range_end = nidlist[0]
+    ranges = []
+    for nid in nidlist:
+        # If nid too large, append to rli and start fresh
+        if nid > range_end + 1 or nid < range_end:
+            ranges.append(range_str(range_start, range_end, numdigits))
+            range_start = nid
+
+        range_end = nid
+
+    # Append the last range
+    ranges.append(range_str(range_start, range_end, numdigits))
+
+    return 'nid{0}[{1}]'.format('0' * (5 - numdigits), ','.join(ranges))
+
+
+def get_gres_types(nodes):
+    """ Get a set of gres types """
+    grestypes = set()
+    for node in nodes.values():
+        grestypes.update([gres.Name for gres in node['Gres']])
+    return grestypes
+
+
+def main():
+    """ Get hardware info, format it, and write to slurm.conf and gres.conf """
+    args = parse_args()
+
+    # Get info from xthwinv and xtcli
+    nodes = get_inventory(args.partition)
+    nodelist = rli_compress([int(nid) for nid in nodes])
+    compact_nodes(nodes)
+    defmem, maxmem = get_mem_per_cpu(nodes)
+
+    # Write files from templates
+    jinjaenv = Environment(loader=FileSystemLoader(args.templatedir))
+    conffile = os.path.join(args.output, 'slurm.conf')
+    print 'Writing Slurm configuration to {0}...'.format(conffile)
+    with open(conffile, 'w') as outfile:
+        outfile.write(jinjaenv.get_template('slurm.conf.j2').render(
+                script=sys.argv[0],
+                date=time.asctime(),
+                controlmachine=args.controlmachine,
+                grestypes=get_gres_types(nodes),
+                defmem=defmem,
+                maxmem=maxmem,
+                nodes=nodes,
+                nodelist=nodelist))
+
+    gresfilename = os.path.join(args.output, 'gres.conf')
+    print 'Writing gres configuration to {0}...'.format(gresfilename)
+    with open(gresfilename, 'w') as gresfile:
+        gresfile.write(jinjaenv.get_template('gres.conf.j2').render(
+                script=sys.argv[0],
+                date=time.asctime(),
+                nodes=nodes))
+
+    print 'Done.'
+
+
+if __name__ == "__main__":
+    main()
diff --git a/contribs/cray/slurm.conf.template b/contribs/cray/slurm.conf.template
index 519a2d4a5..fad2379c8 100644
--- a/contribs/cray/slurm.conf.template
+++ b/contribs/cray/slurm.conf.template
@@ -169,4 +169,4 @@ CpuFreqDef=performance
 # COMPUTE NODES
 {computenodes}
 
-PartitionName=workq Nodes={nodelist} Shared=NO Priority=1 Default=YES DefaultTime=60 MaxTime=24:00:00 State=UP
+PartitionName=workq Nodes={nodelist} Shared=EXCLUSIVE Priority=1 Default=YES DefaultTime=60 MaxTime=24:00:00 State=UP
diff --git a/contribs/lua/Makefile.in b/contribs/lua/Makefile.in
index 39dda3dde..29c76f439 100644
--- a/contribs/lua/Makefile.in
+++ b/contribs/lua/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/contribs/mic/Makefile.in b/contribs/mic/Makefile.in
index ccae6e388..3d4698b39 100644
--- a/contribs/mic/Makefile.in
+++ b/contribs/mic/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/contribs/pam/Makefile.in b/contribs/pam/Makefile.in
index 9c39683ef..974932ae1 100644
--- a/contribs/pam/Makefile.in
+++ b/contribs/pam/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -279,6 +282,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -328,8 +333,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -348,6 +357,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -391,6 +403,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -414,6 +427,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/contribs/pam_slurm_adopt/Makefile.am b/contribs/pam_slurm_adopt/Makefile.am
new file mode 100644
index 000000000..15b1cde2b
--- /dev/null
+++ b/contribs/pam_slurm_adopt/Makefile.am
@@ -0,0 +1,42 @@
+#
+# Makefile for pam_slurm_adopt
+#
+
+AUTOMAKE_OPTIONS = foreign
+
+AM_CPPFLAGS = -fPIC -I$(top_srcdir) -I$(top_srcdir)/src/common
+# -DLIBSLURM_SO=\"$(libdir)/libslurm.so\"
+PLUGIN_FLAGS = -module --export-dynamic -avoid-version
+
+pkglibdir = $(PAM_DIR)
+
+if HAVE_PAM
+pam_lib = pam_slurm_adopt.la
+else
+pam_lib =
+endif
+
+pkglib_LTLIBRARIES = $(pam_lib)
+
+if HAVE_PAM
+
+current = $(SLURM_API_CURRENT)
+age     = $(SLURM_API_AGE)
+rev     = $(SLURM_API_REVISION)
+
+pam_slurm_adopt_la_SOURCES = pam_slurm_adopt.c helper.c helper.h
+
+pam_slurm_adopt_la_LIBADD = $(top_builddir)/src/api/libslurm.la
+
+pam_slurm_adopt_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(LIB_LDFLAGS)
+
+force:
+$(pam_slurm_adopt_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE)
+#	Don't specify basename or version.map files in src/api will not be built
+#	@cd `dirname $@` && $(MAKE) `basename $@`
+
+else
+EXTRA_pam_slurm_adopt_la_SOURCES = pam_slurm_adopt.c helper.c
+endif
+
diff --git a/contribs/pam_slurm_adopt/Makefile.in b/contribs/pam_slurm_adopt/Makefile.in
new file mode 100644
index 000000000..ef70203fb
--- /dev/null
+++ b/contribs/pam_slurm_adopt/Makefile.in
@@ -0,0 +1,835 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for pam_slurm_adopt
+#
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = contribs/pam_slurm_adopt
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp README
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+@HAVE_PAM_TRUE@pam_slurm_adopt_la_DEPENDENCIES =  \
+@HAVE_PAM_TRUE@	$(top_builddir)/src/api/libslurm.la
+am__pam_slurm_adopt_la_SOURCES_DIST = pam_slurm_adopt.c helper.c \
+	helper.h
+@HAVE_PAM_TRUE@am_pam_slurm_adopt_la_OBJECTS = pam_slurm_adopt.lo \
+@HAVE_PAM_TRUE@	helper.lo
+am__EXTRA_pam_slurm_adopt_la_SOURCES_DIST = pam_slurm_adopt.c helper.c
+pam_slurm_adopt_la_OBJECTS = $(am_pam_slurm_adopt_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+pam_slurm_adopt_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(pam_slurm_adopt_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+@HAVE_PAM_TRUE@am_pam_slurm_adopt_la_rpath = -rpath $(pkglibdir)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(pam_slurm_adopt_la_SOURCES) \
+	$(EXTRA_pam_slurm_adopt_la_SOURCES)
+DIST_SOURCES = $(am__pam_slurm_adopt_la_SOURCES_DIST) \
+	$(am__EXTRA_pam_slurm_adopt_la_SOURCES_DIST)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+pkglibdir = $(PAM_DIR)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+AM_CPPFLAGS = -fPIC -I$(top_srcdir) -I$(top_srcdir)/src/common
+# -DLIBSLURM_SO=\"$(libdir)/libslurm.so\"
+PLUGIN_FLAGS = -module --export-dynamic -avoid-version
+@HAVE_PAM_FALSE@pam_lib = 
+@HAVE_PAM_TRUE@pam_lib = pam_slurm_adopt.la
+pkglib_LTLIBRARIES = $(pam_lib)
+@HAVE_PAM_TRUE@current = $(SLURM_API_CURRENT)
+@HAVE_PAM_TRUE@age = $(SLURM_API_AGE)
+@HAVE_PAM_TRUE@rev = $(SLURM_API_REVISION)
+@HAVE_PAM_TRUE@pam_slurm_adopt_la_SOURCES = pam_slurm_adopt.c helper.c helper.h
+@HAVE_PAM_TRUE@pam_slurm_adopt_la_LIBADD = $(top_builddir)/src/api/libslurm.la
+@HAVE_PAM_TRUE@pam_slurm_adopt_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(LIB_LDFLAGS)
+#	Don't specify basename or version.map files in src/api will not be built
+#	@cd `dirname $@` && $(MAKE) `basename $@`
+@HAVE_PAM_FALSE@EXTRA_pam_slurm_adopt_la_SOURCES = pam_slurm_adopt.c helper.c
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign contribs/pam_slurm_adopt/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign contribs/pam_slurm_adopt/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+pam_slurm_adopt.la: $(pam_slurm_adopt_la_OBJECTS) $(pam_slurm_adopt_la_DEPENDENCIES) $(EXTRA_pam_slurm_adopt_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(pam_slurm_adopt_la_LINK) $(am_pam_slurm_adopt_la_rpath) $(pam_slurm_adopt_la_OBJECTS) $(pam_slurm_adopt_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/helper.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pam_slurm_adopt.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+@HAVE_PAM_TRUE@force:
+@HAVE_PAM_TRUE@$(pam_slurm_adopt_la_LIBADD) : force
+@HAVE_PAM_TRUE@	@cd `dirname $@` && $(MAKE)
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/pam_slurm_adopt/README b/contribs/pam_slurm_adopt/README
new file mode 100644
index 000000000..c4658db31
--- /dev/null
+++ b/contribs/pam_slurm_adopt/README
@@ -0,0 +1,117 @@
+NAME
+  pam_slurm_adopt.so
+
+SYNOPSIS
+  Adopt incoming connections into jobs
+
+AUTHOR
+  Ryan Cox <ryan_cox@byu.edu>
+
+MODULE TYPES PROVIDED
+  account
+
+DESCRIPTION
+  This module attempts to determine the job which originated this connection.
+  The module is configurable; these are the default steps:
+
+  1) Check the local stepds for a count of jobs owned by the non-root user
+    a) If none, deny (option action_no_jobs)
+    b) If only one, adopt the process into that job
+    c) If multiple, continue
+  2) Determine src/dst IP/port of socket
+  3) Issue callerid RPC to slurmd at IP address of source
+    a) If the remote slurmd can identify the source job, adopt into that job
+    b) If not, continue
+  4) Pick a random local job from the user to adopt into (option action_unknown)
+
+  Jobs are adopted into a job's allocation step.
+
+MODULE OPTIONS
+This module has the following options (* = default):
+
+    ignore_root - By default, all root connections are ignored. If the RPC call
+                  is sent to a node which drops packets to the slurmd port, the
+                  RPC call will block for some time before failing. This is
+                  unlikely to be desirable. Likewise, root may be trying to
+                  administer the system and not do work that should be in a job.
+                  The job may trigger oom-killer or just exit. If root restarts
+                  a service or similar, it will be tracked and killed by Slurm
+                  when the job exits. This sounds bad because it is bad.
+
+        1* = let the connection through without adoption
+        0  = I am crazy. I want random services to die when root jobs exit. I
+             also like it when RPC calls block for a while then time out.
+
+
+    action_no_jobs - The action to perform if the user has no jobs on the node
+
+        ignore = let the connection through without adoption
+        deny* = deny the connection
+
+
+    action_unknown - The action to perform when the RPC call does not locate the
+                     source job and the user has multiple jobs on the node to
+                     choose from
+
+        any* = pick a job in a (somewhat) random fashion
+        ignore = let the connection through without adoption
+        deny = deny the connection
+
+
+    action_adopt_failure - The action to perform if the job is unable to be
+                           adopted into a job for whatever reason
+
+        ignore = let the connection through without adoption
+        deny* = deny the connection
+
+
+    log_level - See SlurmdDebug in slurm.conf(5) for available options. The
+                default log_level is info.
+
+NOTES
+  This module and the related RPC call currently support Linux systems which
+  have network connection information available through /proc/net/tcp{,6}.  A
+  proccess's sockets must exist as symlinks in its /proc/self/fd directory.
+
+  The RPC data structure itself is OS-agnostic.  If support is desired for a
+  different OS, relevant code must be added to find one's socket information
+  then match that information on the remote end to a particular process which
+  Slurm is tracking.
+
+  IPv6 is supported by the RPC data structure itself and the code which sends it
+  or receives it.  Sending the RPC call to an IPv6 address is not currently
+  supported by Slurm.  Once support is added, remove the relevant check in
+  slurm_network_callerid ().
+
+  proctrack/cgroup is recommended on Linux.
+
+FIREWALLS, IP ADDRESSES, ETC.
+  slurmd should be accessible on any IP address that a user might launch ssh.
+  The RPC call to determine the source job must be able to reach the slurmd port
+  on that particular IP address.
+
+  If there is no slurmd on the source node, it is better to have the RPC call be
+  rejected rather than silently dropped.  This will allow better responsiveness
+  to the RPC initiator.
+
+EXAMPLES / SUGGESTED USAGE
+  Use of this module is recommended on any compute node.
+
+  Add the following line to the appropriate file in /etc/pam.d, such as
+  system-auth or sshd:
+
+    account    required     pam_slurm_adopt.so
+
+  If you always want to allow access for an administrative group (eg, wheel),
+  stack the pam_access module ahead of pam_slurm:
+
+    account    sufficient   pam_slurm_adopt.so
+    account    required     pam_access.so
+
+
+  Then edit the pam_access configuration file (/etc/security/access.conf):
+
+    +:wheel:ALL
+    -:ALL:ALL
+
+  When access is denied, the user will receive a relevant error message.
diff --git a/contribs/pam_slurm_adopt/helper.c b/contribs/pam_slurm_adopt/helper.c
new file mode 100644
index 000000000..04bddf42c
--- /dev/null
+++ b/contribs/pam_slurm_adopt/helper.c
@@ -0,0 +1,199 @@
+/*****************************************************************************\
+ *  $Id$
+ *****************************************************************************
+ *
+ *  Useful portions extracted from pam_slurm.c by Ryan Cox <ryan_cox@byu.edu>
+ *
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  UCRL-CODE-2002-040.
+ *
+ *  Written by Chris Dunlap <cdunlap@llnl.gov>
+ *         and Jim Garlick  <garlick@llnl.gov>
+ *         modified for SLURM by Moe Jette <jette@llnl.gov>.
+ *
+ *  This file is part of pam_slurm, a PAM module for restricting access to
+ *  the compute nodes within a cluster based on information obtained from
+ *  Simple Linux Utility for Resource Managment (SLURM).  For details, see
+ *  <http://www.llnl.gov/linux/slurm/>.
+ *
+ *  pam_slurm is free software; you can redistribute it and/or modify it
+ *  under the terms of the GNU General Public License as published by the
+ *  Free Software Foundation; either version 2 of the License, or (at your
+ *  option) any later version.
+ *
+ *  pam_slurm is distributed in the hope that it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+ *  for more details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with pam_slurm; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef PAM_MODULE_NAME
+#  define PAM_MODULE_NAME "pam_slurm_adopt"
+#endif
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <ctype.h>
+#include <errno.h>
+#include <pwd.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/param.h>
+#include <sys/types.h>
+#include <syslog.h>
+#include <unistd.h>
+#include <dlfcn.h>
+
+#include "slurm/slurm.h"
+#include "src/common/slurm_xlator.h"
+
+/*  Define the externally visible functions in this file.
+ */
+#define PAM_SM_ACCOUNT
+#include <security/pam_modules.h>
+#include <security/_pam_macros.h>
+
+
+/* Define the functions to be called before and after load since _init
+ * and _fini are obsolete, and their use can lead to unpredicatable
+ * results.
+ */
+void __attribute__ ((constructor)) libpam_slurm_init(void);
+void __attribute__ ((destructor)) libpam_slurm_fini(void);
+
+/*
+ *  Handle for libslurm.so
+ *
+ *  We open libslurm.so via dlopen () in order to pass the
+ *   flag RTDL_GLOBAL so that subsequently loaded modules have
+ *   access to libslurm symbols. This is pretty much only needed
+ *   for dynamically loaded modules that would otherwise be
+ *   linked against libslurm.
+ *
+ */
+static void * slurm_h = NULL;
+
+/* This function is necessary because libpam_slurm_init is called without access
+ * to the pam handle.
+ */
+static void
+_log_msg(int level, const char *format, ...)
+{
+	va_list args;
+
+	openlog(PAM_MODULE_NAME, LOG_CONS | LOG_PID, LOG_AUTHPRIV);
+	va_start(args, format);
+	vsyslog(level, format, args);
+	va_end(args);
+	closelog();
+	return;
+}
+
+/*
+ *  Sends a message to the application informing the user
+ *  that access was denied due to SLURM.
+ */
+extern void
+send_user_msg(pam_handle_t *pamh, const char *mesg)
+{
+	int retval;
+	struct pam_conv *conv;
+	void *dummy;    /* needed to eliminate warning:
+			 * dereferencing type-punned pointer will
+			 * break strict-aliasing rules */
+	char str[PAM_MAX_MSG_SIZE];
+	struct pam_message msg[1];
+	const struct pam_message *pmsg[1];
+	struct pam_response *prsp;
+
+	info("send_user_msg: %s", mesg);
+	/*  Get conversation function to talk with app.
+	 */
+	retval = pam_get_item(pamh, PAM_CONV, (const void **) &dummy);
+	conv = (struct pam_conv *) dummy;
+	if (retval != PAM_SUCCESS) {
+		_log_msg(LOG_ERR, "unable to get pam_conv: %s",
+			 pam_strerror(pamh, retval));
+		return;
+	}
+
+	/*  Construct msg to send to app.
+	 */
+	memcpy(str, mesg, sizeof(str));
+	msg[0].msg_style = PAM_ERROR_MSG;
+	msg[0].msg = str;
+	pmsg[0] = &msg[0];
+	prsp = NULL;
+
+	/*  Send msg to app and free the (meaningless) rsp.
+	 */
+	retval = conv->conv(1, pmsg, &prsp, conv->appdata_ptr);
+	if (retval != PAM_SUCCESS)
+		_log_msg(LOG_ERR, "unable to converse with app: %s",
+			 pam_strerror(pamh, retval));
+	if (prsp != NULL)
+		_pam_drop_reply(prsp, 1);
+
+	return;
+}
+
+/*
+ * Dynamically open system's libslurm.so with RTLD_GLOBAL flag.
+ * This allows subsequently loaded modules access to libslurm symbols.
+ */
+extern void libpam_slurm_init (void)
+{
+	char libslurmname[64];
+
+	if (slurm_h)
+		return;
+
+	/* First try to use the same libslurm version ("libslurm.so.24.0.0"),
+	 * Second try to match the major version number ("libslurm.so.24"),
+	 * Otherwise use "libslurm.so" */
+	if (snprintf(libslurmname, sizeof(libslurmname),
+			"libslurm.so.%d.%d.%d", SLURM_API_CURRENT,
+			SLURM_API_REVISION, SLURM_API_AGE) >=
+			(signed) sizeof(libslurmname) ) {
+		_log_msg (LOG_ERR, "Unable to write libslurmname\n");
+	} else if ((slurm_h = dlopen(libslurmname, RTLD_NOW|RTLD_GLOBAL))) {
+		return;
+	} else {
+		_log_msg (LOG_INFO, "Unable to dlopen %s: %s\n",
+			libslurmname, dlerror ());
+	}
+
+	if (snprintf(libslurmname, sizeof(libslurmname), "libslurm.so.%d",
+			SLURM_API_CURRENT) >= (signed) sizeof(libslurmname) ) {
+		_log_msg (LOG_ERR, "Unable to write libslurmname\n");
+	} else if ((slurm_h = dlopen(libslurmname, RTLD_NOW|RTLD_GLOBAL))) {
+		return;
+	} else {
+		_log_msg (LOG_INFO, "Unable to dlopen %s: %s\n",
+			libslurmname, dlerror ());
+	}
+
+	if (!(slurm_h = dlopen("libslurm.so", RTLD_NOW|RTLD_GLOBAL))) {
+		_log_msg (LOG_ERR, "Unable to dlopen libslurm.so: %s\n",
+			  dlerror ());
+	}
+
+	return;
+}
+
+extern void libpam_slurm_fini (void)
+{
+	if (slurm_h)
+		dlclose (slurm_h);
+	return;
+}
diff --git a/contribs/pam_slurm_adopt/helper.h b/contribs/pam_slurm_adopt/helper.h
new file mode 100644
index 000000000..e21d64fb1
--- /dev/null
+++ b/contribs/pam_slurm_adopt/helper.h
@@ -0,0 +1,11 @@
+/* helper.h
+ *
+ * Some helper functions needed for pam_slurm_adopt.c */
+
+#define PAM_SM_ACCOUNT
+#include <security/pam_modules.h>
+#include <security/_pam_macros.h>
+
+extern void send_user_msg(pam_handle_t *pamh, const char *msg);
+extern void libpam_slurm_init (void);
+extern void libpam_slurm_fini (void);
diff --git a/contribs/pam_slurm_adopt/pam_slurm_adopt.c b/contribs/pam_slurm_adopt/pam_slurm_adopt.c
new file mode 100644
index 000000000..8af62b7ce
--- /dev/null
+++ b/contribs/pam_slurm_adopt/pam_slurm_adopt.c
@@ -0,0 +1,543 @@
+/*****************************************************************************\
+ *  pam_slurm_adopt.c - Adopt incoming connections into jobs
+ *****************************************************************************
+ *  Copyright (C) 2015, Brigham Young University
+ *  Author:  Ryan Cox <ryan_cox@byu.edu>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef PAM_MODULE_NAME
+#  define PAM_MODULE_NAME "pam_slurm_adopt"
+#endif
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <security/_pam_macros.h>
+#include <security/pam_ext.h>
+#define PAM_SM_ACCOUNT
+#include <security/pam_modules.h>
+#include <security/pam_modutil.h>
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <syslog.h>
+#include <pwd.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <arpa/inet.h>
+
+#include "helper.h"
+#include "slurm/slurm.h"
+#include "src/common/slurm_xlator.h"
+#include "src/common/slurm_protocol_api.h"
+
+/* This definition would probably be good to centralize somewhere */
+#ifndef MAXHOSTNAMELEN
+#define MAXHOSTNAMELEN    64
+#endif
+
+typedef enum {
+	CALLERID_ACTION_ANY,
+	CALLERID_ACTION_IGNORE,
+	CALLERID_ACTION_DENY
+} callerid_action_t;
+
+/* module options */
+static struct {
+	int single_job_skip_rpc;
+	int ignore_root;
+	int action_no_jobs;
+	callerid_action_t action_unknown;
+	callerid_action_t action_adopt_failure;
+	callerid_action_t action_generic_failure;
+	log_level_t log_level;
+} opts;
+
+static void _init_opts(void)
+{
+	opts.single_job_skip_rpc = 1;
+	opts.ignore_root = 1;
+	opts.action_no_jobs = CALLERID_ACTION_DENY;
+	opts.action_unknown = CALLERID_ACTION_ANY;
+	opts.action_adopt_failure = CALLERID_ACTION_IGNORE;
+	opts.action_generic_failure = CALLERID_ACTION_IGNORE;
+	opts.log_level = LOG_LEVEL_INFO;
+}
+
+static int _adopt_process(pid_t pid, uint32_t job_id)
+{
+	/* TODO:  add this pid to plugins for task, container, accounting, etc
+	*  need more code here ... */
+	info("_adopt_process(%d, %u): UNIMPLEMENTED", pid, job_id);
+
+	/* TODO:  change my primary gid to the job's group, if possible */
+	return SLURM_SUCCESS;
+}
+
+/* Returns negative number on failure. Failures are likely to occur if a step
+ * exits; this is not a problem. */
+static uid_t _get_job_uid(step_loc_t *stepd)
+{
+	/* BUG: uid_t on Linux is unsigned but stepd_get_uid can return -1 */
+	uid_t uid = -1;
+	int fd;
+	uint16_t protocol_version;
+
+	fd = stepd_connect(stepd->directory, stepd->nodename,
+			stepd->jobid, stepd->stepid, &protocol_version);
+	if (fd < 0) {
+		/* It's normal for a step to exit */
+		debug3("unable to connect to step %u.%u on %s: %m",
+				stepd->jobid, stepd->stepid, stepd->nodename);
+		return -1;
+	}
+
+	uid = stepd_get_uid(fd, stepd->protocol_version);
+	close(fd);
+
+	/* The step may have exited. Not a big concern. */
+	/* BUG: uid_t on Linux is unsigned but stepd_get_uid can return -1 */
+	if ((int32_t)uid == -1)
+		debug3("unable to determine uid of step %u.%u on %s",
+				stepd->jobid, stepd->stepid, stepd->nodename);
+
+	return uid;
+}
+
+static int _indeterminate_multiple(pam_handle_t *pamh, List steps, uid_t uid,
+		uint32_t *job_id)
+{
+	ListIterator itr = NULL;
+	int rc = SLURM_FAILURE;
+	step_loc_t *stepd = NULL;
+
+	if (opts.action_unknown == CALLERID_ACTION_DENY) {
+		debug("Denying due to action_unknown=deny");
+		send_user_msg(pamh,
+			      "Access denied by "
+			      PAM_MODULE_NAME
+			      ": unable to determine source job");
+		return PAM_PERM_DENIED;
+	} else if (opts.action_unknown == CALLERID_ACTION_IGNORE) {
+		debug("Allowing due to action_unknown=ignore");
+		return PAM_SUCCESS;
+	}
+
+	itr = list_iterator_create(steps);
+	while ((stepd = list_next(itr))) {
+		if (uid == _get_job_uid(stepd)) {
+			*job_id = stepd->jobid;
+			rc = SLURM_SUCCESS;
+			break;
+		}
+	}
+
+	/* No jobs from this user exist on this node */
+	if (rc != SLURM_SUCCESS) {
+		if (opts.action_no_jobs == CALLERID_ACTION_DENY) {
+			debug("uid %u owns no jobs => deny", uid);
+			send_user_msg(pamh,
+				      "Access denied by "
+				      PAM_MODULE_NAME
+				      ": you have no active jobs on this node");
+			rc = PAM_PERM_DENIED;
+		} else {
+			debug("uid %u owns no jobs but action_no_jobs=ignore",
+					uid);
+			rc = PAM_IGNORE;
+		}
+	}
+
+	list_iterator_destroy(itr);
+	return rc;
+}
+
+static int _single_job_check(List steps, uid_t uid, uint32_t *job_id)
+{
+	ListIterator itr = NULL;
+	int user_job_cnt = 0, rc = SLURM_FAILURE;
+	step_loc_t *stepd = NULL;
+
+	*job_id = (uint32_t)NO_VAL;
+
+	itr = list_iterator_create(steps);
+	while ((stepd = list_next(itr))) {
+		if (uid == _get_job_uid(stepd)) {
+			/* We found a job from the user but we want to ignore
+			 * duplicates due to multiple steps from the same job */
+			if (*job_id != stepd->jobid) {
+				user_job_cnt++;
+				*job_id = stepd->jobid;
+				rc = SLURM_SUCCESS;
+			}
+		}
+		if(user_job_cnt > 1) {
+			debug3("_single_job_check: uid %u has multiple jobs on this node",
+					uid);
+			rc = SLURM_FAILURE;
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+
+	return rc;
+}
+
+static int _rpc_network_callerid(struct callerid_conn *conn, char *user_name,
+		uint32_t *job_id)
+{
+	network_callerid_msg_t req;
+	char ip_src_str[INET6_ADDRSTRLEN];
+	char node_name[MAXHOSTNAMELEN];
+
+	memcpy((void *)&req.ip_src, (void *)&conn->ip_src, 16);
+	memcpy((void *)&req.ip_dst, (void *)&conn->ip_dst, 16);
+	req.port_src = conn->port_src;
+	req.port_dst = conn->port_dst;
+	req.af = conn->af;
+
+	inet_ntop(req.af, &conn->ip_src, ip_src_str, INET6_ADDRSTRLEN);
+	if (slurm_network_callerid(req, job_id, node_name, MAXHOSTNAMELEN)
+			!= SLURM_SUCCESS) {
+		debug("From %s port %d as %s: unable to retrieve callerid data from remote slurmd",
+		     ip_src_str,
+		     req.port_src,
+		     user_name);
+		return SLURM_FAILURE;
+	} else if (*job_id == (uint32_t)NO_VAL) {
+		debug("From %s port %d as %s: job indeterminate",
+		     ip_src_str,
+		     req.port_src,
+		     user_name);
+		return SLURM_FAILURE;
+	} else {
+		info("From %s port %d as %s: member of job %u",
+		      ip_src_str,
+		      req.port_src,
+		      user_name,
+		      *job_id);
+		return SLURM_SUCCESS;
+	}
+}
+
+/* Use the pam logging function for now since normal logging is not yet
+ * initialized */
+log_level_t _parse_log_level(pam_handle_t *pamh, const char *log_level_str)
+{
+	unsigned int u;
+	char *endptr;
+
+	u = (unsigned int)strtoul(log_level_str, &endptr, 0);
+	if (endptr && endptr[0]) {
+		/* not an integer */
+		if (!strcasecmp(log_level_str, "quiet"))
+			u = LOG_LEVEL_QUIET;
+		else if(!strcasecmp(log_level_str, "fatal"))
+			u = LOG_LEVEL_FATAL;
+		else if(!strcasecmp(log_level_str, "error"))
+			u = LOG_LEVEL_ERROR;
+		else if(!strcasecmp(log_level_str, "info"))
+			u = LOG_LEVEL_INFO;
+		else if(!strcasecmp(log_level_str, "verbose"))
+			u = LOG_LEVEL_VERBOSE;
+		else if(!strcasecmp(log_level_str, "debug"))
+			u = LOG_LEVEL_DEBUG;
+		else if(!strcasecmp(log_level_str, "debug2"))
+			u = LOG_LEVEL_DEBUG2;
+		else if(!strcasecmp(log_level_str, "debug3"))
+			u = LOG_LEVEL_DEBUG3;
+		else if(!strcasecmp(log_level_str, "debug4"))
+			u = LOG_LEVEL_DEBUG4;
+		else if(!strcasecmp(log_level_str, "debug5"))
+			u = LOG_LEVEL_DEBUG5;
+		else if(!strcasecmp(log_level_str, "sched"))
+			u = LOG_LEVEL_SCHED;
+		else {
+			pam_syslog(pamh,
+				   LOG_ERR,
+				   "unrecognized log level %s, setting to max",
+				   log_level_str);
+			/* We'll set it to the highest logging
+			 * level, just to be sure */
+			u = (unsigned int)LOG_LEVEL_END - 1;
+		}
+	} else {
+		/* An integer was specified */
+		if (u >= LOG_LEVEL_END) {
+			pam_syslog(pamh,
+				   LOG_ERR,
+				   "log level %u too high, lowering to max", u);
+			u = (unsigned int)LOG_LEVEL_END - 1;
+		}
+	}
+	return u;
+}
+
+/* Use the pam logging function for now, so we need pamh */
+static void _parse_opts(pam_handle_t *pamh, int argc, const char **argv)
+{
+	char *v;
+
+	for (; argc-- > 0; ++argv) {
+		if (!strncasecmp(*argv, "single_job_skip_rpc=0", 18))
+			opts.single_job_skip_rpc = 0;
+		else if (!strncasecmp(*argv, "ignore_root=0", 13))
+			opts.ignore_root = 0;
+		else if (!strncasecmp(*argv,"action_unknown=",15)) {
+			v = (char *)(15 + *argv);
+			if (!strncasecmp(v, "ignore", 6))
+				opts.action_unknown = CALLERID_ACTION_IGNORE;
+			else if (!strncasecmp(v, "any", 3))
+				opts.action_unknown = CALLERID_ACTION_ANY;
+			else if (!strncasecmp(v, "deny", 4))
+				opts.action_unknown = CALLERID_ACTION_DENY;
+			else {
+				pam_syslog(pamh,
+					   LOG_ERR,
+					   "unrecognized action_unknown=%s, setting to 'any'",
+					   v);
+			}
+		} else if (!strncasecmp(*argv, "log_level=", 10)) {
+			v = (char *)(10 + *argv);
+			opts.log_level = _parse_log_level(pamh, v);
+		}
+	}
+
+}
+
+static void _log_init(log_level_t level)
+{
+	log_options_t logopts = LOG_OPTS_INITIALIZER;
+
+	logopts.stderr_level  = LOG_LEVEL_FATAL;
+	logopts.syslog_level  = level;
+	log_init(PAM_MODULE_NAME, logopts, LOG_AUTHPRIV, NULL);
+}
+
+/* Parse arguments, etc then get my socket address/port information. Attempt to
+ * adopt this process into a job in the following order:
+ * 	1) If the user has only one job on the node, pick that one
+ * 	2) Send RPC to source IP of socket. If there is a slurmd at the IP
+ * 		address, ask it which job I belong to. On success, pick that one
+ *	3) Pick a job semi-randomly (default) or skip the adoption (if
+ *		configured)
+*/
+PAM_EXTERN int pam_sm_acct_mgmt(pam_handle_t *pamh, int flags
+		__attribute__((unused)), int argc, const char **argv)
+{
+	int retval = PAM_IGNORE, rc = PAM_IGNORE;
+	char *user_name;
+	struct callerid_conn conn;
+	uint32_t job_id;
+	char ip_src_str[INET6_ADDRSTRLEN];
+	List steps = NULL;
+	struct passwd pwd, *pwd_result;
+	char *buf = NULL;
+	int bufsize;
+
+	_init_opts();
+	_parse_opts(pamh, argc, argv);
+	_log_init(opts.log_level);
+
+	retval = pam_get_item(pamh, PAM_USER, (void *) &user_name);
+	if (user_name == NULL || retval != PAM_SUCCESS)  {
+		pam_syslog(pamh, LOG_ERR, "No username in PAM_USER? Fail!");
+		return PAM_SESSION_ERR;
+	}
+
+	/* Check for an unsafe config that might lock out root. This is a very
+	 * basic check that shouldn't be 100% relied on */
+	if (!opts.ignore_root &&
+			(opts.action_unknown == CALLERID_ACTION_DENY ||
+			opts.action_no_jobs != CALLERID_ACTION_IGNORE ||
+			opts.action_adopt_failure != CALLERID_ACTION_IGNORE ||
+			opts.action_generic_failure != CALLERID_ACTION_IGNORE
+			)) {
+		/* Let's get verbose */
+		info("===============================");
+		info("Danger!!!");
+		info("A crazy admin set ignore_root=0 and some unsafe actions");
+		info("You might lock out root!");
+		info("If this is desirable, modify the source code");
+		info("Setting ignore_root=1 and continuing");
+		opts.ignore_root = 1;
+	}
+
+	/* Ignoring root is probably best but the admin can allow it*/
+	if (!strcmp(user_name, "root")) {
+		if (opts.ignore_root) {
+			info("Ignoring root user");
+			return PAM_IGNORE;
+		} else {
+			/* This administrator is crazy */
+			info("Danger!!! This is a connection attempt by root and ignore_root=0 is set! Hope for the best!");
+		}
+	}
+
+	/* Calculate buffer size for getpwnam_r */
+	bufsize = sysconf(_SC_GETPW_R_SIZE_MAX);
+	if (bufsize == -1)
+		bufsize = 16384; /* take a large guess */
+
+	buf = xmalloc(bufsize);
+	retval = getpwnam_r(user_name, &pwd, buf, bufsize, &pwd_result);
+	if (pwd_result == NULL) {
+		if (retval == 0) {
+			error("getpwnam_r could not locate %s", user_name);
+		} else {
+			errno = retval;
+			error("getpwnam_r: %m");
+		}
+
+		xfree(buf);
+		return PAM_SESSION_ERR;
+	}
+
+	/* Check my fds for a network socket */
+	if (callerid_get_own_netinfo(&conn) != SLURM_SUCCESS) {
+		/* We could press on for the purposes of the single- or
+		 * multi-job checks, but the RPC will surely fail. If we
+		 * continued we'd have to fill in junk for lots of variables */
+		error("Unable to find network socket");
+		rc = PAM_IGNORE;
+		goto cleanup;
+	}
+
+	if (inet_ntop(conn.af, &conn.ip_src, ip_src_str, INET6_ADDRSTRLEN)
+			== NULL) {
+		/* This is really odd. If this failed, other functions are so
+		 * likely to fail that we might as well exit */
+		error("inet_ntop failed");
+		rc = PAM_IGNORE;
+		goto cleanup;
+	}
+
+	/* Get a list of steps on the node. A failure here likely means failures
+	 * everywhere so exit on failure or if no local jobs exist */
+	steps = stepd_available(NULL, NULL);
+	if (!steps) {
+		error("Error obtaining local step information. Fail.");
+		rc = PAM_IGNORE;
+		goto cleanup;
+	} else if (list_count(steps) == 0) {
+		info("No steps on this node from any user");
+		if (opts.action_no_jobs == CALLERID_ACTION_DENY) {
+			send_user_msg(pamh,
+				      "Access denied by "
+				      PAM_MODULE_NAME
+				      ": you have no active jobs on this node");
+			rc = PAM_PERM_DENIED;
+		} else {
+			info("uid %u owns no jobs but action_no_jobs=ignore",
+					pwd.pw_uid);
+			rc = PAM_IGNORE;
+		}
+
+		goto cleanup;
+	}
+
+	/* Check to see if this user has only one job on the node. If so, choose
+	 * that job and adopt this process into it (unless configured not to) */
+	if (opts.single_job_skip_rpc) {
+		if (_single_job_check(steps, pwd.pw_uid, &job_id)
+				== SLURM_SUCCESS) {
+			debug("From %s port %d as %s: _single_job_check succeeded",
+			      ip_src_str,
+			      conn.port_src,
+			      user_name);
+
+			info("From %s port %d as %s: member of job %u",
+			     ip_src_str,
+			     conn.port_src,
+			     user_name,
+			     job_id);
+			rc = _adopt_process(getpid(), job_id);
+			goto cleanup;
+		} else {
+			debug("From %s port %d as %s: _single_job_check failed",
+			      ip_src_str,
+			      conn.port_src,
+			      user_name);
+		}
+	}
+
+	/* Single job check failed or wasn't used. Ask the slurmd (if any) at
+	 * the source IP address about this connection */
+	rc = _rpc_network_callerid(&conn, user_name, &job_id);
+	if (rc == SLURM_SUCCESS) {
+		rc = _adopt_process(getpid(), job_id);
+		goto cleanup;
+	}
+
+	info("From %s port %d as %s: unable to determine source job",
+	     ip_src_str,
+	     conn.port_src,
+	     user_name);
+
+	/* Both the single job check and the RPC call have failed to ascertain
+	 * the correct job to adopt this into. Time for drastic measures */
+	rc = _indeterminate_multiple(pamh, steps, pwd.pw_uid, &job_id);
+	if (rc == SLURM_SUCCESS) {
+		info("From %s port %d as %s: picked job %u",
+		     ip_src_str,
+		     conn.port_src,
+		     user_name,
+		     job_id);
+		rc = _adopt_process(getpid(), job_id);
+	} else {
+		/* This pam module was worthless, apparently */
+		debug("_indeterminate_multiple failed to find a job to adopt this into");
+	}
+
+cleanup:
+	FREE_NULL_LIST(steps);
+	xfree(buf);
+	return rc;
+}
+
+#ifdef PAM_STATIC
+struct pam_module _pam_slurm_adopt_modstruct = {
+	 PAM_MODULE_NAME,
+	 NULL,
+	 NULL,
+	 pam_sm_acct_mgmt,
+	 NULL,
+	 NULL,
+	 NULL,
+};
+#endif
diff --git a/contribs/perlapi/Makefile.am b/contribs/perlapi/Makefile.am
index 40c6a98b0..b6736a91c 100644
--- a/contribs/perlapi/Makefile.am
+++ b/contribs/perlapi/Makefile.am
@@ -1 +1,2 @@
 SUBDIRS = libslurm libslurmdb
+EXTRA_DIST = common/msg.h
diff --git a/contribs/perlapi/Makefile.in b/contribs/perlapi/Makefile.in
index e51f8325e..92b34ad64 100644
--- a/contribs/perlapi/Makefile.in
+++ b/contribs/perlapi/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -446,6 +460,7 @@ top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 SUBDIRS = libslurm libslurmdb
+EXTRA_DIST = common/msg.h
 all: all-recursive
 
 .SUFFIXES:
diff --git a/contribs/perlapi/common/msg.h b/contribs/perlapi/common/msg.h
index 9609d10ea..0a6d78c49 100644
--- a/contribs/perlapi/common/msg.h
+++ b/contribs/perlapi/common/msg.h
@@ -219,6 +219,20 @@ inline static int hv_store_int(HV* hv, const char *key, int val)
 	return 0;
 }
 
+/*
+ * store a double
+ */
+inline static int hv_store_double(HV* hv, const char *key, double val)
+{
+	SV* sv = newSVnv(val);
+
+	if (!key || hv_store(hv, key, (I32)strlen(key), sv, 0) == NULL) {
+		SvREFCNT_dec(sv);
+		return -1;
+	}
+	return 0;
+}
+
 /*
  * store a signed 32b int into HV
  */
diff --git a/contribs/perlapi/libslurm/Makefile.am b/contribs/perlapi/libslurm/Makefile.am
index 7441df6f1..cbcbb7e5c 100644
--- a/contribs/perlapi/libslurm/Makefile.am
+++ b/contribs/perlapi/libslurm/Makefile.am
@@ -53,6 +53,9 @@ test_sources = \
 	$(perl_dir)/t/22-list.t \
 	$(perl_dir)/t/23-bitstr.t
 
+EXTRA_DIST = $(perl_sources) $(test_sources)
+
+
 $(perl_dir)/Makefile:	$(perl_dir)/Makefile.PL
 	@if test "x${top_srcdir}" != "x${top_builddir}"; then \
 		for f in ${perl_sources}; do \
diff --git a/contribs/perlapi/libslurm/Makefile.in b/contribs/perlapi/libslurm/Makefile.in
index 8dee57319..2ea804452 100644
--- a/contribs/perlapi/libslurm/Makefile.in
+++ b/contribs/perlapi/libslurm/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -441,6 +455,7 @@ test_sources = \
 	$(perl_dir)/t/22-list.t \
 	$(perl_dir)/t/23-bitstr.t
 
+EXTRA_DIST = $(perl_sources) $(test_sources)
 AM_CPPFLAGS = \
 	-DVERSION=\"$(VERSION)\" \
 	-I$(top_srcdir) \
diff --git a/contribs/perlapi/libslurm/perl/Slurm.xs b/contribs/perlapi/libslurm/perl/Slurm.xs
index 96885813e..41b67493a 100644
--- a/contribs/perlapi/libslurm/perl/Slurm.xs
+++ b/contribs/perlapi/libslurm/perl/Slurm.xs
@@ -8,10 +8,15 @@
 #include <signal.h>
 #include <string.h>
 #include <unistd.h>
+
 #include "slurm-perl.h"
 #include "bitstr.h"
 
+extern void slurm_conf_reinit(char *pathname);
 
+/* Custom typemap that free's memory after copying to perl stack. */
+typedef char char_xfree;
+typedef char char_free;
 
 struct slurm {
 };
@@ -84,9 +89,6 @@ slurm_get_errno(slurm_t self)
 
 char *
 slurm_strerror(slurm_t self, int errnum=0)
-	PREINIT:
-		char *tmp_str;
-		int n;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
@@ -95,10 +97,7 @@ slurm_strerror(slurm_t self, int errnum=0)
 			    */
 		if (errnum == 0)
 			errnum = slurm_get_errno();
-		tmp_str = slurm_strerror(errnum);
-		n = strlen(tmp_str) + 1;
-		New(0, RETVAL, n, char);
-		Copy(tmp_str, RETVAL, n, char);
+		RETVAL = slurm_strerror(errnum);
 	OUTPUT:
 		RETVAL
 
@@ -111,20 +110,13 @@ slurm_strerror(slurm_t self, int errnum=0)
 
 char *
 slurm_preempt_mode_string(slurm_t self, uint16_t preempt_mode);
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_preempt_mode_string(preempt_mode);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_preempt_mode_string(preempt_mode);
 	OUTPUT:
 		RETVAL
 
@@ -141,58 +133,37 @@ slurm_preempt_mode_num(slurm_t self, char *preempt_mode)
 
 char *
 slurm_job_reason_string(slurm_t self, uint32_t inx)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_job_reason_string(inx);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_job_reason_string(inx);
 	OUTPUT:
 		RETVAL
 
 char *
-slurm_job_state_string(slurm_t self, uint16_t inx)
-	PREINIT:
-		char *tmp_str;
-		int len;
+slurm_job_state_string(slurm_t self, uint32_t inx)
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_job_state_string(inx);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_job_state_string(inx);
 	OUTPUT:
 		RETVAL
 
 char *
-slurm_job_state_string_compact(slurm_t self, uint16_t inx)
-	PREINIT:
-		char *tmp_str;
-		int len;
+slurm_job_state_string_compact(slurm_t self, uint32_t inx)
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_job_state_string_compact(inx);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_job_state_string_compact(inx);
 	OUTPUT:
 		RETVAL
 
@@ -207,143 +178,105 @@ slurm_job_state_num(slurm_t self, char *state_name)
 			      only Slurm::
 			    */
 
-char *
+char_xfree *
 slurm_reservation_flags_string(slurm_t self, uint16_t flags)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_reservation_flags_string(flags);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		xfree(tmp_str);
+		RETVAL = slurm_reservation_flags_string(flags);
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_node_state_string(slurm_t self, uint32_t inx)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_node_state_string(inx);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_node_state_string(inx);
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_node_state_string_compact(slurm_t self, uint32_t inx)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_node_state_string_compact(inx);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_node_state_string_compact(inx);
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_private_data_string(slurm_t self, uint16_t private_data)
+	PREINIT:
+		char tmp_str[128];
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		New(0, RETVAL, 64, char);
-		slurm_private_data_string(private_data, RETVAL, 64);
+		slurm_private_data_string(private_data, tmp_str, sizeof(tmp_str));
+		RETVAL = tmp_str;
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_accounting_enforce_string(slurm_t self, uint16_t enforce)
+	PREINIT:
+		char tmp_str[128];
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		New(0, RETVAL, 32, char);
-		slurm_accounting_enforce_string(enforce, RETVAL, 32);
+		slurm_accounting_enforce_string(enforce, tmp_str, sizeof(tmp_str));
+		RETVAL = tmp_str;
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_conn_type_string(slurm_t self, uint16_t conn_type)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_conn_type_string((enum connection_type)conn_type);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_conn_type_string((enum connection_type)conn_type);
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_node_use_string(slurm_t self, uint16_t node_use)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_node_use_string((enum node_use_type)node_use);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_node_use_string((enum node_use_type)node_use);
 	OUTPUT:
 		RETVAL
 
 char *
 slurm_bg_block_state_string(slurm_t self, uint16_t state)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		tmp_str = slurm_bg_block_state_string(state);
-		len = strlen(tmp_str) + 1;
-		New(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		/* tmp_str is static data */
+		RETVAL = slurm_bg_block_state_string(state);
 	OUTPUT:
 		RETVAL
 
@@ -392,11 +325,10 @@ slurm_print_block_info(slurm_t self, FILE *out, HV *block_info, int one_liner=0)
 	C_ARGS:
 		out, &bi, one_liner
 
-char *
+char_xfree *
 slurm_sprint_block_info(slurm_t self, HV *block_info, int one_liner=0)
 	PREINIT:
 		block_info_t bi;
-		char *tmp_str = NULL;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
@@ -406,10 +338,7 @@ slurm_sprint_block_info(slurm_t self, HV *block_info, int one_liner=0)
 		if(hv_to_block_info(block_info, &bi) < 0) {
 			XSRETURN_UNDEF;
 		}
-		tmp_str = slurm_sprint_block_info(&bi, one_liner);
-		New(0, RETVAL, strlen(tmp_str) + 1, char);
-		Copy(tmp_str, RETVAL, strlen(tmp_str) + 1, char);
-		xfree(tmp_str);
+		RETVAL = slurm_sprint_block_info(&bi, one_liner);
 	OUTPUT:
 		RETVAL
 
@@ -580,23 +509,17 @@ slurm_allocation_lookup_lite(slurm_t self, uint32_t job_id)
 	OUTPUT:
 		RETVAL
 
-char *
+char_free *
 slurm_read_hostfile(slurm_t self, char *filename, int n)
-	PREINIT:
-		char *hostlist;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		hostlist = slurm_read_hostfile(filename, n);
-		if(hostlist == NULL) {
+		RETVAL = slurm_read_hostfile(filename, n);
+		if(RETVAL == NULL) {
 			XSRETURN_UNDEF;
-		} else {
-			New(0, RETVAL, strlen(hostlist) + 1, char);
-			Copy(hostlist, RETVAL, strlen(hostlist) + 1, char);
-			free(hostlist);
 		}
 	OUTPUT:
 		RETVAL
@@ -675,7 +598,7 @@ slurm_job_will_run(slurm_t self, HV *job_desc)
 		RETVAL
 
 HV *
-slurm_sbcast_lookup(slurm_t self, uint32_t job_id)
+slurm_sbcast_lookup(slurm_t self, uint32_t job_id, uint32_t step_id)
 	PREINIT:
 		job_sbcast_cred_msg_t *info;
 		int rc;
@@ -685,7 +608,7 @@ slurm_sbcast_lookup(slurm_t self, uint32_t job_id)
 			      out of the mix Slurm-> doesn't work,
 			      only Slurm::
 			    */
-		rc = slurm_sbcast_lookup(job_id, &info);
+		rc = slurm_sbcast_lookup(job_id, step_id, &info);
 		if (rc == SLURM_SUCCESS) {
 			RETVAL = newHV();
 			sv_2mortal((SV*)RETVAL);
@@ -1452,7 +1375,7 @@ slurm_print_job_info_msg(slurm_t self, FILE *out, HV *job_info_msg, int one_line
 	CLEANUP:
 		xfree(ji_msg.job_array);
 
-char *
+char_xfree *
 slurm_sprint_job_info(slurm_t self, HV *job_info, int one_liner=0)
 	PREINIT:
 		job_info_t ji;
@@ -1470,9 +1393,7 @@ slurm_sprint_job_info(slurm_t self, HV *job_info, int one_liner=0)
 		xfree(ji.exc_node_inx);
 		xfree(ji.node_inx);
 		xfree(ji.req_node_inx);
-		New(0, RETVAL, strlen(tmp_str) + 1, char);
-		Copy(tmp_str, RETVAL, strlen(tmp_str) + 1, char);
-		xfree(tmp_str);
+		RETVAL = tmp_str;
 	OUTPUT:
 		RETVAL
 
@@ -1568,11 +1489,10 @@ slurm_print_job_step_info(slurm_t self, FILE *out, HV *step_info, int one_liner=
 	CLEANUP:
 		xfree(si.node_inx);
 
-char *
+char_xfree *
 slurm_sprint_job_step_info(slurm_t self, HV *step_info, int one_liner=0)
 	PREINIT:
 		job_step_info_t si;
-		char *tmp_str = NULL;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
@@ -1582,11 +1502,8 @@ slurm_sprint_job_step_info(slurm_t self, HV *step_info, int one_liner=0)
 		if(hv_to_job_step_info(step_info, &si) < 0) {
 			XSRETURN_UNDEF;
 		}
-		tmp_str = slurm_sprint_job_step_info(&si, one_liner);
+		RETVAL = slurm_sprint_job_step_info(&si, one_liner);
 		xfree(si.node_inx);
-		New(0, RETVAL, strlen(tmp_str) + 1, char);
-		Copy(tmp_str, RETVAL, strlen(tmp_str) + 1, char);
-		xfree(tmp_str);
 	OUTPUT:
 		RETVAL
 
@@ -1753,11 +1670,10 @@ slurm_print_node_table(slurm_t self, FILE *out, HV *node_info, int node_scaling=
 	C_ARGS:
 		out, &ni, node_scaling, one_liner
 
-char *
+char_xfree *
 slurm_sprint_node_table(slurm_t self, HV *node_info, int node_scaling=1, int one_liner=0)
 	PREINIT:
 		node_info_t ni;
-		char *tmp_str = NULL;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
@@ -1767,10 +1683,7 @@ slurm_sprint_node_table(slurm_t self, HV *node_info, int node_scaling=1, int one
 		if(hv_to_node_info(node_info, &ni) < 0) {
 			XSRETURN_UNDEF;
 		}
-		tmp_str = slurm_sprint_node_table(&ni, node_scaling, one_liner);
-		New(0, RETVAL, strlen(tmp_str) + 1, char);
-		Copy(tmp_str, RETVAL, strlen(tmp_str) + 1, char);
-		xfree(tmp_str);
+		RETVAL = slurm_sprint_node_table(&ni, node_scaling, one_liner);
 	OUTPUT:
 		RETVAL
 
@@ -2062,11 +1975,10 @@ slurm_print_partition_info(slurm_t self, FILE *out, HV *part_info, int one_liner
 	CLEANUP:
 		xfree(pi.node_inx);
 
-char *
+char_xfree *
 slurm_sprint_partition_info(slurm_t self, HV *part_info, int one_liner=0)
 	PREINIT:
 		partition_info_t pi;
-		char *tmp_str = NULL;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
@@ -2076,11 +1988,8 @@ slurm_sprint_partition_info(slurm_t self, HV *part_info, int one_liner=0)
 		if(hv_to_partition_info(part_info, &pi) < 0) {
 			XSRETURN_UNDEF;
 		}
-		tmp_str = slurm_sprint_partition_info(&pi, one_liner);
+		RETVAL = slurm_sprint_partition_info(&pi, one_liner);
 		xfree(pi.node_inx);
-		New(0, RETVAL, strlen(tmp_str) + 1, char);
-		Copy(tmp_str, RETVAL, strlen(tmp_str) + 1, char);
-		xfree(tmp_str);
 	OUTPUT:
 		RETVAL
 
@@ -2163,7 +2072,7 @@ slurm_load_reservations(slurm_t self, time_t update_time=0)
 	OUTPUT:
 		RETVAL
 
-char *
+char_free *
 slurm_create_reservation(slurm_t self, HV *res_info)
 	PREINIT:
 		resv_desc_msg_t resv_msg;
@@ -2260,11 +2169,10 @@ slurm_print_reservation_info(slurm_t self, FILE *out, HV *resv_info, int one_lin
 	CLEANUP:
 		xfree(ri.node_inx);
 
-char *
+char_xfree *
 slurm_sprint_reservation_info(slurm_t self, HV *resv_info, int one_liner=0)
 	PREINIT:
 		reserve_info_t ri;
-		char *tmp_str = NULL;
 	CODE:
 		if (self); /* this is needed to avoid a warning about
 			      unused variables.  But if we take slurm_t self
@@ -2274,11 +2182,8 @@ slurm_sprint_reservation_info(slurm_t self, HV *resv_info, int one_liner=0)
 		if(hv_to_reserve_info(resv_info, &ri) < 0) {
 			XSRETURN_UNDEF;
 		}
-		tmp_str = slurm_sprint_reservation_info(&ri, one_liner);
+		RETVAL = slurm_sprint_reservation_info(&ri, one_liner);
 		xfree(ri.node_inx);
-		New(0, RETVAL, strlen(tmp_str) + 1, char);
-		Copy(tmp_str, RETVAL, strlen(tmp_str) + 1, char);
-		xfree(tmp_str);
 	OUTPUT:
 		RETVAL
 
@@ -2608,35 +2513,23 @@ slurm_hostlist_push(hostlist_t hl, char* hosts)
 int
 slurm_hostlist_push_host(hostlist_t hl, char* host)
 
-char*
+char_xfree *
 slurm_hostlist_ranged_string(hostlist_t hl)
-	PREINIT:
-		char *tmp_str;
-		int len;
 	CODE:
-		tmp_str = slurm_hostlist_ranged_string_xmalloc(hl);
-		if (tmp_str == NULL) {
+		RETVAL = slurm_hostlist_ranged_string_xmalloc(hl);
+		if (RETVAL == NULL) {
 			XSRETURN_UNDEF;
 		}
-		len = strlen(tmp_str) + 1;
-		Newz(0, RETVAL, len, char);
-		Copy(tmp_str, RETVAL, len, char);
-		xfree(tmp_str);
 	OUTPUT:
 		RETVAL
 
-char*
+char_free *
 slurm_hostlist_shift(hostlist_t hl = NULL)
-	PREINIT:
-		char *host = NULL;
 	CODE:
-		host = slurm_hostlist_shift(hl);
-		if (host == NULL) {
+		RETVAL = slurm_hostlist_shift(hl);
+		if (RETVAL == NULL) {
 			XSRETURN_UNDEF;
 		}
-		Newz(0, RETVAL, strlen(host) + 1, char);
-		Copy(host, RETVAL, strlen(host) + 1, char);
-		free(host);
 	OUTPUT:
 		RETVAL
 
diff --git a/contribs/perlapi/libslurm/perl/alloc.c b/contribs/perlapi/libslurm/perl/alloc.c
index f1fa3a2c2..1d3c0dd21 100644
--- a/contribs/perlapi/libslurm/perl/alloc.c
+++ b/contribs/perlapi/libslurm/perl/alloc.c
@@ -151,6 +151,9 @@ hv_to_job_desc_msg(HV *hv, job_desc_msg_t *job_desc)
 	FETCH_FIELD(hv, job_desc, warn_time, uint16_t, FALSE);
 	FETCH_FIELD(hv, job_desc, work_dir, charp, FALSE);
 	/* job constraints: */
+	FETCH_FIELD(hv, job_desc, cpu_freq_min, uint32_t, FALSE);
+	FETCH_FIELD(hv, job_desc, cpu_freq_max, uint32_t, FALSE);
+	FETCH_FIELD(hv, job_desc, cpu_freq_gov, uint32_t, FALSE);
 	FETCH_FIELD(hv, job_desc, cpus_per_task, uint16_t, FALSE);
 	FETCH_FIELD(hv, job_desc, min_cpus, uint32_t, FALSE);
 	FETCH_FIELD(hv, job_desc, max_cpus, uint32_t, FALSE);
diff --git a/contribs/perlapi/libslurm/perl/bitstr.h b/contribs/perlapi/libslurm/perl/bitstr.h
index 567b787cc..bb4d89735 100644
--- a/contribs/perlapi/libslurm/perl/bitstr.h
+++ b/contribs/perlapi/libslurm/perl/bitstr.h
@@ -51,9 +51,12 @@ bitstr_t *slurm_bit_pick_cnt(bitstr_t *b, bitoff_t nbits);
 bitoff_t slurm_bit_get_bit_num(bitstr_t *b, int pos);
 int      slurm_bit_get_pos_num(bitstr_t *b, bitoff_t pos);
 
+#ifdef FREE_NULL_BITMAP
+#undef FREE_NULL_BITMAP
+#endif
+
 #define FREE_NULL_BITMAP(_X)            \
 	 do {                            \
 		if (_X) slurm_bit_free (_X);  \
 			_X      = NULL;         \
 	 } while (0)
-
diff --git a/contribs/perlapi/libslurm/perl/conf.c b/contribs/perlapi/libslurm/perl/conf.c
index 7e2c81f7b..4532a2008 100644
--- a/contribs/perlapi/libslurm/perl/conf.c
+++ b/contribs/perlapi/libslurm/perl/conf.c
@@ -17,20 +17,29 @@ int
 slurm_ctl_conf_to_hv(slurm_ctl_conf_t *conf, HV *hv)
 {
 	STORE_FIELD(hv, conf, last_update, time_t);
-	if (conf->acct_gather_profile_type)
-		STORE_FIELD(hv, conf, acct_gather_profile_type, charp);
-	if (conf->acct_gather_infiniband_type)
-		STORE_FIELD(hv, conf, acct_gather_infiniband_type, charp);
+
+	if (conf->acct_gather_conf)
+		STORE_FIELD(hv, conf, acct_gather_conf, charp);
+	if (conf->acct_gather_energy_type)
+		STORE_FIELD(hv, conf, acct_gather_energy_type, charp);
 	if (conf->acct_gather_filesystem_type)
 		STORE_FIELD(hv, conf, acct_gather_filesystem_type, charp);
-	STORE_FIELD(hv, conf, accounting_storage_enforce, uint16_t);
+	if (conf->acct_gather_infiniband_type)
+		STORE_FIELD(hv, conf, acct_gather_infiniband_type, charp);
+	STORE_FIELD(hv, conf, acct_gather_node_freq, uint16_t);
+	if (conf->acct_gather_profile_type)
+		STORE_FIELD(hv, conf, acct_gather_profile_type, charp);
+	STORE_FIELD(hv, conf, acctng_store_job_comment, uint16_t);
+
 	if (conf->accounting_storage_backup_host)
 		STORE_FIELD(hv, conf, accounting_storage_backup_host, charp);
+	STORE_FIELD(hv, conf, accounting_storage_enforce, uint16_t);
 	if (conf->accounting_storage_host)
 		STORE_FIELD(hv, conf, accounting_storage_host, charp);
 	if (conf->accounting_storage_loc)
 		STORE_FIELD(hv, conf, accounting_storage_loc, charp);
-	/* accounting_storage_pass */
+	if (conf->accounting_storage_pass)
+		STORE_FIELD(hv, conf, accounting_storage_pass, charp);
 	STORE_FIELD(hv, conf, accounting_storage_port, uint32_t);
 	if (conf->accounting_storage_type)
 		STORE_FIELD(hv, conf, accounting_storage_type, charp);
@@ -41,126 +50,217 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t *conf, HV *hv)
 		STORE_FIELD(hv, conf, authinfo, charp);
 	if (conf->authtype)
 		STORE_FIELD(hv, conf, authtype, charp);
+
 	if (conf->backup_addr)
 		STORE_FIELD(hv, conf, backup_addr, charp);
 	if (conf->backup_controller)
 		STORE_FIELD(hv, conf, backup_controller, charp);
+
 	STORE_FIELD(hv, conf, batch_start_timeout, uint16_t);
+
+	if (conf->bb_type)
+		STORE_FIELD(hv, conf, bb_type, charp);
+
 	STORE_FIELD(hv, conf, boot_time, time_t);
+
 	if (conf->checkpoint_type)
 		STORE_FIELD(hv, conf, checkpoint_type, charp);
+
+	if (conf->chos_loc)
+		STORE_FIELD(hv, conf, chos_loc, charp);
+
+	if (conf->core_spec_plugin)
+		STORE_FIELD(hv, conf, core_spec_plugin, charp);
+
 	if (conf->cluster_name)
 		STORE_FIELD(hv, conf, cluster_name, charp);
+
 	STORE_FIELD(hv, conf, complete_wait, uint16_t);
 
 	if (conf->control_addr)
 		STORE_FIELD(hv, conf, control_addr, charp);
+
 	if (conf->control_machine)
 		STORE_FIELD(hv, conf, control_machine, charp);
+
+	STORE_FIELD(hv, conf, cpu_freq_def, uint32_t);
+
 	if (conf->crypto_type)
 		STORE_FIELD(hv, conf, crypto_type, charp);
+
 	STORE_FIELD(hv, conf, debug_flags, uint64_t);
+
 	STORE_FIELD(hv, conf, def_mem_per_cpu, uint32_t);
+
 	STORE_FIELD(hv, conf, disable_root_jobs, uint16_t);
-	STORE_FIELD(hv, conf, dynalloc_port, uint16_t);
+
+	STORE_FIELD(hv, conf, eio_timeout, uint16_t);
+
 	STORE_FIELD(hv, conf, enforce_part_limits, uint16_t);
+
 	if (conf->epilog)
 		STORE_FIELD(hv, conf, epilog, charp);
 	STORE_FIELD(hv, conf, epilog_msg_time, uint32_t);
 	if (conf->epilog_slurmctld)
 		STORE_FIELD(hv, conf, epilog_slurmctld, charp);
+
+	if (conf->ext_sensors_conf)
+		STORE_FIELD(hv, conf, ext_sensors_conf, charp);
+	STORE_FIELD(hv, conf, ext_sensors_freq, uint16_t);
 	if (conf->ext_sensors_type)
 		STORE_FIELD(hv, conf, ext_sensors_type, charp);
-	STORE_FIELD(hv, conf, ext_sensors_freq, uint16_t);
 
 	STORE_FIELD(hv, conf, fast_schedule, uint16_t);
+
 	STORE_FIELD(hv, conf, first_job_id, uint32_t);
+	STORE_FIELD(hv, conf, fs_dampening_factor, uint16_t);
+
 	STORE_FIELD(hv, conf, get_env_timeout, uint16_t);
+
 	if (conf->gres_plugins)
 		STORE_FIELD(hv, conf, gres_plugins, charp);
+
 	STORE_FIELD(hv, conf, group_info, uint16_t);
+
 	STORE_FIELD(hv, conf, hash_val, uint32_t);
+
 	STORE_FIELD(hv, conf, health_check_interval, uint16_t);
+
 	STORE_FIELD(hv, conf, health_check_node_state, uint32_t);
+
 	if (conf->health_check_program)
 		STORE_FIELD(hv, conf, health_check_program, charp);
+
 	STORE_FIELD(hv, conf, inactive_limit, uint16_t);
-	if (conf->job_acct_gather_type)
+
+	if (conf->job_acct_gather_freq)
 		STORE_FIELD(hv, conf, job_acct_gather_freq, charp);
+	if (conf->job_acct_gather_params)
+		STORE_FIELD(hv, conf, job_acct_gather_params, charp);
 	if (conf->job_acct_gather_type)
 		STORE_FIELD(hv, conf, job_acct_gather_type, charp);
 
 	if (conf->job_ckpt_dir)
 		STORE_FIELD(hv, conf, job_ckpt_dir, charp);
+
 	if (conf->job_comp_host)
 		STORE_FIELD(hv, conf, job_comp_host, charp);
 	if (conf->job_comp_loc)
 		STORE_FIELD(hv, conf, job_comp_loc, charp);
-	/* job_comp_pass */
+	if (conf->job_comp_pass)
+		STORE_FIELD(hv, conf, job_comp_pass, charp);
 	STORE_FIELD(hv, conf, job_comp_port, uint32_t);
 	if (conf->job_comp_type)
 		STORE_FIELD(hv, conf, job_comp_type, charp);
 	if (conf->job_comp_user)
 		STORE_FIELD(hv, conf, job_comp_user, charp);
+
+	if (conf->job_container_plugin)
+		STORE_FIELD(hv, conf, job_container_plugin, charp);
+
 	if (conf->job_credential_private_key)
 		STORE_FIELD(hv, conf, job_credential_private_key, charp);
 	if (conf->job_credential_public_certificate)
 		STORE_FIELD(hv, conf, job_credential_public_certificate, charp);
+
 	STORE_FIELD(hv, conf, job_file_append, uint16_t);
+
 	STORE_FIELD(hv, conf, job_requeue, uint16_t);
+
 	if (conf->job_submit_plugins)
 		STORE_FIELD(hv, conf, job_submit_plugins, charp);
 
 	STORE_FIELD(hv, conf, keep_alive_time, uint16_t);
+
 	STORE_FIELD(hv, conf, kill_on_bad_exit, uint16_t);
+
 	STORE_FIELD(hv, conf, kill_wait, uint16_t);
+
+	if (conf->launch_type)
+		STORE_FIELD(hv, conf, launch_type, charp);
+
+	if (conf->layouts)
+		STORE_FIELD(hv, conf, layouts, charp);
+
 	if (conf->licenses)
 		STORE_FIELD(hv, conf, licenses, charp);
+	if (conf->licenses_used)
+		STORE_FIELD(hv, conf, licenses_used, charp);
+
+	STORE_FIELD(hv, conf, log_fmt, uint16_t);
+
 	if (conf->mail_prog)
 		STORE_FIELD(hv, conf, mail_prog, charp);
+
 	STORE_FIELD(hv, conf, max_array_sz, uint16_t);
+
 	STORE_FIELD(hv, conf, max_job_cnt, uint16_t);
+	STORE_FIELD(hv, conf, max_job_id, uint32_t);
+
 	STORE_FIELD(hv, conf, max_mem_per_cpu, uint32_t);
+
+	if (conf->max_step_cnt)
+		STORE_FIELD(hv, conf, max_step_cnt, uint32_t);
+
 	STORE_FIELD(hv, conf, max_tasks_per_node, uint16_t);
+
+	if (conf->mem_limit_enforce)
+		STORE_FIELD(hv, conf, mem_limit_enforce, uint16_t);
+
 	STORE_FIELD(hv, conf, min_job_age, uint16_t);
+
 	if (conf->mpi_default)
 		STORE_FIELD(hv, conf, mpi_default, charp);
 	if (conf->mpi_params)
 		STORE_FIELD(hv, conf, mpi_params, charp);
+
 	STORE_FIELD(hv, conf, msg_timeout, uint16_t);
+
 	STORE_FIELD(hv, conf, next_job_id, uint32_t);
 
 	if (conf->node_prefix)
 		STORE_FIELD(hv, conf, node_prefix, charp);
+
 	STORE_FIELD(hv, conf, over_time_limit, uint16_t);
+
 	if (conf->plugindir)
 		STORE_FIELD(hv, conf, plugindir, charp);
+
 	if (conf->plugstack)
 		STORE_FIELD(hv, conf, plugstack, charp);
+
+	if (conf->power_parameters)
+		STORE_FIELD(hv, conf, power_parameters, charp);
+
 	STORE_FIELD(hv, conf, preempt_mode, uint16_t);
+
 	if (conf->preempt_type)
 		STORE_FIELD(hv, conf, preempt_type, charp);
+
 	STORE_FIELD(hv, conf, priority_calc_period, uint32_t);
 	STORE_FIELD(hv, conf, priority_decay_hl, uint32_t);
 	STORE_FIELD(hv, conf, priority_favor_small, uint16_t);
+	STORE_FIELD(hv, conf, priority_flags, uint16_t);
 	STORE_FIELD(hv, conf, priority_max_age, uint32_t);
 	if (conf->priority_params)
 		STORE_FIELD(hv, conf, priority_params, charp);
 	STORE_FIELD(hv, conf, priority_reset_period, uint16_t);
 	if (conf->priority_type)
 		STORE_FIELD(hv, conf, priority_type, charp);
-	STORE_FIELD(hv, conf, prolog_flags, uint16_t);
-
 	STORE_FIELD(hv, conf, priority_weight_age, uint32_t);
 	STORE_FIELD(hv, conf, priority_weight_fs, uint32_t);
 	STORE_FIELD(hv, conf, priority_weight_js, uint32_t);
 	STORE_FIELD(hv, conf, priority_weight_part, uint32_t);
 	STORE_FIELD(hv, conf, priority_weight_qos, uint32_t);
+	STORE_FIELD(hv, conf, priority_weight_tres, charp);
 	STORE_FIELD(hv, conf, private_data, uint16_t);
+
 	if (conf->proctrack_type)
 		STORE_FIELD(hv, conf, proctrack_type, charp);
+
 	if (conf->prolog)
 		STORE_FIELD(hv, conf, prolog, charp);
+	STORE_FIELD(hv, conf, prolog_flags, uint16_t);
 	if (conf->prolog_slurmctld)
 		STORE_FIELD(hv, conf, prolog_slurmctld, charp);
 
@@ -169,17 +269,35 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t *conf, HV *hv)
 		STORE_FIELD(hv, conf, propagate_rlimits, charp);
 	if (conf->propagate_rlimits_except)
 		STORE_FIELD(hv, conf, propagate_rlimits_except, charp);
+
+	if (conf->reboot_program)
+		STORE_FIELD(hv, conf, reboot_program, charp);
+
 	STORE_FIELD(hv, conf, reconfig_flags, uint16_t);
+
+	if (conf->requeue_exit)
+		STORE_FIELD(hv, conf, requeue_exit, charp);
+	if (conf->requeue_exit_hold)
+		STORE_FIELD(hv, conf, requeue_exit_hold, charp);
+
 	if (conf->resume_program)
 		STORE_FIELD(hv, conf, resume_program, charp);
 	STORE_FIELD(hv, conf, resume_rate, uint16_t);
 	STORE_FIELD(hv, conf, resume_timeout, uint16_t);
+
 	if (conf->resv_epilog)
 		STORE_FIELD(hv, conf, resv_epilog, charp);
+
 	STORE_FIELD(hv, conf, resv_over_run, uint16_t);
+
 	if (conf->resv_prolog)
 		STORE_FIELD(hv, conf, resv_prolog, charp);
+
 	STORE_FIELD(hv, conf, ret2service, uint16_t);
+
+	if (conf->route_plugin)
+		STORE_FIELD(hv, conf, route_plugin, charp);
+
 	if (conf->salloc_default_command)
 		STORE_FIELD(hv, conf, salloc_default_command, charp);
 
@@ -193,19 +311,20 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t *conf, HV *hv)
 		STORE_FIELD(hv, conf, schedtype, charp);
 	STORE_FIELD(hv, conf, schedport, uint16_t);
 	STORE_FIELD(hv, conf, schedrootfltr, uint16_t);
+
+	STORE_PTR_FIELD(hv, conf, select_conf_key_pairs, "Slurm::List"); /* TODO: Think about memory management */
 	if (conf->select_type)
 		STORE_FIELD(hv, conf, select_type, charp);
-	STORE_PTR_FIELD(hv, conf, select_conf_key_pairs, "Slurm::List"); /* TODO: Think about memory management */
 	STORE_FIELD(hv, conf, select_type_param, uint16_t);
+
 	if (conf->slurm_conf)
 		STORE_FIELD(hv, conf, slurm_conf, charp);
 
 	STORE_FIELD(hv, conf, slurm_user_id, uint32_t);
+
 	if (conf->slurm_user_name)
 		STORE_FIELD(hv, conf, slurm_user_name, charp);
-	STORE_FIELD(hv, conf, slurmd_user_id, uint32_t);
-	if (conf->slurmd_user_name)
-		STORE_FIELD(hv, conf, slurmd_user_name, charp);
+
 	STORE_FIELD(hv, conf, slurmctld_debug, uint16_t);
 	if (conf->slurmctld_logfile)
 		STORE_FIELD(hv, conf, slurmctld_logfile, charp);
@@ -222,16 +341,26 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t *conf, HV *hv)
 		STORE_FIELD(hv, conf, slurmd_logfile, charp);
 	if (conf->slurmd_pidfile)
 		STORE_FIELD(hv, conf, slurmd_pidfile, charp);
+	if (conf->slurmd_plugstack)
+		STORE_FIELD(hv, conf, slurmd_plugstack, charp);
 	STORE_FIELD(hv, conf, slurmd_port, uint32_t);
 	if (conf->slurmd_spooldir)
 		STORE_FIELD(hv, conf, slurmd_spooldir, charp);
 	STORE_FIELD(hv, conf, slurmd_timeout, uint16_t);
+	STORE_FIELD(hv, conf, slurmd_user_id, uint32_t);
+	if (conf->slurmd_user_name)
+		STORE_FIELD(hv, conf, slurmd_user_name, charp);
+
 	if (conf->srun_epilog)
 		STORE_FIELD(hv, conf, srun_epilog, charp);
+	if (conf->srun_port_range)
+		STORE_PTR_FIELD(hv, conf, srun_port_range, "SLURM::port_range");
 	if (conf->srun_prolog)
 		STORE_FIELD(hv, conf, srun_prolog, charp);
+
 	if (conf->state_save_location)
 		STORE_FIELD(hv, conf, state_save_location, charp);
+
 	if (conf->suspend_exc_nodes)
 		STORE_FIELD(hv, conf, suspend_exc_nodes, charp);
 	if (conf->suspend_exc_parts)
@@ -266,6 +395,11 @@ slurm_ctl_conf_to_hv(slurm_ctl_conf_t *conf, HV *hv)
 		STORE_FIELD(hv, conf, version, charp);
 	STORE_FIELD(hv, conf, vsize_factor, uint16_t);
 	STORE_FIELD(hv, conf, wait_time, uint16_t);
+
+	STORE_FIELD(hv, conf, z_16, uint16_t);
+	STORE_FIELD(hv, conf, z_32, uint32_t);
+	if (conf->z_char)
+		STORE_FIELD(hv, conf, z_char, charp);
 	return 0;
 }
 
@@ -277,14 +411,19 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 {
 	memset(conf, 0, sizeof(slurm_ctl_conf_t));
 
-	FETCH_FIELD(hv, conf, last_update, time_t, TRUE);
-	FETCH_FIELD(hv, conf, acct_gather_profile_type, charp, FALSE);
-	FETCH_FIELD(hv, conf, acct_gather_infiniband_type, charp, FALSE);
+	FETCH_FIELD(hv, conf, last_update, time_t, FALSE);
+	FETCH_FIELD(hv, conf, acct_gather_conf, charp, FALSE);
+	FETCH_FIELD(hv, conf, acct_gather_energy_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, acct_gather_filesystem_type, charp, FALSE);
+	FETCH_FIELD(hv, conf, acct_gather_infiniband_type, charp, FALSE);
+	FETCH_FIELD(hv, conf, acct_gather_node_freq, uint16_t, FALSE);
+	FETCH_FIELD(hv, conf, acct_gather_profile_type, charp, FALSE);
+	FETCH_FIELD(hv, conf, acctng_store_job_comment, uint16_t, FALSE);
 	FETCH_FIELD(hv, conf, accounting_storage_enforce, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, accounting_storage_backup_host, charp, FALSE);
 	FETCH_FIELD(hv, conf, accounting_storage_host, charp, FALSE);
 	FETCH_FIELD(hv, conf, accounting_storage_loc, charp, FALSE);
+	FETCH_FIELD(hv, conf, accounting_storage_pass, charp, FALSE);
 	FETCH_FIELD(hv, conf, accounting_storage_port, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, accounting_storage_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, accounting_storage_user, charp, FALSE);
@@ -294,26 +433,33 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, backup_addr, charp, FALSE);
 	FETCH_FIELD(hv, conf, backup_controller, charp, FALSE);
 	FETCH_FIELD(hv, conf, batch_start_timeout, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, bb_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, boot_time, time_t, TRUE);
 	FETCH_FIELD(hv, conf, checkpoint_type, charp, FALSE);
+	FETCH_FIELD(hv, conf, chos_loc, charp, FALSE);
+	FETCH_FIELD(hv, conf, core_spec_plugin, charp, FALSE);
 	FETCH_FIELD(hv, conf, cluster_name, charp, FALSE);
 	FETCH_FIELD(hv, conf, complete_wait, uint16_t, TRUE);
 
 	FETCH_FIELD(hv, conf, control_addr, charp, FALSE);
 	FETCH_FIELD(hv, conf, control_machine, charp, FALSE);
+	FETCH_FIELD(hv, conf, cpu_freq_def, uint32_t, FALSE);
 	FETCH_FIELD(hv, conf, crypto_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, debug_flags, uint64_t, TRUE);
 	FETCH_FIELD(hv, conf, def_mem_per_cpu, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, disable_root_jobs, uint16_t, TRUE);
-	FETCH_FIELD(hv, conf, dynalloc_port, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, eio_timeout, uint16_t, FALSE);
 	FETCH_FIELD(hv, conf, enforce_part_limits, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, epilog, charp, FALSE);
 	FETCH_FIELD(hv, conf, epilog_msg_time, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, epilog_slurmctld, charp, FALSE);
+	FETCH_FIELD(hv, conf, ext_sensors_conf, charp, FALSE);
 	FETCH_FIELD(hv, conf, ext_sensors_freq, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, ext_sensors_type, charp, FALSE);
 
 	FETCH_FIELD(hv, conf, fast_schedule, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, first_job_id, uint32_t, TRUE);
+	FETCH_FIELD(hv, conf, fs_dampening_factor, uint16_t, FALSE);
 	FETCH_FIELD(hv, conf, get_env_timeout, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, gres_plugins, charp, FALSE);
 	FETCH_FIELD(hv, conf, group_info, uint16_t, TRUE);
@@ -323,14 +469,17 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, health_check_program, charp, FALSE);
 	FETCH_FIELD(hv, conf, inactive_limit, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, job_acct_gather_freq, charp, TRUE);
+	FETCH_FIELD(hv, conf, job_acct_gather_params, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_acct_gather_type, charp, FALSE);
 
 	FETCH_FIELD(hv, conf, job_ckpt_dir, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_comp_host, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_comp_loc, charp, FALSE);
+	FETCH_FIELD(hv, conf, job_comp_pass, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_comp_port, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, job_comp_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_comp_user, charp, FALSE);
+	FETCH_FIELD(hv, conf, job_container_plugin, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_credential_private_key, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_credential_public_certificate, charp, FALSE);
 	FETCH_FIELD(hv, conf, job_file_append, uint16_t, TRUE);
@@ -340,12 +489,19 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, keep_alive_time, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, kill_on_bad_exit, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, kill_wait, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, launch_type, charp, FALSE);
+	FETCH_FIELD(hv, conf, layouts, charp, FALSE);
 	FETCH_FIELD(hv, conf, licenses, charp, FALSE);
+	FETCH_FIELD(hv, conf, licenses_used, charp, FALSE);
+	FETCH_FIELD(hv, conf, log_fmt, uint16_t, FALSE);
 	FETCH_FIELD(hv, conf, mail_prog, charp, FALSE);
 	FETCH_FIELD(hv, conf, max_array_sz, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, max_job_cnt, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, max_job_id, uint32_t, FALSE);
 	FETCH_FIELD(hv, conf, max_mem_per_cpu, uint32_t, TRUE);
+	FETCH_FIELD(hv, conf, max_step_cnt, uint32_t, FALSE);
 	FETCH_FIELD(hv, conf, max_tasks_per_node, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, mem_limit_enforce, uint16_t, FALSE);
 	FETCH_FIELD(hv, conf, min_job_age, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, mpi_default, charp, FALSE);
 	FETCH_FIELD(hv, conf, mpi_params, charp, FALSE);
@@ -356,11 +512,13 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, over_time_limit, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, plugindir, charp, FALSE);
 	FETCH_FIELD(hv, conf, plugstack, charp, FALSE);
+	FETCH_FIELD(hv, conf, power_parameters, charp, FALSE);
 	FETCH_FIELD(hv, conf, preempt_mode, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, preempt_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, priority_calc_period, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, priority_decay_hl, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, priority_favor_small, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, priority_flags, uint16_t, FALSE);
 	FETCH_FIELD(hv, conf, priority_max_age, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, priority_params, charp, FALSE);
 	FETCH_FIELD(hv, conf, priority_reset_period, uint16_t, TRUE);
@@ -371,16 +529,20 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, priority_weight_js, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, priority_weight_part, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, priority_weight_qos, uint32_t, TRUE);
+	FETCH_FIELD(hv, conf, priority_weight_tres, charp, TRUE);
 	FETCH_FIELD(hv, conf, private_data, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, proctrack_type, charp, FALSE);
 	FETCH_FIELD(hv, conf, prolog, charp, FALSE);
-	FETCH_FIELD(hv, conf, prolog_slurmctld, charp, FALSE);
 	FETCH_FIELD(hv, conf, prolog_flags, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, prolog_slurmctld, charp, FALSE);
 
 	FETCH_FIELD(hv, conf, propagate_prio_process, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, propagate_rlimits, charp, FALSE);
 	FETCH_FIELD(hv, conf, propagate_rlimits_except, charp, FALSE);
+	FETCH_FIELD(hv, conf, reboot_program, charp, FALSE);
 	FETCH_FIELD(hv, conf, reconfig_flags, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, requeue_exit, charp, FALSE);
+	FETCH_FIELD(hv, conf, requeue_exit_hold, charp, FALSE);
 	FETCH_FIELD(hv, conf, resume_program, charp, FALSE);
 	FETCH_FIELD(hv, conf, resume_rate, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, resume_timeout, uint16_t, TRUE);
@@ -388,6 +550,7 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, resv_over_run, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, resv_prolog, charp, FALSE);
 	FETCH_FIELD(hv, conf, ret2service, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, route_plugin, charp, FALSE);
 	FETCH_FIELD(hv, conf, salloc_default_command, charp, FALSE);
 
 	FETCH_FIELD(hv, conf, sched_logfile, charp, FALSE);
@@ -397,8 +560,8 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, schedtype, charp, FALSE);
 	FETCH_FIELD(hv, conf, schedport, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, schedrootfltr, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, select_conf_key_pairs, charp, FALSE);
 	FETCH_FIELD(hv, conf, select_type, charp, FALSE);
-	/* TODO: select_conf_key_pairs */
 	FETCH_FIELD(hv, conf, select_type_param, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, slurm_conf, charp, FALSE);
 
@@ -417,10 +580,12 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, slurmd_debug, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, slurmd_logfile, charp, FALSE);
 	FETCH_FIELD(hv, conf, slurmd_pidfile, charp, FALSE);
+	FETCH_FIELD(hv, conf, slurmd_plugstack, charp, FALSE);
 	FETCH_FIELD(hv, conf, slurmd_port, uint32_t, TRUE);
 	FETCH_FIELD(hv, conf, slurmd_spooldir, charp, FALSE);
 	FETCH_FIELD(hv, conf, slurmd_timeout, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, srun_epilog, charp, FALSE);
+	FETCH_PTR_FIELD(hv, conf, srun_port_range, "SLURM::port_range", FALSE);
 	FETCH_FIELD(hv, conf, srun_prolog, charp, FALSE);
 	FETCH_FIELD(hv, conf, state_save_location, charp, FALSE);
 	FETCH_FIELD(hv, conf, suspend_exc_nodes, charp, FALSE);
@@ -446,6 +611,9 @@ hv_to_slurm_ctl_conf(HV *hv, slurm_ctl_conf_t *conf)
 	FETCH_FIELD(hv, conf, version, charp, FALSE);
 	FETCH_FIELD(hv, conf, vsize_factor, uint16_t, TRUE);
 	FETCH_FIELD(hv, conf, wait_time, uint16_t, TRUE);
+	FETCH_FIELD(hv, conf, z_16, uint16_t, FALSE);
+	FETCH_FIELD(hv, conf, z_32, uint32_t, FALSE);
+	FETCH_FIELD(hv, conf, z_char, charp, FALSE);
 	return 0;
 }
 
diff --git a/contribs/perlapi/libslurm/perl/job.c b/contribs/perlapi/libslurm/perl/job.c
index 46df0a978..5247d5cda 100644
--- a/contribs/perlapi/libslurm/perl/job.c
+++ b/contribs/perlapi/libslurm/perl/job.c
@@ -8,8 +8,171 @@
 #include <slurm/slurm.h>
 #include "ppport.h"
 
+#include "src/common/job_resources.h"
+
+#include "bitstr.h"
 #include "slurm-perl.h"
 
+static node_info_msg_t *job_node_ptr = NULL;
+
+/* This set of functions loads/free node information so that we can map a job's
+ * core bitmap to it's CPU IDs based upon the thread count on each node. */
+static void _load_node_info(void)
+{
+	if (!job_node_ptr)
+		(void) slurm_load_node((time_t) NULL, &job_node_ptr, 0);
+}
+
+static void _free_node_info(void)
+{
+	if (job_node_ptr) {
+		slurm_free_node_info_msg(job_node_ptr);
+		job_node_ptr = NULL;
+	}
+}
+
+static uint32_t _threads_per_core(char *host)
+{
+	uint32_t i, threads = 1;
+
+	if (!job_node_ptr || !host)
+		return threads;
+
+	slurm_mutex_lock(&job_node_info_lock);
+	for (i = 0; i < job_node_ptr->record_count; i++) {
+		if (job_node_ptr->node_array[i].name &&
+		    !strcmp(host, job_node_ptr->node_array[i].name)) {
+			threads = job_node_ptr->node_array[i].threads;
+			break;
+		}
+	}
+	slurm_mutex_unlock(&job_node_info_lock);
+	return threads;
+}
+
+static int _job_resrcs_to_hv(job_info_t *job_info, HV *hv)
+{
+	AV *av;
+	HV *nr_hv;
+	bitstr_t *cpu_bitmap;
+	int sock_inx, sock_reps, last, cnt = 0, i, j, k;
+	char tmp1[128], tmp2[128];
+	char *host;
+	job_resources_t *job_resrcs = job_info->job_resrcs;
+	int bit_inx, bit_reps;
+	int abs_node_inx, rel_node_inx;
+	uint32_t *last_mem_alloc_ptr = NULL;
+	uint32_t last_mem_alloc = NO_VAL;
+	char *last_hosts;
+	hostlist_t hl, hl_last;
+	uint32_t threads;
+
+	if (!job_resrcs || !job_resrcs->core_bitmap
+	    || ((last = slurm_bit_fls(job_resrcs->core_bitmap)) == -1))
+		return 0;
+
+	if (!(hl = slurm_hostlist_create(job_resrcs->nodes)))
+		return 1;
+
+	if (!(hl_last = slurm_hostlist_create(NULL)))
+		return 1;
+	av = newAV();
+
+	bit_inx = 0;
+	i = sock_inx = sock_reps = 0;
+	abs_node_inx = job_info->node_inx[i];
+
+/*	tmp1[] stores the current cpu(s) allocated	*/
+	tmp2[0] = '\0';	/* stores last cpu(s) allocated */
+	for (rel_node_inx=0; rel_node_inx < job_resrcs->nhosts;
+	     rel_node_inx++) {
+
+		if (sock_reps >= job_resrcs->sock_core_rep_count[sock_inx]) {
+			sock_inx++;
+			sock_reps = 0;
+		}
+		sock_reps++;
+
+		bit_reps = job_resrcs->sockets_per_node[sock_inx] *
+			job_resrcs->cores_per_socket[sock_inx];
+		host = slurm_hostlist_shift(hl);
+		threads = _threads_per_core(host);
+		cpu_bitmap = slurm_bit_alloc(bit_reps * threads);
+		for (j = 0; j < bit_reps; j++) {
+			if (slurm_bit_test(job_resrcs->core_bitmap, bit_inx)){
+				for (k = 0; k < threads; k++)
+					slurm_bit_set(cpu_bitmap,
+						      (j * threads) + k);
+			}
+			bit_inx++;
+		}
+		slurm_bit_fmt(tmp1, sizeof(tmp1), cpu_bitmap);
+		FREE_NULL_BITMAP(cpu_bitmap);
+/*
+ *		If the allocation values for this host are not the same as the
+ *		last host, print the report of the last group of hosts that had
+ *		identical allocation values.
+ */
+		if (strcmp(tmp1, tmp2) ||
+		    (last_mem_alloc_ptr != job_resrcs->memory_allocated) ||
+		    (job_resrcs->memory_allocated &&
+		     (last_mem_alloc !=
+		      job_resrcs->memory_allocated[rel_node_inx]))) {
+			if (slurm_hostlist_count(hl_last)) {
+				last_hosts =
+					slurm_hostlist_ranged_string_xmalloc(
+						hl_last);
+				nr_hv = newHV();
+				hv_store_charp(nr_hv, "nodes", last_hosts);
+				hv_store_charp(nr_hv, "cpu_ids", tmp2);
+				hv_store_uint32_t(nr_hv, "mem",
+						  last_mem_alloc_ptr ?
+						  last_mem_alloc : 0);
+				av_store(av, cnt++, newRV_noinc((SV*)nr_hv));
+				xfree(last_hosts);
+				slurm_hostlist_destroy(hl_last);
+				hl_last = slurm_hostlist_create(NULL);
+			}
+			strcpy(tmp2, tmp1);
+			last_mem_alloc_ptr = job_resrcs->memory_allocated;
+			if (last_mem_alloc_ptr)
+				last_mem_alloc = job_resrcs->
+					memory_allocated[rel_node_inx];
+			else
+				last_mem_alloc = NO_VAL;
+		}
+		slurm_hostlist_push_host(hl_last, host);
+		free(host);
+
+		if (bit_inx > last)
+			break;
+
+		if (abs_node_inx > job_info->node_inx[i+1]) {
+			i += 2;
+			abs_node_inx = job_info->node_inx[i];
+		} else {
+			abs_node_inx++;
+		}
+	}
+
+	if (slurm_hostlist_count(hl_last)) {
+		last_hosts = slurm_hostlist_ranged_string_xmalloc(hl_last);
+		nr_hv = newHV();
+		hv_store_charp(nr_hv, "nodes", last_hosts);
+		hv_store_charp(nr_hv, "cpu_ids", tmp2);
+		hv_store_uint32_t(nr_hv, "mem",
+				  last_mem_alloc_ptr ?
+				  last_mem_alloc : 0);
+		av_store(av, cnt++, newRV_noinc((SV*)nr_hv));
+		xfree(last_hosts);
+	}
+	slurm_hostlist_destroy(hl);
+	slurm_hostlist_destroy(hl_last);
+	hv_store_sv(hv, "node_resrcs", newRV_noinc((SV*)av));
+
+	return 0;
+}
+
 /*
  * convert job_info_t to perl HV
  */
@@ -59,7 +222,7 @@ job_info_to_hv(job_info_t *job_info, HV *hv)
 		STORE_FIELD(hv, job_info, gres, charp);
 	STORE_FIELD(hv, job_info, group_id, uint32_t);
 	STORE_FIELD(hv, job_info, job_id, uint32_t);
-	STORE_FIELD(hv, job_info, job_state, uint16_t);
+	STORE_FIELD(hv, job_info, job_state, uint32_t);
 	if(job_info->licenses)
 		STORE_FIELD(hv, job_info, licenses, charp);
 	STORE_FIELD(hv, job_info, max_cpus, uint32_t);
@@ -145,6 +308,8 @@ job_info_to_hv(job_info_t *job_info, HV *hv)
 	if(job_info->work_dir)
 		STORE_FIELD(hv, job_info, work_dir, charp);
 
+	_job_resrcs_to_hv(job_info, hv);
+
 	return 0;
 }
 
@@ -194,7 +359,7 @@ hv_to_job_info(HV *hv, job_info_t *job_info)
 	FETCH_FIELD(hv, job_info, gres, charp, FALSE);
 	FETCH_FIELD(hv, job_info, group_id, uint32_t, TRUE);
 	FETCH_FIELD(hv, job_info, job_id, uint32_t, TRUE);
-	FETCH_FIELD(hv, job_info, job_state, uint16_t, TRUE);
+	FETCH_FIELD(hv, job_info, job_state, uint32_t, TRUE);
 	FETCH_FIELD(hv, job_info, licenses, charp, FALSE);
 	FETCH_FIELD(hv, job_info, max_cpus, uint32_t, TRUE);
 	FETCH_FIELD(hv, job_info, max_nodes, uint32_t, TRUE);
@@ -281,6 +446,8 @@ job_info_msg_to_hv(job_info_msg_t *job_info_msg, HV *hv)
 	HV *hv_info;
 	AV *av;
 
+	_load_node_info();
+
 	STORE_FIELD(hv, job_info_msg, last_update, time_t);
 	/* record_count implied in job_array */
 	av = newAV();
@@ -294,6 +461,9 @@ job_info_msg_to_hv(job_info_msg_t *job_info_msg, HV *hv)
 		av_store(av, i, newRV_noinc((SV*)hv_info));
 	}
 	hv_store_sv(hv, "job_array", newRV_noinc((SV*)av));
+
+	_free_node_info();
+
 	return 0;
 }
 
diff --git a/contribs/perlapi/libslurm/perl/lib/Slurm/Constant.pm b/contribs/perlapi/libslurm/perl/lib/Slurm/Constant.pm
index 3208e6ed6..2ddcda67e 100644
--- a/contribs/perlapi/libslurm/perl/lib/Slurm/Constant.pm
+++ b/contribs/perlapi/libslurm/perl/lib/Slurm/Constant.pm
@@ -944,7 +944,7 @@ head2 SLURM ERRNO
 
 =back
 
-=head3 _info.c/communcation layer RESPONSE_SLURM_RC message codes
+=head3 _info.c/communication layer RESPONSE_SLURM_RC message codes
 
 =over 2
 
diff --git a/contribs/perlapi/libslurm/perl/partition.c b/contribs/perlapi/libslurm/perl/partition.c
index 1a8d58d1b..d454d9db6 100644
--- a/contribs/perlapi/libslurm/perl/partition.c
+++ b/contribs/perlapi/libslurm/perl/partition.c
@@ -22,8 +22,22 @@ partition_info_to_hv(partition_info_t *part_info, HV *hv)
 		STORE_FIELD(hv, part_info, allow_groups, charp);
 	if (part_info->alternate)
 		STORE_FIELD(hv, part_info, alternate, charp);
+	if (part_info->cr_type)
+		STORE_FIELD(hv, part_info, cr_type, uint16_t);
+	if (part_info->def_mem_per_cpu)
+		STORE_FIELD(hv, part_info, def_mem_per_cpu, uint32_t);
 	STORE_FIELD(hv, part_info, default_time, uint32_t);
+	if (part_info->deny_accounts)
+		STORE_FIELD(hv, part_info, deny_accounts, charp);
+	if (part_info->deny_qos)
+		STORE_FIELD(hv, part_info, deny_qos, charp);
 	STORE_FIELD(hv, part_info, flags, uint16_t);
+	if (part_info->grace_time)
+		STORE_FIELD(hv, part_info, grace_time, uint32_t);
+	if (part_info->max_cpus_per_node)
+		STORE_FIELD(hv, part_info, max_cpus_per_node, uint32_t);
+	if (part_info->max_mem_per_cpu)
+		STORE_FIELD(hv, part_info, max_mem_per_cpu, uint32_t);
 	STORE_FIELD(hv, part_info, max_nodes, uint32_t);
 	STORE_FIELD(hv, part_info, max_share, uint16_t);
 	STORE_FIELD(hv, part_info, max_time, uint32_t);
@@ -51,6 +65,8 @@ partition_info_to_hv(partition_info_t *part_info, HV *hv)
 		STORE_FIELD(hv, part_info, nodes, charp);
 	STORE_FIELD(hv, part_info, preempt_mode, uint16_t);
 	STORE_FIELD(hv, part_info, priority, uint16_t);
+	if (part_info->qos_char)
+		STORE_FIELD(hv, part_info, qos_char, charp);
 	STORE_FIELD(hv, part_info, state_up, uint16_t);
 	STORE_FIELD(hv, part_info, total_cpus, uint32_t);
 	STORE_FIELD(hv, part_info, total_nodes, uint32_t);
@@ -71,16 +87,24 @@ hv_to_partition_info(HV *hv, partition_info_t *part_info)
 	memset(part_info, 0, sizeof(partition_info_t));
 
 	FETCH_FIELD(hv, part_info, allow_alloc_nodes, charp, FALSE);
+	FETCH_FIELD(hv, part_info, allow_accounts, charp, FALSE);
 	FETCH_FIELD(hv, part_info, allow_groups, charp, FALSE);
+	FETCH_FIELD(hv, part_info, allow_qos, charp, FALSE);
 	FETCH_FIELD(hv, part_info, alternate, charp, FALSE);
+	FETCH_FIELD(hv, part_info, cr_type, uint16_t, FALSE);
+	FETCH_FIELD(hv, part_info, def_mem_per_cpu, uint32_t, FALSE);
 	FETCH_FIELD(hv, part_info, default_time, uint32_t, TRUE);
+	FETCH_FIELD(hv, part_info, deny_accounts, charp, FALSE);
+	FETCH_FIELD(hv, part_info, deny_qos, charp, FALSE);
 	FETCH_FIELD(hv, part_info, flags, uint16_t, TRUE);
+	FETCH_FIELD(hv, part_info, grace_time, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_info, max_cpus_per_node, uint32_t, FALSE);
+	FETCH_FIELD(hv, part_info, max_mem_per_cpu, uint32_t, FALSE);
 	FETCH_FIELD(hv, part_info, max_nodes, uint32_t, TRUE);
 	FETCH_FIELD(hv, part_info, max_share, uint16_t, TRUE);
 	FETCH_FIELD(hv, part_info, max_time, uint32_t, TRUE);
 	FETCH_FIELD(hv, part_info, min_nodes, uint32_t, TRUE);
 	FETCH_FIELD(hv, part_info, name, charp, TRUE);
-	FETCH_FIELD(hv, part_info, name, charp, TRUE);
 	svp = hv_fetch(hv, "node_inx", 8, FALSE);
 	if (svp && SvROK(*svp) && SvTYPE(SvRV(*svp)) == SVt_PVAV) {
 		av = (AV*)SvRV(*svp);
@@ -97,6 +121,7 @@ hv_to_partition_info(HV *hv, partition_info_t *part_info)
 	FETCH_FIELD(hv, part_info, nodes, charp, FALSE);
 	FETCH_FIELD(hv, part_info, preempt_mode, uint16_t, TRUE);
 	FETCH_FIELD(hv, part_info, priority, uint16_t, TRUE);
+	FETCH_FIELD(hv, part_info, qos_char, charp, TRUE);
 	FETCH_FIELD(hv, part_info, state_up, uint16_t, TRUE);
 	FETCH_FIELD(hv, part_info, total_cpus, uint32_t, TRUE);
 	FETCH_FIELD(hv, part_info, total_nodes, uint32_t, TRUE);
diff --git a/contribs/perlapi/libslurm/perl/reservation.c b/contribs/perlapi/libslurm/perl/reservation.c
index 0d9a243f2..349ea8ea6 100644
--- a/contribs/perlapi/libslurm/perl/reservation.c
+++ b/contribs/perlapi/libslurm/perl/reservation.c
@@ -86,7 +86,7 @@ hv_to_reserve_info(HV *hv, reserve_info_t *resv_info)
 		/* nothing to do */
 	}
 	FETCH_FIELD(hv, resv_info, node_list, charp, FALSE);
-	FETCH_FIELD(hv, resv_info, partition, charp, TRUE);
+	FETCH_FIELD(hv, resv_info, partition, charp, FALSE);
 	FETCH_FIELD(hv, resv_info, start_time, time_t, TRUE);
 	FETCH_FIELD(hv, resv_info, users, charp, FALSE);
 	return 0;
diff --git a/contribs/perlapi/libslurm/perl/slurm-perl.h b/contribs/perlapi/libslurm/perl/slurm-perl.h
index 8ae4e026f..05adb5f4b 100644
--- a/contribs/perlapi/libslurm/perl/slurm-perl.h
+++ b/contribs/perlapi/libslurm/perl/slurm-perl.h
@@ -9,16 +9,16 @@
 
 
 /* these declaration are not in slurm.h */
+#ifndef xfree
 #define xfree(__p) \
 	slurm_xfree((void **)&(__p), __FILE__, __LINE__, __FUNCTION__)
 #define xmalloc(__sz) \
 	slurm_xmalloc (__sz, true, __FILE__, __LINE__, __FUNCTION__)
+#endif
 
 extern void slurm_xfree(void **, const char *, int, const char *);
 extern void *slurm_xmalloc(size_t, bool, const char *, int, const char *);
 
-
-extern void slurm_conf_reinit(char *pathname);
 extern void slurm_api_clear_config(void);
 
 extern void slurm_list_iterator_destroy(ListIterator itr);
@@ -27,8 +27,8 @@ extern void slurm_list_iterator_destroy(ListIterator itr);
 extern char *slurm_preempt_mode_string(uint16_t preempt_mode);
 extern uint16_t slurm_preempt_mode_num(const char *preempt_mode);
 extern char *slurm_job_reason_string(enum job_state_reason inx);
-extern char *slurm_job_state_string(uint16_t inx);
-extern char *slurm_job_state_string_compact(uint16_t inx);
+extern char *slurm_job_state_string(uint32_t inx);
+extern char *slurm_job_state_string_compact(uint32_t inx);
 extern int   slurm_job_state_num(const char *state_name);
 extern char *slurm_node_state_string(uint32_t inx);
 extern char *slurm_node_state_string_compact(uint32_t inx);
diff --git a/contribs/perlapi/libslurm/perl/step.c b/contribs/perlapi/libslurm/perl/step.c
index 41831c567..39d670dd0 100644
--- a/contribs/perlapi/libslurm/perl/step.c
+++ b/contribs/perlapi/libslurm/perl/step.c
@@ -53,7 +53,7 @@ job_step_info_to_hv(job_step_info_t *step_info, HV *hv)
 	STORE_FIELD(hv, step_info, step_id, uint32_t);
 	STORE_FIELD(hv, step_info, time_limit, uint32_t);
 	STORE_FIELD(hv, step_info, user_id, uint32_t);
-	STORE_FIELD(hv, step_info, state, uint16_t);
+	STORE_FIELD(hv, step_info, state, uint32_t);
 
 	return 0;
 }
@@ -68,6 +68,8 @@ hv_to_job_step_info(HV *hv, job_step_info_t *step_info)
 	AV *av;
 	int i, n;
 
+	memset(step_info, 0, sizeof(job_step_info_t));
+
 	FETCH_FIELD(hv, step_info, array_job_id, uint32_t, TRUE);
 	FETCH_FIELD(hv, step_info, array_task_id, uint32_t, TRUE);
 	FETCH_FIELD(hv, step_info, ckpt_dir, charp, FALSE);
@@ -101,7 +103,7 @@ hv_to_job_step_info(HV *hv, job_step_info_t *step_info)
 	FETCH_FIELD(hv, step_info, step_id, uint32_t, TRUE);
 	FETCH_FIELD(hv, step_info, time_limit, uint32_t, TRUE);
 	FETCH_FIELD(hv, step_info, user_id, uint32_t, TRUE);
-	FETCH_FIELD(hv, step_info, state, uint16_t, TRUE);
+	FETCH_FIELD(hv, step_info, state, uint32_t, TRUE);
 
 	return 0;
 }
@@ -245,12 +247,13 @@ job_step_pids_response_msg_to_hv(job_step_pids_response_msg_t *pids_msg, HV *hv)
 
 	av = newAV();
 	itr = slurm_list_iterator_create(pids_msg->pid_list);
-	while((pids = (job_step_pids_t *)slurm_list_next(itr))) {
+	while ((pids = (job_step_pids_t *)slurm_list_next(itr))) {
 		hv_pids = newHV();
 		if (job_step_pids_to_hv(pids, hv_pids) < 0) {
 			Perl_warn(aTHX_ "failed to convert job_step_pids_t to hv for job_step_pids_response_msg_t");
 			SvREFCNT_dec(hv_pids);
 			SvREFCNT_dec(av);
+			slurm_list_iterator_destroy(itr);
 			return -1;
 		}
 		av_store(av, i++, newRV_noinc((SV*)hv_pids));
@@ -300,12 +303,13 @@ job_step_stat_response_msg_to_hv(job_step_stat_response_msg_t *stat_msg, HV *hv)
 
 	av = newAV();
 	itr = slurm_list_iterator_create(stat_msg->stats_list);
-	while((stat = (job_step_stat_t *)slurm_list_next(itr))) {
+	while ((stat = (job_step_stat_t *)slurm_list_next(itr))) {
 		hv_stat = newHV();
 		if(job_step_stat_to_hv(stat, hv_stat) < 0) {
 			Perl_warn(aTHX_ "failed to convert job_step_stat_t to hv for job_step_stat_response_msg_t");
 			SvREFCNT_dec(hv_stat);
 			SvREFCNT_dec(av);
+			slurm_list_iterator_destroy(itr);
 			return -1;
 		}
 		av_store(av, i++, newRV_noinc((SV*)hv_stat));
diff --git a/contribs/perlapi/libslurm/perl/step_ctx.c b/contribs/perlapi/libslurm/perl/step_ctx.c
index ca96e9a2b..5966c0ee3 100644
--- a/contribs/perlapi/libslurm/perl/step_ctx.c
+++ b/contribs/perlapi/libslurm/perl/step_ctx.c
@@ -20,7 +20,9 @@ hv_to_slurm_step_ctx_params(HV *hv, slurm_step_ctx_params_t *params)
 
 	FETCH_FIELD(hv, params, ckpt_interval, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, cpu_count, uint32_t, FALSE);
-	FETCH_FIELD(hv, params, cpu_freq, uint32_t, FALSE);
+	FETCH_FIELD(hv, params, cpu_freq_min, uint32_t, FALSE);
+	FETCH_FIELD(hv, params, cpu_freq_max, uint32_t, FALSE);
+	FETCH_FIELD(hv, params, cpu_freq_gov, uint32_t, FALSE);
 	FETCH_FIELD(hv, params, exclusive, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, features, charp, FALSE);
 	FETCH_FIELD(hv, params, immediate, uint16_t, FALSE);
@@ -190,7 +192,9 @@ hv_to_slurm_step_launch_params(HV *hv, slurm_step_launch_params_t *params)
 	FETCH_FIELD(hv, params, task_epilog, charp, FALSE);
 	FETCH_FIELD(hv, params, cpu_bind_type, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, cpu_bind, charp, FALSE);
-	FETCH_FIELD(hv, params, cpu_freq, uint32_t, FALSE);
+	FETCH_FIELD(hv, params, cpu_freq_min, uint32_t, FALSE);
+	FETCH_FIELD(hv, params, cpu_freq_max, uint32_t, FALSE);
+	FETCH_FIELD(hv, params, cpu_freq_gov, uint32_t, FALSE);
 	FETCH_FIELD(hv, params, mem_bind_type, uint16_t, FALSE);
 	FETCH_FIELD(hv, params, mem_bind, charp, FALSE);
 
diff --git a/contribs/perlapi/libslurm/perl/topo.c b/contribs/perlapi/libslurm/perl/topo.c
index 0867e3c23..4b0a1ebb1 100644
--- a/contribs/perlapi/libslurm/perl/topo.c
+++ b/contribs/perlapi/libslurm/perl/topo.c
@@ -18,11 +18,11 @@ topo_info_to_hv(topo_info_t *topo_info, HV *hv)
 {
 	STORE_FIELD(hv, topo_info, level, uint16_t);
 	STORE_FIELD(hv, topo_info, link_speed, uint32_t);
-	if(topo_info->name)
+	if (topo_info->name)
 		STORE_FIELD(hv, topo_info, name, charp);
-	if(topo_info->nodes)
+	if (topo_info->nodes)
 		STORE_FIELD(hv, topo_info, nodes, charp);
-	if(topo_info->switches)
+	if (topo_info->switches)
 		STORE_FIELD(hv, topo_info, switches, charp);
 	return 0;
 }
@@ -55,7 +55,7 @@ topo_info_response_msg_to_hv(topo_info_response_msg_t *topo_info_msg, HV *hv)
 
 	/* record_count implied in node_array */
 	av = newAV();
-	for(i = 0; i < topo_info_msg->record_count; i ++) {
+	for (i = 0; i < topo_info_msg->record_count; i ++) {
 		hv_info =newHV();
 		if (topo_info_to_hv(topo_info_msg->topo_array + i, hv_info) < 0) {
 			SvREFCNT_dec((SV*)hv_info);
diff --git a/contribs/perlapi/libslurm/perl/trigger.c b/contribs/perlapi/libslurm/perl/trigger.c
index 6f99c8641..c186438df 100644
--- a/contribs/perlapi/libslurm/perl/trigger.c
+++ b/contribs/perlapi/libslurm/perl/trigger.c
@@ -18,7 +18,7 @@ trigger_info_to_hv(trigger_info_t *trigger_info, HV *hv)
 {
 	STORE_FIELD(hv, trigger_info, trig_id, uint32_t);
 	STORE_FIELD(hv, trigger_info, res_type, uint16_t);
-	if(trigger_info->res_id)
+	if (trigger_info->res_id)
 		STORE_FIELD(hv, trigger_info, res_id, charp);
 	STORE_FIELD(hv, trigger_info, trig_type, uint32_t);
 	STORE_FIELD(hv, trigger_info, offset, uint16_t);
@@ -58,7 +58,7 @@ trigger_info_msg_to_hv(trigger_info_msg_t *trigger_info_msg, HV *hv)
 
 	/* record_count implied in node_array */
 	av = newAV();
-	for(i = 0; i < trigger_info_msg->record_count; i ++) {
+	for (i = 0; i < trigger_info_msg->record_count; i ++) {
 		hv_info =newHV();
 		if (trigger_info_to_hv(trigger_info_msg->trigger_array + i, hv_info) < 0) {
 			SvREFCNT_dec((SV*)hv_info);
diff --git a/contribs/perlapi/libslurm/perl/typemap b/contribs/perlapi/libslurm/perl/typemap
index 2ad8cece1..2b4727550 100644
--- a/contribs/perlapi/libslurm/perl/typemap
+++ b/contribs/perlapi/libslurm/perl/typemap
@@ -1,6 +1,8 @@
 #####################################
 TYPEMAP
 
+char_xfree *	T_CHAR_XFREE
+char_free *	T_CHAR_FREE
 uint32_t	T_U_LONG
 uint16_t	T_U_SHORT
 pid_t		T_U_LONG
@@ -35,6 +37,14 @@ T_SLURM
 T_PTROBJ_SLURM
 	sv_setref_pv( $arg, \"${eval(`cat classmap`);\$slurm_perl_api::class_map->{$ntype}}\", (void*)$var );
 
+T_CHAR_XFREE
+	sv_setpv ((SV*)$arg, $var);
+        xfree ($var);
+
+T_CHAR_FREE
+	sv_setpv ((SV*)$arg, $var);
+        free ($var);
+
 #####################################
 INPUT
 
diff --git a/contribs/perlapi/libslurmdb/Makefile.am b/contribs/perlapi/libslurmdb/Makefile.am
index 10bc75d89..4cd57be2f 100644
--- a/contribs/perlapi/libslurmdb/Makefile.am
+++ b/contribs/perlapi/libslurmdb/Makefile.am
@@ -10,13 +10,18 @@ perl_sources = \
 	       $(perl_dir)/Slurmdb.xs \
 	       $(perl_dir)/slurmdb-perl.h \
 	       $(perl_dir)/cluster.c
+
 test_sources = \
-	$(perl_dir)/t/00-use.t \
-	$(perl_dir)/t/01-clusters_get.t \
-	$(perl_dir)/t/02-report_cluster_account_by_user.t \
-	$(perl_dir)/t/03-report_cluster_user_by_account.t \
-	$(perl_dir)/t/04-report_job_sizes_grouped_by_top_account.t \
-	$(perl_dir)/t/05-report_user_top_usage.t
+	       $(perl_dir)/t/00-use.t \
+	       $(perl_dir)/t/01-clusters_get.t \
+	       $(perl_dir)/t/02-report_cluster_account_by_user.t \
+	       $(perl_dir)/t/03-report_cluster_user_by_account.t \
+	       $(perl_dir)/t/04-report_job_sizes_grouped_by_top_account.t \
+	       $(perl_dir)/t/05-report_user_top_usage.t \
+	       $(perl_dir)/t/06-jobs_get.t \
+	       $(perl_dir)/t/07-qos_get.t
+
+EXTRA_DIST = $(perl_sources) $(test_sources)
 
 $(perl_dir)/Makefile:	$(perl_dir)/Makefile.PL
 	@if test "x${top_srcdir}" != "x${top_builddir}"; then \
diff --git a/contribs/perlapi/libslurmdb/Makefile.in b/contribs/perlapi/libslurmdb/Makefile.in
index 62374562a..0da1df7ae 100644
--- a/contribs/perlapi/libslurmdb/Makefile.in
+++ b/contribs/perlapi/libslurmdb/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -399,13 +413,16 @@ perl_sources = \
 	       $(perl_dir)/cluster.c
 
 test_sources = \
-	$(perl_dir)/t/00-use.t \
-	$(perl_dir)/t/01-clusters_get.t \
-	$(perl_dir)/t/02-report_cluster_account_by_user.t \
-	$(perl_dir)/t/03-report_cluster_user_by_account.t \
-	$(perl_dir)/t/04-report_job_sizes_grouped_by_top_account.t \
-	$(perl_dir)/t/05-report_user_top_usage.t
-
+	       $(perl_dir)/t/00-use.t \
+	       $(perl_dir)/t/01-clusters_get.t \
+	       $(perl_dir)/t/02-report_cluster_account_by_user.t \
+	       $(perl_dir)/t/03-report_cluster_user_by_account.t \
+	       $(perl_dir)/t/04-report_job_sizes_grouped_by_top_account.t \
+	       $(perl_dir)/t/05-report_user_top_usage.t \
+	       $(perl_dir)/t/06-jobs_get.t \
+	       $(perl_dir)/t/07-qos_get.t
+
+EXTRA_DIST = $(perl_sources) $(test_sources)
 AM_CPPFLAGS = \
 	-DVERSION=\"$(VERSION)\" \
 	-I$(top_srcdir) \
diff --git a/contribs/perlapi/libslurmdb/perl/Slurmdb.xs b/contribs/perlapi/libslurmdb/perl/Slurmdb.xs
index 770f4a52b..80d9a0021 100644
--- a/contribs/perlapi/libslurmdb/perl/Slurmdb.xs
+++ b/contribs/perlapi/libslurmdb/perl/Slurmdb.xs
@@ -11,7 +11,7 @@
 #include "const-c.inc"
 
 extern void *slurm_xmalloc(size_t, const char *, int, const char *);
-extern void slurmdb_destroy_association_cond(void *object);
+extern void slurmdb_destroy_assoc_cond(void *object);
 extern void slurmdb_destroy_cluster_cond(void *object);
 extern void slurmdb_destroy_job_cond(void *object);
 extern void slurmdb_destroy_user_cond(void *object);
@@ -74,8 +74,8 @@ slurmdb_report_cluster_account_by_user(db_conn, assoc_condition)
     INIT:
 	AV*   results;
 	List  list = NULL;
-	slurmdb_association_cond_t *assoc_cond = (slurmdb_association_cond_t*)
-		slurm_xmalloc(sizeof(slurmdb_association_cond_t), __FILE__,
+	slurmdb_assoc_cond_t *assoc_cond = (slurmdb_assoc_cond_t*)
+		slurm_xmalloc(sizeof(slurmdb_assoc_cond_t), __FILE__,
 		__LINE__, "slurmdb_report_cluster_account_by_user");
 
 	if (hv_to_assoc_cond(assoc_condition, assoc_cond) < 0) {
@@ -91,7 +91,7 @@ slurmdb_report_cluster_account_by_user(db_conn, assoc_condition)
 	    slurm_list_destroy(list);
 	}
 	RETVAL = newRV((SV*)results);
-	slurmdb_destroy_association_cond(assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
     OUTPUT:
         RETVAL
 
@@ -102,8 +102,8 @@ slurmdb_report_cluster_user_by_account(db_conn, assoc_condition)
     INIT:
 	AV*   results;
 	List  list = NULL;
-	slurmdb_association_cond_t *assoc_cond = (slurmdb_association_cond_t*)
-		slurm_xmalloc(sizeof(slurmdb_association_cond_t), __FILE__,
+	slurmdb_assoc_cond_t *assoc_cond = (slurmdb_assoc_cond_t*)
+		slurm_xmalloc(sizeof(slurmdb_assoc_cond_t), __FILE__,
 		__LINE__, "slurmdb_report_cluster_user_by_account");
 
 	if (hv_to_assoc_cond(assoc_condition, assoc_cond) < 0) {
@@ -119,7 +119,7 @@ slurmdb_report_cluster_user_by_account(db_conn, assoc_condition)
 	    slurm_list_destroy(list);
 	}
 	RETVAL = newRV((SV*)results);
-	slurmdb_destroy_association_cond(assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
     OUTPUT:
         RETVAL
 
@@ -169,8 +169,8 @@ slurmdb_report_user_top_usage(db_conn, user_condition, group_accounts)
 	slurmdb_user_cond_t* user_cond = (slurmdb_user_cond_t*)
 		slurm_xmalloc(sizeof(slurmdb_user_cond_t), __FILE__,
 		__LINE__, "slurmdb_report_user_top_usage");
-	user_cond->assoc_cond =	(slurmdb_association_cond_t*)
-		slurm_xmalloc(sizeof(slurmdb_association_cond_t), __FILE__,
+	user_cond->assoc_cond =	(slurmdb_assoc_cond_t*)
+		slurm_xmalloc(sizeof(slurmdb_assoc_cond_t), __FILE__,
 		__LINE__, "slurmdb_report_user_top_usage");
 	if (hv_to_user_cond(user_condition, user_cond) < 0) {
 		XSRETURN_UNDEF;
@@ -189,3 +189,79 @@ slurmdb_report_user_top_usage(db_conn, user_condition, group_accounts)
 	slurmdb_destroy_user_cond(user_cond);
     OUTPUT:
         RETVAL
+
+SV*
+slurmdb_jobs_get(db_conn, conditions)
+	void* db_conn
+	HV*   conditions
+    INIT:
+	AV*   results;
+	HV*   rh;
+	List  list = NULL;
+	ListIterator itr;
+	slurmdb_job_cond_t *job_cond = (slurmdb_job_cond_t*)
+		slurm_xmalloc(sizeof(slurmdb_job_cond_t), __FILE__,
+		__LINE__, "slurmdb_jobs_get");
+	slurmdb_job_rec_t *rec = NULL;
+
+	if (hv_to_job_cond(conditions, job_cond) < 0) {
+		XSRETURN_UNDEF;
+	}
+	results = (AV*)sv_2mortal((SV*)newAV());
+    CODE:
+	list = slurmdb_jobs_get(db_conn, job_cond);
+	if (list) {
+	    itr = slurm_list_iterator_create(list);
+
+	    while ((rec = slurm_list_next(itr))) {
+		rh = (HV *)sv_2mortal((SV*)newHV());
+		if (job_rec_to_hv(rec, rh) < 0) {
+		    XSRETURN_UNDEF;
+		}
+		av_push(results, newRV((SV*)rh));
+	    }
+	    slurm_list_destroy(list);
+	}
+	RETVAL = newRV((SV*)results);
+	slurmdb_destroy_job_cond(job_cond);
+    OUTPUT:
+        RETVAL
+
+
+SV*
+slurmdb_qos_get(db_conn, conditions)
+	void* db_conn
+	HV*   conditions
+    INIT:
+	AV*   results;
+	HV*   rh;
+	List  list = NULL, all = NULL;
+	ListIterator itr;
+	slurmdb_qos_cond_t *qos_cond = (slurmdb_qos_cond_t*)
+		slurm_xmalloc(sizeof(slurmdb_qos_cond_t), __FILE__,
+		__LINE__, "slurmdb_qos_get");
+	slurmdb_qos_rec_t *rec = NULL;
+
+	if (hv_to_qos_cond(conditions, qos_cond) < 0) {
+		XSRETURN_UNDEF;
+	}
+	results = (AV*)sv_2mortal((SV*)newAV());
+    CODE:
+	list = slurmdb_qos_get(db_conn, qos_cond);
+	all = slurmdb_qos_get(db_conn, NULL);
+	if (list) {
+	    itr = slurm_list_iterator_create(list);
+
+	    while ((rec = slurm_list_next(itr))) {
+		rh = (HV *)sv_2mortal((SV*)newHV());
+		if (qos_rec_to_hv(rec, rh, all) < 0) {
+		    XSRETURN_UNDEF;
+		}
+		av_push(results, newRV((SV*)rh));
+	    }
+	    slurm_list_destroy(list);
+	}
+	RETVAL = newRV((SV*)results);
+	slurmdb_destroy_qos_cond(qos_cond);
+    OUTPUT:
+        RETVAL
diff --git a/contribs/perlapi/libslurmdb/perl/cluster.c b/contribs/perlapi/libslurmdb/perl/cluster.c
index e84fcec5e..0a15af4cf 100644
--- a/contribs/perlapi/libslurmdb/perl/cluster.c
+++ b/contribs/perlapi/libslurmdb/perl/cluster.c
@@ -7,6 +7,7 @@
 #include <XSUB.h>
 
 #include <slurm/slurmdb.h>
+#include "src/common/slurm_protocol_defs.h"
 #include "slurmdb-perl.h"
 
 extern char* slurm_xstrdup(const char* str);
@@ -20,8 +21,8 @@ av_to_cluster_grouping_list(AV* av, List grouping_list)
     int    i, elements = 0;
 
     elements = av_len(av) + 1;
-    for(i = 0; i < elements; i ++) {
-	if((svp = av_fetch(av, i, FALSE))) {
+    for (i = 0; i < elements; i ++) {
+	if ((svp = av_fetch(av, i, FALSE))) {
 	    str = slurm_xstrdup((char*)SvPV_nolen(*svp));
 	    slurm_list_append(grouping_list, str);
 	} else {
@@ -33,7 +34,7 @@ av_to_cluster_grouping_list(AV* av, List grouping_list)
 }
 
 int
-hv_to_assoc_cond(HV* hv, slurmdb_association_cond_t* assoc_cond)
+hv_to_assoc_cond(HV* hv, slurmdb_assoc_cond_t* assoc_cond)
 {
     AV*    element_av;
     SV**   svp;
@@ -69,23 +70,7 @@ hv_to_assoc_cond(HV* hv, slurmdb_association_cond_t* assoc_cond)
     FETCH_LIST_FIELD(hv, assoc_cond, acct_list);
     FETCH_LIST_FIELD(hv, assoc_cond, cluster_list);
     FETCH_LIST_FIELD(hv, assoc_cond, def_qos_id_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, fairshare_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_cpu_mins_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_cpu_run_mins_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_cpus_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_jobs_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_mem_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_nodes_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_submit_jobs_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, grp_wall_list);
     FETCH_LIST_FIELD(hv, assoc_cond, id_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_cpu_mins_pj_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_cpu_run_mins_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_cpus_pj_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_jobs_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_nodes_pj_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_submit_jobs_list);
-    FETCH_LIST_FIELD(hv, assoc_cond, max_wall_pj_list);
     FETCH_LIST_FIELD(hv, assoc_cond, parent_acct_list);
     FETCH_LIST_FIELD(hv, assoc_cond, partition_list);
     FETCH_LIST_FIELD(hv, assoc_cond, qos_list);
@@ -131,6 +116,13 @@ hv_to_job_cond(HV* hv, slurmdb_job_cond_t* job_cond)
     time_t start_time = 0;
     time_t end_time = 0;
 
+    if ( (svp = hv_fetch (hv, "step_list", strlen("step_list"), FALSE)) ) {
+	char *jobids = (char *) (SvPV_nolen(*svp));
+	if (!job_cond->step_list)
+	    job_cond->step_list =
+		    slurm_list_create(slurmdb_destroy_selected_step);
+	slurm_addto_step_list(job_cond->step_list, jobids);
+    }
     if ( (svp = hv_fetch (hv, "usage_start", strlen("usage_start"), FALSE)) ) {
 	start_time = (time_t) (SV2time_t(*svp));
     }
@@ -174,7 +166,6 @@ hv_to_job_cond(HV* hv, slurmdb_job_cond_t* job_cond)
     FETCH_LIST_FIELD(hv, job_cond, resv_list);
     FETCH_LIST_FIELD(hv, job_cond, resvid_list);
     FETCH_LIST_FIELD(hv, job_cond, state_list);
-    FETCH_LIST_FIELD(hv, job_cond, step_list);
     FETCH_LIST_FIELD(hv, job_cond, userid_list);
     FETCH_LIST_FIELD(hv, job_cond, wckey_list);
 
@@ -217,15 +208,49 @@ hv_to_user_cond(HV* hv, slurmdb_user_cond_t* user_cond)
     return 0;
 }
 
+int
+tres_rec_to_hv(slurmdb_tres_rec_t* rec, HV* hv)
+{
+	STORE_FIELD(hv, rec, alloc_secs, uint64_t);
+	STORE_FIELD(hv, rec, rec_count,  uint32_t);
+	STORE_FIELD(hv, rec, count,      uint64_t);
+	STORE_FIELD(hv, rec, id,         uint32_t);
+	STORE_FIELD(hv, rec, name,       charp);
+	STORE_FIELD(hv, rec, type,       charp);
+
+	return 0;
+}
+
 int
 report_job_grouping_to_hv(slurmdb_report_job_grouping_t* rec, HV* hv)
 {
+    AV* my_av;
+    HV* rh;
+    slurmdb_tres_rec_t *tres_rec = NULL;
+    ListIterator itr = NULL;
+
     /* FIX ME: include the job list here (is is not NULL, as
      * previously thought) */
     STORE_FIELD(hv, rec, min_size, uint32_t);
     STORE_FIELD(hv, rec, max_size, uint32_t);
     STORE_FIELD(hv, rec, count,    uint32_t);
-    STORE_FIELD(hv, rec, cpu_secs, uint64_t);
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
+    if (rec->tres_list) {
+	itr = slurm_list_iterator_create(rec->tres_list);
+	while ((tres_rec = slurm_list_next(itr))) {
+	    rh = (HV*)sv_2mortal((SV*)newHV());
+	    if (tres_rec_to_hv(tres_rec, rh) < 0) {
+		Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+		slurm_list_iterator_destroy(itr);
+		return -1;
+	    } else {
+		av_push(my_av, newRV((SV*)rh));
+	    }
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "tres_list", newRV((SV*)my_av));
 
     return 0;
 }
@@ -233,30 +258,50 @@ report_job_grouping_to_hv(slurmdb_report_job_grouping_t* rec, HV* hv)
 int
 report_acct_grouping_to_hv(slurmdb_report_acct_grouping_t* rec, HV* hv)
 {
-    AV* group_av = (AV*)sv_2mortal((SV*)newAV());
+    AV* my_av;
     HV* rh;
     slurmdb_report_job_grouping_t* jgr = NULL;
+    slurmdb_tres_rec_t *tres_rec = NULL;
     ListIterator itr = NULL;
 
     STORE_FIELD(hv, rec, acct,     charp);
     STORE_FIELD(hv, rec, count,    uint32_t);
-    STORE_FIELD(hv, rec, cpu_secs, uint64_t);
     STORE_FIELD(hv, rec, lft,      uint32_t);
     STORE_FIELD(hv, rec, rgt,      uint32_t);
 
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->groups) {
 	itr = slurm_list_iterator_create(rec->groups);
 	while ((jgr = slurm_list_next(itr))) {
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_job_grouping_to_hv(jgr, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_job_grouping to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
-		av_push(group_av, newRV((SV*)rh));
+		av_push(my_av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "groups", newRV((SV*)group_av));
+    hv_store_sv(hv, "groups", newRV((SV*)my_av));
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
+    if (rec->tres_list) {
+	itr = slurm_list_iterator_create(rec->tres_list);
+	while ((tres_rec = slurm_list_next(itr))) {
+	    rh = (HV*)sv_2mortal((SV*)newHV());
+	    if (tres_rec_to_hv(tres_rec, rh) < 0) {
+		Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+		slurm_list_iterator_destroy(itr);
+		return -1;
+	    } else {
+		av_push(my_av, newRV((SV*)rh));
+	    }
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "tres_list", newRV((SV*)my_av));
 
     return 0;
 }
@@ -264,28 +309,48 @@ report_acct_grouping_to_hv(slurmdb_report_acct_grouping_t* rec, HV* hv)
 int
 report_cluster_grouping_to_hv(slurmdb_report_cluster_grouping_t* rec, HV* hv)
 {
-    AV* acct_av = (AV*)sv_2mortal((SV*)newAV());
+    AV* my_av;
     HV* rh;
     slurmdb_report_acct_grouping_t* agr = NULL;
+    slurmdb_tres_rec_t *tres_rec = NULL;
     ListIterator itr = NULL;
 
     STORE_FIELD(hv, rec, cluster,  charp);
     STORE_FIELD(hv, rec, count,    uint32_t);
-    STORE_FIELD(hv, rec, cpu_secs, uint64_t);
 
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->acct_list) {
 	itr = slurm_list_iterator_create(rec->acct_list);
 	while ((agr = slurm_list_next(itr))) {
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_acct_grouping_to_hv(agr, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_acct_grouping to a hv");
+		slurm_list_iterator_destroy(itr);
+		return -1;
+	    } else {
+		av_push(my_av, newRV((SV*)rh));
+	    }
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "acct_list", newRV((SV*)my_av));
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
+    if (rec->tres_list) {
+	itr = slurm_list_iterator_create(rec->tres_list);
+	while ((tres_rec = slurm_list_next(itr))) {
+	    rh = (HV*)sv_2mortal((SV*)newHV());
+	    if (tres_rec_to_hv(tres_rec, rh) < 0) {
+		Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
-		av_push(acct_av, newRV((SV*)rh));
+		av_push(my_av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "acct_list", newRV((SV*)acct_av));
+    hv_store_sv(hv, "tres_list", newRV((SV*)my_av));
 
     return 0;
 }
@@ -303,11 +368,13 @@ cluster_grouping_list_to_av(List list, AV* av)
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_cluster_grouping_to_hv(rec, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_cluster_grouping to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
 		av_push(av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
 
     return 0;
@@ -316,8 +383,9 @@ cluster_grouping_list_to_av(List list, AV* av)
 int
 cluster_accounting_rec_to_hv(slurmdb_cluster_accounting_rec_t* ar, HV* hv)
 {
+    HV*   rh;
+
     STORE_FIELD(hv, ar, alloc_secs,   uint64_t);
-    STORE_FIELD(hv, ar, cpu_count,    uint32_t);
     STORE_FIELD(hv, ar, down_secs,    uint64_t);
     STORE_FIELD(hv, ar, idle_secs,    uint64_t);
     STORE_FIELD(hv, ar, over_secs,    uint64_t);
@@ -325,54 +393,86 @@ cluster_accounting_rec_to_hv(slurmdb_cluster_accounting_rec_t* ar, HV* hv)
     STORE_FIELD(hv, ar, period_start, time_t);
     STORE_FIELD(hv, ar, resv_secs,    uint64_t);
 
+    rh = (HV*)sv_2mortal((SV*)newHV());
+    if (tres_rec_to_hv(&ar->tres_rec, rh) < 0) {
+	    Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+	    return -1;
+    }
+    hv_store_sv(hv, "tres_rec", newRV((SV*)rh));
+
     return 0;
 }
 
 int
 cluster_rec_to_hv(slurmdb_cluster_rec_t* rec, HV* hv)
 {
-    AV* acc_av = (AV*)sv_2mortal((SV*)newAV());
+    AV* my_av;
     HV* rh;
     ListIterator itr = NULL;
     slurmdb_cluster_accounting_rec_t* ar = NULL;
 
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->accounting_list) {
 	itr = slurm_list_iterator_create(rec->accounting_list);
 	while ((ar = slurm_list_next(itr))) {
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (cluster_accounting_rec_to_hv(ar, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a cluster_accounting_rec to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
-		av_push(acc_av, newRV((SV*)rh));
+		av_push(my_av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "accounting_list", newRV((SV*)acc_av));
+    hv_store_sv(hv, "accounting_list", newRV((SV*)my_av));
 
     STORE_FIELD(hv, rec, classification, uint16_t);
     STORE_FIELD(hv, rec, control_host,   charp);
     STORE_FIELD(hv, rec, control_port,   uint32_t);
-    STORE_FIELD(hv, rec, cpu_count,      uint32_t);
     STORE_FIELD(hv, rec, dimensions,     uint16_t);
     STORE_FIELD(hv, rec, flags,          uint32_t);
     STORE_FIELD(hv, rec, name,           charp);
     STORE_FIELD(hv, rec, nodes,          charp);
     STORE_FIELD(hv, rec, plugin_id_select, uint32_t);
-    /* slurmdb_association_rec_t* root_assoc; */
+    /* slurmdb_assoc_rec_t* root_assoc; */
     STORE_FIELD(hv, rec, rpc_version,    uint16_t);
+    STORE_FIELD(hv, rec, tres_str,          charp);
 
     return 0;
 }
 
 int
-report_assoc_rec_to_hv(slurmdb_report_assoc_rec_t* ar, HV* hv)
+report_assoc_rec_to_hv(slurmdb_report_assoc_rec_t* rec, HV* hv)
 {
-    STORE_FIELD(hv, ar, acct,        charp);
-    STORE_FIELD(hv, ar, cluster,     charp);
-    STORE_FIELD(hv, ar, cpu_secs,    uint64_t);
-    STORE_FIELD(hv, ar, parent_acct, charp);
-    STORE_FIELD(hv, ar, user,        charp);
+    AV* my_av;
+    HV* rh;
+    slurmdb_tres_rec_t *tres_rec = NULL;
+    ListIterator itr = NULL;
+
+    STORE_FIELD(hv, rec, acct,        charp);
+    STORE_FIELD(hv, rec, cluster,     charp);
+    STORE_FIELD(hv, rec, parent_acct, charp);
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
+    if (rec->tres_list) {
+	itr = slurm_list_iterator_create(rec->tres_list);
+	while ((tres_rec = slurm_list_next(itr))) {
+	    rh = (HV*)sv_2mortal((SV*)newHV());
+	    if (tres_rec_to_hv(tres_rec, rh) < 0) {
+		Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+		slurm_list_iterator_destroy(itr);
+		return -1;
+	    } else {
+		av_push(my_av, newRV((SV*)rh));
+	    }
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "tres_list", newRV((SV*)my_av));
+
+    STORE_FIELD(hv, rec, user,        charp);
 
     return 0;
 }
@@ -380,44 +480,68 @@ report_assoc_rec_to_hv(slurmdb_report_assoc_rec_t* ar, HV* hv)
 int
 report_cluster_rec_to_hv(slurmdb_report_cluster_rec_t* rec, HV* hv)
 {
-    AV* acc_av = (AV*)sv_2mortal((SV*)newAV());
-    AV* usr_av = (AV*)sv_2mortal((SV*)newAV());
+    AV* my_av;
     HV* rh;
     slurmdb_report_assoc_rec_t* ar = NULL;
     slurmdb_report_user_rec_t* ur = NULL;
+    slurmdb_tres_rec_t *tres_rec = NULL;
     ListIterator itr = NULL;
 
+    /* FIXME: do the accounting_list (add function to parse
+     * slurmdb_accounting_rec_t) */
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->assoc_list) {
 	itr = slurm_list_iterator_create(rec->assoc_list);
 	while ((ar = slurm_list_next(itr))) {
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_assoc_rec_to_hv(ar, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_assoc_rec to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
-		av_push(acc_av, newRV((SV*)rh));
+		av_push(my_av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "assoc_list", newRV((SV*)acc_av));
+    hv_store_sv(hv, "assoc_list", newRV((SV*)my_av));
 
-    STORE_FIELD(hv, rec, cpu_count, uint32_t);
-    STORE_FIELD(hv, rec, cpu_secs,  uint64_t);
     STORE_FIELD(hv, rec, name,      charp);
 
+    my_av = (AV*)sv_2mortal((SV*)newAV());
+    if (rec->tres_list) {
+	itr = slurm_list_iterator_create(rec->tres_list);
+	while ((tres_rec = slurm_list_next(itr))) {
+	    rh = (HV*)sv_2mortal((SV*)newHV());
+	    if (tres_rec_to_hv(tres_rec, rh) < 0) {
+		Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+		slurm_list_iterator_destroy(itr);
+		return -1;
+	    } else {
+		av_push(my_av, newRV((SV*)rh));
+	    }
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "tres_list", newRV((SV*)my_av));
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->user_list) {
 	itr = slurm_list_iterator_create(rec->user_list);
 	while ((ur = slurm_list_next(itr))) {
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_user_rec_to_hv(ur, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_user_rec to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
-		av_push(usr_av, newRV((SV*)rh));
+		av_push(my_av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "user_list", newRV((SV*)usr_av));
+    hv_store_sv(hv, "user_list", newRV((SV*)my_av));
 
     return 0;
 }
@@ -435,11 +559,13 @@ report_cluster_rec_list_to_av(List list, AV* av)
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_cluster_rec_to_hv(rec, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_cluster_rec to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
 		av_push(av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
 
     return 0;
@@ -448,39 +574,207 @@ report_cluster_rec_list_to_av(List list, AV* av)
 int
 report_user_rec_to_hv(slurmdb_report_user_rec_t* rec, HV* hv)
 {
-    AV*   acc_av = (AV*)sv_2mortal((SV*)newAV());
-    AV*   char_av = (AV*)sv_2mortal((SV*)newAV());
+    AV*   my_av;
     HV*   rh;
     char* acct;
     slurmdb_report_assoc_rec_t* ar = NULL;
+    slurmdb_tres_rec_t *tres_rec = NULL;
     ListIterator itr = NULL;
 
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->acct_list) {
 	itr = slurm_list_iterator_create(rec->acct_list);
 	while ((acct = slurm_list_next(itr))) {
-	    av_push(char_av, newSVpv(acct, strlen(acct)));
+	    av_push(my_av, newSVpv(acct, strlen(acct)));
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "acct_list", newRV((SV*)char_av));
+    hv_store_sv(hv, "acct_list", newRV((SV*)my_av));
 
+    my_av = (AV*)sv_2mortal((SV*)newAV());
     if (rec->assoc_list) {
 	itr = slurm_list_iterator_create(rec->assoc_list);
 	while ((ar = slurm_list_next(itr))) {
 	    rh = (HV*)sv_2mortal((SV*)newHV());
 	    if (report_assoc_rec_to_hv(ar, rh) < 0) {
 		Perl_warn(aTHX_ "Failed to convert a report_assoc_rec to a hv");
+		slurm_list_iterator_destroy(itr);
 		return -1;
 	    } else {
-		av_push(acc_av, newRV((SV*)rh));
+		av_push(my_av, newRV((SV*)rh));
 	    }
 	}
+	slurm_list_iterator_destroy(itr);
     }
-    hv_store_sv(hv, "assoc_list", newRV((SV*)acc_av));
+    hv_store_sv(hv, "assoc_list", newRV((SV*)my_av));
 
     STORE_FIELD(hv, rec, acct,     charp);
-    STORE_FIELD(hv, rec, cpu_secs, uint64_t);
     STORE_FIELD(hv, rec, name,     charp);
+
+    my_av = (AV*)sv_2mortal((SV*)newAV());
+    if (rec->tres_list) {
+	itr = slurm_list_iterator_create(rec->tres_list);
+	while ((tres_rec = slurm_list_next(itr))) {
+	    rh = (HV*)sv_2mortal((SV*)newHV());
+	    if (tres_rec_to_hv(tres_rec, rh) < 0) {
+		Perl_warn(aTHX_ "Failed to convert a tres_rec to a hv");
+		slurm_list_iterator_destroy(itr);
+		return -1;
+	    } else {
+		av_push(my_av, newRV((SV*)rh));
+	    }
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "tres_list", newRV((SV*)my_av));
+
     STORE_FIELD(hv, rec, uid,      uid_t);
 
     return 0;
 }
+
+int
+stats_to_hv(slurmdb_stats_t *stats, HV* hv)
+{
+    STORE_FIELD(hv, stats, act_cpufreq,           double);
+    STORE_FIELD(hv, stats, cpu_ave,               double);
+    STORE_FIELD(hv, stats, consumed_energy,       double);
+    STORE_FIELD(hv, stats, cpu_min,               uint32_t);
+    STORE_FIELD(hv, stats, cpu_min_nodeid,        uint32_t);
+    STORE_FIELD(hv, stats, cpu_min_taskid,        uint32_t);
+    STORE_FIELD(hv, stats, disk_read_ave,         double);
+    STORE_FIELD(hv, stats, disk_read_max,         double);
+    STORE_FIELD(hv, stats, disk_read_max_nodeid,  uint32_t);
+    STORE_FIELD(hv, stats, disk_read_max_taskid,  uint32_t);
+    STORE_FIELD(hv, stats, disk_write_ave,        double);
+    STORE_FIELD(hv, stats, disk_write_max,        double);
+    STORE_FIELD(hv, stats, disk_write_max_nodeid, uint32_t);
+    STORE_FIELD(hv, stats, disk_write_max_taskid, uint32_t);
+    STORE_FIELD(hv, stats, pages_ave,             double);
+    STORE_FIELD(hv, stats, pages_max,             uint64_t);
+    STORE_FIELD(hv, stats, pages_max_nodeid,      uint32_t);
+    STORE_FIELD(hv, stats, pages_max_taskid,      uint32_t);
+    STORE_FIELD(hv, stats, rss_ave,               double);
+    STORE_FIELD(hv, stats, rss_max,               uint64_t);
+    STORE_FIELD(hv, stats, rss_max_nodeid,        uint32_t);
+    STORE_FIELD(hv, stats, rss_max_taskid,        uint32_t);
+    STORE_FIELD(hv, stats, vsize_ave,             double);
+    STORE_FIELD(hv, stats, vsize_max,             uint64_t);
+    STORE_FIELD(hv, stats, vsize_max_nodeid,      uint32_t);
+    STORE_FIELD(hv, stats, vsize_max_taskid,      uint32_t);
+
+    return 0;
+}
+
+int
+step_rec_to_hv(slurmdb_step_rec_t *rec, HV* hv)
+{
+    HV* stats_hv = (HV*)sv_2mortal((SV*)newHV());
+
+    stats_to_hv(&rec->stats, stats_hv);
+    hv_store_sv(hv, "stats", newRV((SV*)stats_hv));
+
+    STORE_FIELD(hv, rec, elapsed,         uint32_t);
+    STORE_FIELD(hv, rec, end,             time_t);
+    STORE_FIELD(hv, rec, exitcode,        int32_t);
+    STORE_FIELD(hv, rec, nnodes,          uint32_t);
+    STORE_FIELD(hv, rec, nodes,           charp);
+    STORE_FIELD(hv, rec, ntasks,          uint32_t);
+    STORE_FIELD(hv, rec, pid_str,         charp);
+    STORE_FIELD(hv, rec, req_cpufreq_min, uint32_t);
+    STORE_FIELD(hv, rec, req_cpufreq_max, uint32_t);
+    STORE_FIELD(hv, rec, req_cpufreq_gov, uint32_t);
+    STORE_FIELD(hv, rec, requid,          uint32_t);
+    STORE_FIELD(hv, rec, start,           time_t);
+    STORE_FIELD(hv, rec, state,           uint32_t);
+    STORE_FIELD(hv, rec, stepid,          uint32_t);
+    STORE_FIELD(hv, rec, stepname,        charp);
+    STORE_FIELD(hv, rec, suspended,       uint32_t);
+    STORE_FIELD(hv, rec, sys_cpu_sec,     uint32_t);
+    STORE_FIELD(hv, rec, sys_cpu_usec,    uint32_t);
+    STORE_FIELD(hv, rec, task_dist,       uint16_t);
+    STORE_FIELD(hv, rec, tot_cpu_sec,     uint32_t);
+    STORE_FIELD(hv, rec, tot_cpu_usec,    uint32_t);
+    STORE_FIELD(hv, rec, tres_alloc_str,  charp);
+    STORE_FIELD(hv, rec, user_cpu_sec,    uint32_t);
+    STORE_FIELD(hv, rec, user_cpu_usec,   uint32_t);
+
+    return 0;
+}
+
+int
+job_rec_to_hv(slurmdb_job_rec_t* rec, HV* hv)
+{
+    slurmdb_step_rec_t *step;
+    ListIterator itr = NULL;
+    AV* steps_av = (AV*)sv_2mortal((SV*)newAV());
+    HV* stats_hv = (HV*)sv_2mortal((SV*)newHV());
+    HV* step_hv;
+
+    stats_to_hv(&rec->stats, stats_hv);
+    hv_store_sv(hv, "stats", newRV((SV*)stats_hv));
+
+    if (rec->steps) {
+	itr = slurm_list_iterator_create(rec->steps);
+	while ((step = slurm_list_next(itr))) {
+	    step_hv = (HV*)sv_2mortal((SV*)newHV());
+	    step_rec_to_hv(step, step_hv);
+	    av_push(steps_av, newRV((SV*)step_hv));
+	}
+	slurm_list_iterator_destroy(itr);
+    }
+    hv_store_sv(hv, "steps", newRV((SV*)steps_av));
+
+    STORE_FIELD(hv, rec, account,         charp);
+    STORE_FIELD(hv, rec, alloc_gres,      charp);
+    STORE_FIELD(hv, rec, alloc_nodes,     uint32_t);
+    STORE_FIELD(hv, rec, array_job_id,    uint32_t);
+    STORE_FIELD(hv, rec, array_max_tasks, uint32_t);
+    STORE_FIELD(hv, rec, array_task_id,   uint32_t);
+    STORE_FIELD(hv, rec, array_task_str,  charp);
+    STORE_FIELD(hv, rec, associd,         uint32_t);
+    STORE_FIELD(hv, rec, blockid,         charp);
+    STORE_FIELD(hv, rec, cluster,         charp);
+    STORE_FIELD(hv, rec, derived_ec,      uint32_t);
+    STORE_FIELD(hv, rec, derived_es,      charp);
+    STORE_FIELD(hv, rec, elapsed,         uint32_t);
+    STORE_FIELD(hv, rec, eligible,        time_t);
+    STORE_FIELD(hv, rec, end,             time_t);
+    STORE_FIELD(hv, rec, exitcode,        uint32_t);
+    /*STORE_FIELD(hv, rec, first_step_ptr,  void*);*/
+    STORE_FIELD(hv, rec, gid,             uint32_t);
+    STORE_FIELD(hv, rec, jobid,           uint32_t);
+    STORE_FIELD(hv, rec, jobname,         charp);
+    STORE_FIELD(hv, rec, lft,             uint32_t);
+    STORE_FIELD(hv, rec, partition,       charp);
+    STORE_FIELD(hv, rec, nodes,           charp);
+    STORE_FIELD(hv, rec, priority,        uint32_t);
+    STORE_FIELD(hv, rec, qosid,           uint32_t);
+    STORE_FIELD(hv, rec, req_cpus,        uint32_t);
+    STORE_FIELD(hv, rec, req_gres,        charp);
+    STORE_FIELD(hv, rec, req_mem,         uint32_t);
+    STORE_FIELD(hv, rec, requid,          uint32_t);
+    STORE_FIELD(hv, rec, resvid,          uint32_t);
+    STORE_FIELD(hv, rec, resv_name,       charp);
+    STORE_FIELD(hv, rec, show_full,       uint32_t);
+    STORE_FIELD(hv, rec, start,           time_t);
+    STORE_FIELD(hv, rec, state,           uint32_t);
+    STORE_FIELD(hv, rec, submit,          time_t);
+    STORE_FIELD(hv, rec, suspended,       uint32_t);
+    STORE_FIELD(hv, rec, sys_cpu_sec,     uint32_t);
+    STORE_FIELD(hv, rec, sys_cpu_usec,    uint32_t);
+    STORE_FIELD(hv, rec, timelimit,       uint32_t);
+    STORE_FIELD(hv, rec, tot_cpu_sec,     uint32_t);
+    STORE_FIELD(hv, rec, tot_cpu_usec,    uint32_t);
+    STORE_FIELD(hv, rec, track_steps,     uint16_t);
+    STORE_FIELD(hv, rec, tres_alloc_str,  charp);
+    STORE_FIELD(hv, rec, uid,             uint32_t);
+    STORE_FIELD(hv, rec, used_gres,       charp);
+    STORE_FIELD(hv, rec, user,            charp);
+    STORE_FIELD(hv, rec, user_cpu_sec,    uint32_t);
+    STORE_FIELD(hv, rec, user_cpu_usec,   uint32_t);
+    STORE_FIELD(hv, rec, wckey,           charp);
+    STORE_FIELD(hv, rec, wckeyid,         uint32_t);
+
+    return 0;
+}
diff --git a/contribs/perlapi/libslurmdb/perl/slurmdb-perl.h b/contribs/perlapi/libslurmdb/perl/slurmdb-perl.h
index bd39af511..df19a0a10 100644
--- a/contribs/perlapi/libslurmdb/perl/slurmdb-perl.h
+++ b/contribs/perlapi/libslurmdb/perl/slurmdb-perl.h
@@ -33,15 +33,18 @@
 
 
 extern int av_to_cluster_grouping_list(AV* av, List grouping_list);
-extern int hv_to_assoc_cond(HV* hv, slurmdb_association_cond_t* assoc_cond);
+extern int hv_to_assoc_cond(HV* hv, slurmdb_assoc_cond_t* assoc_cond);
 extern int hv_to_cluster_cond(HV* hv, slurmdb_cluster_cond_t* cluster_cond);
 extern int hv_to_job_cond(HV* hv, slurmdb_job_cond_t* job_cond);
 extern int hv_to_user_cond(HV* hv, slurmdb_user_cond_t* user_cond);
+extern int hv_to_qos_cond(HV* hv, slurmdb_qos_cond_t* qos_cond);
 
 extern int cluster_grouping_list_to_av(List list, AV* av);
 extern int cluster_rec_to_hv(slurmdb_cluster_rec_t *rec, HV* hv);
 extern int report_cluster_rec_list_to_av(List list, AV* av);
 extern int report_user_rec_to_hv(slurmdb_report_user_rec_t *rec, HV* hv);
+extern int job_rec_to_hv(slurmdb_job_rec_t *rec, HV* hv);
+extern int qos_rec_to_hv(slurmdb_qos_rec_t *rec, HV* hv, List all_qos);
 
 
 #endif /* _SLURMDB_PERL_H */
diff --git a/contribs/perlapi/libslurmdb/perl/t/06-jobs_get.t b/contribs/perlapi/libslurmdb/perl/t/06-jobs_get.t
new file mode 100755
index 000000000..cd73c602b
--- /dev/null
+++ b/contribs/perlapi/libslurmdb/perl/t/06-jobs_get.t
@@ -0,0 +1,37 @@
+#!/usr/bin/perl -T
+# Before `make install' is performed this script should be runnable with
+# `make test'. After `make install' it should work as `perl Slurmdb.t'
+use strict;
+use warnings;
+
+#########################
+
+use Test::More tests => 2;
+BEGIN { use_ok('Slurmdb') };
+use Data::Dumper;
+
+#########################
+
+# Insert your test code below, the Test::More module is use()ed here so read
+# its man page ( perldoc Test::More ) for help writing this test script.
+
+my $db_conn = Slurmdb::connection_get();
+
+my %job_cond = ();
+#$job_cond{'usage_start'} = 0;
+#$job_cond{'usage_end'}   = 0;
+#$job_cond{acct_list}     = ["blah"];
+#$job_cond{userid_list}   = [1003];
+#$job_cond{groupid_list}  = [500];
+#$job_cond{jobname_list}  = ["hostname","pwd"];
+#my @states = ("CA", "CD", "FAILED");
+#my @state_nums = map {$slurm->job_state_num($_)} @states;
+#$job_cond{state_list} = \@state_nums;
+#$job_cond{step_list} = "2547,2549,2550.1";
+$job_cond{'without_usage_truncation'} = 1;
+
+my $jobs = Slurmdb::jobs_get($db_conn, \%job_cond);
+print Dumper($jobs);
+
+my $rc = Slurmdb::connection_close(\$db_conn);
+ok( $rc == 0, 'connection_close' );
diff --git a/contribs/perlapi/libslurmdb/perl/t/07-qos_get.t b/contribs/perlapi/libslurmdb/perl/t/07-qos_get.t
new file mode 100755
index 000000000..1653cf6e6
--- /dev/null
+++ b/contribs/perlapi/libslurmdb/perl/t/07-qos_get.t
@@ -0,0 +1,30 @@
+#!/usr/bin/perl -T
+# Before `make install' is performed this script should be runnable with
+# `make test'. After `make install' it should work as `perl Slurmdb.t'
+use strict;
+use warnings;
+
+#########################
+
+use Test::More tests => 2;
+BEGIN { use_ok('Slurmdb') };
+use Data::Dumper;
+
+#########################
+
+# Insert your test code below, the Test::More module is use()ed here so read
+# its man page ( perldoc Test::More ) for help writing this test script.
+
+my $db_conn = Slurmdb::connection_get();
+
+my %qos_cond = ();
+#$qos_cond{description_list} = ["general","other"];
+#$qos_cond{id_list}          = ["1","2","14"];
+#$qos_cond{name_list}        = ["normal","special"];
+#$qos_cond{with_deleted}     = "1";
+
+my $qoss = Slurmdb::qos_get($db_conn, \%qos_cond);
+print Dumper($qoss);
+
+my $rc = Slurmdb::connection_close(\$db_conn);
+ok( $rc == 0, 'connection_close' );
diff --git a/contribs/phpext/Makefile.in b/contribs/phpext/Makefile.in
index 26350a868..1c241ff4f 100644
--- a/contribs/phpext/Makefile.in
+++ b/contribs/phpext/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/contribs/pmi2/Makefile.am b/contribs/pmi2/Makefile.am
index 8ebff5144..90f305b5a 100644
--- a/contribs/pmi2/Makefile.am
+++ b/contribs/pmi2/Makefile.am
@@ -4,6 +4,7 @@
 AUTOMAKE_OPTIONS = foreign
 
 pkginclude_HEADERS = slurm/pmi2.h
+noinst_HEADERS = pmi2_util.h
 
 if WITH_GNU_LD
 PMI2_VERSION_SCRIPT = \
@@ -26,6 +27,7 @@ libpmi2_la_LDFLAGS = $(LIB_LDFLAGS) -version-info $(libpmi2_current):$(libpmi2_r
 $(PMI2_VERSION_SCRIPT) :
 	(echo "{ global:"; \
 	echo "   PMI2_*;"; \
+	echo "   PMIX_*;"; \
 	echo "  local: *;"; \
 	echo "};") > $(PMI2_VERSION_SCRIPT)
 
diff --git a/contribs/pmi2/Makefile.in b/contribs/pmi2/Makefile.in
index 0ce0a3a26..4ebf369e7 100644
--- a/contribs/pmi2/Makefile.in
+++ b/contribs/pmi2/Makefile.in
@@ -85,7 +85,8 @@ host_triplet = @host@
 target_triplet = @target@
 subdir = contribs/pmi2
 DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
-	$(top_srcdir)/auxdir/depcomp $(pkginclude_HEADERS)
+	$(top_srcdir)/auxdir/depcomp $(noinst_HEADERS) \
+	$(pkginclude_HEADERS) README
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
 am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/ax_pthread.m4 \
@@ -102,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -213,7 +217,7 @@ am__can_run_installinfo = \
     n|no|NO) false;; \
     *) (install-info --version) >/dev/null 2>&1;; \
   esac
-HEADERS = $(pkginclude_HEADERS)
+HEADERS = $(noinst_HEADERS) $(pkginclude_HEADERS)
 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
 # Read a list of newline-separated strings from the standard input,
 # and print each of them once, without duplicates.  Input order is
@@ -275,6 +279,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +330,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +354,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +400,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +424,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -473,6 +488,7 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 pkginclude_HEADERS = slurm/pmi2.h
+noinst_HEADERS = pmi2_util.h
 @WITH_GNU_LD_TRUE@PMI2_VERSION_SCRIPT = \
 @WITH_GNU_LD_TRUE@        pmi2_version.map
 
@@ -842,6 +858,7 @@ uninstall-am: uninstall-libLTLIBRARIES uninstall-pkgincludeHEADERS
 $(PMI2_VERSION_SCRIPT) :
 	(echo "{ global:"; \
 	echo "   PMI2_*;"; \
+	echo "   PMIX_*;"; \
 	echo "  local: *;"; \
 	echo "};") > $(PMI2_VERSION_SCRIPT)
 
diff --git a/contribs/pmi2/README b/contribs/pmi2/README
new file mode 100644
index 000000000..1032c6e8d
--- /dev/null
+++ b/contribs/pmi2/README
@@ -0,0 +1,10 @@
+#
+# Instructions how to compile the example programs.
+#
+#export $SLURM_ROOT=slurm_install
+# for example SLRUM_ROOT=/home/david/clusters/master/linux
+#
+# gcc -g -O0 -o testpmi2 testpmi2.c -I$SLURM_ROOT/include /home/david/clusters/master/linux/lib/libpmi2.so
+#
+# gcc -g -O0 -o testpmixring testpmixring.c -I$SLURM_ROOT/include /home/david/clusters/master/linux/lib/libpmi2.so
+#
diff --git a/contribs/pmi2/pmi2_api.c b/contribs/pmi2/pmi2_api.c
index d4fad3627..2789fbcee 100644
--- a/contribs/pmi2/pmi2_api.c
+++ b/contribs/pmi2/pmi2_api.c
@@ -207,6 +207,9 @@ int PMI2_Init(int *spawned, int *size, int *rank, int *appnum)
 		PMI2_size = 1;
 		PMI2_rank = 0;
 		*spawned = 0;
+		*size = PMI2_size;
+		*rank = PMI2_rank;
+		*appnum = -1;
 
 		PMI2_initialized = SINGLETON_INIT_BUT_NO_PM;
 		goto fn_exit;
@@ -641,6 +644,57 @@ fn_fail:
     goto fn_exit;
 }
 
+int PMIX_Ring(const char value[], int *rank, int *ranks, char left[], char right[], int maxvalue)
+{
+    int pmi2_errno = PMI2_SUCCESS;
+    PMI2_Command cmd = {0};
+    int rc;
+    const char *errmsg;
+    int found;
+    const char *kvsvalue;
+    int kvsvallen;
+
+    PMI2U_printf("[BEGIN PMI2_Ring]");
+
+    /* send message: cmd=ring_in, count=1, left=value, right=value */
+    pmi2_errno = PMIi_WriteSimpleCommandStr(PMI2_fd, &cmd, RING_CMD,
+	RING_COUNT_KEY,   "1",
+	RING_LEFT_KEY,  value,
+	RING_RIGHT_KEY, value,
+	NULL);
+    if (pmi2_errno) PMI2U_ERR_POP(pmi2_errno);
+
+    /* wait for reply: cmd=ring_out, rc=0|1, count=rank, left=leftval, right=rightval */
+    pmi2_errno = PMIi_ReadCommandExp(PMI2_fd, &cmd, RINGRESP_CMD, &rc, &errmsg);
+    if (pmi2_errno) PMI2U_ERR_SETANDJUMP(1, pmi2_errno, "PMIi_ReadCommandExp");
+    PMI2U_ERR_CHKANDJUMP(rc, pmi2_errno, PMI2_ERR_OTHER, "**pmi2_ring %s", errmsg ? errmsg : "unknown");
+
+    /* get our rank from the count key */
+    found = getvalint(cmd.pairs, cmd.nPairs, RING_COUNT_KEY, rank);
+    PMI2U_ERR_CHKANDJUMP(found != 1, pmi2_errno, PMI2_ERR_OTHER, "**intern");
+
+    /* set size of ring (just number of procs in job) */
+    *ranks = PMI2_size;
+
+    /* lookup left value and copy to caller's buffer */
+    found = getval(cmd.pairs, cmd.nPairs, RING_LEFT_KEY, &kvsvalue, &kvsvallen);
+    PMI2U_ERR_CHKANDJUMP(found != 1, pmi2_errno, PMI2_ERR_OTHER, "**intern");
+    MPIU_Strncpy(left, kvsvalue, maxvalue);
+
+    /* lookup right value and copy to caller's buffer */
+    found = getval(cmd.pairs, cmd.nPairs, RING_RIGHT_KEY, &kvsvalue, &kvsvallen);
+    PMI2U_ERR_CHKANDJUMP(found != 1, pmi2_errno, PMI2_ERR_OTHER, "**intern");
+    MPIU_Strncpy(right, kvsvalue, maxvalue);
+
+fn_exit:
+    free(cmd.command);
+    freepairs(cmd.pairs, cmd.nPairs);
+    PMI2U_printf("[END PMI2_Ring]");
+    return pmi2_errno;
+fn_fail:
+    goto fn_exit;
+}
+
 int PMI2_KVS_Put(const char key[], const char value[])
 {
     int pmi2_errno = PMI2_SUCCESS;
diff --git a/contribs/pmi2/slurm/pmi2.h b/contribs/pmi2/slurm/pmi2.h
index ddcc36c88..59bdad6be 100644
--- a/contribs/pmi2/slurm/pmi2.h
+++ b/contribs/pmi2/slurm/pmi2.h
@@ -52,6 +52,8 @@ static const char NAMEUNPUBLISH_CMD[]     = "name-unpublish";
 static const char NAMEUNPUBLISHRESP_CMD[] = "name-unpublish-response";
 static const char NAMELOOKUP_CMD[]        = "name-lookup";
 static const char NAMELOOKUPRESP_CMD[]    = "name-lookup-response";
+static const char RING_CMD[]              = "ring";
+static const char RINGRESP_CMD[]          = "ring-response";
 
 static const char PMIJOBID_KEY[]          = "pmijobid";
 static const char PMIRANK_KEY[]           = "pmirank";
@@ -81,6 +83,9 @@ static const char THRID_KEY[]             = "thrid";
 static const char INFOKEYCOUNT_KEY[]      = "infokeycount";
 static const char INFOKEY_KEY[]           = "infokey%d";
 static const char INFOVAL_KEY[]           = "infoval%d";
+static const char RING_COUNT_KEY[]        = "ring-count";
+static const char RING_LEFT_KEY[]         = "ring-left";
+static const char RING_RIGHT_KEY[]        = "ring-right";
 
 static const char TRUE_VAL[]              = "TRUE";
 static const char FALSE_VAL[]             = "FALSE";
@@ -412,6 +417,33 @@ int PMI2_Job_Connect(const char jobid[], PMI2_Connect_comm_t *conn);
 @*/
 int PMI2_Job_Disconnect(const char jobid[]);
 
+/*@
+  PMIX_Ring - execute ring exchange over processes in group
+
+  Input Parameters:
+  + value    - input string
+  - maxvalue - max size of input and output strings
+ 
+  Output Parameters:
+  + rank  - returns caller's rank within ring
+  - ranks - returns number of procs within ring
+  - left  - buffer to receive value provided by (rank - 1) % ranks
+  - right - buffer to receive value provided by (rank + 1) % ranks
+ 
+  Return values:
+  Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
+ 
+  Notes:
+  This function is collective, but not necessarily synchronous,
+  across all processes in the process group to which the calling
+  process belongs.  All processes in the group must call this
+  function, but a process may return before all processes have called
+  the function.
+
+@*/
+#define HAVE_PMIX_RING 1 /* so one can conditionally compile with this funciton */
+int PMIX_Ring(const char value[], int *rank, int *ranks, char left[], char right[], int maxvalue);
+
 /*@
   PMI2_KVS_Put - put a key/value pair in the keyval space for this job
 
diff --git a/contribs/pmi2/testpmi2.c b/contribs/pmi2/testpmi2.c
index 6007eae4a..7042337ed 100644
--- a/contribs/pmi2/testpmi2.c
+++ b/contribs/pmi2/testpmi2.c
@@ -1,9 +1,9 @@
-
+/*  Copyright (C) 2014 SchedMD
+ */
 #include <stdio.h>
 #include <time.h>
 #include <stdlib.h>
 #include <string.h>
-#include <mpi.h>
 #include <slurm/pmi2.h>
 #include <sys/time.h>
 
@@ -12,100 +12,100 @@ static char *mrand(int, int);
 int
 main(int argc, char **argv)
 {
-	int rank;
-	int size;
-	int appnum;
-	int spawned;
-	int flag;
-	int len;
-	int i;
-	struct timeval tv;
-	struct timeval tv2;
-	char jobid[128];
-	char key[128];
-	char val[128];
-	char buf[128];
-
-	{
-		int x = 1;
-		while (x == 0) {
-			sleep(2);
-		}
-	}
-
-	gettimeofday(&tv, NULL);
-	srand(tv.tv_sec);
-
-	PMI2_Init(&spawned, &size, &rank, &appnum);
-
-	PMI2_Job_GetId(jobid, sizeof(buf));
-
-	memset(val, 0, sizeof(val));
-	PMI2_Info_GetJobAttr("mpi_reserved_ports",
-						 val,
-						 PMI2_MAX_ATTRVALUE,
-						 &flag);
-
-	sprintf(key, "mpi_reserved_ports");
-	PMI2_KVS_Put(key, val);
-
-	memset(val, 0, sizeof(val));
-	sprintf(buf, "PMI_netinfo_of_task");
-	PMI2_Info_GetJobAttr(buf,
-						 val,
-						 PMI2_MAX_ATTRVALUE,
-						 &flag);
-	sprintf(key, buf);
-	PMI2_KVS_Put(key, val);
-
-	memset(val, 0, sizeof(val));
-	sprintf(key, "david@%d", rank);
-	sprintf(val, "%s", mrand(97, 122));
-	PMI2_KVS_Put(key, val);
-
-	PMI2_KVS_Fence();
-
-	for (i = 0; i < size; i++) {
-
-		memset(val, 0, sizeof(val));
-		sprintf(key, "PMI_netinfo_of_task");
-		PMI2_KVS_Get(jobid,
-					 PMI2_ID_NULL,
-					 key,
-					 val,
-					 sizeof(val),
-					 &len);
-		printf("rank: %d key:%s val:%s\n", rank, key, val);
-
-		memset(val, 0, sizeof(val));
-		sprintf(key, "david@%d", rank);
-		PMI2_KVS_Get(jobid,
-					 PMI2_ID_NULL,
-					 key,
-					 val,
-					 sizeof(val),
-					 &len);
-		printf("rank: %d key:%s val:%s\n", rank, key, val);
-
-		memset(val, 0, sizeof(val));
-		sprintf(key, "mpi_reserved_ports");
-		PMI2_KVS_Get(jobid,
-					 PMI2_ID_NULL,
-					 key,
-					 val,
-					 sizeof(val),
-					 &len);
-		printf("rank: %d key:%s val:%s\n", rank, key, val);
-	}
-
-	PMI2_Finalize();
-
-	gettimeofday(&tv2, NULL);
-	printf("%f\n",
-		   ((tv2.tv_sec - tv.tv_sec) * 1000.0
-			+ (tv2.tv_usec - tv.tv_usec) / 1000.0));
-
-	return 0;
+    int rank;
+    int size;
+    int appnum;
+    int spawned;
+    int flag;
+    int len;
+    int i;
+    struct timeval tv;
+    struct timeval tv2;
+    char jobid[128];
+    char key[128];
+    char val[128];
+    char buf[128];
+
+    {
+        int x = 1;
+        while (x == 0) {
+            sleep(2);
+        }
+    }
+
+    gettimeofday(&tv, NULL);
+    srand(tv.tv_sec);
+
+    PMI2_Init(&spawned, &size, &rank, &appnum);
+
+    PMI2_Job_GetId(jobid, sizeof(buf));
+
+    memset(val, 0, sizeof(val));
+    PMI2_Info_GetJobAttr("mpi_reserved_ports",
+                         val,
+                         PMI2_MAX_ATTRVALUE,
+                         &flag);
+
+    sprintf(key, "mpi_reserved_ports");
+    PMI2_KVS_Put(key, val);
+
+    memset(val, 0, sizeof(val));
+    sprintf(buf, "PMI_netinfo_of_task");
+    PMI2_Info_GetJobAttr(buf,
+                         val,
+                         PMI2_MAX_ATTRVALUE,
+                         &flag);
+    sprintf(key, buf);
+    PMI2_KVS_Put(key, val);
+
+    memset(val, 0, sizeof(val));
+    sprintf(key, "david@%d", rank);
+    sprintf(val, "%s", mrand(97, 122));
+    PMI2_KVS_Put(key, val);
+
+    PMI2_KVS_Fence();
+
+    for (i = 0; i < size; i++) {
+
+        memset(val, 0, sizeof(val));
+        sprintf(key, "PMI_netinfo_of_task");
+        PMI2_KVS_Get(jobid,
+                     PMI2_ID_NULL,
+                     key,
+                     val,
+                     sizeof(val),
+                     &len);
+        printf("rank: %d key:%s val:%s\n", rank, key, val);
+
+        memset(val, 0, sizeof(val));
+        sprintf(key, "david@%d", rank);
+        PMI2_KVS_Get(jobid,
+                     PMI2_ID_NULL,
+                     key,
+                     val,
+                     sizeof(val),
+                     &len);
+        printf("rank: %d key:%s val:%s\n", rank, key, val);
+
+        memset(val, 0, sizeof(val));
+        sprintf(key, "mpi_reserved_ports");
+        PMI2_KVS_Get(jobid,
+                     PMI2_ID_NULL,
+                     key,
+                     val,
+                     sizeof(val),
+                     &len);
+        printf("rank: %d key:%s val:%s\n", rank, key, val);
+    }
+
+    PMI2_Finalize();
+
+    gettimeofday(&tv2, NULL);
+    printf("%f\n",
+           ((tv2.tv_sec - tv.tv_sec) * 1000.0
+            + (tv2.tv_usec - tv.tv_usec) / 1000.0));
+
+    return 0;
 }
 
 /* Generate a random number between
@@ -115,13 +115,13 @@ main(int argc, char **argv)
 static char *
 mrand(int m, int M)
 {
-	int i;
-	time_t t;
-	static char buf[64];
+    int i;
+    time_t t;
+    static char buf[64];
 
-	memset(buf, 0, sizeof(buf));
-	for (i = 0; i  < 16; i++)
-		buf[i] = rand() % (M - m + 1) + m;
+    memset(buf, 0, sizeof(buf));
+    for (i = 0; i  < 16; i++)
+        buf[i] = rand() % (M - m + 1) + m;
 
-	return buf;
+    return buf;
 }
diff --git a/contribs/pmi2/testpmi2_put.c b/contribs/pmi2/testpmi2_put.c
index 937ff2c91..0cb8ddafe 100644
--- a/contribs/pmi2/testpmi2_put.c
+++ b/contribs/pmi2/testpmi2_put.c
@@ -5,41 +5,42 @@
 
 int main(int argc, char **argv)
 {
-  int spawned, size, rank, appnum;
-  int ret;
-  char jobid[50];
-
-  ret = PMI2_Init(&spawned, &size, &rank, &appnum);
-  if (ret != PMI2_SUCCESS) {
-	  perror("PMI2_Init failed");
-	  return 1;
-  }
-
-  PMI2_Job_GetId(jobid, sizeof(jobid));
-  printf("spawned=%d, size=%d, rank=%d, appnum=%d, jobid=%s\n",
-         spawned, size, rank, appnum, jobid);
-  fflush(stdout);
-
-  PMI2_KVS_Fence();
-
-  /* broadcast msg=42 from proc 0 */
-  int msg = 0;
-  char val[20] = "0\n";
-  if (rank == 0) {
-	  msg = 42;
-	  snprintf(val, sizeof(val), "%d\n", msg);
-	  PMI2_KVS_Put("msg", val);
-	  printf("%d> send %d\n", rank, msg);
-	  fflush(stdout);
-  }
-
-  PMI2_KVS_Fence();
-  int len = 0;
-  PMI2_KVS_Get(jobid, PMI2_ID_NULL, "msg", val, sizeof(val), &len);
-  msg = atoi(val);
-  printf("%d> got %d\n", rank, msg);
-  fflush(stdout);
-
-  PMI2_Finalize();
-  return 0;
+    int spawned, size, rank, appnum;
+    int ret;
+    char jobid[50];
+    int msg = 0;
+    char val[20] = "0\n";
+    int len = 0;
+
+    ret = PMI2_Init(&spawned, &size, &rank, &appnum);
+    if (ret != PMI2_SUCCESS) {
+        perror("PMI2_Init failed");
+        return 1;
+    }
+
+    PMI2_Job_GetId(jobid, sizeof(jobid));
+    printf("spawned=%d, size=%d, rank=%d, appnum=%d, jobid=%s\n",
+           spawned, size, rank, appnum, jobid);
+    fflush(stdout);
+
+    PMI2_KVS_Fence();
+
+    /* broadcast msg=42 from proc 0 */
+    if (rank == 0) {
+        msg = 42;
+        snprintf(val, sizeof(val), "%d\n", msg);
+        PMI2_KVS_Put("msg", val);
+        printf("%d> send %d\n", rank, msg);
+        fflush(stdout);
+    }
+
+    PMI2_KVS_Fence();
+
+    PMI2_KVS_Get(jobid, PMI2_ID_NULL, "msg", val, sizeof(val), &len);
+    msg = atoi(val);
+    printf("%d> got %d\n", rank, msg);
+    fflush(stdout);
+
+    PMI2_Finalize();
+    return 0;
 }
diff --git a/contribs/pmi2/testpmixring.c b/contribs/pmi2/testpmixring.c
new file mode 100644
index 000000000..825ed654d
--- /dev/null
+++ b/contribs/pmi2/testpmixring.c
@@ -0,0 +1,63 @@
+
+#include <stdio.h>
+#include <time.h>
+#include <stdlib.h>
+#include <string.h>
+//#include <mpi.h>
+#include <slurm/pmi2.h>
+#include <sys/time.h>
+
+/*
+ * To build:
+ *
+ * gcc -g -O0 -o testpmixring testpmixring.c -I<slurm_install>/include -Wl,-rpath,<slurm_install>/lib -L<slurm_install>/lib -lpmi2
+ *
+ * To run:
+ *
+ * srun -n8 -m block ./testpmixring
+ * srun -n8 -m cyclic ./testpmixring
+ */
+
+int
+main(int argc, char **argv)
+{
+    int spawned, size, rank, appnum;
+    struct timeval tv, tv2;
+    int ring_rank, ring_size;
+    char jobid[128];
+    char val[128];
+    char buf[128];
+    char left[128];
+    char right[128];
+
+    {
+        int x = 1;
+
+        while (x) {
+            fprintf(stderr, "attachme %d\n", getpid());
+            sleep(2);
+        }
+    }
+
+    gettimeofday(&tv, NULL);
+
+    PMI2_Init(&spawned, &size, &rank, &appnum);
+
+    PMI2_Job_GetId(jobid, sizeof(buf));
+
+    /* test PMIX_Ring */
+    snprintf(val, sizeof(val), "pmi_rank=%d", rank);
+    PMIX_Ring(val, &ring_rank, &ring_size, left, right, 128);
+
+    printf("pmi_rank:%d ring_rank:%d ring_size:%d left:%s mine:%s right:%s\n",
+           rank, ring_rank, ring_size, left, val, right);
+
+    PMI2_Finalize();
+
+    gettimeofday(&tv2, NULL);
+    printf("%f\n",
+           ((tv2.tv_sec - tv.tv_sec) * 1000.0
+            + (tv2.tv_usec - tv.tv_usec) / 1000.0));
+
+    return 0;
+}
diff --git a/contribs/sgather/Makefile.in b/contribs/sgather/Makefile.in
index 599c6e634..007ed4526 100644
--- a/contribs/sgather/Makefile.in
+++ b/contribs/sgather/Makefile.in
@@ -97,6 +97,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -105,10 +106,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -121,7 +124,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -222,6 +225,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -271,8 +276,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -291,6 +300,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -334,6 +346,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -357,6 +370,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/contribs/sgi/Makefile.am b/contribs/sgi/Makefile.am
new file mode 100644
index 000000000..b0f20aef6
--- /dev/null
+++ b/contribs/sgi/Makefile.am
@@ -0,0 +1,21 @@
+#
+# Makefile for sgi programs
+#
+
+AUTOMAKE_OPTIONS = foreign
+
+if HAVE_NETLOC
+
+EXTRA_DIST = README.txt
+bin_PROGRAMS = netloc_to_topology
+netloc_to_topology_SOURCES  = netloc_to_topology.c
+netloc_to_topology_CPPFLAGS = $(NETLOC_CPPFLAGS) $(HWLOC_CPPFLAGS)
+netloc_to_topology_LDFLAGS  = $(NETLOC_LDFLAGS) $(NETLOC_LIBS) $(HWLOC_LDFLAGS) $(HWLOC_LIBS)
+
+else
+
+EXTRA_DIST = \
+	netloc_to_topology.c	\
+	README.txt
+
+endif
diff --git a/contribs/sgi/Makefile.in b/contribs/sgi/Makefile.in
new file mode 100644
index 000000000..ff59773ef
--- /dev/null
+++ b/contribs/sgi/Makefile.in
@@ -0,0 +1,817 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for sgi programs
+#
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+@HAVE_NETLOC_TRUE@bin_PROGRAMS = netloc_to_topology$(EXEEXT)
+subdir = contribs/sgi
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__installdirs = "$(DESTDIR)$(bindir)"
+PROGRAMS = $(bin_PROGRAMS)
+am__netloc_to_topology_SOURCES_DIST = netloc_to_topology.c
+@HAVE_NETLOC_TRUE@am_netloc_to_topology_OBJECTS = netloc_to_topology-netloc_to_topology.$(OBJEXT)
+netloc_to_topology_OBJECTS = $(am_netloc_to_topology_OBJECTS)
+netloc_to_topology_LDADD = $(LDADD)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+netloc_to_topology_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(netloc_to_topology_LDFLAGS) \
+	$(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(netloc_to_topology_SOURCES)
+DIST_SOURCES = $(am__netloc_to_topology_SOURCES_DIST)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+@HAVE_NETLOC_FALSE@EXTRA_DIST = \
+@HAVE_NETLOC_FALSE@	netloc_to_topology.c	\
+@HAVE_NETLOC_FALSE@	README.txt
+
+@HAVE_NETLOC_TRUE@EXTRA_DIST = README.txt
+@HAVE_NETLOC_TRUE@netloc_to_topology_SOURCES = netloc_to_topology.c
+@HAVE_NETLOC_TRUE@netloc_to_topology_CPPFLAGS = $(NETLOC_CPPFLAGS) $(HWLOC_CPPFLAGS)
+@HAVE_NETLOC_TRUE@netloc_to_topology_LDFLAGS = $(NETLOC_LDFLAGS) $(NETLOC_LIBS) $(HWLOC_LDFLAGS) $(HWLOC_LIBS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign contribs/sgi/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign contribs/sgi/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+install-binPROGRAMS: $(bin_PROGRAMS)
+	@$(NORMAL_INSTALL)
+	@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+	if test -n "$$list"; then \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(bindir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(bindir)" || exit 1; \
+	fi; \
+	for p in $$list; do echo "$$p $$p"; done | \
+	sed 's/$(EXEEXT)$$//' | \
+	while read p p1; do if test -f $$p \
+	 || test -f $$p1 \
+	  ; then echo "$$p"; echo "$$p"; else :; fi; \
+	done | \
+	sed -e 'p;s,.*/,,;n;h' \
+	    -e 's|.*|.|' \
+	    -e 'p;x;s,.*/,,;s/$(EXEEXT)$$//;$(transform);s/$$/$(EXEEXT)/' | \
+	sed 'N;N;N;s,\n, ,g' | \
+	$(AWK) 'BEGIN { files["."] = ""; dirs["."] = 1 } \
+	  { d=$$3; if (dirs[d] != 1) { print "d", d; dirs[d] = 1 } \
+	    if ($$2 == $$4) files[d] = files[d] " " $$1; \
+	    else { print "f", $$3 "/" $$4, $$1; } } \
+	  END { for (d in files) print "f", d, files[d] }' | \
+	while read type dir files; do \
+	    if test "$$dir" = .; then dir=; else dir=/$$dir; fi; \
+	    test -z "$$files" || { \
+	    echo " $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files '$(DESTDIR)$(bindir)$$dir'"; \
+	    $(INSTALL_PROGRAM_ENV) $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL_PROGRAM) $$files "$(DESTDIR)$(bindir)$$dir" || exit $$?; \
+	    } \
+	; done
+
+uninstall-binPROGRAMS:
+	@$(NORMAL_UNINSTALL)
+	@list='$(bin_PROGRAMS)'; test -n "$(bindir)" || list=; \
+	files=`for p in $$list; do echo "$$p"; done | \
+	  sed -e 'h;s,^.*/,,;s/$(EXEEXT)$$//;$(transform)' \
+	      -e 's/$$/$(EXEEXT)/' \
+	`; \
+	test -n "$$list" || exit 0; \
+	echo " ( cd '$(DESTDIR)$(bindir)' && rm -f" $$files ")"; \
+	cd "$(DESTDIR)$(bindir)" && rm -f $$files
+
+clean-binPROGRAMS:
+	@list='$(bin_PROGRAMS)'; test -n "$$list" || exit 0; \
+	echo " rm -f" $$list; \
+	rm -f $$list || exit $$?; \
+	test -n "$(EXEEXT)" || exit 0; \
+	list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \
+	echo " rm -f" $$list; \
+	rm -f $$list
+
+netloc_to_topology$(EXEEXT): $(netloc_to_topology_OBJECTS) $(netloc_to_topology_DEPENDENCIES) $(EXTRA_netloc_to_topology_DEPENDENCIES) 
+	@rm -f netloc_to_topology$(EXEEXT)
+	$(AM_V_CCLD)$(netloc_to_topology_LINK) $(netloc_to_topology_OBJECTS) $(netloc_to_topology_LDADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/netloc_to_topology-netloc_to_topology.Po@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+netloc_to_topology-netloc_to_topology.o: netloc_to_topology.c
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(netloc_to_topology_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT netloc_to_topology-netloc_to_topology.o -MD -MP -MF $(DEPDIR)/netloc_to_topology-netloc_to_topology.Tpo -c -o netloc_to_topology-netloc_to_topology.o `test -f 'netloc_to_topology.c' || echo '$(srcdir)/'`netloc_to_topology.c
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/netloc_to_topology-netloc_to_topology.Tpo $(DEPDIR)/netloc_to_topology-netloc_to_topology.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='netloc_to_topology.c' object='netloc_to_topology-netloc_to_topology.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(netloc_to_topology_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o netloc_to_topology-netloc_to_topology.o `test -f 'netloc_to_topology.c' || echo '$(srcdir)/'`netloc_to_topology.c
+
+netloc_to_topology-netloc_to_topology.obj: netloc_to_topology.c
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(netloc_to_topology_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -MT netloc_to_topology-netloc_to_topology.obj -MD -MP -MF $(DEPDIR)/netloc_to_topology-netloc_to_topology.Tpo -c -o netloc_to_topology-netloc_to_topology.obj `if test -f 'netloc_to_topology.c'; then $(CYGPATH_W) 'netloc_to_topology.c'; else $(CYGPATH_W) '$(srcdir)/netloc_to_topology.c'; fi`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/netloc_to_topology-netloc_to_topology.Tpo $(DEPDIR)/netloc_to_topology-netloc_to_topology.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='netloc_to_topology.c' object='netloc_to_topology-netloc_to_topology.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(netloc_to_topology_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) -c -o netloc_to_topology-netloc_to_topology.obj `if test -f 'netloc_to_topology.c'; then $(CYGPATH_W) 'netloc_to_topology.c'; else $(CYGPATH_W) '$(srcdir)/netloc_to_topology.c'; fi`
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(PROGRAMS)
+installdirs:
+	for dir in "$(DESTDIR)$(bindir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-binPROGRAMS
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-binPROGRAMS
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
+	clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \
+	ctags ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-binPROGRAMS \
+	install-data install-data-am install-dvi install-dvi-am \
+	install-exec install-exec-am install-html install-html-am \
+	install-info install-info-am install-man install-pdf \
+	install-pdf-am install-ps install-ps-am install-strip \
+	installcheck installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags tags-am uninstall uninstall-am uninstall-binPROGRAMS
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/contribs/sgi/README.txt b/contribs/sgi/README.txt
new file mode 100644
index 000000000..dbb83e0a8
--- /dev/null
+++ b/contribs/sgi/README.txt
@@ -0,0 +1,56 @@
+Copyright (C) 2014 Silicon Graphics International Corp.
+All rights reserved.
+
+The SGI hypercube topology plugin for SLURM enables SLURM to understand the
+hypercube topologies on some SGI ICE InfiniBand clusters. With this
+understanding about where nodes are physically located in relation to each
+other, SLURM can make better decisions about which sets of nodes to allocate to
+jobs.
+
+The plugin requires a properly set up topology.conf file. This is built using
+the contribs/sgi/netloc_to_topology program which in turn uses the OpenMPI
+group's netloc and hwloc tools. Please execute the following steps:
+
+1) Ensure that hwloc and netloc are installed on every node in your cluster
+
+2) Create a temporary directory in a shared filesystem available to each node
+   in your cluster. In this example we'll call it /data/slurm/cluster_data/.
+
+3) Create a subdirectory called hwloc, ie. /data/slurm/cluster_data/hwloc/.
+
+4) Create the following script in /data/slurm/cluster_data/create.sh
+   #!/bin/sh
+   HN=`hostname`
+   hwloc-ls /data/slurm/cluster_data/hwloc/$HN.xml
+
+5) Run the script on each compute node
+   $ cexec /data/slurm/cluster_data/create.sh
+
+6) Ensure that hwloc output files got put into /data/slurm/cluster_data/hwloc/.
+   If you have any nodes down right now, their missing data may cause you
+   problems later.
+
+7) Run netloc discovery on the primary InfiniBand fabric
+   $ cd /data/slurm/cluster_data/
+   $ netloc_ib_gather_raw --out-dir ib-raw --sudo --force-subnet mlx4_0:1
+   $ netloc_ib_extract_dats
+
+8) Run netloc_to_topology to turn the netloc and hwloc data into a SLURM
+   topology.conf.
+   $ netloc_to_topology -d /data/slurm/cluster_data/
+   netloc_to_topology assumes a InfiniBand fabric ID of "fe80:0000:0000:0000".
+   If you have a different fabric ID, then you'll need to specify it with the
+   "-f" option. You can find the fabric ID with `ibv_devinfo -v`. E.g.
+   $ ibv_devinfo -v 
+   Look down the results and for the HCA and port that you want to key off of,
+   look at its GID field. E.g.
+   GID[ 0]: fec0:0000:0000:0000:f452:1403:0047:36d1
+   Use the first four couplets:
+   $ netloc_to_topology -d /data/slurm/cluster_data/ -f fec0:0000:0000:0000 
+
+9) Copy the resulting topology.conf file into SLURM's location for configuration
+   files. The following command copies it to the compute nodes. Make sure to
+   copy it to the node(s) running slurmctld as well.
+   $ cpush topology.conf /etc/slurm/topology.conf
+
+10) Restart SLURM
diff --git a/contribs/sgi/netloc_to_topology.c b/contribs/sgi/netloc_to_topology.c
new file mode 100644
index 000000000..33f3919d0
--- /dev/null
+++ b/contribs/sgi/netloc_to_topology.c
@@ -0,0 +1,922 @@
+/*****************************************************************************
+ *  Copyright (C) 2014 Silicon Graphics International Corp.
+ *  All rights reserved. 
+ ****************************************************************************/
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <sys/param.h>
+
+#include <netloc.h>
+#ifdef HAVE_NETLOC_NOSUB
+#  include <netloc_map.h>
+#else
+#  include <netloc/map.h>
+#endif
+
+typedef struct node_group {
+	char *node_name;
+	int node_name_len; 
+	int cpus;
+	int memory;
+	int cores_per_socket;
+	int threads_per_core;
+} node_group;
+
+typedef struct switch_name {
+	const char *sw_name;
+	unsigned long physical_id; 
+} switch_name;
+
+
+// Parse the command line arguments and update variables appropriately 
+static int parse_args(int argc, char ** argv);
+
+// Check the directory parameters to make sure they are formatted correctly
+static int check_directory_parameters();
+						
+// initialize NetLoc topology to be used to lookup NetLoc information
+static netloc_topology_t setup_topology(char *data_uri);
+
+// initialize NetLoc map to be used to lookup HwLoc information
+static netloc_map_t setup_map(char *data_uri);
+
+// Generate a topology.conf file based on NetLoc topology and save it to file
+static int generate_topology_file(netloc_topology_t *topology, netloc_map_t *map);
+
+// Loop through and parse all of the switches and their connections
+static int loop_through_switches(netloc_topology_t *topology, 
+					netloc_map_t *map, netloc_dt_lookup_table_t *switches);
+						
+// Loop through and parse all of the edges for a switch
+static int loop_through_edges(netloc_topology_t *topology, netloc_map_t *map, 
+					netloc_node_t *node, const char *src_name, FILE *f_temp);
+
+
+// Add a switch connection and its link speed to the switch list
+static int add_switch_connection(netloc_edge_t **edges, int idx, int num_edges,
+			const char *src_name, const char *dst_name, char *switch_str);
+
+// calculate the link speed for an edge between two switches
+static int calculate_link_speed(netloc_edge_t *edge);
+							
+// Add a node connection to the node list
+static int add_node_connection( netloc_topology_t *topology, netloc_map_t *map, 
+						netloc_edge_t *edge, char *node_str );
+						
+// Find a node group that matches the specifications given
+static int find_node_group( int cpus, int cores_per_socket, 
+					 int threads_per_core, int memory, const char *dst_name);
+
+// Make a new node group in the table and fill in information
+static void make_new_node_group( int cpus, int cores_per_socket, 
+					 int threads_per_core, int memory, const char *dst_name);
+
+// Save Topology data of network to topology.conf file
+static int save_topology_data_to_file();
+
+// Gets the name and the hw_loc topology for a NetLoc node
+static int get_node_name_and_topology(netloc_topology_t *topology, netloc_map_t *map, 
+			netloc_node_t *node, const char **name, hwloc_topology_t *hw_topo);
+			
+// Gets the name of a switch in the network
+static int get_switch_name( netloc_topology_t *topology, netloc_map_t *map, 
+									netloc_node_t *node, const char **name );
+
+// Find a switch_name that matches the Physical ID given
+static int find_switch_name( netloc_node_t *node );
+
+// Compares switch_name with all of the names in the table
+static int check_unique_switch_name( char *sw_name);
+
+// Make a new switch_name entry in the table and fill in information
+static int make_new_switch_name( netloc_topology_t *topology, netloc_map_t *map,
+									netloc_node_t *node, const char **name );
+
+#define NETLOC_DIR "netloc"
+
+const char * ARG_OUTDIR         = "--outdir";
+const char * ARG_SHORT_OUTDIR   = "-o";
+const char * ARG_DATADIR         = "--datadir";
+const char * ARG_SHORT_DATADIR   = "-d";
+const char * ARG_VERBOSE         = "--verbose";
+const char * ARG_SHORT_VERBOSE   = "-v";
+const char * ARG_FABRIC         = "--fabric";
+const char * ARG_SHORT_FABRIC   = "-f";
+const char * ARG_HELP           = "--help";
+const char * ARG_SHORT_HELP     = "-h";
+
+static char * outdir = NULL;
+static char * datadir = NULL;
+static char * fabric = "fe80:0000:0000:0000";
+static int verbose = 0;
+
+static int max_nodes = 0, max_switches = 0;
+static node_group *node_group_table = NULL;
+static int node_group_cnt = 0;
+static int node_groups_max  = 32;
+static switch_name **switch_name_table = NULL;
+static int switch_name_cnt = 0;
+static int switch_name_max  = 256;
+static char *file_location = NULL, *file_location_temp= NULL;
+
+
+int main(int argc, char ** argv) {
+	int ret;
+	netloc_topology_t topology;
+	netloc_map_t map;
+	
+	// Parse the command line arguments and update variables appropriately 
+	if( 0 != parse_args(argc, argv) ) {
+		printf(
+"Usage: %s\n"
+"\t%s|%s <directory with hwloc and netloc data directories>\n"
+"\t[%s|%s <output directory>]\n"
+"\t[%s|%s <IB Fabric ID, eg. fec0:0000:0000:0000>]\n"
+"\t[%s|%s] [--help|-h]\n",
+			   argv[0],
+			   ARG_DATADIR, ARG_SHORT_DATADIR,
+			   ARG_OUTDIR, ARG_SHORT_OUTDIR,
+			   ARG_FABRIC, ARG_SHORT_FABRIC,
+			   ARG_VERBOSE, ARG_SHORT_VERBOSE);
+		printf("     Default %-10s = current working directory\n", ARG_OUTDIR);
+		return NETLOC_ERROR;
+	}
+  
+	asprintf(&file_location, "%stopology.conf", outdir);
+	asprintf(&file_location_temp, "%s.temp", file_location);
+	
+	// initialize NetLoc topology to be used to lookup NetLoc information
+	topology = setup_topology(datadir);
+	(verbose) ? printf("Successfully Created Network Topology \n") : 0 ;
+	
+	// initialize NetLoc map to be used to lookup HwLoc information
+	map = setup_map(datadir);
+	(verbose) ? printf("Successfully Created Network Map\n") : 0 ;
+	
+	node_group_table = malloc( sizeof(node_group) * node_groups_max );
+	switch_name_table = malloc( sizeof(switch_name *) * switch_name_max );
+
+	// Generate a topology.conf file based on NetLoc topology and save to file
+	ret = generate_topology_file(&topology, &map);
+	
+	if( NETLOC_SUCCESS == ret ) 
+		printf("\nDone generating topology.conf file from NetLoc data\n");
+	else
+		printf("Error: Couldn't Create topology.conf file from NetLoc data\n");
+
+	netloc_detach(topology);
+	netloc_map_destroy(map);
+	return ret;
+}
+
+// Parse the command line arguments and update variables appropriately 
+static int parse_args(int argc, char ** argv) {
+	int i, ret = NETLOC_SUCCESS;
+
+	for(i = 1; i < argc; ++i ) {
+		// --outdir
+		if( ( 0 == strncmp(ARG_OUTDIR, argv[i], strlen(ARG_OUTDIR)) ) ||
+		(0 == strncmp(ARG_SHORT_OUTDIR, argv[i], strlen(ARG_SHORT_OUTDIR))) ) {
+			++i;
+			if( i >= argc ) {
+				fprintf(stderr, "Error: Must supply an argument to %s\n",
+															ARG_OUTDIR );
+				return NETLOC_ERROR;
+			}
+			outdir = strdup(argv[i]);
+		}
+		// --datadir (directory with hwloc and netloc input data directories)
+		else if( 0 ==strncmp(ARG_DATADIR,       argv[i], strlen(ARG_DATADIR)) ||
+		0 == strncmp(ARG_SHORT_DATADIR, argv[i], strlen(ARG_SHORT_DATADIR)) ) {
+			++i;
+			if( i >= argc ) {
+				fprintf(stderr, "Error: Must supply an argument to %s "
+								"(input data directory)\n", ARG_DATADIR );
+				return NETLOC_ERROR;
+			}
+			datadir = strdup(argv[i]);
+		}
+		// verbose output
+		else if( 0 == strncmp(ARG_VERBOSE, argv[i], strlen(ARG_VERBOSE)) ||
+		(0 == strncmp(ARG_SHORT_VERBOSE, argv[i], strlen(ARG_SHORT_VERBOSE)))){
+			verbose = 1;
+		}
+		// Help
+		else if( 0 == strncmp(ARG_HELP,       argv[i], strlen(ARG_HELP)) ||
+		0 == strncmp(ARG_SHORT_HELP, argv[i], strlen(ARG_SHORT_HELP)) ) {
+			return NETLOC_ERROR;
+		} else if (0 == strcmp(ARG_FABRIC, argv[i]) ||
+			    0 == strcmp(ARG_SHORT_FABRIC, argv[i])) {
+			i++;
+			if (i >= argc) {
+				fprintf(stderr,
+"Error: Must supply an argument to %s (fabric ID)\n",
+					 ARG_FABRIC);
+			}
+
+			fabric = strdup(argv[i]);
+		}
+		// Unknown options throw warnings
+		else {
+			fprintf(stderr, "Warning: Unknown argument of <%s>\n", argv[i]);
+			return NETLOC_ERROR;
+		}
+	}
+	// Check the directory parameters to make sure they are formatted correctly
+	ret = check_directory_parameters();
+	return ret;
+}
+
+
+// Check the directory parameters to make sure they are formatted correctly
+static int check_directory_parameters() {
+	int ret = NETLOC_SUCCESS;
+	
+	// Check Output Directory Parameter
+	if( NULL == outdir || strlen(outdir) <= 0 ) {
+		if( NULL != outdir )
+			free(outdir);
+		// Default: current working directory
+		outdir = strdup(".");
+	}
+	if( '/' != outdir[strlen(outdir)-1] ) {
+		outdir = (char *)realloc(outdir, sizeof(char) * (strlen(outdir)+1));
+		outdir[strlen(outdir)+1] = '\0';
+		outdir[strlen(outdir)]   = '/';
+	}
+	
+	// Check Input Data Directory Parameter
+	if( NULL == datadir || strlen(datadir) <= 0 ) {
+		fprintf(stderr, "Error: Must supply an argument to %s|%s (input data"
+						" directory)\n", ARG_DATADIR, ARG_SHORT_DATADIR );
+		return NETLOC_ERROR;
+	}
+	else if( '/' != datadir[strlen(datadir)-1] ) {
+		datadir = (char *)realloc(datadir, sizeof(char) * (strlen(datadir)+1));
+		datadir[strlen(datadir)+1] = '\0';
+		datadir[strlen(datadir)]   = '/';
+	}
+	
+	// Display Parsed Arguments
+	(verbose) ? printf("  Input Data Directory: %s\n", datadir) : 0 ;
+	(verbose) ? printf("  Output Directory    : %s\n", outdir) : 0 ;
+	return ret;
+}
+
+
+// initialize NetLoc topology to be used to lookup NetLoc information
+static netloc_topology_t setup_topology(char *data_uri)
+{
+	int ret;
+	netloc_topology_t topology;
+	netloc_network_t *tmp_network = NULL;
+	char *search_uri = NULL;
+
+	// Setup a Network connection
+	tmp_network = netloc_dt_network_t_construct();
+	tmp_network->network_type = NETLOC_NETWORK_TYPE_INFINIBAND;
+	tmp_network->subnet_id    = strdup(fabric);
+
+	asprintf(&search_uri, "file://%s%s", data_uri, NETLOC_DIR);
+	ret = netloc_find_network(search_uri, tmp_network);
+	free(search_uri);
+	if (NETLOC_SUCCESS != ret) {
+		fprintf(stderr,
+			 "Error: netloc_find_network return error (%d)\n"
+			 "\tConsider passing a different IB fabric ID with -f\n",
+			 ret);
+		exit(ret);
+	}
+
+	// Attach to the topology context
+	ret = netloc_attach(&topology, *tmp_network);
+	netloc_dt_network_t_destruct(tmp_network);
+	if( NETLOC_SUCCESS != ret ) {
+		fprintf(stderr, "Error: netloc_attach returned an error (%d)\n", ret);
+		exit(ret);
+	}
+	return topology;
+}
+
+// initialize NetLoc map to be used to lookup HwLoc information
+static netloc_map_t setup_map(char *data_uri)
+{
+	int err;
+	netloc_map_t map;
+	char *path;
+
+	err = netloc_map_create(&map);
+	if (err) {
+		fprintf(stderr, "Failed to create the map\n");
+		exit(EXIT_FAILURE);
+	}
+
+	asprintf(&path, "%shwloc", data_uri);
+
+	err = netloc_map_load_hwloc_data(map, path);
+	free(path);
+	if (err) {
+		fprintf(stderr, "Failed to load hwloc data\n");
+		exit(EXIT_FAILURE);
+	}
+
+	asprintf(&path, "file://%s%s", data_uri, NETLOC_DIR);
+
+	err = netloc_map_load_netloc_data(map, path);
+	free(path);
+	if (err) {
+		fprintf(stderr, "Failed to load netloc data\n");
+		exit(EXIT_FAILURE);
+	}
+
+	err = netloc_map_build(map, 0);
+	if (err) {
+		fprintf(stderr, "Failed to build map data\n");
+		exit(EXIT_FAILURE);
+	}
+
+	return map;
+}
+
+
+// Generate a topology.conf file based on NetLoc topology and save it to file
+static int generate_topology_file(netloc_topology_t *topology, 
+							netloc_map_t *map)
+{
+	int ret;
+	netloc_dt_lookup_table_t switches = NULL;
+	
+	// Get all of the switches
+	ret = netloc_get_all_switch_nodes(*topology, &switches);
+	if( NETLOC_SUCCESS != ret ) {
+		fprintf(stderr, "Error: get_all_switch_nodes returned %d\n", ret);
+		return ret;
+	}
+	
+	// Loop through and parse all of the switches and their connections
+	ret = loop_through_switches(topology, map, &switches);
+	if( NETLOC_SUCCESS != ret ) {
+		fprintf(stderr, "Error: loop_through_switches returned %d\n", ret);
+		return ret;
+	}
+	
+	// Save Topology data of network to topology.conf file
+	save_topology_data_to_file();
+	
+	// Cleanup 
+	netloc_lookup_table_destroy(switches);
+	free(switches);
+
+	free(file_location);
+	free(file_location_temp);
+	int i;
+	for ( i = 0; i < node_group_cnt; i++)
+		free(node_group_table[i].node_name);
+	free(node_group_table);
+	for ( i = 0; i < switch_name_cnt; i++)
+		free(switch_name_table[i]);
+	free(switch_name_table);
+	return NETLOC_SUCCESS;
+}
+
+
+// Loop through and parse all of the switches and their connections
+static int
+loop_through_switches(netloc_topology_t *topology, 
+			 netloc_map_t *map, netloc_dt_lookup_table_t *switches)
+{
+	int ret;
+	netloc_dt_lookup_table_iterator_t hti = NULL;
+	FILE *f_temp = fopen(file_location_temp, "w");
+
+	/* Loop through all of the switches */
+	hti = netloc_dt_lookup_table_iterator_t_construct(*switches);
+	while (!netloc_lookup_table_iterator_at_end(hti)) {
+		const char * key = netloc_lookup_table_iterator_next_key(hti);
+		if (NULL == key) {break;}
+
+		netloc_node_t *node = (netloc_node_t *)
+			netloc_lookup_table_access(*switches, key);
+		if (NETLOC_NODE_TYPE_SWITCH != node->node_type) {
+			fprintf(stderr, "Error: Returned unexpected node: %s\n", 
+				 netloc_pretty_print_node_t(node));
+			return NETLOC_ERROR;
+		}
+
+		// Get the Switch Name
+		const char *src_name;
+		ret = get_switch_name(topology, map, node, &src_name);
+		if (NETLOC_SUCCESS != ret) {
+			if (verbose) {
+				fprintf(stderr,
+"Did not find data for any nodes attached to switch %s\n",
+					 netloc_pretty_print_node_t(node));
+			}
+			continue;
+		}
+
+		// Loop through and parse all of the edges for a switch
+		loop_through_edges(topology, map, node, src_name, f_temp);
+	}
+
+	// Cleanup
+	fclose(f_temp); 
+	netloc_dt_lookup_table_iterator_t_destruct(hti);
+	return NETLOC_SUCCESS;
+}
+
+
+// Loop through and parse all of the edges for a switch
+static int
+loop_through_edges(netloc_topology_t *topology, netloc_map_t *map, 
+		     netloc_node_t *node, const char *src_name, FILE *f_temp)
+{
+	int ret, i, num_edges, nodes_cnt = 0, switches_cnt = 0;
+	netloc_edge_t **edges = NULL;
+	size_t slen = 4096;
+   	char *switch_str = malloc(sizeof(char) * slen);
+	char *node_str = malloc(sizeof(char) * slen);
+
+	strcpy(switch_str, "");
+	strcpy(node_str, "");
+
+	// Get all of the edges
+	ret = netloc_get_all_edges(*topology, node, &num_edges, &edges);
+	if (NETLOC_SUCCESS != ret) {
+		fprintf(stderr,
+			 "Error: get_all_edges_by_id returned %d for"
+			 " node %s\n", ret, node->description);
+		return ret;
+	}
+
+	(verbose) ? printf("\nFound Switch: %s - %s which has %d edges \n",
+			     src_name, node->physical_id, num_edges) : 0;
+
+	// Loop through all of the edges
+	for (i = 0; i < num_edges; i++) {
+		(verbose) ? printf("\tEdge %2d - Speed: %s, Width: %s - " , i, 
+				     edges[i]->speed, edges[i]->width) : 0;
+
+		if (NETLOC_NODE_TYPE_SWITCH == edges[i]->dest_node->node_type) {
+			// get the dest_node name
+			const char *dst_name;
+			ret = get_switch_name(
+				topology, map, edges[i]->dest_node, &dst_name);
+			if (NETLOC_SUCCESS != ret) {
+				if (verbose) {
+					fprintf(stderr,
+"Did not find data for any nodes attached to switch %s\n",
+						 netloc_pretty_print_node_t(node));
+				}
+				continue;
+			}
+
+			// Add name and link_speed to switch_str
+			ret = add_switch_connection(edges, i, num_edges, src_name,
+							dst_name, switch_str);
+			if (NETLOC_SUCCESS == ret) {switches_cnt++;}
+		} else if (NETLOC_NODE_TYPE_HOST == edges[i]->dest_node->node_type) {
+			// if edge goes to a node, add name to node_str and put in a group
+			ret = add_node_connection(topology, map, edges[i], node_str);
+			if (NETLOC_SUCCESS == ret) {nodes_cnt++;}
+		} else {
+			fprintf(stderr,
+				 "Error: Returned unexpected node: %s\n", 
+				 netloc_pretty_print_node_t(edges[i]->dest_node));
+			return NETLOC_ERROR;
+		}
+	}
+
+	// update maximum totals needed later
+	max_switches = MAX(switches_cnt, max_switches);
+	max_nodes = MAX(max_nodes, nodes_cnt);
+	
+	// Erase any trailing commas
+	assert(0 < strlen(switch_str) && slen > strlen(switch_str));
+	assert(0 < strlen(node_str) && slen > strlen(node_str));
+	switch_str[strlen(switch_str) - 1] = '\0';
+	node_str[strlen(node_str) - 1] = '\0';
+
+	// combine strings together and output to tolopogy file 
+	fprintf(f_temp, "SwitchName=%s Switches=%s Nodes=%s\n",
+		 src_name, switch_str, node_str);			
+	
+	free(switch_str);
+	free(node_str);
+	return NETLOC_SUCCESS;
+}
+
+
+// Add a switch connection and its link speed to the switch list
+static int
+add_switch_connection(netloc_edge_t **edges, int idx, int num_edges,
+			 const char *src_name, const char *dst_name, char *switch_str)
+{	
+	netloc_node_t* dn = edges[idx]->dest_node;
+	char * pch = strstr(switch_str, dst_name);
+	int i, total_link_speed = 0;
+	unsigned long current_ID = dn->physical_id_int;
+
+	// Print out node information
+	(verbose) ? printf("Dst:%9s - (%s - %s) [%20s][%18lu]/[%7s] - (%d edges)\n",
+			     dst_name, netloc_decode_network_type(dn->network_type),
+			     netloc_decode_node_type(dn->node_type), dn->physical_id, 
+			     dn->physical_id_int, dn->logical_id, dn->num_edges) : 0;
+
+	// Check to see if this switch is already on the switch connection list	
+	if (pch != NULL) {return NETLOC_ERROR;}
+
+	// Total up the link speed for all the connections between the two switches
+	for (i = idx; i < num_edges; i++) {
+		// If the IDs match then the connections go to the same switch 
+		if (edges[i]->dest_node->physical_id_int == current_ID) {
+			int link_speed = calculate_link_speed(edges[i]);
+
+			if (0 >= link_speed) {
+				fprintf(stderr,
+					 "\nError: invalid connection width %s or "
+					 "speed %s between %s and %s\n",
+					 edges[idx]->width,
+					 edges[idx]->speed, src_name, dst_name);
+				return NETLOC_ERROR;
+			}
+
+			total_link_speed += link_speed;
+		}
+	}
+
+	// Put the switch and its link_speed on the switch string		
+	sprintf(switch_str, "%s%s-%d,", switch_str, dst_name, total_link_speed);
+	return NETLOC_SUCCESS;
+}
+
+
+// calculate the link speed for an edge between two switches
+static int calculate_link_speed(netloc_edge_t *edge)
+{	
+	// calculate the link speed between the two switches
+	int link_speed = atoi(edge->width);
+	if (link_speed < 1 || (link_speed > 24 ) ){
+		return -1;
+	}
+	if ( strcasecmp(edge->speed, "SDR" ) == 0 )
+		link_speed *= 2;
+	else if ( strcasecmp(edge->speed, "DDR" ) == 0 )
+		link_speed *= 4;
+	else if ( strcasecmp(edge->speed, "QDR" ) == 0 )
+		link_speed *= 8;
+	else if ( strcasecmp(edge->speed, "FDR-10" ) == 0 )
+		link_speed *= 10;
+	else if ( strcasecmp(edge->speed, "FDR" ) == 0 )
+		link_speed *= 14;
+	else if ( strcasecmp(edge->speed, "EDR" ) == 0 )
+		link_speed *= 25;
+	else if ( strcasecmp(edge->speed, "HDR" ) == 0 )
+		link_speed *= 50;
+	else{
+		return -1;
+	}
+	return link_speed;
+}
+
+
+// Add a node connection to the node list
+static int
+add_node_connection(netloc_topology_t *topology, netloc_map_t *map, 
+		      netloc_edge_t *edge, char *node_str)
+{
+	int ret;
+	hwloc_topology_t dst_hw_topo;
+	const char *dst_name;
+
+	ret = get_node_name_and_topology(topology, map, edge->dest_node, 
+					     &dst_name, &dst_hw_topo);
+	if (NETLOC_SUCCESS != ret) {return NETLOC_ERROR;}
+
+	(verbose) ? printf( "Dst:%9s - ", dst_name) : 0;
+
+	sprintf(node_str, "%s%s,",node_str, dst_name);
+	
+	// get and calculate needed node information
+	hwloc_obj_t hw_obj = hwloc_get_root_obj(dst_hw_topo);
+	int cpus = hwloc_get_nbobjs_by_type(dst_hw_topo, HWLOC_OBJ_PU);
+	int sockets = hwloc_get_nbobjs_by_type(dst_hw_topo, HWLOC_OBJ_SOCKET);
+	int cores = hwloc_get_nbobjs_by_type(dst_hw_topo, HWLOC_OBJ_CORE);
+	int cores_per_socket = cores / sockets;
+	int threads_per_core = cpus / cores;
+	int memory = hw_obj->memory.total_memory/1024/1024;
+	
+	// Find a node group that matches the specifications given
+	ret = find_node_group(cpus, cores_per_socket, threads_per_core,
+				 memory, dst_name);
+	
+	// if couldn't find a matching node group, create a new one
+	if (ret == node_group_cnt) {
+		// Make a new node group in the table and fill in information
+		make_new_node_group(cpus, cores_per_socket, threads_per_core,
+				      memory, dst_name);
+	}
+
+	netloc_node_t* dn = edge->dest_node;
+	( verbose ) ? printf("(%s - %s) [%20s][%18lu]/[%7s] - (%d edges)\n",
+		netloc_decode_network_type(dn->network_type),
+			netloc_decode_node_type(dn->node_type), dn->physical_id, 
+				dn->physical_id_int, dn->logical_id, dn->num_edges) : 0;
+
+	return NETLOC_SUCCESS;
+}
+
+
+// Find a node group that matches the specifications given
+static int find_node_group( int cpus, int cores_per_socket, 
+					 int threads_per_core, int memory, const char *dst_name)
+{
+	int j;
+	for ( j=0; j < node_group_cnt; j++){
+		// Check to make sure all of the numbers are the same
+		if ((node_group_table[j].cpus == cpus) && 
+			(node_group_table[j].memory == memory) && 
+			(node_group_table[j].cores_per_socket == cores_per_socket) && 
+			(node_group_table[j].threads_per_core == threads_per_core)){
+			// Make node_name string bigger if there isn't enough space 
+			if ((strlen(node_group_table[j].node_name) + strlen(dst_name) + 3)
+								>= node_group_table[j].node_name_len ){
+				node_group_table[j].node_name_len *= 2;
+				char *temp_node_name = 
+						(char *) realloc( node_group_table[j].node_name, 
+							sizeof(char) * node_group_table[j].node_name_len);
+				if (temp_node_name == NULL) {
+					printf("Error (re)allocating memory - node_name string\n");
+					exit(-1);
+				}
+				node_group_table[j].node_name = temp_node_name;		
+			}
+			sprintf(node_group_table[j].node_name, "%s,%s",
+					node_group_table[j].node_name, dst_name);
+			return j;
+		}
+	}
+	return j;
+}
+
+
+// Make a new node group in the table and fill in information
+static void make_new_node_group( int cpus, int cores_per_socket, 
+					 int threads_per_core, int memory, const char *dst_name)
+{
+	node_group_table[node_group_cnt].node_name = malloc( sizeof(char) * 2048);
+	node_group_table[node_group_cnt].node_name_len = 2048;
+	strcpy(node_group_table[node_group_cnt].node_name, dst_name);
+	node_group_table[node_group_cnt].cpus = cpus;
+	node_group_table[node_group_cnt].memory = memory;
+	node_group_table[node_group_cnt].cores_per_socket = cores_per_socket;
+	node_group_table[node_group_cnt].threads_per_core = threads_per_core;
+	node_group_cnt++;
+	// if there aren't any more empty groups, make new ones
+	if ( node_group_cnt >= node_groups_max){
+		node_groups_max *= 2;
+		node_group *temp_node_group = realloc(node_group_table, 
+									sizeof(node_group) * node_groups_max);
+		if ( temp_node_group == NULL){
+			printf("Error (re)allocating memory for more node groups");
+			exit(-1);
+		}
+		node_group_table = temp_node_group;
+	}
+}
+
+
+// Save Topology data of network to topology.conf file
+int save_topology_data_to_file()
+{
+	int j;
+	// open up files to save data to topology.conf
+	FILE *f = fopen(file_location, "w");
+	FILE *f_temp = fopen(file_location_temp, "r");
+	if ( (f == NULL) || (f_temp == NULL) ){
+		printf("Error opening file!\n");
+		exit(1);
+	}
+	
+	// print hypercube topology configuration information for reference
+	fprintf(f,"#############################################################"
+		"#####\n# SLURM's network topology configuration file for use with the"
+		" topology/hypercube plugin\n#########################################"
+		"#########################\n# Hypcube topology information:\n# Maximum "
+		"Number of Dimensions: %d \n# Maximum Number of Nodes per Switch: %d\n"
+		"\n##################################################################\n"
+		,max_switches, max_nodes); 
+
+	/*
+	 * Print out compute nodes info and partitions nodes list for slurm.conf
+	 * in case the user wants to use this tool to fill in their node list for
+	 * that config file.
+	 */
+	fprintf(f, "# Compute Nodes information for slurm.conf:\n");
+	for ( j=0; j < node_group_cnt; j++){
+		fprintf(f,"# NodeName=%s CPUs=%d RealMemory=%d CoresPerSocket=%d " 
+			"ThreadsPerCore=%d State=UNKNOWN\n", node_group_table[j].node_name,
+				node_group_table[j].cpus, node_group_table[j].memory,
+					node_group_table[j].cores_per_socket, 
+						node_group_table[j].threads_per_core);
+	}
+	fprintf(f,"\n###########################################################"
+			"#######\n# Partition nodes list for slurm.conf: \n" "# Nodes=" );
+	for ( j=0; j < node_group_cnt-1; j++){
+		fprintf(f, "%s,", node_group_table[j].node_name );
+	}
+	fprintf(f, "%s \n", node_group_table[j].node_name );
+	// copy switch information from temp file to topology.conf
+	fprintf(f,	"\n#########################################################"
+				"#########\n# Switch Hypercube Topology Information: \n");
+	char ch;
+	while ( ( ch = fgetc(f_temp) ) != EOF )
+		fputc(ch, f);
+	
+	// Cleanup 
+	fclose(f);
+	fclose(f_temp);
+	remove(file_location_temp);
+	return NETLOC_SUCCESS;
+}
+
+
+// Gets the name and the hw_loc topology for a NetLoc node
+static int
+get_node_name_and_topology(
+	netloc_topology_t *topology, netloc_map_t *map, 
+	netloc_node_t *node, const char **name, hwloc_topology_t *hw_topo)
+{
+	netloc_map_port_t port = NULL;
+	hwloc_obj_t hw_obj = NULL;
+	netloc_map_server_t server = NULL;
+	int ret;
+
+	ret = netloc_map_netloc2port(*map, *topology, node, NULL, &port);
+	if( NETLOC_SUCCESS != ret ) {
+		if (verbose) {
+			printf( "\n    Error: netloc_map_netloc2port could not find"
+				 " port info for %s\n", netloc_pretty_print_node_t(node) );
+		}
+		return ret;
+	}
+	ret = netloc_map_port2hwloc(port, hw_topo, &hw_obj);
+	if( NETLOC_SUCCESS != ret ) {
+		fprintf(stderr, "Error: netloc_map_port2hwloc returned an error");
+		return ret;
+	}
+	ret = netloc_map_hwloc2server(*map, *hw_topo, &server);
+	if( NETLOC_SUCCESS != ret ) {
+		fprintf(stderr, "Error: netloc_map_hwloc2server returned an error");
+		return ret;
+	}
+	ret = netloc_map_server2name(server, name);
+	if( NETLOC_SUCCESS != ret ) {
+		fprintf(stderr, "Error: netloc_map_server2name returned an error");
+		return ret;
+	}
+
+	return NETLOC_SUCCESS;
+}
+
+
+// Gets the name of a switch in the network
+static int get_switch_name(netloc_topology_t *topology, netloc_map_t *map, 
+			      netloc_node_t *node, const char **name)
+{
+	// Find a switch_name that matches the Physical ID given
+	int ret = find_switch_name(node);
+	
+	// If there already a switch_name assigned to the physical ID
+	if  (ret != switch_name_cnt) {
+		*name = switch_name_table[ret]->sw_name; 
+	}
+	// Else if couldn't find a matching switch_name create a new one
+	else{
+		// Make a switch_name entry in the table and fill in information
+		ret = make_new_switch_name(topology, map, node, name);
+		if (NETLOC_SUCCESS != ret) {return ret;}
+
+		switch_name *sw_name_entry = malloc(sizeof(switch_name));
+		sw_name_entry->sw_name = *name;
+		sw_name_entry->physical_id = node->physical_id_int;
+		switch_name_table[switch_name_cnt] = sw_name_entry;
+		switch_name_cnt++;
+		
+		// If no more room for more switch_names, then make more space
+		if (switch_name_cnt == switch_name_max) {
+			switch_name_max *= 2;
+			switch_name **temp_switch_name_table = realloc(
+				switch_name_table, 
+				sizeof(switch_name) * switch_name_max);
+			if (temp_switch_name_table == NULL){
+				printf("Error (re)allocating memory for more switch_names");
+				exit(-1);
+			}
+			switch_name_table = temp_switch_name_table;
+		}
+	}
+	return NETLOC_SUCCESS;
+}
+
+
+// Find a switch_name that matches the Physical ID given
+static int find_switch_name( netloc_node_t *node )
+{
+	int j;
+	for ( j=0; j < switch_name_cnt; j++){
+		// Check to see if the numbers are the same
+		if ( switch_name_table[j]->physical_id == node->physical_id_int ) {
+			return j;
+		}
+	}
+	return j;
+}
+
+
+// Compares switch_name with all of the names in the table
+static int check_unique_switch_name( char *sw_name)
+{
+	int j;
+	for ( j=0; j < switch_name_cnt; j++){
+		// Check to see if the names are the same
+		if ( strcmp( switch_name_table[j]->sw_name, sw_name ) == 0 ) {
+			break;
+		}
+	}
+	// if the name already exists return 0, else return 1
+	if ( j < switch_name_cnt )
+		return NETLOC_ERROR;
+	else 
+		return NETLOC_SUCCESS;
+}
+
+
+// Make a new switch_name entry in the table and fill in information
+static int
+make_new_switch_name(netloc_topology_t *topology, netloc_map_t *map, 
+			netloc_node_t *node, const char **name )
+{	
+	int ret, i, num_edges;
+	netloc_edge_t **edges = NULL;
+	const char *node_name;
+
+	//Get all of the edges
+	ret = netloc_get_all_edges(*topology, node, &num_edges, &edges);
+	if (NETLOC_SUCCESS != ret) {
+		fprintf(stderr,
+			 "Error: netloc_get_all_edges returned %d for"
+			 " node %s\n", ret, netloc_pretty_print_node_t(node));
+		return ret;
+	}
+
+	// get the node name of the first host connected to the switch
+	for (i = 0; i < num_edges; i++) {
+		if (NETLOC_NODE_TYPE_HOST == edges[i]->dest_node->node_type) {
+			hwloc_topology_t dst_hw_topo;
+
+			ret = get_node_name_and_topology(
+				topology, map, 
+				edges[i]->dest_node, &node_name, &dst_hw_topo);
+			if (NETLOC_SUCCESS == ret) {break;}
+		}	
+	}
+
+	/*
+	 * If we couldn't find hwloc data for any host attached to the switch,
+	 * let's issue a warning but otherwise assume that the switch won't be
+	 * used
+	 */
+	if (num_edges == i) {
+		if (verbose) {
+			fprintf(stderr,
+				 "Skipping switch because no data was available for attached nodes:\n"
+				 "\t%s\n",
+				 netloc_pretty_print_node_t(node));
+		}
+		return NETLOC_ERROR_EMPTY;
+	}
+
+	// Use the node name to create the switch name
+	char * temp_node_name = strdup(node_name);
+	char * temp_name = strtok (temp_node_name,"n");
+	char * sw_name;
+	int switch_cnt = 0;
+	asprintf( &sw_name, "%ss%d", temp_name, switch_cnt);
+
+	// Check to see if the switch name is unique, change it if it isn't
+	while (check_unique_switch_name(sw_name) == NETLOC_ERROR) {
+		free(sw_name);
+		switch_cnt++;
+		asprintf( &sw_name, "%ss%d", temp_name, switch_cnt);
+	}
+
+	free(temp_node_name);
+	*name = sw_name;
+	return NETLOC_SUCCESS;
+}
diff --git a/contribs/sjobexit/Makefile.in b/contribs/sjobexit/Makefile.in
index 500961d4d..7c7e41bd1 100644
--- a/contribs/sjobexit/Makefile.in
+++ b/contribs/sjobexit/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -222,6 +225,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -271,8 +276,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -291,6 +300,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -334,6 +346,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -357,6 +370,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/contribs/slurm_completion_help/slurm_completion.sh b/contribs/slurm_completion_help/slurm_completion.sh
index 476e8690a..6f57163ea 100644
--- a/contribs/slurm_completion_help/slurm_completion.sh
+++ b/contribs/slurm_completion_help/slurm_completion.sh
@@ -40,10 +40,10 @@
 function compute_set_diff(){
     res=""
     for i in $1; do
-        [[ "$2" =~ ${i%%=*} ]] && continue 
-        res="$i $res"
+	[[ "$2" =~ ${i%%=*} ]] && continue
+	res="$i $res"
     done
-    echo $res 
+    echo $res
 }
 
 _split_long_opt() {
@@ -54,88 +54,88 @@ _split_long_opt() {
 function find_first_partial_occurence(){
     res=""
     for item1 in $1; do
-        for item2 in $2; do
-            if [[ $item2 == "$item1=" ]]; then
-                res="$item1"
-                break
-            fi
-        done
-        if [[ $res != "" ]]; then
-            break
-        fi
+	for item2 in $2; do
+	    if [[ $item2 == "$item1=" ]]; then
+		res="$item1"
+		break
+	    fi
+	done
+	if [[ $res != "" ]]; then
+	    break
+	fi
     done
-    echo $res 
+    echo $res
 }
 
 function find_first_occurence(){
     res=""
     for item1 in $1; do
-        for item2 in $2; do
-            if [[ $item1 = $item2 ]]; then
-                res="$item1"
-                break
-            fi
-        done
-        if [[ $res != "" ]]; then
-            break
-        fi
+	for item2 in $2; do
+	    if [[ $item1 = $item2 ]]; then
+		res="$item1"
+		break
+	    fi
+	done
+	if [[ $res != "" ]]; then
+	    break
+	fi
     done
-    echo $res 
+    echo $res
 }
 
 function offer (){
     remainings=$(compute_set_diff "$1" "${COMP_WORDS[*]}")
-    COMPREPLY=( $( compgen -W "$remainings" -- $cur ) ) 
+    COMPREPLY=( $( compgen -W "$remainings" -- $cur ) )
     if [[ "$1" == *=* || "$1" == *%* || "$1" == *:* ]];
     then
-        #echo "NO SPACE $1" >> loglog
-        compopt -o nospace
+	#echo "NO SPACE $1" >> loglog
+	compopt -o nospace
     fi
 }
 
 function offer_list () {
-    curlist=${cur%,*} 
-    curitem=${cur##*,} 
+    curlist=${cur%,*}
+    curitem=${cur##*,}
 
-    if [[ $curlist == $curitem ]]      
+    if [[ $curlist == $curitem ]]
     then
-        COMPREPLY=( $( compgen -W "${1}" -- $cur ) ) ; return
-    elif [[ $cur == *,  ]]  ; 
-    then 
-        compvalues=""
-        for i in $1;do
-            [[ $cur =~ $i ]] && continue 
-            compvalues="$i $compvalues "
-        done
-        uniqueprefix=1
-        prefix=${compvalues:0:1}
-        for i in $compvalues;do
-            [[ ${i:0:1} == $prefix ]] || uniqueprefix=0
-        done
-        if [[ $uniqueprefix == 1  ]]
-        then
-            compvalues=""
-            for i in $1;do
-                [[ $cur =~ $i ]] && continue 
-                compvalues="$compvalues $curlist,$i"
-            done
-        fi
-        COMPREPLY=( $( compgen -W "${compvalues}" -- "" ) ) ; return
-    else                                 
-        compvalues=""
-        for i in $1;do
-            [[ $cur =~ $i ]] && continue 
-            compvalues="$compvalues $curlist,$i"
-        done
-        COMPREPLY=( $( compgen -W "${compvalues}" -- $cur ) ) ; 
+	COMPREPLY=( $( compgen -W "${1}" -- $cur ) ) ; return
+    elif [[ $cur == *,  ]]  ;
+    then
+	compvalues=""
+	for i in $1;do
+	    [[ $cur =~ $i ]] && continue
+	    compvalues="$i $compvalues "
+	done
+	uniqueprefix=1
+	prefix=${compvalues:0:1}
+	for i in $compvalues;do
+	    [[ ${i:0:1} == $prefix ]] || uniqueprefix=0
+	done
+	if [[ $uniqueprefix == 1  ]]
+	then
+	    compvalues=""
+	    for i in $1;do
+		[[ $cur =~ $i ]] && continue
+		compvalues="$compvalues $curlist,$i"
+	    done
+	fi
+	COMPREPLY=( $( compgen -W "${compvalues}" -- "" ) ) ; return
+    else
+	compvalues=""
+	for i in $1;do
+	    [[ $cur =~ $i ]] && continue
+	    compvalues="$compvalues $curlist,$i"
+	done
+	COMPREPLY=( $( compgen -W "${compvalues}" -- $cur ) ) ;
     fi
-} 
+}
 
 function offer_many () {
     availablevalues=""
     for i in $1;do
-        [[ $cur =~ $i ]] && continue 
-        availablevalues="$i $availablevalues"
+	[[ $cur =~ $i ]] && continue
+	availablevalues="$i $availablevalues"
     done
 
     # Check that there is no unique prefix for all remaining options (God knows why I have to do this. Must be missing something)
@@ -143,7 +143,7 @@ function offer_many () {
     uniqueprefix=1
     prefix=${availablevalues:0:1}
     for i in $availablevalues;do
-        [[ ${i:0:1} == $prefix ]] || uniqueprefix=0
+	[[ ${i:0:1} == $prefix ]] || uniqueprefix=0
     done
 
 
@@ -153,37 +153,37 @@ function offer_many () {
     #fi #added for --format in squeue
 
     if [[  ${COMP_WORDS[COMP_CWORD-1]} == "$argname" ]]; then
-        # echo  "The first value is about to be entered" >> loglog
-        cur=""
-        COMPREPLY=( $( compgen -W "${1}" -- $cur ) ) ; return
+	# echo  "The first value is about to be entered" >> loglog
+	cur=""
+	COMPREPLY=( $( compgen -W "${1}" -- $cur ) ) ; return
     fi
     if [[ ${COMP_WORDS[COMP_CWORD-1]} == '='  && "$cur" != *,* ]]; then
-        # echo  "A supplementary value is being entered" >> loglog
-        COMPREPLY=( $( compgen -W "${1}" -- $cur ) ) ; return
+	# echo  "A supplementary value is being entered" >> loglog
+	COMPREPLY=( $( compgen -W "${1}" -- $cur ) ) ; return
     fi
     if [[ ${cur:${#cur}-1:1} == "," && $uniqueprefix == 0  ]]; then
-         echo  "A supplementary value is about to be entered and there is a no unique suffix" >> loglog
-        compvalues=""
-        for i in $1;do
-            [[ $cur =~ $i ]] && continue 
-            compvalues="$i $compvalues"
-        done
-        cur=""
-        COMPREPLY=( $( compgen -W "${compvalues}" -- $cur ) ) ;
-        return
+	 echo  "A supplementary value is about to be entered and there is a no unique suffix" >> loglog
+	compvalues=""
+	for i in $1;do
+	    [[ $cur =~ $i ]] && continue
+	    compvalues="$i $compvalues"
+	done
+	cur=""
+	COMPREPLY=( $( compgen -W "${compvalues}" -- $cur ) ) ;
+	return
     fi
-    if [[ "$cur" =~ ","  ]] ; then 
-         echo  "A supplementary value is about to be entered and there is a unique prefix or we are in the middle of one" >> loglog
-        compvalues=""
-        for i in $1;do
-            [[ $cur =~ $i ]] && continue 
-            compvalues="$compvalues ${cur%,*},$i"
-            #compvalues="$compvalues $i"
-        done
-        COMPREPLY=( $( compgen -W "${compvalues}" -- $cur ) ) ; 
-
-        # This is lame, we show complete list rather than last element
-        return
+    if [[ "$cur" =~ ","  ]] ; then
+	 echo  "A supplementary value is about to be entered and there is a unique prefix or we are in the middle of one" >> loglog
+	compvalues=""
+	for i in $1;do
+	    [[ $cur =~ $i ]] && continue
+	    compvalues="$compvalues ${cur%,*},$i"
+	    #compvalues="$compvalues $i"
+	done
+	COMPREPLY=( $( compgen -W "${compvalues}" -- $cur ) ) ;
+
+	# This is lame, we show complete list rather than last element
+	return
     fi
     return 255
 }
@@ -197,49 +197,55 @@ function param () {
 }
 
 function _jobs() {
-echo $( scontrol -o show jobs | cut -d' ' -f 1 | cut -d'=' -f 2 ) ; 
+echo $( scontrol -o show jobs | cut -d' ' -f 1 | cut -d'=' -f 2 ) ;
 }
 function _wckeys() {
-echo $(sacctmgr -p -n list wckeys | cut -d'|' -f1) ; 
+echo $(sacctmgr -p -n list wckeys | cut -d'|' -f1) ;
 }
 function _qos() {
-echo $(sacctmgr -p -n list qos | cut -d'|' -f1) ; 
+echo $(sacctmgr -p -n list qos | cut -d'|' -f1) ;
 }
 function _clusters() {
-echo $(sacctmgr -p -n list clusters | cut -d'|' -f1) ; 
+echo $(sacctmgr -p -n list clusters | cut -d'|' -f1) ;
 }
 function _jobnames() {
-echo $( scontrol -o show jobs | cut -d' ' -f 2 | cut -d'=' -f 2 ) ; 
+echo $( scontrol -o show jobs | cut -d' ' -f 2 | cut -d'=' -f 2 ) ;
 }
 function _partitions() {
-echo $(scontrol show partitions|grep PartitionName|cut -c 15- |cut -f 1 -d' '|paste -s -d ' ') ; 
+echo $(scontrol show partitions|grep PartitionName|cut -c 15- |cut -f 1 -d' '|paste -s -d ' ') ;
 }
 function _nodes() {
-echo $(scontrol show nodes | grep NodeName | cut -c 10- | cut -f 1 -d' ' | paste -s -d ' ') ; 
+echo $(scontrol show nodes | grep NodeName | cut -c 10- | cut -f 1 -d' ' | paste -s -d ' ') ;
 }
 function _accounts() {
-echo $(sacctmgr -pn list accounts | cut -d'|' -f1 | paste -s -d' ') ; 
+echo $(sacctmgr -pn list accounts | cut -d'|' -f1 | paste -s -d' ') ;
 }
 function _licenses() {
-echo $(scontrol show config| grep Licenses | sed 's/Licenses *=//'| paste -s -d' ') ; 
+echo $(scontrol show config| grep Licenses | sed 's/Licenses *=//'| paste -s -d' ') ;
 }
 function _nodes() {
-echo $(scontrol show nodes | grep NodeName | cut -c 10- | cut -f 1 -d' ' | paste -s -d ' ') ; 
+echo $(scontrol show nodes | grep NodeName | cut -c 10- | cut -f 1 -d' ' | paste -s -d ' ') ;
 }
 function _features() {
-echo $(scontrol -o show nodes|cut -d' ' -f7|sed 's/Features=//'|sort -u|tr -d '()'|paste -d, -s) ; 
+echo $(scontrol -o show nodes|cut -d' ' -f7|sed 's/Features=//'|sort -u|tr -d '()'|paste -d, -s) ;
 }
 function _users() {
-echo $(sacctmgr -pn list users | cut -d'|' -f1) ; 
+echo $(sacctmgr -pn list users | cut -d'|' -f1) ;
 }
 function _reservations() {
-echo $(scontrol -o show reservations | cut -d' ' -f1 | cut -d= -f2) ; 
+echo $(scontrol -o show reservations | cut -d' ' -f1 | cut -d= -f2) ;
 }
 function _gres() {
 echo $(scontrol show config | grep GresTypes | cut -d= -f2)
 }
 function _jobname() {
-echo $(scontrol show -o jobs | cut -d' ' -f 2 | sed 's/Name=//') 
+echo $(scontrol show -o jobs | cut -d' ' -f 2 | sed 's/Name=//')
+}
+function _resource() {
+echo $(sacctmgr -pn list resource | cut -d'|' -f1 | paste -s -d' ')
+}
+function _step() {
+echo $( scontrol -o show step | cut -d' ' -f 1 | cut -d'=' -f 2 ) ;
 }
 
 _sacctmgr()
@@ -247,22 +253,30 @@ _sacctmgr()
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
 
-    local subopts="" 
-    local commands="add create delete dump list load modify show "
-    local entities="account association cluster configuration coordinator\
-            event job qos transaction user wckeys"
-    
-    local shortoptions="-h -i -n -p -P -Q -r -s -s -v -V"
+    local subopts=""
+    local commands="add archive create delete dump list load modify show "
+    local shortoptions="-h -i -n -p -P -Q -r -s -v -V"
     local longoptions="--help --immediate --noheader --parsable \
-        --parsable2 --quiet --readonly --associations --verbose --version"
+	--parsable2 --quiet --readonly --associations --verbose --version"
 
 
     local assocparams="clusters= accounts= users= partition= "
-    local assocbasedparams="defaultqos= fairshare= grpcpumins= grpcpus= \
-        grpjobs= grpnodes= grpsubmitjobs= grpwall= maxcpumins= maxcpus= maxjobs= \
-        maxnodes= maxsubmitjobs= maxwall= qoslevel="
 
-    # Check whether we are in the middle of an option. If so serve them. 
+    local assocbasedparams="defaultqos= fairshare= gracetime=\
+			    grpcpumins= grpcpurunmins= grpcpus=\
+			    grpjobs= grpmemory= grpnodes= grpsubmitjobs=\
+			    grpwall= maxcpumins= maxcpus= maxjobs= maxnodes=\
+			    maxsubmitjobs= maxwall= qoslevel="
+
+    local qosflags="DenyOneLimit EnforceUsageThreshold NoReserve\
+		    PartitionMaxNodes PartitionMinNodes PartitionQos\
+		    PartitionTimeLimit"
+    local qospreempt="cluster cancel checkpoint requeue suspend"
+
+    local clusflags="aix bgl bgq bluegene crayxt frontend multipleslumd\
+		     sunconstellation xcpu"
+
+    # Check whether we are in the middle of an option. If so serve them.
     remainings=$(compute_set_diff "$longoptions" "${COMP_WORDS[*]}")
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$remainings" ; return ; }
@@ -270,259 +284,306 @@ _sacctmgr()
     # Search for a command in the argument list (first occurence)
     # the command might be in any position because of the options
     command=$(find_first_occurence "${COMP_WORDS[*]}" "$commands")
-    
+
     # If no command has been entered, serve the list of valid commands
     [[ $command == "" ]] && { offer "$commands" ; return ; }
 
     # Load command has a specific syntax. Treat it first
     [[ $command == "load" ]] && { _filedir ; return ; }
 
-    entity=$(find_first_occurence "${COMP_WORDS[*]}" "$entities")
-
-    [[ $entity == "" ]] && { offer "$entities" ; return ; }
-
     case $command in
-    add|create) 
-        objects="account cluster coordinator qos user "
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        account)  
-            params="cluster= description= name= organization= parent= "
-            if param "cluster" ; then  offer_list "$(_clusters)" ; 
-            elif param "parent" ; then  offer_list "$(_accounts)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        cluster)
-            params="classification= flags= name= rpc="
-            if param "flags" ; then  offer_list "aix bgl bgq bluegene crayxt frontend \
-                multipleslumd sunconstellation xcpu" ; 
-            else offer "$params" 
-            fi
-            ;;
-        coordinator)
-            params="accounts= names="
-            if param "names" ; then  offer_list "$(_users)" ; 
-            elif param "accounts" ; then  offer_list "$(_accounts)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        qos)  
-            params="flags= grpcpumins= grpcpus= grpjobs= grpnodes= grpsubmitjobs= grpwall= \
-                maxcpumins= maxcpus= maxjobs= maxsubmitjobs= maxwall= name= preempt= \
-                preemptmode= priority= usagefactor= usagethreshold= "
-            if param "flags" ; then  offer_list "EnforceUsageThreshold NoReserve \
-                                            PartitionMaxNodes PartitionMinNodes PartitionTimeLimit" ; 
-            elif param "preemptmode" ; then  offer_list "cluster cancel checkpoint requeue suspend" ; 
-            elif param "flags" ; then  offer_list "enforceusagethreshold noreserve \
-                parittionmaxnodes partitionminnodes partitiontimelimit" ; 
-            elif param "preempt" ; then  offer_list "$(_qos)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        user)  
-            params="account= adminlevel= cluster= defaultaccount= defaultwckey= name= "
-            if param "defaultaccount" ; then  offer_list "$(_accounts)" ; 
-            elif param "account" ; then  offer_list "$(_accounts)"; 
-            elif param "adminlevel" ; then  offer_list "none operator admin" ; 
-            elif param "cluster" ; then  offer_list "$(_cluster)" ; 
-            elif param "defaultwckey" ; then  offer_list "$(_wckey)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        *) offer "$objects" ;;
-        esac
-        ;;
+    add|create)
+	objects="account cluster coordinator qos user "
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	account)
+	    params="cluster= description= name= organization= parent=\
+		    rawusage= withassoc withcoord withdeleted"
+	    if param "cluster" ; then  offer_list "$(_clusters)" ;
+	    elif param "parent" ; then  offer_list "$(_accounts)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	cluster)
+	    params="classification= flags= name= rpc= wolimits"
+	    if param "flags" ; then  offer_list "$clusflags" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	coordinator)
+	    params="accounts= names="
+	    if param "names" ; then  offer_list "$(_users)" ;
+	    elif param "accounts" ; then  offer_list "$(_accounts)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	qos)
+	    params="flags= gracetime= grpcpumins= grpcpurunmins= grpcpus=\
+		    grpjobs= grpnodes= grpsubmitjobs= grpwall= maxcpumins=\
+		    maxcpus= maxcpusperuser= maxjobs= maxnodes= mincpus=\
+		    maxnodesperuser= maxsubmitjobs= maxwall= name= preempt=\
+		    preemptmode= priority= usagefactor= usagethreshold=\
+		    withdeleted"
+	    if param "preemptmode" ; then  offer_list "$qospreempt" ;
+	    elif param "flags" ; then  offer_list "$qosflags" ;
+	    elif param "preempt" ; then  offer_list "$(_qos)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	user)
+	    params="account= adminlevel= cluster= defaultaccount=\
+	       defaultwckey= name= partition= rawusage= wckey= withassoc
+	       withcoord withdeleted"
+	    if param "defaultaccount" ; then  offer_list "$(_accounts)" ;
+	    elif param "account" ; then  offer_list "$(_accounts)";
+	    elif param "adminlevel" ; then  offer_list "none operator admin" ;
+	    elif param "cluster" ; then  offer_list "$(_cluster)" ;
+	    elif param "defaultwckey" ; then  offer_list "$(_wckey)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	*) offer "$objects" ;;
+	esac
+	;;
+    archive)
+	objects="dump load"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	dump)
+            _filedir
+	    ;;
+	load)
+            _filedir
+	    ;;
+	*) offer "$objects"
+	    ;;
+	esac
+	;;
     delete)
-        objects="account cluster coordinator qos user"
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        account)  
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="cluster= description= name= organization= parent="
-            if param "cluster" ; then  offer_list "$(_clusters)" ; 
-            elif param "parent" ; then  offer_list "$(_accounts)" ; 
-            elif param "name" ; then  offer_list "$(_accounts)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        cluster)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="classification= flags= name= rpc= $assocbasedparams"
-            if param "flags" ; then  offer_list "aix bgl bgq bluegene crayxt frontend \
-                multipleslumd sunconstellation xcpu" ; 
-            elif param "defaultqos" ; then  offer_list "$(_qos)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        coordinator)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="accounts= names="
-            if param "names" ; then  offer_list "$(_users)" ; 
-            elif param "accounts" ; then  offer_list "$(_accounts)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        user)
-            params="account= adminlevel= cluster= defaultaccount= defaultwckey= name= wckeys= withassoc"
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            if param "defaultaccount" ; then  offer_list "$(_accounts)" ; 
-            elif param "account" ; then  offer_list "$(_accounts)"; 
-            elif param "adminlevel" ; then  offer_list "none operator admin" ; 
-            elif param "cluster" ; then  offer_list "$(_cluster)" ; 
-            elif param "wckeys" ; then  offer_list "$(_wckeys)" ; 
-            elif param "defaultwckey" ; then  offer_list "$(_wckey)" ; 
-            else offer "$params" ;
-            fi
-            ;;
-        *) offer "$objects" 
-            ;;
-        esac
-        ;;
+	objects="account cluster coordinator qos user"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	account)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
+	    params="cluster= description= name= organization= parent="
+	    if param "cluster" ; then  offer_list "$(_clusters)" ;
+	    elif param "parent" ; then  offer_list "$(_accounts)" ;
+	    elif param "name" ; then  offer_list "$(_accounts)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	cluster)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="classification= flags= name= rpc= $assocbasedparams"
+	    if param "flags" ; then  offer_list "$clusflags" ;
+	    elif param "defaultqos" ; then  offer_list "$(_qos)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	coordinator)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="accounts= names="
+	    if param "names" ; then  offer_list "$(_users)" ;
+	    elif param "accounts" ; then  offer_list "$(_accounts)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	user)
+	    params="account= adminlevel= cluster= defaultaccount=\
+		    defaultwckey= name= wckeys= withassoc"
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    if param "defaultaccount" ; then  offer_list "$(_accounts)" ;
+	    elif param "account" ; then  offer_list "$(_accounts)";
+	    elif param "adminlevel" ; then  offer_list "none operator admin" ;
+	    elif param "cluster" ; then  offer_list "$(_cluster)" ;
+	    elif param "wckeys" ; then  offer_list "$(_wckeys)" ;
+	    elif param "defaultwckey" ; then  offer_list "$(_wckey)" ;
+	    else offer "$params" ;
+	    fi
+	    ;;
+	*) offer "$objects"
+	    ;;
+	esac
+	;;
     list|show)
-        objects="account association cluster configuration \
-            event problem qos transaction user wckey"
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        account)  
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="cluster= description= name= organization= parent= withassoc \ 
-                withcoord withdeleted $assocparams $assocbasedparams"
-            if param "cluster" ; then  offer_list "$(_clusters)" ; 
-            elif param "parent" ; then  offer_list "$(_accounts)" ; 
-            elif param "users" ; then  offer_list "$(_users)" ; 
-            elif param "partition" ; then  offer_list "$(_partition)" ; 
-            elif param "defaultqos" ; then  offer_list "$(_qos)" ; 
-            elif param "name" ; then  offer_list "$(_accounts)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        association) 
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="$assocparams onlydefaults tree withdeleted withsubaccounts \
-                wolimits wopinfo woplimits"
-            if param "clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "users" ; then  offer_list "$(_users)" ; 
-            elif param "partition" ; then  offer_list "$(_partitions)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        cluster)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="classification= flags= name= rpc= $assocbasedparams"
-            if param "flags" ; then  offer_list "aix bgl bgq bluegene crayxt frontend \
-                multipleslumd sunconstellation xcpu" ; 
-            elif param "defaultqos" ; then  offer_list "$(_qos)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        event)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="all_clusters all_time clusters= end= event= maxcpu= mincpus= \
-                nodes= reason= start= states= user= "
-            if param "clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "nodes" ; then  offer_list "$(_nodes)" ; 
-            elif param "event" ; then  offer_list "cluster node" ; 
-            elif param "states" ; then  offer_list "alloc allocated down drain \
-                        fail failing idle mixed maint power_down power_up resume" ; 
-            elif param "users" ; then  offer_list "$(_users)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        qos)  
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="flags= grpcpumins= grpcpus= grpjobs= grpnodes= grpsubmitjobs= grpwall= \
-                maxcpumins= maxcpus= maxjobs= maxsubmitjobs= maxwall= name= preempt= \
-                preemptmode= priority= usagefactor= usagethreshold= withdeleted"
-            if param "flags" ; then  offer_list "EnforceUsageThreshold NoReserve \
-                                            PartitionMaxNodes PartitionMinNodes PartitionTimeLimit" ; 
-            elif param "preemptmode" ; then  offer_list "cluster cancel checkpoint requeue suspend" ; 
-            elif param "flags" ; then  offer_list "enforceusagethreshold noreserve \
-                parittionmaxnodes partitionminnodes partitiontimelimit" ; 
-            elif param "preempt" ; then  offer_list "$(_qos)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        transaction)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="accounts= action= actor= clusters= endtime= startime= users= withassoc"
-            if param "accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "actor" ; then  offer_list "$(_users)" ; 
-            elif param "clusters" ; then  offer_list "$(_clusters)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        user)  
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="account= adminlevel= cluster= defaultaccount= defaultwckey= name= wckeys= withassoc"
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            if param "defaultaccount" ; then  offer_list "$(_accounts)" ; 
-            elif param "account" ; then  offer_list "$(_accounts)"; 
-            elif param "adminlevel" ; then  offer_list "none operator admin" ; 
-            elif param "cluster" ; then  offer_list "$(_cluster)" ; 
-            elif param "wckeys" ; then  offer_list "$(_wckeys)" ; 
-            elif param "defaultwckey" ; then  offer_list "$(_wckey)" ; 
-            else offer "$params" ;
-            fi
-            ;;
-        *) offer "$objects" ;;
-        esac
-        ;;
+	objects="account association cluster configuration \
+	    event problem qos resource transaction user wckey"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	account)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="cluster= description= name= organization= parent=\
+		    withassoc withcoord withdeleted $assocparams\
+		    $assocbasedparams"
+	    if param "cluster" ; then  offer_list "$(_clusters)" ;
+	    elif param "parent" ; then  offer_list "$(_accounts)" ;
+	    elif param "users" ; then  offer_list "$(_users)" ;
+	    elif param "partition" ; then  offer_list "$(_partition)" ;
+	    elif param "defaultqos" ; then  offer_list "$(_qos)" ;
+	    elif param "name" ; then  offer_list "$(_accounts)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	association)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="$assocparams onlydefaults tree withdeleted withsubaccounts\
+		    wolimits wopinfo woplimits"
+	    if param "clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "users" ; then  offer_list "$(_users)" ;
+	    elif param "partition" ; then  offer_list "$(_partitions)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	cluster)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="classification= flags= name= rpc= $assocbasedparams\
+		    wolimits"
+	    if param "flags" ; then  offer_list "$clusflags" ;
+	    elif param "defaultqos" ; then  offer_list "$(_qos)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	event)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="all_clusters all_time clusters= end= event= maxcpu=\
+		    mincpus= nodes= reason= start= states= user= "
+	    if param "clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "nodes" ; then  offer_list "$(_nodes)" ;
+	    elif param "event" ; then  offer_list "cluster node" ;
+	    elif param "states" ; then  offer_list "alloc allocated down drain\
+			fail failing idle mixed maint power_down power_up\
+			resume" ;
+	    elif param "users" ; then  offer_list "$(_users)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	qos)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="flags= gracetime= grpcpumins= grpcpus= grpcpurunmins=\
+		    grpjobs= grpnodes= grpsubmitjobs= grpwall= id= maxcpumins=\
+		    maxcpusmins= maxcpus= maxjobs= maxnodes= mincpus=\
+		    maxnodesperuser= maxsubmitjobs= maxwall= name= preempt=\
+		    preemptmode= priority= rawusage= usagefactor=\
+		   usagethreshold= withdeleted"
+	    if param "preemptmode" ; then  offer_list "cluster cancel\
+							 checkpoint requeue\
+							 suspend" ;
+	    elif param "flags" ; then  offer_list "$qosflags" ;
+	    elif param "preempt" ; then  offer_list "$(_qos)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	resource)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="cluster= count= description= flags= servertype= name=\
+		    precentallowed= server= type="
+	    if param "name" ; then  offer_list "$(_resource)" ;
+	    elif param "cluster" ; then offer_list "$(_clusters)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	transaction)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="accounts= action= actor= clusters= endtime= startime=\
+		users= withassoc"
+	    if param "accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "actor" ; then  offer_list "$(_users)" ;
+	    elif param "clusters" ; then  offer_list "$(_clusters)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	user)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="account= adminlevel= cluster= defaultaccount=\
+		    defaultwckey= name= partition= wckeys= withassoc withcoord \
+		    withdelted"
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    if param "defaultaccount" ; then  offer_list "$(_accounts)" ;
+	    elif param "account" ; then  offer_list "$(_accounts)";
+	    elif param "adminlevel" ; then  offer_list "none operator admin" ;
+	    elif param "cluster" ; then  offer_list "$(_cluster)" ;
+	    elif param "wckeys" ; then  offer_list "$(_wckeys)" ;
+	    elif param "defaultwckey" ; then  offer_list "$(_wckey)" ;
+	    else offer "$params" ;
+	    fi
+	    ;;
+	*) offer "$objects" ;;
+	esac
+	;;
     modify)
-        objects="account cluster job qos user"
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        account)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="cluster= description= name= organization= parent="
-            if param "cluster" ; then  offer_list "$(_clusters)" ; 
-            elif param "parent" ; then  offer_list "$(_accounts)" ; 
-            elif param "name" ; then  offer_list "$(_accounts)" ; 
-            else offer "$params set" 
-            fi
-            ;;
-        cluster)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="classification= flags= name= rpc= $assocbasedparams"
-            if param "flags" ; then  offer_list "aix bgl bgq bluegene crayxt frontend \
-                multipleslumd sunconstellation xcpu" ; 
-            elif param "defaultqos" ; then  offer_list "$(_qos)" ; 
-            else offer "$params set" 
-            fi
-            ;;
-        qos)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="flags= grpcpumins= grpcpus= grpjobs= grpnodes= grpsubmitjobs= grpwall= \
-                maxcpumins= maxcpus= maxjobs= maxsubmitjobs= maxwall= name= preempt= \
-                preemptmode= priority= usagefactor= usagethreshold= withdeleted"
-            if param "flags" ; then  offer_list "EnforceUsageThreshold NoReserve \
-                                            PartitionMaxNodes PartitionMinNodes PartitionTimeLimit" ; 
-            elif param "preemptmode" ; then  offer_list "cluster cancel checkpoint requeue suspend" ; 
-            elif param "flags" ; then  offer_list "enforceusagethreshold noreserve \
-                parittionmaxnodes partitionminnodes partitiontimelimit" ; 
-            elif param "preempt" ; then  offer_list "$(_qos)" ; 
-            else offer "$params set" 
-            fi
-            ;;
-        user)
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            params="account= adminlevel= cluster= defaultaccount= defaultwckey= name= wckeys= withassoc"
-            if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ; return ;fi
-            if param "defaultaccount" ; then  offer_list "$(_accounts)" ; 
-            elif param "account" ; then  offer_list "$(_accounts)"; 
-            elif param "adminlevel" ; then  offer_list "none operator admin" ; 
-            elif param "cluster" ; then  offer_list "$(_cluster)" ; 
-            elif param "wckeys" ; then  offer_list "$(_wckeys)" ; 
-            elif param "defaultwckey" ; then  offer_list "$(_wckey)" ; 
-            else offer "$params" ;
-            fi
-            ;;
-        *) offer "$objects" 
-            ;;
-        esac
-        ;;
-            
+	objects="account cluster job qos user"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	account)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="cluster= description= name= organization= parent=\
+		rawusage= $assocbasedparams"
+	    if param "cluster" ; then  offer_list "$(_clusters)" ;
+	    elif param "parent" ; then  offer_list "$(_accounts)" ;
+	    elif param "name" ; then  offer_list "$(_accounts)" ;
+	    else offer "$params set"
+	    fi
+	    ;;
+	cluster)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="classification= flags= name= rpc= $assocbasedparams"
+	    if param "flags" ; then  offer_list "$clusflags" ;
+	    elif param "defaultqos" ; then  offer_list "$(_qos)" ;
+	    else offer "$params set"
+	    fi
+	    ;;
+	qos)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="flags= gracetime= grpcpumins= grpcpurunmins= grpcpus=\
+		    grpjobs= grpnodes= grpsubmitjobs= grpwall= maxcpumins=\
+		    maxcpus= maxcpusperuser= maxjobs= maxnodes= mincpus=\
+		    maxnodesperuser= maxsubmitjobs= maxwall= name= preempt=\
+		    preemptmode= priority= rawusage= usagefactor=\
+		    usagethreshold= withdeleted"
+	    if param "flags" ; then  offer_list "$qosflags" ;
+	    elif param "name" ; then offer_list "$(_qos)" ;
+	    elif param "preemptmode" ; then  offer_list "$qospreempt" ;
+	    elif param "preempt" ; then  offer_list "$(_qos)" ;
+	    else offer "$params set"
+	    fi
+	    ;;
+	user)
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    params="account= adminlevel= cluster= defaultaccount=\
+		    defaultwckey= name= partition= rawusage= wckeys= withassoc"
+	    if [[ "${COMP_WORDS[*]}" != *where* ]] ; then offer "where" ;
+		    return ;fi
+	    if param "defaultaccount" ; then  offer_list "$(_accounts)" ;
+	    elif param "account" ; then  offer_list "$(_accounts)";
+	    elif param "adminlevel" ; then  offer_list "none operator admin" ;
+	    elif param "cluster" ; then  offer_list "$(_cluster)" ;
+	    elif param "wckeys" ; then  offer_list "$(_wckeys)" ;
+	    elif param "defaultwckey" ; then  offer_list "$(_wckey)" ;
+	    else offer "$params" ;
+	    fi
+	    ;;
+	*) offer "$objects"
+	    ;;
+	esac
+	;;
+
     esac
 }
 complete -F _sacctmgr sacctmgr
@@ -532,14 +593,14 @@ _sreport()
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
 
-    local subopts="" 
+    local subopts=""
     local commands="cluster job user reservation"
-    
-    local shortoptions="-a -n -h -p -P -t -v -V"
+
+    local shortoptions="-a -n -h -p -P -Q -t -v -V"
     local longoptions="--all_clusters --help --noheader --parsable\
-                        --parsable2--quiet --verbose --version"
+			--parsable2 --quiet --verbose --version"
 
-    # Check whether we are in the middle of an option. If so serve them. 
+    # Check whether we are in the middle of an option. If so serve them.
     remainings=$(compute_set_diff "$longoptions" "${COMP_WORDS[*]}")
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$remainings" ; return ; }
@@ -547,116 +608,118 @@ _sreport()
     # Search for a command in the argument list (first occurence)
     # the command might be in any position because of the options
     command=$(find_first_occurence "${COMP_WORDS[*]}" "$commands")
-    
+
     # If no command has been entered, serve the list of valid commands
     [[ $command == "" ]] && { offer "$commands" ; return ; }
 
     opts_all="All_Clusters Clusters= End= Format= Start="
 
     case $command in
-    user) 
-        objects="TopUsage"
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        TopUsage)  
-            params="$opts_all Accounts= Group TopCount= Users="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Account Cluster Login Proper User" ; 
-            elif param "Accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "Users" ; then  offer_list "$(_users)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        *) offer "$objects" ;;
-        esac
-        ;;
-    reservation) 
-        objects="Utilization"
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        Utilization)  
-            params="$opts_all Names= Nodes="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Allocated Associations \
-                Clusters CPUCount CPUTime End Idle Name Nodes Start TotalTime" ;  
-            elif param "Nodes" ; then  offer_list "$(_nodes)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        *) offer "$objects" ;;
-        esac
-        ;;
-    job) 
-        objects="SizesByAccount SizesByAccountAndWckey SizesByWckey" 
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        SizesByAccount|SizesByAccountAndWckey)  
-            params="$opts_all Accounts= FlatView GID= Grouping= \
-                Jobs= Nodes= OPartitions= PrintJobCount Users= Wckeys="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Account Cluster" ;  
-            elif param "Accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "GID" ; then  _gids ; 
-            elif param "Users" ; then  offer_list "$(_users)" ; 
-            elif param "Wckeys" ; then  offer_list "$(_wckeys)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        SizesByWckey)  
-            params="$opts_all Accounts= FlatView GID= Grouping= \
-                Jobs= Nodes= OPartitions= PrintJobCount Users= Wckeys="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Wckey Cluster" ; 
-            elif param "Accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "GID" ; then  _gids ; 
-            elif param "Users" ; then  offer_list "$(_users)" ; 
-            elif param "Wckeys" ; then  offer_list "$(_wckeys)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        *) offer "$objects" ;;
-        esac
-        ;;
-    cluster) 
-        objects="AccountUtilizationByUser UserUtilizationByAccount \
-                    UserUtilizationByWCKey Utilization WCKeyUtilizationByUser"
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-        case $object in
-        Utilization)
-            params="$opts_all Names= Nodes="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Allocated Cluster \
-                CPUCount Down Idle Overcommited PlannedDown Reported Reserved" ;
-            elif param "Nodes" ; then  offer_list "$(_nodes)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        AccountUtilizationByUser|UserUtilizationByAccount)
-            params="$opts_all Accounts= Tree Users= Wckeys="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Accounts Cluster CPUCount \
-                                                    Login Proper Used" ;
-            elif param "Accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "Users" ; then  offer_list "$(_users)" ; 
-            elif param "Wckeys" ; then  offer_list "$(_wckeys)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        UserUtilizationByWCKey|WCKeyUtilizationByUser)
-            params="$opts_all Accounts= Tree Users= Wckeys="
-            if param "Clusters" ; then  offer_list "$(_clusters)" ; 
-            elif param "Format" ; then  offer_list "Cluster CPUCount Login \
-                                                    Proper Used Wckey" ;  
-            elif param "Accounts" ; then  offer_list "$(_accounts)" ; 
-            elif param "Users" ; then  offer_list "$(_users)" ; 
-            elif param "Wckeys" ; then  offer_list "$(_wckeys)" ; 
-            else offer "$params" 
-            fi
-            ;;
-        *) offer "$objects" ;;
-        esac
-        ;;
-            
+    user)
+	objects="TopUsage"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	TopUsage)
+	    params="$opts_all Accounts= Group TopCount= Users="
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Account Cluster Login\
+						    Proper User" ;
+	    elif param "Accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "Users" ; then  offer_list "$(_users)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	*) offer "$objects" ;;
+	esac
+	;;
+    reservation)
+	objects="Utilization"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	Utilization)
+	    params="$opts_all Names= Nodes= Accounts= Group TopCount= Users= "
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Allocated Associations \
+		Clusters CPUCount CPUTime End Flags Idle Name Nodes\
+		ReservationId Start TotalTime";
+	    elif param "Nodes" ; then  offer_list "$(_nodes)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	*) offer "$objects" ;;
+	esac
+	;;
+    job)
+	objects="SizesByAccount SizesByAccountAndWckey SizesByWckey"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	SizesByAccount|SizesByAccountAndWckey)
+	    params="$opts_all Accounts= FlatView GID= Grouping= \
+		Jobs= Nodes= Partitions= PrintJobCount Users= Wckeys="
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Account Cluster" ;
+	    elif param "Accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "GID" ; then  _gids ;
+	    elif param "Users" ; then  offer_list "$(_users)" ;
+	    elif param "Wckeys" ; then  offer_list "$(_wckeys)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	SizesByWckey)
+	    params="$opts_all Accounts= FlatView GID= Grouping= \
+		Jobs= Nodes= OPartitions= PrintJobCount Users= Wckeys="
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Wckey Cluster" ;
+	    elif param "Accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "GID" ; then  _gids ;
+	    elif param "Users" ; then  offer_list "$(_users)" ;
+	    elif param "Wckeys" ; then  offer_list "$(_wckeys)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	*) offer "$objects" ;;
+	esac
+	;;
+    cluster)
+	objects="AccountUtilizationByUser UserUtilizationByAccount \
+		    UserUtilizationByWCKey Utilization WCKeyUtilizationByUser"
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+	case $object in
+	Utilization)
+	    params="$opts_all Names= Nodes="
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Allocated Cluster \
+		CPUCount Down Idle Overcommited PlannedDown Reported Reserved";
+	    elif param "Nodes" ; then  offer_list "$(_nodes)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	AccountUtilizationByUser|UserUtilizationByAccount)
+	    params="$opts_all Accounts= Tree Users= Wckeys="
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Accounts Cluster CPUCount\
+						    Login Proper Used" ;
+	    elif param "Accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "Users" ; then  offer_list "$(_users)" ;
+	    elif param "Wckeys" ; then  offer_list "$(_wckeys)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	UserUtilizationByWCKey|WCKeyUtilizationByUser)
+	    params="$opts_all Accounts= Tree Users= Wckeys="
+	    if param "Clusters" ; then  offer_list "$(_clusters)" ;
+	    elif param "Format" ; then  offer_list "Cluster CPUCount Login\
+						    Proper Used Wckey" ;
+	    elif param "Accounts" ; then  offer_list "$(_accounts)" ;
+	    elif param "Users" ; then  offer_list "$(_users)" ;
+	    elif param "Wckeys" ; then  offer_list "$(_wckeys)" ;
+	    else offer "$params"
+	    fi
+	    ;;
+	*) offer "$objects" ;;
+	esac
+	;;
+
     esac
 }
 complete -F _sreport sreport
@@ -665,250 +728,316 @@ _scontrol()
 {
     local cur=${COMP_WORDS[COMP_CWORD]}
     local prev=${COMP_WORDS[COMP_CWORD-1]}
-    
-    local commands="abort checkpoint create completing delete hold notify \
-                    pidinfo listpids ping reconfigure release requeue resume\
-                    setdebug show shutdown suspend takeover uhold update version"
-    
+
+    local commands="abort checkpoint cluster create completing delete details\
+		    errnumstr help hold notify oneliner\
+		    pidinfo listpids ping quit reboot_nodes reconfigure release\
+		    requeue requeuehold schedloglevel resume schedloglevel\
+		    script setdebug setdebugflags show shutdown suspend\
+		    takeover uhold update verbose version wait_job"
+
     local shortoptions="-a -d -h -M -o -Q -v -V "
     local longoptions="--all --details --help --hide --cluster --oneliner \
-                        --quiet --verbose --version"
-    
-    # Check whether we are in the middle of an option. If so serve them. 
+			--quiet --verbose --version"
+
+    # Check whether we are in the middle of an option. If so serve them.
     remainings=$(compute_set_diff "$longoptions" "${COMP_WORDS[*]}")
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$remainings" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $remainings)"; return ; }
-    
+
     # Search for a command in the argument list (first occurence)
     # the command might be in any position because of the options
     command=$(find_first_occurence "${COMP_WORDS[*]}" "$commands")
-    
+
     # If no command has been entered, serve the list of valid commands
     [[ $command == "" ]] && { offer "$commands" ; return ; }
-    
+
     # Otherwise process command
-    case $command in 
+    case $command in
     shutdown) # scontrol shutdown object
-        offer "slurmctld controller" 
-        ;;
+	offer "slurmctld controller"
+	;;
     setdebug) # scontrol setdebug value
-        offer "quiet info warning error debug debug2 debug3 debug4 debug5 " # FIXME
-        ;;
-    uhold | suspend | release | requeue | resume | hold ) 
-        offer "$(_jobs)" 
-        ;; #TODO notify
+	offer "quiet info warning error debug debug2 debug3 debug4 debug5 " # FIXME
+	;;
+    uhold | suspend | release | requeue | resume | hold )
+	offer "$(_jobs)"
+	;; #TODO notify
     checkpoint) # scontrol checkpoint create jobid [parameter1=value1,...]
-        # This one has unsusual ordering: object is before command. 
-        # command subcommand argument #TODO add support for additional options cfr manpage
-        objects="disable enable able create vacate error restart"
-    
-        if [[ $prev == checkpoint ]]; then 
-            offer "$objects"; 
-        elif [[ $objects == *$prev* ]]; then 
-            offer "$(_jobs)"; 
-        else 
-            echo todo
-            #TODO
-        fi
-        ;;
+	# This one has unsusual ordering: object is before command.
+	# command subcommand argument #TODO add support for additional options cfr manpage
+	objects="able create disable enable error restart requeue vacate"
+
+	if [[ $prev == checkpoint ]]; then
+	    offer "$objects";
+	elif [[ $objects == *$prev* ]]; then
+	    offer "$(_jobs)";
+	else
+	    echo todo
+	    #TODO
+	fi
+	;;
     show) # scontrol show object [id]
-        objects="config daemons job nodes partitions reservations \
-                 slurmd steps topology hostlist hostnames"
-    
-        # Search for the current object in the argument list
-        object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
-    
-        # If no object has yet been (fully) typed in, serve the list of objects
-        [[ $object == "" ]] && { offer "$objects" ; return ; }
-    
-        # Otherwise, offer the ids depending on the object
-        if param "job"          ; then offer  "$(_jobs)"         ; fi
-        if param "nodes"        ; then offer_list "$(_nodes)"        ; fi
-        if param "partitions"   ; then offer "$(_partitions)"   ; fi
-        if param "reservations" ; then offer "$(_reservations)"  ; fi
-        #TODO if object "steps"    
-        ;;
+	objects="aliases config block daemons frontend hostlist hostlistsorted\
+		 hostnames job nodes partitions reservations slurmd steps\
+		 submp topology"
+
+	# Search for the current object in the argument list
+	object=$(find_first_occurence "${COMP_WORDS[*]}" "$objects")
+
+	# If no object has yet been (fully) typed in, serve the list of objects
+	[[ $object == "" ]] && { offer "$objects" ; return ; }
+
+	# Otherwise, offer the ids depending on the object
+	if param "job"          ; then offer  "$(_jobs)"         ; fi
+	if param "nodes"        ; then offer_list "$(_nodes)"        ; fi
+	if param "partitions"   ; then offer "$(_partitions)"   ; fi
+	if param "reservations" ; then offer "$(_reservations)"  ; fi
+	#TODO if object "steps"
+	;;
     delete) # scontrol delete objectname=id
-        parameters="partitionname= reservationname="
-    
-        # If a parameter has been fully typed in, serve the corresponding
-        # values, otherwise, serve the list of parameters.
-        if   param "partitionname"   ; then offer_many "$(_partitions)"  
-        elif param "reservationname" ; then offer_many "$(_reservations)" 
-        else offer "$parameters" ; fi
-        ;;
-    update) 
-        parameters="jobid= step= nodename= partitionname= reservationname="
-    
-        param=$(find_first_partial_occurence "${COMP_WORDS[*]}" "$parameters")
-        [[ $param == "" ]] && { offer "$parameters" ; return ; }
-    
-        # If a parameter has been fully typed in, serve the corresponding
-        # values, if it is the first one. 
-        if   param "jobid"   ; then offer_many "$(_jobs)" ; return  
-        elif param "nodename" ; then offer_many "$(_nodes)"  ; return
-        elif param "partitionname" ; then offer_many "$(_partitions)" ; return 
-        elif param "reservationname" ; then offer_many "$(_reservations)"  ; return
-        fi
-    
-        # Otherwise, process the others based on the first one
-        case $param in
-        jobid)
-            local parameters="account=<account> conn-type=<type> \
-              contiguous=<yes|no> dependency=<dependency_list> \
-              eligibletime=yyyy-mm-dd excnodelist=<nodes>\
-              features=<features> geometry=<geo> gres=<list> \
-              licenses=<name> mincpusnode=<count> minmemorycpu=<megabytes> \
-              mintmpdisknode=<megabytes> name=<name> nice[=delta] \
-              nodelist=<nodes> numcpus=<min_count[-max_count] \
-              numnodes=<min_count[-max_count]> numtasks=<count> \
-              partition=<name> priority=<number> qos=<name> \
-              reqcores=<count> reqthreads=<count> requeue=<0|1> \
-              reservationname=<name> rotate=<yes|no> shared=<yes|no> \
-              starttime=yyyy-mm-dd timelimit=[d-]h:m:s wckey=<key>"
-    
-            remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
-    
-            # If a new named argument is about to be entered, serve the list of options
-            [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
-    
-            # Test all potential arguments and server corresponding values
-            if   param "account"         ; then offer_many "$(_accounts)"
-            elif param "excnodelist"     ; then offer_many "$(_nodes)"
-            elif param "nodelist"        ; then offer_many "$(_nodes)"
-            elif param "features"        ; then offer_many "$(_features)"
-            elif param "gres"            ; then offer_many "$(_gres)"
-            elif param "licences"        ; then offer_many "$(_licenses)"
-            elif param "partition"       ; then offer_many "$(_partitions)"
-            elif param "reservationname" ; then offer_many "$(_reservations)"
-            elif param "qos"             ; then offer_many "$(_qos)"
-            elif param "wckey"           ; then offer_many "$(wckeys)"
-            elif param "conn-type"       ; then offer_many "MESH TORUS NAV"
-            elif param "rotate"          ; then offer_many "yes no"
-            elif param "shared"          ; then offer_many "yes no"
-            else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
-            fi
-            ;;
-        nodename)  
-            local parameters="features=<features> gres=<gres> \
-               reason=<reason> state=<state> weight=<weight>"
-    
-            remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
-    
-            # If a new named argument is about to be entered, serve the list of options
-            [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
-    
-            # Test all potential arguments and server corresponding values
-            if param "features"   ; then offer_many "$(_features)"
-            elif param "gres"     ; then offer_many "$(_gres)"
-            elif param "state"    ; then offer_many "alloc allocated down drain \
-                        fail failing idle mixed maint power_down power_up resume"
-            else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
-            fi
-            ;;
-        partitionname) 
-            local parameters="nodes=<node_list> alternate=<partition_name> default=yes|no 
-              defaulttime=d-h:m:s|unlimited disablerootjobs=yes|no hidden=yes|no \
-              maxnodes=<count> maxtime=d-h:m:s|unlimited minnodes=<count> \
-              allocnodes=<node_list>  preemptmode=off|cancel|checkpoint|requeue|suspend \
-              priority=count rootonly=yes|no shared=yes|no|exclusive|force \
-              state=up|down|drain|inactive allowgroups=<name>" 
-    
-            remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
-            # If a new named argument is about to be entered, serve the list of options
-            [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
-    
-            # Test all potential arguments and server corresponding values
-            if   param "allocnodes"  ; then offer_many "$(_nodes)"
-            elif param "alternate"   ; then offer_many "$(_partitions)"
-            elif param "default"     ; then offer_many  "yes no"
-            elif param "preemptmode" ; then offer_many "off cancel checkpoint requeue suspend"
-            elif param "shared"      ; then offer_many "yes no exclusive force"
-            elif param "state"       ; then offer_many "up down drain inactive"
-            else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
-            fi
-            ;;
-        reservationname)
-            local parameters="users=<user_list> nodecnt=<count> \
-              nodes=<node_list> starttime=yyyy-mm-dd[thh:mm[:ss]] \
-              endtime=yyyy-mm-dd[thh:mm[:ss]] duration=[days-]hours:minutes:seconds \
-              flags=maint,overlap,ignore_jobs,daily,weekly \
-              partitionname=<partition_list> features=<feature_list> \
-              accounts=<account_list> licenses=<license>"
-    
-            remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
-            # If a new named argument is about to be entered, serve the list of options
-            [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
-            
-            # test all potential arguments and server corresponding values
-            if   param "accounts" ; then offer_many  "$(_accounts)" 
-            elif param "licences" ; then offer_many "$(_licenses)"
-            elif param "nodes"    ; then offer_many "$(_nodes)"
-            elif param "features" ; then offer_many "$(_features)"
-            elif param "users"    ; then offer_many "$(_users)"
-            elif param "flags"    ; then offer_many " maint overlap ignore_jobs daily weekly "
-            else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
-            fi
-            ;;
-        esac
-        ;;
+	parameters="partitionname= reservationname="
+
+	# If a parameter has been fully typed in, serve the corresponding
+	# values, otherwise, serve the list of parameters.
+	if   param "partitionname"   ; then offer_many "$(_partitions)"
+	elif param "reservationname" ; then offer_many "$(_reservations)"
+	else offer "$parameters" ; fi
+	;;
+    update)
+	parameters="jobid= step= nodename= partitionname=\
+		    reservationname="
+
+	param=$(find_first_partial_occurence "${COMP_WORDS[*]}" "$parameters")
+	[[ $param == "" ]] && { offer "$parameters" ; return ; }
+
+	# If a parameter has been fully typed in, serve the corresponding
+	# values, if it is the first one.
+	if   param "jobid"   ; then offer_many "$(_jobs)" ; return
+	elif param "nodename" ; then offer_many "$(_nodes)"  ; return
+	elif param "partitionname" ; then offer_many "$(_partitions)" ; return
+	elif param "reservationname" ; then offer_many "$(_reservations)"  ; return
+	elif param "step" ; then offer_many "$(_step)" ; return
+	fi
+
+	# Otherwise, process the others based on the first one
+	case $param in
+	jobid)
+	    local parameters="account=<account> conn-type=<type> \
+			      contiguous=<yes|no> dependency=<dependency_list>\
+			      eligibletime=yyyy-mm-dd excnodelist=<nodes>\
+			      features=<features> geometry=<geo> gres=<list>\
+			      jobid=<job_id> licenses=<name>\
+			      mincpusnode=<count> minmemorycpu=<megabytes>\
+			      minmemorynode=<megabytes>\
+			      mintmpdisknode=<megabytes> name=<name>\
+			      nice[=delta] nodelist=<nodes>\
+			      numcpus=<min_count[-max_count]\
+			      numnodes=<min_count[-max_count]>\
+			      numtasks=<count> partition=<name>\
+			      priority=<number> qos=<name> reqcores=<count>\
+			      reqnodelist=<nodes> reqsockets=<count>\
+			      reqthreads=<count> requeue=<0|1>\
+			      reservationname=<name> rotate=<yes|no>\
+			      shared=<yes|no> starttime=yyyy-mm-dd\
+			      switches=<count>[@<max-time-to-wait>]\
+			      timelimit=[d-]h:m:s userid=<UID or name>\
+			      wckey=<key>"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+
+	    # If a new named argument is about to be entered, serve the list of options
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
+
+	    # Test all potential arguments and server corresponding values
+	    if   param "account"         ; then offer_many "$(_accounts)"
+	    elif param "excnodelist"     ; then offer_many "$(_nodes)"
+	    elif param "nodelist"        ; then offer_many "$(_nodes)"
+	    elif param "features"        ; then offer_many "$(_features)"
+	    elif param "gres"            ; then offer_many "$(_gres)"
+	    elif param "licences"        ; then offer_many "$(_licenses)"
+	    elif param "partition"       ; then offer_many "$(_partitions)"
+	    elif param "reservationname" ; then offer_many "$(_reservations)"
+	    elif param "qos"             ; then offer_many "$(_qos)"
+	    elif param "wckey"           ; then offer_many "$(wckeys)"
+	    elif param "conn-type"       ; then offer_many "MESH TORUS NAV"
+	    elif param "rotate"          ; then offer_many "yes no"
+	    elif param "shared"          ; then offer_many "yes no"
+	    else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
+	    fi
+	    ;;
+	nodename)
+	    local parameters="features=<features> gres=<gres> \
+	       reason=<reason> state=<state> weight=<weight>"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+
+	    # If a new named argument is about to be entered, serve the list of options
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ;
+		    return ; }
+
+	    # Test all potential arguments and server corresponding values
+	    if param "features"   ; then offer_many "$(_features)"
+	    elif param "gres"     ; then offer_many "$(_gres)"
+	    elif param "state"    ; then offer_many "noresp drain fail future\
+						     resume power_down\
+						     power_up undrain"
+	    else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
+	    fi
+	    ;;
+	partitionname)
+	    local parameters="allowgroups=<name> allocnodes=<node_list>\
+			      alternate=<partition_name> default=yes|no\
+			      defaulttime=d-h:m:s|unlimited defmempercpu=<MB>\
+			      defmempercnode=<MB> disablerootjobs=yes|no\
+			      gracetime=<seconds> hidden=yes|no\
+			      maxmempercpu=<MB> maxmempercnode=<MB>\
+			      maxnodes=<count> maxtime=d-h:m:s|unlimited\
+			      minnodes=<count> nodes=<name>\
+			      preemptmode=off|cancel|checkpoint|requeue|suspend\
+			      priority=count rootonly=yes|no reqresv=<yes|no>\
+			      shared=yes|no|exclusive|force\
+			      state=up|down|drain|inactive"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+	    # If a new named argument is about to be entered, serve the list of options
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ;
+		    return ; }
+
+	    # Test all potential arguments and server corresponding values
+	    if   param "allocnodes"  ; then offer_many "$(_nodes)"
+	    elif param "nodes"       ; then offer_many "$(_nodes)"
+	    elif param "alternate"   ; then offer_many "$(_partitions)"
+	    elif param "default"     ; then offer_many  "yes no"
+	    elif param "preemptmode" ; then offer_many "off cancel checkpoint\
+							requeue suspend"
+	    elif param "shared"      ; then offer_many "yes no exclusive force"
+	    elif param "state"       ; then offer_many "up down drain inactive"
+	    elif param "disablerootjobs" ; then offer_many "yes no"
+	    elif param "hidden"      ; then offer_many "yes no"
+	    elif param "rootonly"    ; then offer_many "yes no"
+	    elif param "reqresv"     ; then offer_many "yes no"
+	    else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
+	    fi
+	    ;;
+	reservationname)
+	    local parameters="accounts=<account_list> corecnt=<num>\
+			      duration=[days-]hours:minutes:seconds\
+			      endtime=yyyy-mm-dd[thh:mm[:ss]]\
+			      features=<feature_list>\
+			      flags=maint,overlap,ignore_jobs,daily,weekly\
+			      licenses=<license> nodecnt=<count>\
+			      nodes=<node_list> users=<user_list>\
+			      partitionname=<partition_list>\
+			      starttime=yyyy-mm-dd[thh:mm[:ss]]"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+	    # If a new named argument is about to be entered, serve the list of options
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ;
+		    return ; }
+
+	    # test all potential arguments and server corresponding values
+	    if   param "accounts" ; then offer_many  "$(_accounts)"
+	    elif param "licences" ; then offer_many "$(_licenses)"
+	    elif param "nodes"    ; then offer_many "$(_nodes)"
+	    elif param "features" ; then offer_many "$(_features)"
+	    elif param "users"    ; then offer_many "$(_users)"
+	    elif param "flags"    ; then offer_many "daily first_cores\
+						     ignore_jobs license_only\
+						     maint overlap part_nodes\
+						     spec_nodes static_alloc\
+						     time_float weekly"
+	    elif param "partitioname" ; then offer_many "$(_partitions)"
+	    else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
+	    fi
+	    ;;
+	step)
+	    local parameters="stepid=<job_id>[.<step_id>]\
+			     CompFile=<completion_file> TimeLimit=<time>"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ;
+		    return ; }
+	    if param "stepid" ; then offer_list "$(_step)" ;
+	    fi
+	    ;;
+
+	esac
+	;;
     create) # command object attribute1=value1 etc.
-        parameters="partition reservation"
-    
-        param=$(find_first_occurence "${COMP_WORDS[*]}" "$parameters")
-        [[ $param == "" ]] && { offer "$parameters" ; return ; }
-    
-        # Process object
-        case $param in
-        partition)
-            local parameters="partitionname=<name> nodes=<node_list> \
-              alternate=<partition_name> default=yes|no \
-              defaulttime=days-hours:minutes:seconds|unlimited \
-              disablerootjobs=yes|no hidden=yes|no maxnodes=<count> \
-              maxtime=days-hours:minutes:seconds|unlimited minnodes=<count> \
-              allocnodes=<node_list>  \
-              preemptmode=off|cancel|checkpoint|requeue|suspend \
-              priority=count rootonly=yes|no shared=yes|no|exclusive|force \
-              state=up|down|drain|inactive allowgroups=<name>" 
-    
-            remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
-            # If a new named argument is about to be entered, serve the list of options
-            [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
-            
-            if   param "allocnodes" ; then offer_many  "$(_nodes)" 
-            elif param "alternate" ; then offer_many "$(_partitions)"
-            elif param "partitionname" ; then offer_many "$(_partitions)"
-            elif param "nodes"    ; then offer_many "$(_nodes)"
-            elif param "preemptmode" ; then offer_many "off cancel checkpoint requeue suspend"
-            elif param "shared" ; then offer_many "yes no exclusive force"
-            else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
-            fi
-            ;;
-        reservation)
-            local parameters="reservation=<name> users=<user_list> nodecnt=<count> \
-              nodes=<node_list> starttime=yyyy-mm-dd[thh:mm[:ss]] \
-              endtime=yyyy-mm-dd[thh:mm[:ss]] duration=[days-]hours:minutes:seconds \
-              flags=maint,overlap,ignore_jobs,daily,weekly \
-              partitionname=<partition_list> features=<feature_list> \
-              accounts=<account_list> licenses=<license>"
-    
-            remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
-            # If a new named argument is about to be entered, serve the list of options
-            [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
-            
-            # Test all potential arguments and server corresponding values
-            if   param "accounts" ; then offer  "$(_accounts)" 
-            elif param "licences" ; then offer_many "$(_licenses)"
-            elif param "nodes"    ; then offer_many "$(_nodes)"
-            elif param "features" ; then offer_many "$(_features)"
-            elif param "users"    ; then offer_many "$(_users)"
-            elif param "flags"    ; then offer_many " maint overlap ignore_jobs daily weekly "
-            else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
-            fi
-            ;;
-        esac
-        ;;
+	parameters="partition reservation"
+
+	param=$(find_first_occurence "${COMP_WORDS[*]}" "$parameters")
+	[[ $param == "" ]] && { offer "$parameters" ; return ; }
+
+	# Process object
+	case $param in
+	partition)
+	    local parameters="allocnodes=<node_list> allowgroups=<name>\
+			      alternate=<partition_name> default=yes|no\
+			      defaulttime=d-h:m:s|unlimited defmempercpu=<MB>\
+			      defmempercnode=<MB> disablerootjobs=yes|no\
+			      gracetime=<seconds> hidden=yes|no\
+			      maxmempercpu=<MB> maxmempercnode=<MB>\
+			      maxnodes=<count> maxtime=d-h:m:s|unlimited\
+			      minnodes=<count> nodes=<name>\
+			      preemptmode=off|cancel|checkpoint|requeue|suspend\
+			      priority=count rootonly=yes|no reqresv=<yes|no>\
+			      shared=yes|no|exclusive|force\
+			      state=up|down|drain|inactive"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+	    # If a new named argument is about to be entered, serve the list of options
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ; return ; }
+	    if   param "allocnodes"  ; then offer_many "$(_nodes)"
+	    elif param "nodes"       ; then offer_many "$(_nodes)"
+	    elif param "alternate"   ; then offer_many "$(_partitions)"
+	    elif param "default"     ; then offer_many  "yes no"
+	    elif param "preemptmode" ; then offer_many "off cancel checkpoint\
+							requeue suspend"
+	    elif param "shared"      ; then offer_many "yes no exclusive force"
+	    elif param "state"       ; then offer_many "up down drain inactive"
+	    elif param "disablerootjobs" ; then offer_many "yes no"
+	    elif param "hidden"      ; then offer_many "yes no"
+	    elif param "rootonly"    ; then offer_many "yes no"
+	    elif param "reqresv"     ; then offer_many "yes no"
+	    fi
+	    ;;
+	reservation)
+	    local parameters="accounts=<account_list> corecnt=<num>\
+	                      duration=[days-]hours:minutes:seconds\
+	                      endtime=yyyy-mm-dd[thh:mm[:ss]]\
+                              features=<feature_list>\
+                              flags=maint,overlap,ignore_jobs,daily,weekly\
+                              licenses=<license> nodecnt=<count>\
+                              nodes=<node_list> users=<user_list>\
+	                      partitionname=<partition_list>\
+                              starttime=yyyy-mm-dd[thh:mm[:ss]]"
+
+	    remainings=$(compute_set_diff "$parameters" "${COMP_WORDS[*]}")
+	    # If a new named argument is about to be entered, serve the list of
+	    # options
+	    [[ $cur == "" && $prev != "=" ]] && { offer "$remainings" ;
+		    return ; }
+
+	    # Test all potential arguments and server corresponding values
+	    if   param "accounts" ; then offer_many  "$(_accounts)"
+	    elif param "licences" ; then offer_many "$(_licenses)"
+	    elif param "nodes"    ; then offer_many "$(_nodes)"
+	    elif param "features" ; then offer_many "$(_features)"
+	    elif param "users"    ; then offer_many "$(_users)"
+	    elif param "flags"    ; then offer_many "daily first_cores\
+						     ignore_jobs license_only\
+						     maint overlap part_nodes\
+						     spec_nodes static_alloc\
+						     time_float weekly"
+	    elif param "partitioname" ; then offer_many "$(_partitions)"
+	    else offer "$(sed 's/\=[^ ]*/\=/g' <<< $remainings)"
+	    fi
+	    ;;
+	esac
+	;;
     esac
 }
 complete -F _scontrol scontrol
@@ -917,40 +1046,48 @@ _squeue()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-A -i -j -M -n -o -p -q -s -S -t -u -a -h -l -s -V -v"
-    local longoptions="--help --hide --steps --start --usage --verbose \
-            --version --noheader --account<account_list> \
-            --iterate<seconds> --jobs<job_id_list> \
-            --clusters<string> --nodes<hostlist> --format<fmtstring> \
-            --partition<part_list> --qos<qos_list> --sort<sort_list> \
-            --state<state_list> --user<user_list> "
-    
+
+    local shortoptions="-A -a -h -i -j -l -L -M -n -O -o -P -p -q -R -r -S -s\
+			-t -u -V -v -w"
+    local longoptions="--account<account_list> --all --array --noheader --help
+		       --hide --iterate<seconds> --jobs<job_id_list> --long\
+		       --licenses=<license_list> --clusters<string>\
+		       --name=<name_list> --format<fmtstring>\
+		       --Format<fmtstring> --partition<part_list>\
+		       --priotity --qos<qos_list>\
+		       --reservation=<reservation_name> --steps\
+		       --sort<sort_list> --start --state<state_list> --usage\
+		       --verbose --version --nodelist<hostlist>"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
 
-    if [[ $cur == *% ]] ; 
-    then 
-        offer "%a(Account) %A(NTasks) %b(gres) %c(mincpu) %C(Ncpus) %d(minTmp) \
-            %D(NNodes) %e(end) %E(dependency) %f(features) %g(group) %G(gID) %h(shared) \
-            %H(Nsockets) %i(id) %I(Ncores/socket) %j(name) %k(comment) %l(limit) \
-            %L(timeleft) %m(mem) %M(time) %n(reqnodes) %N(alloc_nodes) %O(contiguous) \
-            %p(priority) %r(reason)  %R(reason) %s(selecplugin) %t(state) %T(state) \
-            %u(user) %U(uID) %v(reservation) %x(excnodes)" ; 
-        return; 
+    if [[ $cur == *% ]] ;
+    then
+	offer "%a(Account) %A(NTasks) %b(gres) %c(mincpu) %C(Ncpus) %d(minTmp) \
+	       %D(NNodes) %e(end) %E(dependency) %f(features) %g(group) %G(gID)\
+	       %h(shared) %H(Nsockets) %i(id) %I(Ncores/socket) %j(name)\
+	       %k(comment) %l(limit) %L(timeleft) %m(mem) %M(time) %n(reqnodes)\
+	       %N(alloc_nodes) %O(contiguous) %p(priority) %r(reason)\
+	       %R(reason) %s(selecplugin) %t(state) %T(state) \
+	       %u(user) %U(uID) %v(reservation) %x(excnodes)" ;
+	return;
     fi
 
-    case $prev in 
-    --partition|-p) offer_list "$(_partitions)" ;;
-    --jobs|-j) offer_list "$(_jobs)" ;;
+    case $prev in
     --account|-A) offer_list "$(_accounts)" ;;
+    --jobs|-j) offer_list "$(_jobs)" ;;
     --clusters|-M) offer_list "$(_clusters)" ;;
-    --nodes|-N) offer_list "$(_nodes)" ;;
+    --name|-n) offer_list "$(_jobname)" ;;
     --qos) offer_list "$(_qos)" ;;
     --user|-u) offer_list "$(_users)" ;;
-    --state|-s) offer_list "pending running suspended completing completed" ;;
-    --format|-o) offer "\\\"%" ;;
+    --state|-t) offer_list "pending running suspended completing completed" ;;
+    --format|-o|--Format|-O) offer "\\\"%" ;;
+    --partition|-p) offer_list "$(_partitions)" ;;
+    --reservation|-R) offer_list "$(_reservation)" ;;
+    --sort|-S) offer_list "\\\"%" ;;
+    --nodelist|-w) offer_list "$(_nodes)" ;;
     esac
 }
 complete -F _squeue squeue
@@ -959,29 +1096,30 @@ _scancel()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions=" -a -i -v -V -A -b -M -n -p -q -R -s -t -u -w"
-    local longoptions="--batch --ctld --help --quiet --usage --verbose --version \
-            --account<account_list>  --name<job_name> \
-            --clusters<string> --nodelist<hostlist> --reservation<reservation_name>\
-            --partition<part_list> --qos<qos_list> --signal<SIGXXX>\
-            --state<state_list> --user<user_list> --wckeys<wckey>"
-    
+
+    local shortoptions=" -A -b -i -M -n -p -q -Q -R -s -t -u -v -V -w"
+    local longoptions="--account<account_list> --batch --ctld --help\
+		       --interactive --clusters<string> --name<job_name>\
+		       --nodelist<hostlist> --partition<part_list>\
+		       --qos<qos_list> --quiet --reservation<reservation_name>\
+		       --signal<SIGXXX> --state<state_list> --user<user_list>\
+		       --usage --verbose --version --wckeys<wckey>"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
 
     case $prev in
-    --partition|-p) offer_list "$(_partitions)" ;;
     --account|-A) offer_list "$(_accounts)" ;;
     --clusters|-M) offer_list "$(_clusters)" ;;
+    --name|-n) offer_list "$(_jobnames)" ;;
+    --partition|-p) offer_list "$(_partitions)" ;;
     --qos) offer_list "$(_qos)" ;;
-    --wckeys) offer_list "$(_wckeys)" ;;
-    --user|-u) offer_list "$(_users)" ;;
-    --nodelist|-w) offer_list "$(_nodes)" ;;
-    --name) offer_list "$(_jobnames)" ;;
     --reservation|-R) offer_list "$(_reservations)" ;;
     --state) offer_list "pending running suspended completing completed" ;;
+    --user|-u) offer_list "$(_users)" ;;
+    --wckeys) offer_list "$(_wckeys)" ;;
+    --nodelist|-w) offer_list "$(_nodes)" ;;
     *) offer_list "$(_jobs)";;
     esac
 }
@@ -991,11 +1129,12 @@ _sshare()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-a -h -l -p -P -v -V -A -M -u"
-    local longoptions="--noheader --parsable --parsable2 --verbose --version \
-                --accounts<accounts> --clusters<string> --users<user_list>"
-    
+
+    local shortoptions="-A -a -h -l -M -p -P -u -v -V"
+    local longoptions="--accounts<accounts> --all --noheader --long\
+		       --clusters<string> --parsable --parsable2\
+			--users<user_list> --verbose --version --help"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
@@ -1012,11 +1151,12 @@ _sbcast()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-C -f -p -v -V -F -s -t"
-    local longoptions="--compress --force --preserve --verbose --version \
-                    fanout<number> --size<bytes> --timeout<seconds>"
-    
+
+    local shortoptions="-C -f -F -j -p -s -t -v -V"
+    local longoptions="--compress --force --fanout<number> --jobid<number>\
+		       --preserve --size<bytes> --timeout<seconds> --verbose\
+		       --version"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
@@ -1029,32 +1169,39 @@ _sinfo()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-a -b -d -e -h -i -l -n -N -o -p -R -s -S -t -v -V"
-    local longoptions="--all --exact --noheader --help --hide --iterate<seconds> \
-            --long --clusters<clusternames> --nodes<nodelist> --Node --format<fmtstr> \
-            --partition<partition> --summarize --sort<sortlist> --states<statelist> \
-            --usage --verbose --version"
-    
+
+    local shortoptions="-a -b -d -e -h -i -l -M -n -N -o -p -r -R -s -S -t -T\
+			-v -V"
+    local longoptions="--all --bgl --dead --exact --noheader --help --hide\
+		       --iterate<seconds> --long --clusters<clusternames>\
+		       --nodes<nodelist> --Node --format<fmtstr>\
+		       --partition<partition> --responding --list-reasons\
+		       --summarize --sort<sortlist> --state<statelist>\
+		       --reservation --usage --verbose --version"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
 
-    if [[ $cur == *% ]] ; 
-    then 
-        offer "%a(Availabilit) %A(cpu_usage) %c(cpus_per_node) %C(cpu_usage) %d(diskspace) \
-            %D(NNodes) %E(reason) %f(features) %F(nodes_usage) %g(group) %G(Gres) %h(shared) \
-            %H(timestamp) %l(time_limit)  %L(default_time) %m(mem) %M(preemt_mode) \
-            %N(node_names) %P(partition)  %r(root_jobs)  %R(reason) %s(max_job_size) \
-            %S(allowed_allocating_nodes) %t(state) %T(state) %u(user) %U(uID) %w(weight)\
-            %X(sockets_per_node) %Y(cores_per_socket) %Z(threads_per_core)" ; 
-        return; 
+    if [[ $cur == *% ]] ;
+    then
+	offer "%all %a(Availability) %A(cpu_usage) %B(max_cpus)\
+	       %c(cpus_per_node) %C(cpu_usage) %d(diskspace) %D(NNodes)\
+	       %E(reason) %f(features) %F(nodes_usage) %g(group) %G(Gres)\
+	       %h(shared) %H(timestamp) %l(time_limit) %L(default_time) %m(mem)\
+	       %M(preemt_mode) %n(hostnames) %N(node_names) %o(node_addr)\
+	       %O(cpu_load) %e(free_mem) %p(partition_prio) %P(partition) %r(root_jobs)\
+	       %R(reason) %s(max_job_size) %S(allowed_allocating_nodes)\
+	       %t(state) %T(state) %u(user) %U(uID) %w(weight)\
+	       %X(sockets_per_node) %Y(cores_per_socket)\
+	       %z(extend_process_info) %Z(threads_per_core)" ;
+	return;
     fi
 
-    case $prev in 
-    --partition|-p) offer_list "$(_partitions)" ;;
+    case $prev in
     --clusters|-M) offer_list "$(_clusters)" ;;
     --nodes|-n) offer_list "$(_nodes)" ;;
+    --partition|-p) offer_list "$(_partitions)" ;;
     --state) offer_list "pending running suspended completing completed" ;;
     --format|-o) offer "\\\"%" ;;
     esac
@@ -1065,24 +1212,26 @@ _sprio()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
+
     local shortoptions="-h -j -l -M -n -o -u -v -V -w"
-    local longoptions="--noheader --help --job<jobids> --long --clusters<clustername> \
-            --norm --format<fmtstr> --user<userlist> --usage --verbose --version --weights"
-    
+    local longoptions="--noheader --help --job<jobids> --long\
+		       --clusters<clustername> --norm --format<fmtstr>\
+		       --user<userlist> --usage --verbose --version --weights"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
 
-    if [[ $cur == *% ]] ; 
-    then 
-        offer "%a(n_age) %A(w_age) %f(n_fair-share) %F(w_fair-share) %i(JobId) \
-            %j(n_job_size) %J(w_job_size) %N(Nice adjustmen) %p(n_partition) \
-            %P(w_partition) %q(n_qos) %Q(w_qos) %u(User) %Y(priority) %y(n_priority) " ; 
-        return; 
+    if [[ $cur == *% ]] ;
+    then
+	offer "%a(n_age) %A(w_age) %f(n_fair-share) %F(w_fair-share) %i(JobId)\
+	       %j(n_job_size) %J(w_job_size) %N(Nice adjustmen) %p(n_partition)\
+	       %P(w_partition) %q(n_qos) %Q(w_qos) %u(User) %Y(priority)\
+	       %y(n_priority)" ;
+	return;
     fi
 
-    case $prev in 
+    case $prev in
     --jobs|-j) offer_list "$(_jobs)" ;;
     --clusters|-M) offer_list "$(_clusters)" ;;
     --format|-o) offer "\\\"%" ;;
@@ -1095,35 +1244,40 @@ _sacct()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-a -A -b -c -d -e -E -f -g -h -j -k -K -l -L -M -n \
-                        -N -o -O -p -P -q -r -s -S -T -u -v -V -W -x -X"
-    local longoptions="--allusers --accounts<accountlist> --brief --completion \
-        --dump --duplicates --helpformat --endtime<time> --file<path> --group<gidlist> \
-        --help -- jobs<joblist> --timelimit-min<time> --timelimit-max<time> --long \
-        --allclusters --clusters<clusterlist> --noheader --nodes<nodes> \
-        --format<itemlist> --formatted_dump --parsable --parsable2 --qos<qos> \
-        --partition<partitionlist> --state<statelist> --starttime<time> --truncate \
-        --user<userlist> --usage --verbose --version --wckeys<wckeyslist> \
-        --associations<assoclist> --allocations"
-    
+
+    local shortoptions="-a -A -b -c -D -e -E -f -g -h -j -k -K -l -L -M -n\
+			-N -o -p -P -q -r -s -S -T -u -v -V -W -x -X"
+    local longoptions="--allusers --accounts<accountlist> --brief --completion\
+	               --duplicates --helpformat --endtime<time> --file<path>\
+                       --group<gidlist> --help --jobs<joblist>\
+                       --timelimit-min<time> --timelimit-max<time> --long\
+                       --allclusters --clusters<clusterlist> --noheader\
+	               --nodeslist<nodes> --name=<jobname> --format<itemlist>\
+                       --parsable --parsable2 --qos<qos>\
+                       --partition<partitionlist> --state<statelist>\
+	               --starttime<time> --truncate --user<userlist> --usage\
+                       --verbose --version --wckeys<wckeyslist>\
+                       --associations<assoclist> --allocations"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
-    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
+    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)";
+	    return ; }
 
-    case $prev in 
-    --group|--gid|-g) _gids ;;
-    --partition) offer_list "$(_partitions)" ;;
-    --jobs) offer_list "$(_jobs)" ;;
+    case $prev in
     --accounts|-A) offer_list "$(_accounts)" ;;
+    --group|--gid|-g) _gids ;;
+    --jobs|-j) offer_list "$(_jobs)" ;;
     --clusters|-M) offer_list "$(_clusters)" ;;
-    --nodes) offer_list "$(_nodes)" ;;
+    --nodes|-N) offer_list "$(_nodes)" ;;
+    --name) offer_list "$(_jobs)" ;;
+    --partition) offer_list "$(_partitions)" ;;
+    --format|-o) offer_list "$(sacct -e)" ;;
+    --state|-s) offer_list "pending running suspended completing completed" ;;
     --qos) offer_list "$(_qos)" ;;
+    --user|-u) offer_list "$(_users)" ;;
     --wckeys|-W) offer_list "$(_wckeys)" ;;
     --associations|-x) offer_list "$(_associations)" ;;
-    --user|-u) offer_list "$(_users)" ;;
-    --state|-s) offer_list "pending running suspended completing completed" ;;
-    --format) offer_list "$(sacct -e)" ;;
     esac
 }
 complete -F _sacct sacct
@@ -1132,122 +1286,167 @@ _salloc()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-A -B -C -c -d -D -F -h -H -I -J -K -l -L -m -N \
-                        -n -O -Q -s -t -u -V -v -W -w -x"
-    local longoptions="--account<account> --acctg-freq<seconds> \
-        --extra-node-info<sockets[:cores[:threads]]> --sockets-per-node<number>\
-        --cores-per-sopcket<number> --threads-per-core<number> --begin<time> --bell \
-        --comment<string> --constraint<list> --contiguous --cpu-bind<type> \
-        --cpus-per-task<number> --dependency<deplist> --chdir<path> --exclusive \
-        --nodefile<nodefile> --get-user-env --gid<group> --gres<list> --hold \
-        --help --hint<type> --immediate[<seconds>] --jobid<jobid> --killcommand \
-        --no-kill --licenses<licenses> --distribution<dist> --mail-type<type> \
-        --mail-user<email> --mem<MB> --mem-per-cpu<MB> --mem-bind<type> \
-        --min-cpus<number> --nodes<minnodes[-maxnodes]> --ntasks<number> \
-        --network<type> --nice<[adjustment]> --ntasks-per-core<number> \
-        --no-bell --no-shell --overcommit --partition<partitionname> --quiet \
-        --qos<qos> --reservation<name> --share --signal<sig_num>[@<sig_time>] \
-        --time<time> --time-min<time> --tmp<MB> --usage --uid<user> --version \
-        --verbose --wait<seconds> --nodelist<nodelist> --wait-all-nodes<0|1> \
-        --wckey<wckey> --exclude<nodelist>"
-   
+
+    local shortoptions="-A -B -bb -C -c -d -D -F -g -h -H -I -J -k -K -L -m -n\
+			-N -O -p -Q -R -s -S -t -u -V -v -W -w -x"
+    local longoptions="--account=<account> --acctg-freq=<seconds>\
+		       --extra-node-info=<sockets[:cores[:threads]]>\
+		       --begin=<time> --bell --comment=<string>\
+		       --constraint=<list> --contiguous\
+		       --cores-per-socket=<number> --cpu-freq=<p1[-p2[:p3]]>\
+		       --cpus-per-task=<ncpus> --dependency=<deplist>\
+		       --chdir=<path> --exclusive=[user] --nodefile=<nodefile>\
+		       --get-user-env --gid=<group> --gres=<list> --hold\
+		       --help --hint=<type> --immediate=<seconds>\
+		       --job-name=<jobname> --jobid=<jobid>\
+		       --kill-command=[signal] --no-kill --licenses=<licenses>\
+		       --distribution=<dist> --mail-type=<type>\
+		       --mail-user=<email> --mem=<MB> --mem-per-cpu=<MB>\
+		       --mem_bind=<type> --mincpus=<number>\
+		       --nodes=<minnodes[-maxnodes]> --ntasks=<number>\
+		       --network=<type> --nice=<[adjustment]>\
+		       --ntasks-per-core=<number> --ntasks-per-socket=<ntasks>\
+		       --ntasks-per-node=<ntasks> --no-bell --no-shell\
+		       --overcommit --power=<flags> --priority=<value>\
+		       --profile=<all|none|energy|task|lustre|network>\
+		       --partition=<partitionname> --quiet --qos=<qos> --reboot\
+		       --reservation=<name> --share --core-spec=<num>\
+		       --signal=<sig_num><sig_time>\
+		       --sockets-per-node=<number>\
+		       --switches=<count><max-time> --time-min=<time>\
+		       --threads-per-core=<number> --time-min=<time>\
+		       --tmp=<MB> --usage --uid=<user> --version --verbose\
+		       --wait=<seconds> --nodelist=<nodelist>\
+		       --wait-all-nodes=<0|1> --wckey=<wckey>\
+		       --exclude=<nodelist> --blrts-image=<path>\
+		       --cnload-image=<path> --conn-type=<type>
+		       --geometry=<XxYxZ> --ioload-image=<path>\
+		       --linux-image=<path> --mloader-image=<path> --no-rotate\
+		       --ramdisk-image=<path>"
+
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
-    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
+    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)";
+	    return ; }
 
-    case $prev in 
+    case $prev in
     --account|-A) offer_list "$(_accounts)" ;;
     --constraint|-C) offer_list "$(_features)" ;;
     --cpu-bind) offer_list "none rank map_cpu: mask_cpu: sockets cores \
-                             threads ldoms" ;;
-    --dependency) offer_list "after: afterany: afternotok: 
-                             afterok: singleton" ;; 
-    --gid) _gids ;; 
+			     threads ldoms" ;;
+    --dependency) offer_list "after: afterany: afternotok:
+			     afterok: expand: singleton" ;;
+    --gid) _gids ;;
     --partition|-p) offer_list "$(_partitions)" ;;
-    --gres) offer_list "$(_gres)" ;; 
+    --gres) offer_list "$(_gres)" ;;
     --hint) offer "compute_bound memory_bound multithread nomultithread" ;;
     --jobid) offer_list "$(_jobs)" ;;
     --licenses|-L) offer_list "$(_licenses)" ;;
-    --distribution|-d) offer "block cyclic plane arbitrary" ;;
-    --mail-type) offer_list "BEGIN END FAIL REQUEUE ALL" ;;
+    --distribution|-m) offer "block cyclic plane arbitrary" ;;
+    --mail-type) offer_list "BEGIN END FAIL REQUEUE ALL STAGE_OUT TIME_LIMIT\
+			     TIME_LIMIT_90 TIME_LIMIT_80 TIME_LIMIT_50" ;;
     --mem-bind) offer "none rank local map_mem: mask_mem:" ;;
-    # TODO --network) _configured_interfaces ;; 
+    # TODO --network) _configured_interfaces ;;
     --reservation) offer_list "$(_reservations)" ;;
     --clusters) offer_list "$(_clusters)" ;;
     --nodelist) offer_list "$(_nodes)" ;;
     --exclude) offer_list "$(_nodes)" ;;
     --qos) offer_list "$(_qos)" ;;
-    :|afterany|after|afternotok|afterok) offer_list "$(_jobs)" ;;
+    :|afterany|after|afternotok|afterok|expand) offer_list "$(_jobs)" ;;
+    --profile) offer_list "all none energy task lustre network" ;;
+    --wait-all-nodes) offer_list "1 0" ;;
+    --conn-type) offer_list "MESH TORUS NAV" ;;
     esac
     #TODO options for blue gene systems
 }
 complete -F _salloc salloc
-    
+
 _sbatch()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
 
-    local shortoptions="-A -B -C -c -d -D -F -h -H -I -J -K -l -L -m -N -n -O \
-                        -Q -s -t -u -V -v -W -w -x -q -r -T -X -Z"
-    local longoptions="--account<account> --acctg-freq<seconds> \
-        --extra-node-info<sockets[:cores[:threads]]> --sockets-per-node<number> \
-        --cores-per-sopcket<number> --threads-per-core<number> --begin<time> \
-        --bell --comment<string> --constraint<list> --contiguous --cpu-bind<type> \
-        --cpus-per-task<number> --dependency<deplist> --chdir<path> --exclusive \
-        --nodefile<nodefile> --get-user-env --gid<group> --gres<list> --hold \
-        --help --hint<type> --immediate[<seconds>] --jobid<jobid> --no-kill \
-        --licenses<licenses> --distribution<dist> --mail-type<type> \
-        --mail-user<email> --mem<MB> --mem-per-cpu<MB> --mem-bind<type> \
-        --min-cpus<number> --nodes<minnodes[-maxnodes]> --ntasks<number> \
-        --network<type> --nice<[adjustment]> --ntasks-per-core<number>  \
-        --overcommit --partition<partitionname> --quiet --qos<qos> \
-        --reservation<name> --share --signal<sig_num>[@<sig_time>] \
-        --time<time> --time-min<time> --tmp<MB> --usage --uid<user> \
-        --version --verbose --wait<seconds> --nodelist<nodelist> \
-        --wait-all-nodes<0|1> --wckey<wckey> --exclude<nodelist> \
-        --checkpoint<time> --checkpoint-dir<directory> --error<file> \
-        --preserve-env --epilog<path> --input<file> --job-name<name> \
-        --kill-on-bad-exit --label --msg-timeout --mpi<type> \
-        --multi-prog --output<file> --open-mode<append|truncate> \ 
-        --prolog<path> --propagate<rlimits> --pty --quit-on-interrupt \
-        --relative<number> --resv-ports --restart-dir<dir> --slurmd-debug<level> \
-        --threads<number> --task-epilog<path> --task-prolog<path> --test-only \
-        --unbuffered --disable-status --no-allocate --export<env_var> \
-        --ntasks-per-socket<number> --ntasks-per-node<number> --tasks-per-node<number>"
-   
+    local shortoptions="-a -A -B -C -c -d -D -e -F -g -h -H -i -I -J -k -K -L\
+			-m -M -n -N -o -O -p -Q -s -S -t -u -v -V -w -x -q -R"
+    local longoptions="--array<indexes> --account<account>\
+		       --acctg-freq<seconds>\
+		       --extra-node-info<sockets[:cores[:threads]]>\
+		       --bb<spec> --begin=<time> --checkpoint<time>\
+		       --checkpoint-dir<directory> --comment<string>\
+		       --constraint<list> --contiguous\
+		       --cores-per-sopcket<number> --cpus-per-task<number>\
+		       --dependency<deplist> --workdir<directory>\
+		       --error<filename pattern> --exclusive<user>\
+		       --export<environment variables|ALL|NONE>\
+		       --export-file<filename|fd> --nodefile<nodefile>\
+		       --get-user-env --gid<group>\
+		       --gres<list> --hold --help --hint<type> --immediate\
+		       --ignore-pbs --input<filename>\
+		       --job-name<jobname> --jobid<jobid> --no-kill\
+		       --licenses<license> --clusters<string>\
+		       --distribution<dist>\
+		       --mail-type<type> --mail-user<user> --mem<MB>\
+		       --mem-per-cpu<MB> --mem_bind<type> --mincpus<n>\
+		       --nodes<minnodes[-maxnodes]> --ntasks<number>\
+		       --network<type> --nice[adjustment] --no-requeue\
+		       --ntasks-per-core<ntasks>  --ntasks-per-socket<ntasks>\
+		       --ntasks-per-node<ntasks> --overcommit\
+		       --output<filename> --open-mode\
+		       --parsable --partition<partition_names> --power<flags>\
+		       --priority<value>\
+		       --profile<type>\
+		       --propagate<limit> --quiet --qos<qos> --reboot\
+		       --requeue --reservation<name> --share --core-spec<num>\
+		       --sicp --signal<signal> --sockets-per-node<sockets>\
+		       --switches<type> --time<time>\
+		       --tasks-per-node<n> --test-only\
+		       --threads-per-core<threads> --time-min<time>\
+		       --tmp<MB> --usage --uid=<user> --version --verbose\
+		       --nodelist<node name list> --wait-all-nodes<value>\
+		       --wckey<wckey> --wrap<command string>\
+		       --exclude<node name list> --blrts-image<path>\
+		       --cnload-image<path> --conn-type<type>\
+		       --geometry<XxYxZ> --ioload-image<path>\
+		       --linux-image<path> --mloader-image<path> --no-rotate\
+		       --ramdisk-image<path>"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
 
-    case $prev in 
+    case $prev in
     --account|-A) offer_list "$(_accounts)" ;;
     --constraint|-C) offer_list "$(_features)" ;;
     --cpu-bind) offer "none rank map_cpu: mask_cpu: sockets \
-                            cores threads ldoms" ;;
+			    cores threads ldoms" ;;
     --dependency|-d) offer "after: afterany: afternotok: \
-                            afterok: singleton" ;; 
-    --gid) _gids ;; 
+			    afterok: expand: singleton" ;;
+    --gid) _gids ;;
     --partition|-p) offer_list "$(_partitions)" ;;
-    --gres) offer_list "$(_gres)" ;; 
+    --gres) offer_list "$(_gres)" ;;
     --hint) offer "compute_bound memory_bound multithread \
-                        nomultithread" ;;
+			nomultithread" ;;
     --jobid) offer_list "$(_jobs)" ;;
     --licenses|-L) offer_list "$(_licenses)" ;;
     --distribution|-m) offer_list "block cyclic plane arbitrary" ;;
-    --mail-type) offer_list "begin end fail requeue all" ;;
-    --mem-bind) offer "none rank local map_mem: mask_mem:" ;;
+    --mail-type) offer_list "BEGIN END FAIL REQUEUE ALL STAGE_OUT TIME_LIMIT\
+			     TIME_LIMIT_90 TIME_LIMIT_80 TIME_LIMIT_50" ;;
+    --mem-bind) offer "quiet verbose none rank local map_mem: mask_mem:" ;;
     --mpi) offer "lam mpich1_shmem mpichgm mvapich openmpi none" ;;
     --propagate) offer_list "all as core cpu data fsize memlock \
-                              nofile nproc rss stack" ;;
+			      nofile nproc rss stack" ;;
     # TODO --network) _configured_interfaces ;;
     --reservation) offer_list "$(_reservations)" ;;
     --clusters|-M) offer_list "$(_clusters)" ;;
-    --nodelist) offer_list "$(_nodes)" ;;
+    --nodelist|-w) offer_list "$(_nodes)" ;;
     --exclude|-x) offer_list "$(_nodes)" ;;
     --qos) offer_list "$(_qos)" ;;
     :|afterany|after|afternotok|afterok) offer_list "$(_jobs)" ;;
+    --profile) offer_list "all none energy task lustre network" ;;
+    --propagate) offer_list "ALL AS CORE CPU DATA FSIZE MEMLOCK NOFILE NPROC\
+			     RSS STACK" ;;
+    --wait-all-nodes) offer_list "1 0" ;;
     *)  _filedir
     esac
     #TODO options for blue gene systems
@@ -1258,74 +1457,146 @@ _srun()
 {
     _get_comp_words_by_ref cur prev words cword
     _split_long_opt
-    
-    local shortoptions="-A -B -C -c -d -D -F -h -H -I -J -K -l -L -m -N -n -O \
-                        -Q -s -t -u -V -v -W -w -x -q -r -T -X -Z"
-    local longoptions=" --open-mode<append|truncate> --account<account> --acctg-freq<seconds> \
-        --extra-node-info<sockets[:cores[:threads]]> --sockets-per-node<number> \
-        --cores-per-socket<number> --threads-per-core<number> --begin<time> \
-        --bell --comment<string> --constraint<list> --contiguous --cpu-bind<type> \
-        --cpus-per-task<number> --dependency<deplist> --chdir<path> --exclusive \
-        --nodefile<nodefile> --get-user-env --gid<group> --gres<list> --hold \
-        --help --hint<type> --immediate<[seconds>] --jobid<jobid> --no-kill \
-        --licenses<licenses> --distribution<dist> --mail-type<type> \
-        --mail-user<email> --mem<MB> --mem-per-cpu<MB> --mem-bind<type> \
-        --min-cpus<number> --nodes<minnodes[-maxnodes]> --ntasks<number> \
-        --network<type> --nice<[adjustment]> --ntasks-per-core<number>  \
-        --overcommit --partition<partitionname> --quiet --qos<qos> \
-        --reservation<name> --share --signal<sig_num[@sig_time]> \
-        --time<time> --time-min<time> --tmp<MB> --usage --uid<user> \
-        --version --verbose --wait<seconds> --nodelist<nodelist> \
-        --wait-all-nodes<0|1> --wckey<wckey> --exclude<nodelist> \
-        --checkpoint<time> --checkpoint-dir<directory> --error<file> \
-        --preserve-env --epilog<path> --input<file> --job-name<name> \
-        --kill-on-bad-exit --label --msg-timeout --mpi<type> \
-        --multi-prog --output<file> \
-        --prolog<path> --propagate<rlimits> --pty --quit-on-interrupt \
-        --relative<number> --resv-ports --restart-dir<dir> --slurmd-debug<level> \
-        --threads<number> --task-epilog<path> --task-prolog<path> --test-only \
-        --unbuffered --disable-status --no-allocate --export<env_var> \
-        --ntasks-per-socket<number> --ntasks-per-node<number> --tasks-per-node<number>"
-   
+
+    local shortoptions="-A -B -C -c -d -D -e -E -g  -h -H -i -I -J -k -K -l -L\
+                        -m -N -n -o -O -p\
+			-q -Q -r -s -S -t -T -u -V -v -W -w -x"
+    local longoptions=" --account<account> --acctg-freq\
+			--extra-node-info<spec>\
+			--bb<spec> --begin<time> --checkpoint<time>\
+			--checkpoint-dir<directory> --comment<string>\
+			--constraint<list> --contiguous\
+			--cores-per-socket<cores> --cpu_bind=<type>\
+			--cpu-freq<freq> --cpus-per-task<ncpus>\
+			--dependency=<dependency_list> --chdir=<path>\
+			--error<mode> --preserve-env --epilog<executable>\
+			--exclusive=<user> --export<var> --gid<group>\
+			--gres<list> --hold --help --hint<type>\
+			--immediate=<seconds> --input<mode>\
+			--job-name<jobname> --jobid<jobid>\
+			--kill-on-bad-exit<0|1> --no-kill --launch-cmd\
+			--launcher-opts<options> --label --licenses<license>\
+			--distribution<type> --mail-type<type>\
+			--mail-user<user> --mem<MB> --mem-per-cpu<MB>\
+			--mem_bind<type> --mincpus<n> --msg-timeout<seconds>\
+			--mpi<mpi_type> --multi-prog\
+			--nodes<minnodes[-maxnodes]> --ntasks<number>\
+			--network<type> --nice<adjustment>\
+			--ntasks-per-core<ntasks> --ntasks-per-node<ntasks>\
+			--ntasks-per-socket<ntasks> --overcommit\
+			--output<mode> --open-mode<append|truncate>\
+			--partition<partition_names> --power<flags>\
+			--priority<value> --profile<type> --prolog<executable>\
+			--propagate<limits> --pty --quiet --quit-on-interrupt\
+			--qos<qos> --relative<n> --reboot --resv-ports\
+			--reservation<name> --restart-dir<directory> --share\
+			--core-spec<num> --sicp --signal=<num>\
+			--slurmd-debug<level> --sockets-per-node<sockets>\
+			--switches<type> --threads<nthreads> --time<time>\
+			--task-epilog<executable> --task-prolog<executable>\
+			--test-only --threads-per-core<threads>\
+			--time-min<time> --tmp<MB> --unbuffered --usage\
+			--uid<user> --version --verbose --wait<seconds>\
+			--nodelist=<hostlist> --wckey<wckey> --disable-status\
+			--exclude=<hostlist> --no-allocate --blrts-image<path>\
+			--cnload-image<path> --conn-type<type>\
+			--geometry<XxYxZ> --ioload-image<path>\
+			--linux-image<path> --mloader-image<path> --no-rotate\
+			--ramdisk-image<path>
+"
+
     [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
     [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
     [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
 
-    case $prev in 
-    --gid) _gids ;; 
-    --nodefile) _filedir ;;
-    # TODO --network) _configured_interfaces ;; 
-    --prolog|--task-epilog|--task-prolog) _filedir ;;
+    case $prev in
+    --account|-A) offer_list "$(_accounts)" ;;
+    --begin) offer $(date -dtomorrow +"%Y-%m-%d");;
     --chdir|--restart-dir|--checkpoint-dir) _filedir ;;
-    --reservation) offer_list "$(_reservations)" ;;
-    --constraint|-C) offer_list "$(_features)" ;;
     --clusters) offer_list "$(_clusters)" ;;
-    --account|-A) offer_list "$(_accounts)" ;;
-    --qos) offer_list "$(_qos)" ;;
-    --gres) offer_list "$(_gres)" ;; 
-    --jobid) offer_list "$(_jobs)" ;;
+    --constraint|-C) offer_list "$(_features)" ;;
+    --cpu-bind) offer "none rank map_cpu: mask_cpu: sockets \
+			   cores threads ldoms" ;;
+    --dependency|-d) offer "after: afterany: afternotok: afterok: expand:\
+			    singleton" ;;
+    :|afterany|after|afternotok|afterok) offer_list "$(_jobs)" ;;
     --exclude|-x) offer_list "$(_nodes)" ;;
-    --nodelist|-w) offer_list "$(_nodes)" ;;
-    --licenses|-L) offer_list "$(_licenses)" ;; 
-    --partition|-p) offer_list "$(_partitions)" ;;
-    --begin) offer $(date -dtomorrow +"%Y-%m-%d");; 
-    --open-mode) offer "append truncate" ;; 
-    --mail-type) offer_list "begin end fail requeue all" ;;
-    --distribution|-m) offer "block cyclic plane arbitrary" ;;
-    --mpi) offer "lam mpich1_shmem mpichgm mvapich openmpi none" ;;
+    --gid) _gids ;;
+    --gres) offer_list "$(_gres)" ;;
     --hint) offer "compute_bound memory_bound multithread \
-                       nomultithread" ;;
-    --propagate) offer_list "all as core cpu data fsize memlock \
-                              nofile nproc rss stack" ;;
+		       nomultithread" ;;
+    --job-name|-J) "$(_jobname)" ;;
+    --jobid) offer_list "$(_jobs)" ;;
+    --distribution|-m) offer "block cyclic plane arbitrary pack nopack" ;;
+    --licenses|-L) offer_list "$(_licenses)" ;;
+    --mail-type) offer_list "begin end fail requeue all" ;;
     --mem-bind) offer "none rank local map_mem: mask_mem:" ;;
-    --cpu-bind) offer "none rank map_cpu: mask_cpu: sockets \
-                           cores threads ldoms" ;;
-    --dependency|-d) offer "after: afterany: afternotok: afterok: singleton" ;; 
-    :|afterany|after|afternotok|afterok) offer_list "$(_jobs)" ;;
+    --mpi) offer "lam mpich1_shmem mpichgm mvapich openmpi pmi2 none" ;;
+    --partition|-p) offer_list "$(_partitions)" ;;
+    --profile) offer_list "all none energy task filesystem network" ;;
+    --propagate) offer_list "all as core cpu data fsize memlock \
+			      nofile nproc rss stack" ;;
+    --qos) offer_list "$(_qos)" ;;
+    --reservation) offer_list "$(_reservations)" ;;
+    --slurmd-debug) offer_list "quiet fatal error info verbose" ;;
+    --nodefile) _filedir ;;
+    # TODO --network) _configured_interfaces ;;
+    --prolog|--task-epilog|--task-prolog) _filedir ;;
+    --nodelist|-w) offer_list "$(_nodes)" ;;
+    --open-mode) offer "append truncate" ;;
+    --wait-all-nodes) offer_list "1 0" ;;
+    --conn-type) offer_list "MESH TORUS NAV" ;;
     *)  COMPREPLY=( $( compgen -c -- "$cur" ) ) ; return
     esac
     #TODO options for blue gene systems
 }
 complete -F _srun srun
 
+_sattach()
+{
+    _get_comp_words_by_ref cur prev words cword
+    _split_long_opt
+
+    local shortoptions=" -h -l -Q -u -V -v"
+    local longoptions=" --help --input-filter[=]<task number>\
+                        --output-filter[=]<task number>\
+                        --error-filter[=]<task number> --label --layout --pty\
+                        --quiet --usage --version --verbose"
+
+    [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
+    [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
+    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
+}
+complete -F _sattach sattach
+
+_sdiag()
+{
+    _get_comp_words_by_ref cur prev words cword
+    _split_long_opt
+
+    local shortoptions="-a -h -i -r -t -T -V"
+    local longoptions="--all --help --sort-by-id --reset --sort-by-time\
+                       --sort-by-time2 --usage --version"
+
+    [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
+    [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
+    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
+}
+complete -F _sdiag sdiag
+
+_sstat()
+{
+    _get_comp_words_by_ref cur prev words cword
+    _split_long_opt
+
+    local shortoptions="-a -e -h -i -j -n -o -p -P -v -V"
+    local longoptions="--allstep --helpformat --help --pidformat --jobs\
+                       --noheader --format --parsable --parsable2 --usage\
+                       --verbose --version"
+
+    [[ $cur == - ]] && { offer "$shortoptions" ; return ; }
+    [[ $cur == -- ]] && { offer "$longoptions" ; return ; }
+    [[ $cur == --* ]] && { offer "$(sed 's/<[^>]*>//g' <<< $longoptions)"; return ; }
+}
+complete -F _sstat sstat
 # vim: sw=4:ts=4:expandtab
diff --git a/contribs/slurmdb-direct/Makefile.am b/contribs/slurmdb-direct/Makefile.am
index 210ff14bc..f30993a0c 100644
--- a/contribs/slurmdb-direct/Makefile.am
+++ b/contribs/slurmdb-direct/Makefile.am
@@ -5,6 +5,7 @@ AUTOMAKE_OPTIONS = foreign
 
 sbin_SCRIPTS = moab_2_slurmdb
 extra = config.slurmdb.pl
+EXTRA_DIST = $(extra)
 
 moab_2_slurmdb:
 
diff --git a/contribs/slurmdb-direct/Makefile.in b/contribs/slurmdb-direct/Makefile.in
index d22f80bde..90c77a2d9 100644
--- a/contribs/slurmdb-direct/Makefile.in
+++ b/contribs/slurmdb-direct/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -222,6 +225,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -271,8 +276,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -291,6 +300,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -334,6 +346,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -357,6 +370,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -421,6 +435,7 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 sbin_SCRIPTS = moab_2_slurmdb
 extra = config.slurmdb.pl
+EXTRA_DIST = $(extra)
 _perldir = $(exec_prefix)`perl -e 'use Config; $$T=$$Config{installsitearch}; $$P=$$Config{installprefix}; $$P1="$$P/local"; $$T =~ s/$$P1//; $$T =~ s/$$P//; print $$T;'`
 all: all-am
 
diff --git a/contribs/torque/Makefile.in b/contribs/torque/Makefile.in
index 803f3da97..0f0d62450 100644
--- a/contribs/torque/Makefile.in
+++ b/contribs/torque/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -222,6 +225,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -271,8 +276,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -291,6 +300,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -334,6 +346,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -357,6 +370,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/doc/Makefile.in b/doc/Makefile.in
index f96b8767c..734297568 100644
--- a/doc/Makefile.in
+++ b/doc/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/doc/html/Makefile.am b/doc/html/Makefile.am
index 0449f7532..1be23af4b 100644
--- a/doc/html/Makefile.am
+++ b/doc/html/Makefile.am
@@ -9,8 +9,10 @@ generated_html = \
 	add.html \
 	api.html \
 	authplugins.html \
+	bb_plugins.html \
 	big_sys.html \
 	bluegene.html \
+	burst_buffer.html \
 	cgroups.html \
 	checkpoint_blcr.html \
 	checkpoint_plugins.html \
@@ -27,7 +29,6 @@ generated_html = \
 	dist_plane.html \
 	documentation.html \
 	download.html \
-	dynalloc.html \
 	elastic_computing.html \
 	ext_sensorsplugins.html \
 	faq.html \
@@ -62,11 +63,12 @@ generated_html = \
 	overview.html \
 	platforms.html \
 	plugins.html \
+	power_mgmt.html \
 	power_save.html \
+	power_plugins.html \
 	preempt.html \
 	preemption_plugins.html \
 	priority_multifactor.html \
-	priority_multifactor2.html \
 	priority_multifactor3.html \
 	priority_plugins.html \
 	proctrack_plugins.html \
diff --git a/doc/html/Makefile.in b/doc/html/Makefile.in
index 86d826bf7..c54e0b9f6 100644
--- a/doc/html/Makefile.in
+++ b/doc/html/Makefile.in
@@ -99,6 +99,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -107,10 +108,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -123,7 +126,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -221,6 +224,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -270,8 +275,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -290,6 +299,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -333,6 +345,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -356,6 +369,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -425,8 +439,10 @@ generated_html = \
 	add.html \
 	api.html \
 	authplugins.html \
+	bb_plugins.html \
 	big_sys.html \
 	bluegene.html \
+	burst_buffer.html \
 	cgroups.html \
 	checkpoint_blcr.html \
 	checkpoint_plugins.html \
@@ -443,7 +459,6 @@ generated_html = \
 	dist_plane.html \
 	documentation.html \
 	download.html \
-	dynalloc.html \
 	elastic_computing.html \
 	ext_sensorsplugins.html \
 	faq.html \
@@ -478,11 +493,12 @@ generated_html = \
 	overview.html \
 	platforms.html \
 	plugins.html \
+	power_mgmt.html \
 	power_save.html \
+	power_plugins.html \
 	preempt.html \
 	preemption_plugins.html \
 	priority_multifactor.html \
-	priority_multifactor2.html \
 	priority_multifactor3.html \
 	priority_plugins.html \
 	proctrack_plugins.html \
diff --git a/doc/html/accounting.shtml b/doc/html/accounting.shtml
index f22ec9e29..1031affa8 100644
--- a/doc/html/accounting.shtml
+++ b/doc/html/accounting.shtml
@@ -278,9 +278,9 @@ prevent users from accessing invalid accounts.
   enforced you need to use the 'limits' option.
 </li>
 <li>safe - This will ensure a job will only be launched when using an
-  association or qos that has a GrpCPUMins limit set if the job will be
+  association or qos that has a GrpTRESMins limit set if the job will be
   able to run to completion. Without this option set, jobs will be
-  launched as long as their usage hasn't reached the cpu-minutes limit
+  launched as long as their usage hasn't reached the TRES-minutes limit
   which can lead to jobs being launched but then killed when the limit is
   reached. By setting this option, both the 'associations' option and the
   'limits' option are set automatically.
@@ -653,132 +653,16 @@ is specified when a job is submitted. (Only used when tracking wckeys.)</li>
 
 <h2>Limit Enforcement</h2>
 
-<p>Slurm's hierarchical limits are enforced in the following order:
-<ol>
-	<li>QOS limit</li>
-	<li>User association</li>
-	<li>Account association(s)</li>
-	<li>Root/Cluster association</li>
-	<li>Partition limit</li>
-	<li>None</li>
-</ol>
-Note: Higher precedent limits override lower.
-
-<p>All of the above entities can include limits as described below and
-in the <a href="resource_limits.html">Resource Limits</a> document.
-To enable any limit enforcement you must at least have
+<p>Various limits and limit enforcement are described in
+  the <a href="resource_limits.html">Resource Limits</a> web page.</p>
+
+<p>To enable any limit enforcement you must at least have
 <b>AccountingStorageEnforce=limits</b> in your slurm.conf,
 otherwise, even if you have limits set, they will not be enforced.
 Other options for AccountingStorageEnforce and the explanation for
 each are found on the <a href="resource_limits.html">Resource
 Limits</a> document.</p>
 
-<ul>
-
-<li><b>Fairshare=</b> Integer value used for determining priority.
-  Essentially this is the amount of claim this association and it's
-  children have to the above system. Can also be the string "parent",
-  when used on a user this means that the parent association is used
-  for fairshare.  If Fairshare=parent is set on an account, that
-  account's children will be effectively reparented for fairshare
-  calculations to the first parent of their parent that is not
-  Fairshare=parent.  Limits remain the same, only it's fairshare value
-  is affected.
-</li>
-
-<li><b>GrpCPUMins=</b> The total number of cpu minutes that can
-  possibly be used by past, present and future jobs
-  running from this association and its children.  If this limit is
-  reached all jobs running in this group will be killed, and no new
-  jobs will be allowed to run.  This usage is decayed (at a rate of
-  PriorityDecayHalfLife).  It can also be reset (according to
-  PriorityUsageResetPeriod) in order to allow jobs to run against the
-  association tree again.  This limit only applies when using the Priority
-  Multifactor plugin.
-</li>
-
-<li><b>GrpCPURunMins=</b> Used to limit the combined total number of CPU
-  minutes used by all jobs running with this association and its
-  children.  This takes into consideration time limit of
-  running jobs and consumes it, if the limit is reached no new jobs
-  are started until other jobs finish to allow time to free up.
-</li>
-
-<li><b>GrpCPUs=</b> The total count of cpus able to be used at any given
-  time from jobs running from this association and its children.  If
-  this limit is reached new jobs will be queued but only allowed to
-  run after resources have been relinquished from this group.
-</li>
-
-<li><b>GrpJobs=</b> The total number of jobs able to run at any given
-  time from this association and its children.  If
-  this limit is reached new jobs will be queued but only allowed to
-  run after previous jobs complete from this group.
-</li>
-
-<li><b>GrpMemory=</b> The total amount of memory (MB) able to be used
-  at any given time from jobs running from this association and its
-  children.  If this limit is reached new jobs will be queued but only
-  allowed to run after resources have been relinquished from this group.
-</li>
-
-<li><b>GrpNodes=</b> The total count of nodes able to be used at any given
-  time from jobs running from this association and its children.  If
-  this limit is reached new jobs will be queued but only allowed to
-  run after resources have been relinquished from this group.
-  Each job's node allocation is counted separately
-  (i.e. if a single node has resources allocated to two jobs, this is counted
-  as two allocated nodes).
-</li>
-
-<li><b>GrpSubmitJobs=</b> The total number of jobs able to be submitted
-  to the system at any given time from this association and its children.  If
-  this limit is reached new submission requests will be denied until
-  previous jobs complete from this group.
-</li>
-
-<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
-  this group can run for.  If this limit is reached submission requests
-  will be denied.
-</li>
-
-<li><b>MaxCPUMinsPerJob=</b> A limit of cpu minutes to be used by jobs
-  running from this association.  If this limit is
-  reached the job will be killed.
-</li>
-
-<li><b>MaxCPUsPerJob=</b> The maximum size in cpus any given job can
-  have from this association.  If this limit is reached the job will
-  be denied at submission.
-</li>
-
-<li><b>MaxJobs=</b> The total number of jobs able to run at any given
-  time from this association.  If this limit is reached new jobs will
-  be queued but only allowed to run after previous jobs complete from
-  this association.
-</li>
-
-<li><b>MaxNodesPerJob=</b> The maximum size in nodes any given job can
-  have from this association.  If this limit is reached the job will
-  be denied at submission.
-</li>
-
-<li><b>MaxSubmitJobs=</b> The maximum number of jobs able to be submitted
-  to the system at any given time from this association.  If
-  this limit is reached new submission requests will be denied until
-  previous jobs complete from this association.
-</li>
-
-<li><b>MaxWallDurationPerJob=</b> The maximum wall clock time any job
-  submitted to this association can run for.  If this limit is reached
-  the job will be denied at submission.
-</li>
-
-<li><b>QOS=</b> comma separated list of QOS's this association is
-  able to run.
-</li>
-</ul>
-
 <h2>Modifying Entities</h2>
 
 <p>When modifying entities, you can specify many different options in
diff --git a/doc/html/accounting_storageplugins.shtml b/doc/html/accounting_storageplugins.shtml
index dae590bb4..7a17872c7 100644
--- a/doc/html/accounting_storageplugins.shtml
+++ b/doc/html/accounting_storageplugins.shtml
@@ -29,6 +29,16 @@ for the type of accounting package. We currently use
   Daemon (SlurmDBD).  Extra configuration is needed and described <a href="accounting.html">here</a>.
 <li><b>none</b>&#151; Information is not stored anywhere.
 </ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/accounting_storage/mysql</span>
 for a sample implementation of a Slurm Accounting Storage plugin.
@@ -43,10 +53,10 @@ information.
 
 <h2>API Functions</h2>
 
-The Job Accounting Storage API uses hooks in the slurmctld.
+<p>The Job Accounting Storage API uses hooks in the slurmctld.</p>
 
 <p>All of the following functions are required. Functions which are not
-implemented must be stubbed.
+implemented must be stubbed.</p>
 
 <p class="commandline"> int init (void)
 <p style="margin-left:.2in"><b>Description</b>:<br>
@@ -179,15 +189,15 @@ acct_cluster_rec_t *'s containing information about the clusters to add. <br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
 <p class="commandline">
-int acct_storage_p_add_associations(void *db_conn, uint32_t uid, List association_list)
+int acct_storage_p_add_assocs(void *db_conn, uint32_t uid, List assoc_list)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Called to add associations to the storage type.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
 <span class="commandline">uid</span> (input) uid of user calling the function.<br>
-<span class="commandline">association_list</span> (input) list of
-acct_association_rec_t *'s containing information about the
+<span class="commandline">assoc_list</span> (input) list of
+acct_assoc_rec_t *'s containing information about the
 associations to add. <br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
@@ -301,8 +311,8 @@ modified on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
-acct_association_cond_t *assoc_cond, acct_association_rec_t *assoc)
+List acct_storage_p_modify_assocs(void *db_conn, uint32_t uid,
+acct_assoc_cond_t *assoc_cond, acct_assoc_rec_t *assoc)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to modify existing associations in the storage type.  The condition
   could include very vague information about the association, so this
@@ -474,8 +484,8 @@ removed on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-List acct_storage_p_remove_associations(void *db_conn, uint32_t uid,
-acct_association_cond_t *assoc_cond)
+List acct_storage_p_remove_assocs(void *db_conn, uint32_t uid,
+acct_assoc_cond_t *assoc_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
 Used to remove associations from the storage type.  You need to make
   sure no jobs are running with any association that is to be removed.
@@ -591,10 +601,10 @@ on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
 <p class="commandline">
-List acct_storage_p_get_associations(void *db_conn, uint32_t uid,
-acct_association_cond_t *assoc_cond)
+List acct_storage_p_get_assocs(void *db_conn, uint32_t uid,
+acct_assoc_cond_t *assoc_cond)
 <p style="margin-left:.2in"><b>Description</b>:<br>
-Get a list of acct_association_rec_t *'s based on the conditional sent.
+Get a list of acct_assoc_rec_t *'s based on the conditional sent.
 <p style="margin-left:.2in"><b>Arguments</b>: <br>
 <span class="commandline">db_conn</span> (input) connection to
 the storage type. <br>
@@ -604,7 +614,7 @@ function.<br>
 which associations are to be returned.  Association names should not need to
 be stated.<br>
 <p style="margin-left:.2in"><b>Returns</b>: <br>
-<span class="commandline">List</span> containing acct_association_rec_t *'s
+<span class="commandline">List</span> containing acct_assoc_rec_t *'s
 on success, or<br>
 <span class="commandline">NULL</span> on failure.
 
@@ -690,7 +700,7 @@ the storage type.<br>
 <span class="commandline">uid</span> (input) uid of user calling the
 function.<br>
 <span class="commandline">in</span> (input/out) can be anything that
-gathers usage like acct_association_rec_t * or acct_wckey_rec_t *.<br>
+gathers usage like acct_assoc_rec_t * or acct_wckey_rec_t *.<br>
 <span class="commandline">type</span> (input) really
 slurmdbd_msg_type_t should let the plugin know what the structure is
 that was sent in some how.<br>
@@ -1012,13 +1022,8 @@ database with.
   of those listed.
 </dl>
 
-<h2>Versioning</h2>
-<p> This document describes version 1 of the Slurm Accounting Storage API. Future
-releases of Slurm may revise this API. An Accounting Storage plugin conveys its
-ability to implement a particular API version using the mechanism outlined
-for Slurm plugins.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/acct_gather_energy_plugins.shtml b/doc/html/acct_gather_energy_plugins.shtml
index 7e1361acc..9152908f6 100644
--- a/doc/html/acct_gather_energy_plugins.shtml
+++ b/doc/html/acct_gather_energy_plugins.shtml
@@ -31,6 +31,16 @@ IPMI (Intelligent Platform Management Interface) tool.
 core/socket, using RAPL (Running Average Power Limit) sensors. Note that
 enabling RAPL may require the execution of the command "sudo modprobe msr".
 </ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/acct_gather_energy/rapl</span> and 
 <span class="commandline">src/common/slurm_acct_gather_energy.c</span>
@@ -112,15 +122,9 @@ plugin and the frequency at which to gather node energy data.</p>
 <dd>Time interval between pollings in seconds.
 </dl>
 
-<h2>Versioning</h2>
-<p>This document describes version 1 of the Slurm Energy Accounting API. Future
-releases of Slurm may revise this API. A, energy accounting plugin conveys its
-ability to implement a particular API version using the mechanism outlined
-for Slurm plugins.</p>
-
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/acct_gather_profile_plugins.shtml b/doc/html/acct_gather_profile_plugins.shtml
index e47765487..ece2fbd35 100644
--- a/doc/html/acct_gather_profile_plugins.shtml
+++ b/doc/html/acct_gather_profile_plugins.shtml
@@ -66,6 +66,16 @@ for the type of profile accounting. We currently use
 <li><b>hdf5</b>&#151;Gets profile data about energy use, i/o sources
 (Lustre, network) and task data such as local disk i/o,  CPU and memory usage.
 </ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study
 <span class="commandline">
 src/plugins/acct_gather_profile/acct_gather_profile_hdf5.c</span> and
@@ -229,8 +239,8 @@ It this parameter is not specified, no profiling will occur.
 <dt><span class="commandline">ProfileDefaultProfile</span>
 <dd>Default setting for --profile command line option for srun, salloc, sbatch.
 </dl>
-The default profile value is <b>none</b> which means no profiling will be done
-for jobs. The hdf5 plugin also includes;
+<p>The default profile value is <b>none</b> which means no profiling will be done
+for jobs. The hdf5 plugin also includes;</p>
 <ul>
 <li>
 <b>energy</b> sample energy use for the node.
@@ -249,31 +259,33 @@ for the node.
 <b>all</b> all of the above.
 </li>
 </ul>
-Use caution when setting the default to values other than none as a file for
-each job will be created. This option is provided for test systems.
+<p>Use caution when setting the default to values other than none as a file for
+each job will be created. This option is provided for test systems.</p>
+
 <p>Most of the sources of profile data are associated with various
 acct_gather plugins. The acct_gather.conf file has setting for various
 sampling mechanisms that can be used to change the frequency at which
-samples occur.
+samples occur.</p>
 
 <h2>Data Types</h2>
-A plugin-like structure is implemented to generalize HDF5 data operations from
+<p>A plugin-like structure is implemented to generalize HDF5 data operations from
 various sources. A <i>C</i> <b>typedef</b> is defined for each datatype. These
 declarations are in /common/slurm_acct_gather_profile.h so the datatype are
-common to all profile plugins.
-<p>
-The operations are defined via structures of function pointers, and they are
+common to all profile plugins.</p>
+
+<p>The operations are defined via structures of function pointers, and they are
 defined in /plugins/acct_gather_profile/common/profile_hdf5.h and should work
-on any HDF5 implementation, not only hdf5.
-<p>
-Functions must be implemented to perform various operations for the datatype.
+on any HDF5 implementation, not only hdf5.</p>
+
+<p>Functions must be implemented to perform various operations for the datatype.
 The api for the plugin includes an argument for the datatype so that the
-implementation of that api can call the specific operation for that datatype.
+implementation of that api can call the specific operation for that datatype.</p>
+
 <p>Groups in the HDF5 file containing a dataset will include an attribute for
 the datatype so that the program that merges step files into the job can
-discover the type of the group and do the right thing.
-<p>
-For example, the typedef for the energy sample datatype;
+discover the type of the group and do the right thing.</p>
+
+<p>For example, the typedef for the energy sample datatype;</p>
 <pre>
 typedef struct profile_energy {
     char     tod[TOD_LEN];	// Not used in node-step
@@ -333,18 +345,18 @@ typedef struct profile_hdf5_ops {
 } profile_hdf5_ops_t;
 </pre>
 
-Note there are two different data types for supporting time series.<br>
+<p>Note there are two different data types for supporting time series.<br>
 1) A primary type is defined for gathering data in the node step file.
 It is typically named profile_{series_name}_t.<br>
 2) Another type is defined for summarizing series totals.
 It is typically named profile_{series_name}_s_t. It does not have a 'factory'.
 It is only used in the functions of the primary data type and the
-primaries structure has operations to create appropriate hdf5 objects.
+primaries structure has operations to create appropriate hdf5 objects.</p>
 
 <p>When adding a new type, the <b>profile_factory</b> function has to be
-modified to return an <i>ops</i> for the type.
+modified to return an <i>ops</i> for the type.</p>
 
-<p>Interaction between type and hdf5.
+<p>Interaction between type and hdf5.</p>
 <ul>
 <li>
 The profile_{type}_t structure is used by callers of the <b>add_sample_data</b>
@@ -362,14 +374,9 @@ dataset.) The <i>create_file_datatype</i> function creates
 the appropriate object.
 </li>
 </ul>
-<h2>Versioning</h2>
-<p>This document describes version 1 of the Slurm Profile Accounting API.
-Future releases of Slurm may revise this API. A profile accounting plugin
-conveys its ability to implement a particular API version using the mechanism
-outlined for Slurm plugins.</p>
 
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 9 June 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/authplugins.shtml b/doc/html/authplugins.shtml
index 68cc9ed2e..02918ce55 100644
--- a/doc/html/authplugins.shtml
+++ b/doc/html/authplugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm authentication plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-authentication plugins. This is version 100 of the API.</p>
+authentication plugins.</p>
 <p>Slurm authentication plugins are Slurm plugins that implement the Slurm authentication
 API described herein. They must conform to the Slurm Plugin API with the following
 specifications:</p>
@@ -19,9 +19,19 @@ production use due to lack of effective security.</li>
 <li><b>authd</b>&#151;Brett Chun's Linux authd.</li>
 <li><b>munge</b>&#151;LLNL's Munge protocol (recommended plugin for production use).</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and <span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for authentication.
-Note carefully, however, the versioning discussion below.</p>
+
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study <span class="commandline">src/plugins/auth/none/auth_none.c</span>
 for an example implementation of a Slurm authentication plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
@@ -223,16 +233,8 @@ by the API.</p>
 <p style="margin-left:.2in"><b>Returns</b>: A pointer to a static error message.
 This function must always return a pointer to a string, even if the string is
 empty or ambiguous such as &quot;unknown error.&quot;</p>
-
-<h2>Versioning</h2>
-<p> This document describes version 0 of the Slurm Authentication API. Future
-releases of Slurm may revise this API. An authentication plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm plugins.
-In addition, the credential is transmitted along with the version number of the
-plugin that transmitted it. It is at the discretion of the plugin author whether
-to maintain data format compatibility across different versions of the plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/bb_plugins.shtml b/doc/html/bb_plugins.shtml
new file mode 100644
index 000000000..496f0f9d6
--- /dev/null
+++ b/doc/html/bb_plugins.shtml
@@ -0,0 +1,234 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">Burst Buffer Plugin Programmer Guide</a></h1>
+
+<h2> Overview</h2>
+<p> This document describes the Slurm burst buffer plugins and the
+APIs that defines them. It is intended as a resource to programmers
+wishing to write their own Slurm burst buffer plugin.
+
+<p>Slurm burst buffer plugins must conform to the
+Slurm Plugin API with the following specifications:
+
+<p><span class="commandline">const char
+plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
+<p style="margin-left:.2in">
+A free-formatted ASCII text string that identifies the plugin.
+
+<p><span class="commandline">const char
+plugin_type[]="<i>major/minor</i>"</span><br>
+<p style="margin-left:.2in">
+
+The major type must be &quot;burst_buffer&quot;.
+The minor type can be any suitable name for the type of burst buffer
+package.
+The following burst buffer plugins are included in the Slurm distribution
+<ul>
+<li><b>cray</b>&#151;Use Cray APIs to provide burst buffer.</li>
+<li><b>generic</b>&#151;Use generic burst buffer plugin.</li>
+</ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loaded by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
+<h2>API Functions</h2>
+<p>All of the following functions are required. Functions which are not
+implemented must be stubbed.
+
+<p class="commandline"> int init (void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called when the plugin is loaded, before any other functions are
+called. Put global initialization here.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+<span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+<span class="commandline">SLURM_ERROR</span> on failure.</p>
+
+<p class="commandline"> void fini (void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Called when the plugin is removed. Clear any allocated storage here.
+<p style="margin-left:.2in"><b>Returns</b>: None.</p>
+
+<p><b>Note</b>: These init and fini functions are not the same as those
+described in the <span class="commandline">dlopen (3)</span> system library.
+The C run-time system co-opts those symbols for its own initialization.
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
+<span class="commandline">fini()</span> is called before the system's
+<span class="commandline">_fini()</span>.</p>
+
+<p class="commandline">
+int bb_p_load_state(bool init_config)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+This function loads the current state of the burst buffer.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">init_config</span>
+(input) true if called as part of slurmctld initialization.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+int bb_p_state_pack(uid_t uid, Buf buffer, uint16_t protocol_version)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Pack current burst buffer state information for network transmission.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">uid</span>
+(input) Owning user ID.<br>
+<span class="commandline">buffer</span>
+(input) buffer that will be packed.<br>
+<span class="commandline">protocol_version</span>
+(input) Version number of the data packing mechanism.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+int bb_p_reconfig(void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Reread the burst buffer config file when it is updated.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+uint64_t bb_p_get_systm_size(char *name)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get the total burst buffer size in MB of a given plugin name.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">name</span>
+(input) Plugin name of the burst buffer. If name is NULL, return the total
+space of all burst buffer plugins.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+The size of the burst buffer in MB.
+
+<p class="commandline">
+int bb_p_job_validate(struct job_descriptor *job_desc, uid_t submit_uid)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Validation of a job submit request with respect to burst buffer option.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_desc</span>
+(input) Job submission request.<br>
+<span class="commandline">submit_uid</span>
+(input) ID of the user submitting the job.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno.
+
+<p class="commandline">
+int bb_p_job_validate2(struct job_record *job_ptr, char **err_msg)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Validation of a job submit request with respect to burst buffer option.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job record for the job request with respect to burst buffer.<br>
+<span class="commandline">err_msg</span>
+(output) Error message, sent directlt to job submission command<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno.
+
+<p class="commandline">
+void bb_p_job_set_tres_cnt(struct job_record *job_ptr,
+uint64_t *tres_cnt, bool locked);
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Set the tres count in the job recored.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job record to be set.<br>
+<span class="commandline">tres_cnt</span>
+(input/output) Fill in this already allocated array with tres_cnts<br>
+<span class="commandline">locked</span>
+(input) If tres read lock is locked or not.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+None
+
+<p class="commandline">
+time_t bb_p_job_get_est_start(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Get an estimation of when a job can start.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Start time of this job.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+Estimated start time of job_ptr.
+
+<p class="commandline">
+int bb_p_job_try_stage_in(void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Allocate burst buffers to jobs expected to start soonest.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+int bb_p_job_test_stage_in(struct job_record *job_ptr, bool test_only)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Determine if a job's burst buffer stage-in is complete.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job record to test.<br>
+<span class="commandline">test_only</span>
+(input) If false, then attempt to load burst buffer if possible.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+0 stage-in is underway<br>
+1 stage-in complete<br>
+-1 state-in not started or burst buffer in some unexpeced state.
+
+<p class="commandline">
+int bb_p_job_begin(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Attempt to claim burst buffer resources.<br>
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job record to test.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+int bb_p_job_start_stage_out(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Trigger a job's burst buffer stage out to begin.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job to stage out.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+int bb_p_job_test_stage_out(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Determine of jobs's stage out is complete.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job to check if stage out is complete.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+0 - stage-out is underway<br>
+1 - stage-out complete<br>
+-1 - fatal error
+
+<p class="commandline">
+int bb_p_job_cancel(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Terminate any file staging and release burst buffer resources.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) Job to cancel.<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+A Slurm errno
+
+<p class="commandline">
+char *bb_p_xlate_bb_2_tres_str(char *burst_buffer)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Translate burst buffer string to TRES string.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">burst_buffer</span>
+(input) Burst buffer to translate to TRES string<br>
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+The TRES string of the given burst buffer (Note: User must xfree the
+return value).
+
+<p class="footer"><a href="#top">top</a>
+
+<p style="text-align:center;">Last modified 25 August 2015</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/big_sys.shtml b/doc/html/big_sys.shtml
index 786b6e83d..d1e22fc31 100644
--- a/doc/html/big_sys.shtml
+++ b/doc/html/big_sys.shtml
@@ -102,6 +102,12 @@ will both make for easier administration and better performance.</p>
 
 <h2>Timers</h2>
 
+<p>The <i>EioTimeout</i> configuration parameter controls how long the srun
+command will wait for the slurmstepd to close the TCP/IP connection used to
+relay data between the user application and srun when the user application
+terminates. The default value is 60 seconds. Larger systems and/or slower
+networks may need a higher value.</p>
+
 <p>If a high throughput of jobs is anticipated (i.e. large numbers of jobs
 with brief execution times) then configure <i>MinJobAge</i> to the smallest
 interval practical for your environment. <i>MinJobAge</i> specifies the
@@ -159,6 +165,6 @@ the hard limit in order to process all of the standard input and output
 connections to the launched tasks. It is recommended that you set the
 open file hard limit to 8192 across the cluster.</p>
 
-<p style="text-align:center;">Last modified 13 November 2013</p>
+<p style="text-align:center;">Last modified 22 July 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/burst_buffer.shtml b/doc/html/burst_buffer.shtml
new file mode 100644
index 000000000..4bf3c08e3
--- /dev/null
+++ b/doc/html/burst_buffer.shtml
@@ -0,0 +1,273 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Slurm Burst Buffer Guide</h1>
+
+<ul>
+<li><a href="#overview">Overview</a></li>
+<li><a href="#configuration">Configuration (for system administrators)</a></li>
+<li><a href="#submit">Job Submission Commands</a></li>
+<li><a href="#persist">Persistent Burst Buffer Creation and Deletion Directives</a></li>
+<li><a href="#interactive">Interactive Job Options</a></li>
+<li><a href="#status">Status Commands</a></li>
+<li><a href="#reservation">Advanced Reservations</a></li>
+</ul>
+
+<h2><a name="overview">Overview</a></h2>
+
+<p>Slurm version 15.08 includes support for
+<a href="http://www.mcs.anl.gov/papers/P2070-0312.pdf">burst buffers</a>,
+a shared high-speed storage resource.
+Slurm provides support for allocating these resources, staging files in,
+scheduling compute nodes for jobs using these resources, then staging files out.
+Burst buffers can also be used as temporary storage during a job's lifetime,
+without file staging.
+Another typical use case is for persist storage, not associated with any
+specific job.
+This support is provided using a plugin mechanism so that a various burst
+buffer infrastructures may be easily configured.
+Two plugins are provided initially:</p>
+<ol>
+<li><b>cray</b> - Uses Cray APIs to perform underlying management functions</li>
+<li><b>generic</b> - Uses system administrator defined scripts to perform
+underlying management functions, currently under development</li>
+</ol>
+<p>Additional plugins may be provided in future releases of Slurm.</p>
+
+<p>Slurm's mode of operation follows this general pattern:</p>
+<ol start="0">
+<li>Read configuration information and initial state information</li>
+<li>After expected start times for pending jobs are established, allocate
+    burst buffers to those jobs expected to start earliest and start stage-in
+    of required files</li>
+<li>After stage-in has completed, jobs can be allocated compute nodes and begin
+    execution</li>
+<li>After job has completed execution, begin file stage-out from burst buffer</li>
+<li>After file stage-out has completed, burst buffer can be released and the
+    job record purged
+</ol>
+
+<h2><a name="configuration">Configuration  (for system administrators)</a></h2>
+
+<p>Burst buffer support in Slurm is enabled by specifying the plugin(s) to
+loaded for managing these resources using the <i>BurstBufferType</i>
+configuration parameter in the <i>slurm.conf</i> file.
+Multiple plugin names may be specified in a comma separated list.
+Detailed logging of burst buffer specific actions may be generated for debugging
+purposes by using the <i>DebugFlags=BurstBuffer</i> configuration parameter.
+The BurstBuffer DebugFlags (like many other DebugFlags) can result in very
+verbose logging and is only intended for diagnostic purposes rather than for
+use in a production system.</p>
+
+<pre>
+# Excerpt of example slurm.conf file
+BurstBufferType=burst_buffer/generic
+# DebugFlags=BurstBuffer # Commented out
+</pre>
+
+<p>Burst buffer specific options should be defined in a <i>burst_buffer.conf</i>
+file.
+If multiple burst buffer plugins are configured, an independent configuration
+file can be specified for each plugin with a file name including the plugin name.
+For example files named "burst_buffer_cray.conf" and
+"burst_buffer_generic.conf" can be used independently by the cray and generic
+burst buffer plugins respectively.
+This file can contain information about who can or can not use burst buffers,
+timeouts, and paths of scripts used to perform various functions, etc.
+<a href="resource_limits.html">TRES limits</a> can be configured to establish
+limits by association, QOS, etc.
+The size of a job's burst buffer requirements can be used as a factor in
+setting the job priority as described in the
+<a href="priority_multifactor.html">multifactor priority</a> document.</p>
+
+<p>Note that sample scripts for performing burst buffer operations for the
+burst_buffer/generic plugin are included with the Slurm distribution.
+These scripts were intended for Slurm development and testing purposes and
+are <b>not</b> currently intended for customer use.
+Contributions of usable scripts/programs are invited.
+For more information about the functionality that might be included in the
+scripts, please see
+<a href="http://www.k9mach3.org/wickberg-thesis.pdf">
+The Ramdisk Storage Accelerator</a> by Timothy B. Wickberg.
+Note that these scripts/programs are executed as SlurmUser rather than user
+root.
+Please see the <i>burst_buffer.conf</i> man page for more configuration
+information.</p>
+
+<pre>
+# Excerpt of burst_buffer.conf file for generic plugin
+AllowUsers=alan:brenda
+Flags=EnablePersistent,PrivateData
+Granularity=1GB
+StageInTimeout=30
+StageOutTimeout=30
+#
+GetSysState=/usr/local/slurm/15.08/sbin/GSS
+StartStageIn=/usr/local/slurm/15.08/sbin/SSI
+StartStageOut=/usr/local/slurm/15.08/sbin/SSO
+StopStageIn=/usr/local/slurm/15.08/sbin/PSI
+StopStageOut=/usr/local/slurm/15.08/sbin/PSO
+</pre>
+
+<p><b>Note for Cray systems:</b> The JSON-C library must be installed in order
+to build Slurm's burst_buffer/cray plugin, which must parse JSON format data.
+See Slurm's <a href="download.html#json">JSON installation information</a>
+for details.</p>
+
+<h2><a name="submit">Job Submission Commands</a></h2>
+
+<p>The normal mode of operation is for batch jobs to specify burst buffer
+requirements within the batch script.
+Batch script lines containing a prefix of "#BB" identify the job's burst buffer
+space requirements, files to be staged in, files to be staged out, etc.
+when using the <u>burst_buffer/generic</u> plugin.
+The prefix of "#DW" (for "DataWarp") is used for burst buffer directives when
+using the <u>burst_buffer/cray</u> plugin.
+Please reference Cray documentation for details about the DataWarp options.
+For DataWarp systems, the prefix of "#BB" can be used to create or delete
+persistent burst buffer storage (NOTE: The "#BB" prefix is used since the
+command is interpreted by Slurm and not by the Cray Datawarp software).
+Interactive jobs (those submitted using the <i>salloc</i> and <i>srun</i>
+commands) can specify their burst buffer space requirements using the "--bb"
+or "--bbf" command line options, as described later in this document.</p>
+
+<p>A basic validation is performed on the job's burst buffer options at job
+submit time.
+If the options are invalid, the job will be rejected and an error message will
+be returned directly to the user.
+If the job is accepted, but later fails (e.g. some problem staging files), the
+job will be held and its "Reason" field will be set to error message provided
+by the underlying infrastructure.</p>
+
+<p>Users may also request to be notified by email upon completion of burst
+buffer stage out using the "--mail-type=stage_out" or "--mail-type=all" option.
+The subject line of the email will be of this form:</p>
+<pre>
+SLURM Job_id=12 Name=my_app Staged Out, StageOut time 00:05:07
+</pre>
+
+<p><b>NOTE:</b> Burst buffer support is currently not available for job arrays.</p>
+
+<h2><a name="persist">Persistent Burst Buffer Creation and Deletion Directives</a></h2>
+
+<p>These options are used for both the <u>burst_buffer/cray</u> and
+<u>burst_buffer/generic</u> plugins to create and delete persistent burst
+buffers, which have a lifetime independent of the job.
+These options are only available for batch jobs.</p>
+<ul>
+<li>#BB create_persistent name=&lt;name&gt; capacity=&lt;number&gt; [access=&lt;access&gt;] [type=&lt;type&gt;]</li>
+<li>#BB destroy_persistent name=&lt;name&gt; [hurry]</li>
+</ul>
+<p>The persistent burst buffer name may not start with a numeric value (numeric
+names are reserved for job-specific burst buffers).
+The size specification can include a suffix of M, G, T, P, etc. for megabytes,
+gigabytes, terabytes, petabytes, etc.
+Multiple persistent burst buffers may be created or deleted within a single
+job.
+A sample batch script follows:</p>
+<pre>
+#!/bin/bash
+#BB create_persistent name=alpha capacity=32GB access=striped
+#DW jobdw type=scratch capacity=1GB access_mode=striped
+#DW stage_in  type=file source=/home/alan/data.in  destination=/ss/data
+#DW stage_out type=file destination=/home/alan/data.out source=/ss/data
+/home/alan/a.out
+</pre>
+
+<p><b>NOTE:</b> The ability to create and destroy persistent burst buffers may be
+limited by the "Flags" option in the burst_buffer.conf file.
+By default only privileged users can create or destroy persistent burst buffers.</p>
+
+<h2><a name="interactive">Interactive Job Options</a></h2>
+
+<p>Interactive jobs may include directives for creating job specific burst
+buffers as well as file staging.
+These options may be specified using either the "--bb" or "--bbf" option of
+the salloc or srun command.
+The "--bbf" option take as an argument a filename and that file should contain
+a collection of burst buffer operations identical to that used for batch jobs.
+This file may contain file staging directives.
+Alternately the "--bb" option may be used to specify burst buffer directives
+as the option argument. The format of those directives can either be identical
+to those used in a batch script OR a very limited set of directives can be uses,
+which are translated to the equivalent script for later processing.
+Multiple directives should be space separated.</p>
+<ul>
+<li>access=&lt;access&gt;</li>
+<li>capacity=&lt;number&gt;</li>
+<li>swap=&lt;number&gt;</li>
+<li>type=&lt;type&gt;</li>
+</ul>
+<p>If a swap option is specified, the job must also specify the required
+node count.
+The capacity specification can include a suffix of M, G, T, P, etc. for
+megabytes, gigabytes, terabytes, petabytes, etc.
+A sample command line follows and we also show the equivalent burst buffer
+script generated by the options:</p>
+<pre>
+# Sample execute line:
+srun --bb="capacity=1G access=striped type=scratch" a.out
+
+# Equivalent script as generated by Slurm's burst_buffer/cray plugin
+#DW jobdw capacity=1GiB access_mode=striped type=scratch
+</pre>
+
+<h2><a name="status">Status Commands</a></h2>
+
+<p>Slurm's current burst buffer state information is available using the 
+<i>scontrol show burst</i> command or by using the <i>sview</i> command's
+<i>Burst Buffer</i> tab. A sample scontrol output is shown below. The scontrol
+"-v" option may be used for a more verbose output format.</p>
+
+<pre>
+$ scontrol show burst
+Name=generic DefaultPool=ssd Granularity=100G TotalSpace=50T UsedSpace=42T
+  StageInTimeout=30 StageOutTimeout=30 Flags=EnablePersistent,PrivateData
+  AllowUsers=alan:brenda
+  CreateBuffer=/usr/local/slurm/15.08/sbin/CB
+  DestroyBuffer=/usr/local/slurm/15.08/sbin/DB
+  GetSysState=/usr/local/slurm/15.08/sbin/GSS
+  StartStageIn=/usr/local/slurm/15.08/sbin/SSI
+  StartStageIn=/usr/local/slurm/15.08/sbin/SSO
+  StopStageIn=/usr/local/slurm/15.08/sbin/PSI
+  StopStageIn=/usr/local/slurm/15.08/sbin/PSO
+  Allocated Buffers:
+    JobID=18 CreateTime=2015-08-19T16:46:05 Size=10T State=allocated UserID=alan(1000)
+    JobID=20 CreateTime=2015-08-19T16:46:45 Size=10T State=allocated UserID=alan(1000)
+    Name=DB1 CreateTime=2015-08-19T16:46:45 Size=22T State=allocated UserID=brenda(1001)
+  Per User Buffer Use:
+    UserID=alan(1000) Used=20T
+    UserID=brenda(1001) Used=22T
+</pre>
+
+<h2><a name="reservation">Advanced Reservations</a></h2>
+
+<p>Burst buffer resources can be placed in an advanced reservation using the
+<i>BurstBuffer</i> option.
+The argument consists of four elements:<br>
+[plugin:][type:]#[units]<br><br>
+<i>plugin</i> is the burst buffer plugin name, currently either "cray" or "generic".
+If no plugin is specified, the reservation applies to all configured burst
+buffer plugins.<br><br>
+<i>type</i> specifies a Cray generic burst buffer resource, for example "nodes".
+if "type" is not specified, the number is a measure of storage space.<br><br>
+<i>units</i> may be "N" (nodes), "M" (megabytes), "G" (gigabytes),
+"T" (terabytes), "P" (petabytes), etc. with the default units being bytes for
+reservations of storage space.<br><br>
+
+Jobs using this reservation are not restricted to these burst buffer resources,
+but may use these reserved resources plus any which are generally available.
+Some examples follow.</p>
+
+<pre>
+$ scontrol create reservation starttime=now duration=60 \
+  users=alan flags=any_nodes \
+  burstbuffer=cray:100G,generic:20G
+
+$ scontrol create reservation StartTime=noon duration=60 \
+  users=brenda NodeCnt=8 \
+  BurstBuffer=cray:20G
+</pre>
+
+<p style="text-align:center;">Last modified 25 August 2015</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/checkpoint_plugins.shtml b/doc/html/checkpoint_plugins.shtml
index 83e6105de..a26eee51a 100644
--- a/doc/html/checkpoint_plugins.shtml
+++ b/doc/html/checkpoint_plugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm job checkpoint plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-job checkpoint plugins. This is version 100 of the API.</p>
+job checkpoint plugins.</p>
 
 <p>Slurm job checkpoint plugins are Slurm plugins that implement the SLURM
 API for checkpointing and restarting jobs.
@@ -24,11 +24,18 @@ Berkeley Lab Checkpoint/Restart (BLCR)</a></li>
 <li><b>ompi</b>&#151;OpenMPI checkpoint (requires OpenMPI version 1.3 or higher).</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for
-job checkpoint support.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/checkpoint/checkpoint_aix.c</span>
@@ -208,14 +215,8 @@ of a tasks from a checkpoint image, called by <b>slurmstepd</b>.</p>
 On failure, the plugin should return SLURM_ERROR and set the error_code
 and error_msg to an appropriate value to indicate the reason for failure.</p>
 
-
-<h2>Versioning</h2>
-<p> This document describes version 100 of the Slurm checkpoint API.
-Future releases of Slurm may revise this API.
-A checkpoint plugin conveys its ability to implement a particular API
-version using the mechanism outlined for Slurm plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/configurator.html.in b/doc/html/configurator.html.in
index ad6e8c5bc..50e21cef8 100644
--- a/doc/html/configurator.html.in
+++ b/doc/html/configurator.html.in
@@ -336,7 +336,7 @@ Define the hostname of the computer on which the Slurm controller and
 optional backup controller will execute. You can also specify addresses
 of these computers if desired (defaults to their hostnames).
 The IP addresses can be either numeric IP addresses or names.
-Hostname values should should not be the fully qualified domain
+Hostname values should not be the fully qualified domain
 name (e.g. use <I>tux</I> rather than <I>tux.abc.com</I>).
 <P>
 <input type="text" name="control_machine" value="linux0"> <B>ControlMachine</B>:
diff --git a/doc/html/core_spec_plugins.shtml b/doc/html/core_spec_plugins.shtml
index e33e4c0d7..c094d6459 100644
--- a/doc/html/core_spec_plugins.shtml
+++ b/doc/html/core_spec_plugins.shtml
@@ -3,9 +3,9 @@
 <h1><a name="top">Core Specialization Plugin Programmer Guide</a></h1>
 
 <h2> Overview</h2>
-<p> This document describe. Slurm core specialization plugins and the API that
-defines them. It is intended as a resource to programmers wishing to write
-their own Slurm core specialization plugins. This is version 100 of the API.
+<p> This document describes the Slurm core specialization plugins and the APIs
+that defines them. It is intended as a resource to programmers wishing to write
+their own Slurm core specialization plugin. This is version 100 of the API.
 
 <p>Slurm core specialization plugins must conform to the
 Slurm Plugin API with the following specifications:
@@ -29,6 +29,15 @@ otherwise does nothing.</li>
 </ul>
 <p>Slurm can be configured to use multiple core specialization plugins if desired.</p>
 
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p><b>NOTE:</b> These functions all accept as an argument the job step's
 container ID (as set by the proctrack plugin).
 Each job step will have a different container ID.
@@ -133,11 +142,8 @@ node.
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<h2>Versioning</h2>
-<p> This document describes version 100 of the Slurm core specialization API.
-Future releases of Slurm may revise this API.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 12 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/cpu_management.shtml b/doc/html/cpu_management.shtml
index 99953dd5f..e91105ab8 100644
--- a/doc/html/cpu_management.shtml
+++ b/doc/html/cpu_management.shtml
@@ -3,8 +3,8 @@
 <h1> CPU Management User and Administrator Guide</h1>
 <a name="Overview"></a>
 <h2>Overview</h2>
-<p>The purpose of this guide is to assist Slurm users and administrators in selecting configuration options 
-and composing command lines to manage the use of CPU resources by jobs, steps and tasks. The document 
+<p>The purpose of this guide is to assist Slurm users and administrators in selecting configuration options
+and composing command lines to manage the use of CPU resources by jobs, steps and tasks. The document
 is divided into the following sections:</p>
 <ul>
 <li><a href="#Overview">Overview</a></li>
@@ -15,18 +15,18 @@ is divided into the following sections:</p>
 </ul>
 
 <p>CPU Management through user commands is constrained by the configuration parameters
-chosen by the Slurm administrator. The interactions between different CPU management options are complex 
-and often difficult to predict. Some experimentation may be required to discover the exact combination 
-of options  needed to produce a desired outcome. Users and administrators should refer to the man pages 
+chosen by the Slurm administrator. The interactions between different CPU management options are complex
+and often difficult to predict. Some experimentation may be required to discover the exact combination
+of options  needed to produce a desired outcome. Users and administrators should refer to the man pages
 for <a href="slurm.conf.html">slurm.conf</a>, <a href="cgroup.conf.html">cgroup.conf</a>,
-<a href="salloc.html">salloc</a>, 
-<a href="sbatch.html">sbatch</a> and <a href="srun.html">srun</a> for detailed explanations of each 
+<a href="salloc.html">salloc</a>,
+<a href="sbatch.html">sbatch</a> and <a href="srun.html">srun</a> for detailed explanations of each
 option. The following html documents may also be useful:</p>
 
 <p>
 <a href="cons_res.html">Consumable Resources in Slurm</a><br>
 <a href="cons_res_share.html">Sharing Consumable Resources</a><br>
-<a href="mc_support.html">Support for Multi-core/Multi-thread 
+<a href="mc_support.html">Support for Multi-core/Multi-thread
 Architectures</a><br>
 <a href="dist_plane.html">Plane distribution</a></p>
 
@@ -39,19 +39,19 @@ information on Cray and IBM BlueGene systems, please refer to the appropriate do
 <li><a href="#Step1">Step 1: Selection of Nodes</a>
 </li><li><a href="#Step2">Step 2: Allocation of CPUs from the selected Nodes</a>
 </li><li><a href="#Step3">Step 3: Distribution of Tasks to the selected Nodes</a>
-</li><li><a href="#Step4">Step 4: Optional Distribution and Binding of Tasks to CPUs within a Node 
+</li><li><a href="#Step4">Step 4: Optional Distribution and Binding of Tasks to CPUs within a Node
 </a>
 </li></ul>
 <a name="Step1"></a>
 <h3>Step 1: Selection of Nodes</h3>
-<p>In Step 1, Slurm selects the set of nodes from which CPU resources are to be allocated to a job or 
+<p>In Step 1, Slurm selects the set of nodes from which CPU resources are to be allocated to a job or
 job step.  Node selection is therefore influenced by many of the configuration and command line options
 that control the allocation of CPUs (Step 2 below).
 If <font face="Courier New, monospace">
 SelectType=select/linear</font> is configured, all resources on the selected nodes will be allocated
-to the job/step. If <font face="Courier New, monospace">SelectType=select/cons_res</font> is configured, 
-individual sockets, cores and threads may be allocated from the selected nodes as 
-<a href="cons_res.html">consumable resources</a>. The consumable resource type is defined by 
+to the job/step. If <font face="Courier New, monospace">SelectType=select/cons_res</font> is configured,
+individual sockets, cores and threads may be allocated from the selected nodes as
+<a href="cons_res.html">consumable resources</a>. The consumable resource type is defined by
 <font face="Courier New, monospace">SelectTypeParameters.</font>
 <br>
 <br>
@@ -59,7 +59,7 @@ Step 1 is performed by slurmctld and the select plugin.
 <br>
 </p><center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 1</b></caption>
+		<caption style="font-size: 8pt"><b>slurm.conf options that control Step 1</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
@@ -126,7 +126,7 @@ Step 1 is performed by slurmctld and the select plugin.
 			</td>
 			<td height="18" width="20%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-                               <font face="Courier New, monospace">select/linear | select/cons_res</font></font></p>
+			       <font face="Courier New, monospace">select/linear | select/cons_res</font></font></p>
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls
@@ -151,26 +151,26 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 			</td>
 		</tr>
 	</tbody></table>
-</center> 
+</center>
 <br>
 <center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 1</b></caption>
+		<caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 1</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
 		</colgroup><tbody><tr>
 			<td bgcolor="#e0e0e0" height="17" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+		    <b style="font-family: Arial, Helvetica, sans-serif">Command
 				line option</b></font></p>
 			</td>
 			<td bgcolor="#e0e0e0" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
 			</td><td bgcolor="#e0e0e0" width="60%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
 			</td>
 		</tr>
 		<tr>
@@ -275,7 +275,7 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 			</td>
 			<td width="20%">
 				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">compute_bound |
-                                memory_bound | [no]multithread</font></font></p>
+				memory_bound | [no]multithread</font></font></p>
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Additional
@@ -448,7 +448,7 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 			</td>
 			<td width="20%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-                                &lt;host1,host2,... or filename&gt;</font></p>
+				&lt;host1,host2,... or filename&gt;</font></p>
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">List
@@ -474,21 +474,21 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 <br>
 <a name="Step2"></a>
 <h3>Step 2: Allocation of CPUs from the selected Nodes</h3>
-<p>In Step 2, Slurm allocates CPU resources to a job/step from the set of nodes selected 
+<p>In Step 2, Slurm allocates CPU resources to a job/step from the set of nodes selected
 in Step 1. CPU allocation is therefore influenced by the configuration and command line options
 that relate to node selection.
 If <font face="Courier New, monospace">
 SelectType=select/linear</font> is configured, all resources on the selected nodes will be allocated
-to the job/step. If <font face="Courier New, monospace">SelectType=select/cons_res</font> is configured, 
-individual sockets, cores and threads may be allocated from the selected nodes as 
-<a href="cons_res.html">consumable resources</a>. The consumable resource type is defined by 
+to the job/step. If <font face="Courier New, monospace">SelectType=select/cons_res</font> is configured,
+individual sockets, cores and threads may be allocated from the selected nodes as
+<a href="cons_res.html">consumable resources</a>. The consumable resource type is defined by
 <font face="Courier New, monospace">SelectTypeParameters.</font>
 <br>
-</p><p>When using <font face="Courier New, monospace">SelectType=select/cons_res</font>, 
-the default allocation method across nodes is block allocation (allocate all available CPUs in 
-a node before using another node). The default allocation method within a node is cyclic 
-allocation (allocate available CPUs in a round-robin fashion across the sockets within a node). 
-Users may override the default behavior using the appropriate command 
+</p><p>When using <font face="Courier New, monospace">SelectType=select/cons_res</font>,
+the default allocation method across nodes is block allocation (allocate all available CPUs in
+a node before using another node). The default allocation method within a node is cyclic
+allocation (allocate available CPUs in a round-robin fashion across the sockets within a node).
+Users may override the default behavior using the appropriate command
 line options described below.  The choice of allocation methods may influence which specific
 CPUs are allocated to the job/step.
 <br><br>
@@ -496,7 +496,7 @@ Step 2 is performed by slurmctld and the select plugin.
 <br>
 </p><center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 2</b></caption>
+		<caption style="font-size: 8pt"><b>slurm.conf options that control Step 2</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
@@ -563,7 +563,7 @@ Step 2 is performed by slurmctld and the select plugin.
 			</td>
 			<td height="18" width="20%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-                               <font face="Courier New, monospace">select/linear | select/cons_res</font></font></p>
+			       <font face="Courier New, monospace">select/linear | select/cons_res</font></font></p>
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">Controls
@@ -588,26 +588,26 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 			</td>
 		</tr>
 	</tbody></table>
-</center> 
+</center>
 <br>
 <center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 2</b></caption>
+		<caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 2</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
 		</colgroup><tbody><tr>
 			<td bgcolor="#e0e0e0" height="17" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+		    <b style="font-family: Arial, Helvetica, sans-serif">Command
 				line option</b></font></p>
 			</td>
 			<td bgcolor="#e0e0e0" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
 			</td><td bgcolor="#e0e0e0" width="60%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
 			</td>
 		</tr>
 		<tr>
@@ -726,7 +726,7 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 			</td>
 			<td width="20%">
 				<p align="LEFT"><font face="Courier New, monospace"><font style="font-size: 8pt" size="1">compute_bound |
-                                memory_bound | [no]multithread</font></font></p>
+				memory_bound | [no]multithread</font></font></p>
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">Additional
@@ -899,7 +899,7 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 			</td>
 			<td width="20%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-                                &lt;host1,host2,... or filename&gt;</font></p>
+				&lt;host1,host2,... or filename&gt;</font></p>
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">List
@@ -925,19 +925,19 @@ CR_Core_Memory | CR_Socket | CR_Socket_Memory</font><br><br>Plus additional opti
 <br>
 <a name="Step3"></a>
 <h3>Step 3: Distribution of Tasks to the selected Nodes</h3>
-<p>In Step 3, Slurm distributes tasks to the nodes that were selected for 
-the job/step in Step 1. Each task is distributed to only one node, but more than one 
+<p>In Step 3, Slurm distributes tasks to the nodes that were selected for
+the job/step in Step 1. Each task is distributed to only one node, but more than one
 task may be distributed to each node.  Unless overcommitment of CPUs to tasks is
 specified for the job, the number of tasks distributed to a node is
 constrained by the number of CPUs allocated on the node and the number of CPUs per
-task. If consumable resources is configured, or resource sharing is allowed, tasks from 
-more than one job/step may run on the same node concurrently.  
+task. If consumable resources is configured, or resource sharing is allowed, tasks from
+more than one job/step may run on the same node concurrently.
 <br><br>
 Step 3 is performed by slurmctld.
 <br>
 </p><center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 3</b></caption>
+		<caption style="font-size: 8pt"><b>slurm.conf options that control Step 3</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
@@ -972,22 +972,22 @@ Step 3 is performed by slurmctld.
 <br>
 <center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 3</b></caption>
+		<caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 3</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
 		</colgroup><tbody><tr>
 			<td bgcolor="#e0e0e0" height="17" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+		    <b style="font-family: Arial, Helvetica, sans-serif">Command
 				line option</b></font></p>
 			</td>
 			<td bgcolor="#e0e0e0" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
 			</td><td bgcolor="#e0e0e0" width="60%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
 			</td>
 		</tr>
 		<tr>
@@ -1001,8 +1001,8 @@ Step 3 is performed by slurmctld.
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">The first specified distribution (before the ":")
-				controls the sequence in which tasks are distributed to each of the selected nodes. Note that 
-				this option does not affect the number of tasks distributed to each node, but only the sequence of 
+				controls the sequence in which tasks are distributed to each of the selected nodes. Note that
+				this option does not affect the number of tasks distributed to each node, but only the sequence of
 				distribution.</font></p>
 			</td>
 		</tr>
@@ -1064,16 +1064,16 @@ Step 3 is performed by slurmctld.
 <br>
 <a name="Step4"></a>
 <h3>Step 4: Optional Distribution and Binding of Tasks to CPUs within a Node</h3>
-<p>In optional Step 4, Slurm distributes and binds each task to a specified subset of 
-the allocated CPUs on the node to which the task was distributed in Step 3. Different 
-tasks distributed to the same node may be bound to the same subset of CPUs or to 
+<p>In optional Step 4, Slurm distributes and binds each task to a specified subset of
+the allocated CPUs on the node to which the task was distributed in Step 3. Different
+tasks distributed to the same node may be bound to the same subset of CPUs or to
 different subsets. This step is known as task affinity or task/CPU binding.
 <br><br>
 Step 4 is performed by slurmd and the task plugin.
 <br>
 </p><center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>slurm.conf options that control Step 4</b></caption>
+		<caption style="font-size: 8pt"><b>slurm.conf options that control Step 4</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
@@ -1114,15 +1114,15 @@ Step 4 is performed by slurmd and the task plugin.
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-				For task/affinity, controls the binding unit (sockets, cores or threads) and the 
-                                binding method (sched or cpusets)</font></p>
+				For task/affinity, controls the binding unit (sockets, cores or threads) and the
+				binding method (sched or cpusets)</font></p>
 			</td>
 		</tr>
 	</tbody></table>
 </center>
 </p><center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>cgroup.conf options that control Step 4 (task/cgroup plugin only)</b></caption>
+		<caption style="font-size: 8pt"><b>cgroup.conf options that control Step 4 (task/cgroup plugin only)</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
@@ -1172,22 +1172,22 @@ Step 4 is performed by slurmd and the task plugin.
 <br>
 <center>
 	<table style="page-break-inside: avoid; font-family: Arial,Helvetica,sans-serif;" border="1" bordercolor="#000000" cellpadding="6" cellspacing="0" width="100%">
-                <caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 4</b></caption>
+		<caption style="font-size: 8pt"><b>srun/salloc/sbatch command line options that control Step 4</b></caption>
 		<colgroup><col width="20%">
 		<col width="20%">
 		<col width="60%">
 		</colgroup><tbody><tr>
 			<td bgcolor="#e0e0e0" height="17" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Command
+		    <b style="font-family: Arial, Helvetica, sans-serif">Command
 				line option</b></font></p>
 			</td>
 			<td bgcolor="#e0e0e0" width="20%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Possible values</b></font></p>
 			</td><td bgcolor="#e0e0e0" width="60%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-                    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
+		    <b style="font-family: Arial, Helvetica, sans-serif">Description</b></font></p>
 			</td>
 		</tr>
 		<tr>
@@ -1228,7 +1228,7 @@ Step 4 is performed by slurmd and the task plugin.
 			</td>
 			<td width="60%">
 				<p align="LEFT"><font face="Arial, sans-serif"><font style="font-size: 8pt" size="1">
-				The second specified distribution (after the ":") controls the sequence in which tasks are 
+				The second specified distribution (after the ":") controls the sequence in which tasks are
 				distributed to allocated CPUs within a node for binding of tasks to CPUs</font></font></p>
 			</td>
 		</tr>
@@ -1237,24 +1237,24 @@ Step 4 is performed by slurmd and the task plugin.
 </center>
 <br><br>
 <h2>Additional Notes on CPU Management Steps</h2>
-<p>For consumable resources, it is important for users to understand the difference between 
-cpu allocation (Step 2) and task affinity/binding (Step 4).  Exclusive (unshared) allocation 
-of CPUs as consumable resources limits the number of jobs/steps/tasks that 
-can use a node concurrently.  But it does not limit the set of CPUs on the node that each 
-task distributed to the node can use.  Unless some form of CPU/task binding is used 
-(e.g., a task or spank plugin), all tasks distributed to a node can use all of 
-the CPUs on the node, including CPUs not allocated to their job/step.  This may have 
-unexpected adverse effects on performance, since it allows one job to use CPUs allocated 
-exclusively to another job.  For this reason, it may not be advisable to configure 
-consumable resources without also configuring task affinity.  Note that task affinity 
-can also be useful when select/linear (whole node allocation) is configured, to improve 
-performance by restricting each task to a particular socket or other subset of CPU 
+<p>For consumable resources, it is important for users to understand the difference between
+cpu allocation (Step 2) and task affinity/binding (Step 4).  Exclusive (unshared) allocation
+of CPUs as consumable resources limits the number of jobs/steps/tasks that
+can use a node concurrently.  But it does not limit the set of CPUs on the node that each
+task distributed to the node can use.  Unless some form of CPU/task binding is used
+(e.g., a task or spank plugin), all tasks distributed to a node can use all of
+the CPUs on the node, including CPUs not allocated to their job/step.  This may have
+unexpected adverse effects on performance, since it allows one job to use CPUs allocated
+exclusively to another job.  For this reason, it may not be advisable to configure
+consumable resources without also configuring task affinity.  Note that task affinity
+can also be useful when select/linear (whole node allocation) is configured, to improve
+performance by restricting each task to a particular socket or other subset of CPU
 resources on a node.</p>
 <br><br>
 <a name="Section2"></a>
 <h2>Getting Information about CPU usage by Jobs/Steps/Tasks</h2>
-<p>There is no easy way to generate a comprehensive set of CPU management information 
-for a job/step (allocation, distribution and binding). However, several 
+<p>There is no easy way to generate a comprehensive set of CPU management information
+for a job/step (allocation, distribution and binding). However, several
 commands/options provide limited information about CPU usage.
 </p>
 <center>
@@ -1270,13 +1270,13 @@ commands/options provide limited information about CPU usage.
 		<tr>
 			<td width="30%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-scontrol show job option: 
+scontrol show job option:
 <font face="Courier New, monospace">--details</font></font></p>
 			</td>
 			<td width="70%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-This option provides a list of the nodes selected for the job and the CPU ids allocated to the job on each 
-node. Note that the CPU ids reported by this command are Slurm abstract CPU ids, not Linux/hardware CPU ids 
+This option provides a list of the nodes selected for the job and the CPU ids allocated to the job on each
+node. Note that the CPU ids reported by this command are Slurm abstract CPU ids, not Linux/hardware CPU ids
 (as reported by, for example, /proc/cpuinfo).
 </font></p>
 			</td>
@@ -1314,13 +1314,13 @@ SLURMD_NODENAME<br>
 		<tr>
 			<td width="30%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-srun/salloc/sbatch option: 
+srun/salloc/sbatch option:
 <font face="Courier New, monospace">--cpu_bind=verbose</font></font></p>
 			</td>
 			<td width="70%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-This option provides a list of the CPU masks used by task affinity to bind tasks to CPUs. 
-Note that the CPU ids represented by these masks are Linux/hardware CPU ids, not Slurm 
+This option provides a list of the CPU masks used by task affinity to bind tasks to CPUs.
+Note that the CPU ids represented by these masks are Linux/hardware CPU ids, not Slurm
 abstract CPU ids as reported by scontrol, etc.
 </font></p>
 			</td>
@@ -1328,13 +1328,13 @@ abstract CPU ids as reported by scontrol, etc.
 		<tr>
 			<td width="30%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-srun/salloc/sbatch option: 
+srun/salloc/sbatch option:
 <font face="Courier New, monospace">-l</font></font></p>
 			</td>
 			<td width="70%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-This option adds the task id as a prefix to each line of output from a task sent to stdout/stderr. 
-This can be useful for distinguishing node-related and CPU-related information by task id 
+This option adds the task id as a prefix to each line of output from a task sent to stdout/stderr.
+This can be useful for distinguishing node-related and CPU-related information by task id
 for multi-task jobs/steps.
 </font></p>
 			</td>
@@ -1342,13 +1342,13 @@ for multi-task jobs/steps.
 		<tr>
 			<td width="30%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">
-Linux command:<br> 
+Linux command:<br>
 <font face="Courier New, monospace">cat /proc/&lt;pid&gt;/status | grep Cpus_allowed_list</font></font></p>
 			</td>
 			<td width="70%">
 				<p align="LEFT"><font style="font-size: 8pt" size="1">
-Given a task's pid (or "self" if the command is executed by the task itself), this command 
-produces a list of the CPU ids bound to the task. This is the same information that is 
+Given a task's pid (or "self" if the command is executed by the task itself), this command
+produces a list of the CPU ids bound to the task. This is the same information that is
 provided by <font face="Courier New, monospace">--cpu_bind=verbose</font>, but in a more readable format.
 </font></p>
 			</td>
@@ -1372,7 +1372,7 @@ usage at the level of users, groups and clusters. For details, see the sacctmgr
 <p>The following examples illustrate some scenarios for managing CPU
 resources using Slurm. Many additional scenarios are possible. In
 each example, it is assumed that all CPUs on each node are available
-for allocation.</p> 
+for allocation.</p>
 <ul>
 <li><a href="#Example">Example Node and Partition Configuration</a><br>
 </li><li><a href="#Example1">Example 1: Allocation of whole nodes</a><br>
@@ -1389,7 +1389,7 @@ for allocation.</p>
 </li><li><a href="#Example12">Example 12: Consumable resources with task affinity and socket binding, Case 2</a>
 </li><li><a href="#Example13">Example 13: Consumable resources with task affinity and socket binding, Case 3</a>
 </li><li><a href="#Example14">Example 14: Consumable resources with task affinity and customized allocation and distribution</a>
-</li><li><a href="#Example15">Example 15: Consumable resources with task affinity to optimize the performance of a multi-task, 
+</li><li><a href="#Example15">Example 15: Consumable resources with task affinity to optimize the performance of a multi-task,
 multi-thread job</a>
 </li><li><a href="#Example16">Example 16: Consumable resources with task cgroup and core binding</a>
 </li></ul><br>
@@ -1559,7 +1559,7 @@ multi-thread job</a>
 Nodename=n1 NodeAddr=node1 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8 State=IDLE
 Nodename=n2 NodeAddr=node2 Sockets=2 CoresPerSocket=4 ThreadsPerCore=1 Procs=8 State=IDLE
 Nodename=n3 NodeAddr=node3 Sockets=2 CoresPerSocket=4 ThreadsPerCore=2 Procs=16 State=IDLE
-PartitionName=regnodes Nodes=n0,n1,n2 Shared=YES Default=YES State=UP 
+PartitionName=regnodes Nodes=n0,n1,n2 Shared=YES Default=YES State=UP
 PartitionName=hypernode Nodes=n3 State=UP
 </pre>
 <br>
@@ -1580,7 +1580,7 @@ Slurm to allocate at least 2 nodes to the job.<p></p>
 <br>
 <a name="Example2"></a>
 <h3>Example 2: Simple allocation of cores as consumable resources</h3>
-<p>A job requires 6 CPUs (2 tasks and 3 CPUs per task with no overcommitment). Allocate the 6 CPUs as consumable resources 
+<p>A job requires 6 CPUs (2 tasks and 3 CPUs per task with no overcommitment). Allocate the 6 CPUs as consumable resources
 from a single node in the default partition.</p>
 <p>slurm.conf options:</p>
 <pre>SelectType=select/cons_res
@@ -1590,7 +1590,7 @@ SelectTypeParameters=CR_Core
 <pre>srun --nodes=1-1 --ntasks=2 --cpus-per-task=3 ...
 </pre>
 <p>Comments:</p>
-<p>The <font face="Courier New, monospace">SelectType</font> configuration options define cores as consumable resources. 
+<p>The <font face="Courier New, monospace">SelectType</font> configuration options define cores as consumable resources.
 The <font face="Courier New, monospace">--nodes=1-1</font> srun option
  restricts the job to a single node. The following table shows a possible pattern of allocation
   for this job.
@@ -1650,7 +1650,7 @@ The <font face="Courier New, monospace">--nodes=1-1</font> srun option
 <br>
 <a name="Example3"></a>
 <h3>Example 3: Consumable resources with balanced allocation across nodes</h3>
-<p>A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment). 
+<p>A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment).
 Allocate 3 CPUs from each of the 3 nodes in the default partition.
 </p>
 <p>slurm.conf options:</p>
@@ -1721,8 +1721,8 @@ SelectTypeParameters=CR_Core
 <br>
 <a name="Example4"></a>
 <h3>Example 4: Consumable resources with minimization of resource fragmentation</h3>
-<p>A job requires 12 CPUs (12 tasks and 1 CPU per task with no overcommitment). Allocate 
-CPUs using the minimum number of nodes and the minimum number of sockets required for 
+<p>A job requires 12 CPUs (12 tasks and 1 CPU per task with no overcommitment). Allocate
+CPUs using the minimum number of nodes and the minimum number of sockets required for
 the job in order to minimize fragmentation of allocated/unallocated CPUs in the cluster.
 </p>
 <p>slurm.conf options:</p>
@@ -1734,11 +1734,11 @@ SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
 </pre>
 <p>Comments:</p>
 <p>The default allocation method across nodes is block. This minimizes the number of nodes
- used for the job. The configuration option <font face="Courier New, monospace"> 
- CR_CORE_DEFAULT_DIST_BLOCK</font> sets the default allocation method within a 
- node to block. This minimizes the number of sockets used for the job within a node. 
- The combination of these two methods causes Slurm to allocate the 12 CPUs using the 
- minimum required number of nodes (2 nodes) and sockets (3 sockets).The following 
+ used for the job. The configuration option <font face="Courier New, monospace">
+ CR_CORE_DEFAULT_DIST_BLOCK</font> sets the default allocation method within a
+ node to block. This minimizes the number of sockets used for the job within a node.
+ The combination of these two methods causes Slurm to allocate the 12 CPUs using the
+ minimum required number of nodes (2 nodes) and sockets (3 sockets).The following
  table shows a possible pattern of allocation for this job.
 </p>
 <center>
@@ -1831,7 +1831,7 @@ SelectTypeParameters=CR_Core,CR_CORE_DEFAULT_DIST_BLOCK
 <br>
 <a name="Example5"></a>
 <h3>Example 5: Consumable resources with cyclic distribution of tasks to nodes</h3>
-<p>A job requires 12 CPUs (6 tasks and 2 CPUs per task with no overcommitment). Allocate 
+<p>A job requires 12 CPUs (6 tasks and 2 CPUs per task with no overcommitment). Allocate
 6 CPUs each from 2 nodes in the default partition. Distribute tasks to nodes cyclically.
 </p>
 <p>slurm.conf options:</p>
@@ -1843,11 +1843,11 @@ SelectTypeParameters=CR_Core
 --ntasks=6 --cpus-per-task=2 ...
 </pre>
 <p>Comments:</p>
-<p>The options specify the following conditions for the job: 6 tasks, 2 unique CPUs per task, 
-using exactly 2 nodes, and with 3 tasks per node. To satisfy these conditions, Slurm 
+<p>The options specify the following conditions for the job: 6 tasks, 2 unique CPUs per task,
+using exactly 2 nodes, and with 3 tasks per node. To satisfy these conditions, Slurm
 must allocate 6 CPUs from each of the 2 nodes. The <font face="Courier New, monospace">
---distribution=cyclic</font> option causes the tasks to be distributed to the nodes in a 
-round-robin fashion. The following table shows a possible pattern of allocation and 
+--distribution=cyclic</font> option causes the tasks to be distributed to the nodes in a
+round-robin fashion. The following table shows a possible pattern of allocation and
 distribution for this job.
 </p>
 <center>
@@ -1920,7 +1920,7 @@ distribution for this job.
 <br>
 <a name="Example6"></a>
 <h3>Example 6: Consumable resources with default allocation and plane distribution of tasks to nodes</h3>
-<p>A job requires 16 CPUs (8 tasks and 2 CPUs per task with no overcommitment). 
+<p>A job requires 16 CPUs (8 tasks and 2 CPUs per task with no overcommitment).
 Use all 3 nodes in the default partition. Distribute tasks to each node in blocks of two in a round-robin fashion.
 </p>
 <p>slurm.conf options:</p>
@@ -1931,15 +1931,15 @@ SelectTypeParameters=CR_Core
 <pre>srun --nodes=3-3 --distribution=plane=2 --ntasks=8 --cpus-per-task=2 ...
 </pre>
 <p>Comments:</p>
-<p>The options specify the following conditions for the job: 8 tasks, 2 unique CPUs 
-per task, using all 3 nodes in the partition. To satisfy these conditions using 
-the default allocation method across nodes (block), Slurm allocates 8 CPUs from 
-the first node, 6 CPUs from the second node and 2 CPUs from the third node. 
-The <font face="Courier New, monospace">--distribution=plane=2</font> option causes Slurm 
+<p>The options specify the following conditions for the job: 8 tasks, 2 unique CPUs
+per task, using all 3 nodes in the partition. To satisfy these conditions using
+the default allocation method across nodes (block), Slurm allocates 8 CPUs from
+the first node, 6 CPUs from the second node and 2 CPUs from the third node.
+The <font face="Courier New, monospace">--distribution=plane=2</font> option causes Slurm
 to distribute tasks in blocks of two to each of the nodes in a round-robin fashion,
 subject to the number of CPUs allocated on each node.  So, for example, only 1 task
 is distributed to the third node because only 2 CPUs were allocated on that node and
-each task requires 2 CPUs. The following table shows a possible pattern of allocation 
+each task requires 2 CPUs. The following table shows a possible pattern of allocation
 and distribution for this job.
 </p>
 <center>
@@ -2006,7 +2006,7 @@ and distribution for this job.
 				 3<br>7</font></p>
 			</td>
 			<td width="19">
-				<p align="CENTER"><font style="font-size: 8pt" size="1">4<br> 
+				<p align="CENTER"><font style="font-size: 8pt" size="1">4<br>
 				</font></p>
 			</td>
 		</tr>
@@ -2025,8 +2025,8 @@ SelectTypeParameters=CR_Core
 <pre>srun --nodes=1-1 --ntasks=20 --overcommit ...
 </pre>
 <p>Comments:</p>
-<p>The 
-<font face="Courier New, monospace">--overcommit</font> option allows the job to 
+<p>The
+<font face="Courier New, monospace">--overcommit</font> option allows the job to
 run in only one node by overcommitting CPUs to tasks.The following table shows
  a possible pattern of allocation and distribution for this job.
 </p>
@@ -2102,7 +2102,7 @@ run in only one node by overcommitting CPUs to tasks.The following table shows
 <br>
 <a name="Example8"></a>
 <h3>Example 8: Consumable resources with resource sharing between jobs</h3>
-<p>2 jobs each require 6 CPUs (6 tasks per job with no overcommitment). 
+<p>2 jobs each require 6 CPUs (6 tasks per job with no overcommitment).
 Run both jobs simultaneously in a single node.
 </p>
 <p>slurm.conf options:</p>
@@ -2114,15 +2114,15 @@ SelectTypeParameters=CR_Core
 srun --nodes=1-1 --nodelist=n0 --ntasks=6 --share ...
 </pre>
 <p>Comments:</p>
-<p>The <font face="Courier New, monospace">--nodes=1-1</font> and <font face="Courier New, monospace">--nodelist=n0</font> 
-srun options together restrict both jobs to node n0. The 
-<font face="Courier New, monospace">Shared=YES</font> option in the partition definition plus 
-the <font face="Courier New, monospace">--share</font> srun option allows the two 
+<p>The <font face="Courier New, monospace">--nodes=1-1</font> and <font face="Courier New, monospace">--nodelist=n0</font>
+srun options together restrict both jobs to node n0. The
+<font face="Courier New, monospace">Shared=YES</font> option in the partition definition plus
+the <font face="Courier New, monospace">--share</font> srun option allows the two
 jobs to share CPUs on the node.
 </p><br>
 <a name="Example9"></a>
 <h3>Example 9: Consumable resources on multithreaded node, allocating only one thread per core</h3>
-<p>A job requires 8 CPUs (8 tasks with no overcommitment). Run the job on node n3, 
+<p>A job requires 8 CPUs (8 tasks with no overcommitment). Run the job on node n3,
 allocating only one thread per core.
 </p>
 <p>slurm.conf options:</p>
@@ -2133,11 +2133,11 @@ SelectTypeParameters=CR_CPU
 <pre>srun --partition=hypernode --ntasks=8 --hint=nomultithread ...
 </pre>
 <p>Comments:</p>
-<p>The <font face="Courier New, monospace">CR_CPU</font> configuration 
-option enables the allocation of only one thread per core. 
-The <font face="Courier New, monospace">--hint=nomultithread</font> 
-srun option causes Slurm to allocate only one thread from each core to 
-this job. The following table shows a possible pattern of allocation 
+<p>The <font face="Courier New, monospace">CR_CPU</font> configuration
+option enables the allocation of only one thread per core.
+The <font face="Courier New, monospace">--hint=nomultithread</font>
+srun option causes Slurm to allocate only one thread from each core to
+this job. The following table shows a possible pattern of allocation
 for this job.
 </p>
  <center>
@@ -2291,7 +2291,7 @@ for this job.
 <br>
 <a name="Example10"></a>
 <h3>Example 10: Consumable resources with task affinity and core binding</h3>
-<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a 
+<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a
 single node in the default partition. Apply core binding to each task.
 </p>
 <p>slurm.conf options:</p>
@@ -2304,11 +2304,11 @@ TaskPluginParam=sched
 <pre>srun --nodes=1-1 --ntasks=6 --cpu_bind=cores ...
 </pre>
 <p>Comments:</p>
-<p>Using the default allocation method within nodes (cyclic), Slurm allocates 
-3 CPUs on each socket of 1 node. Using the default distribution method 
-within nodes (cyclic), Slurm distributes and binds each task to an allocated 
-core in a round-robin fashion across the sockets. The following table shows 
-a possible pattern of allocation, distribution and binding for this job. 
+<p>Using the default allocation method within nodes (cyclic), Slurm allocates
+3 CPUs on each socket of 1 node. Using the default distribution method
+within nodes (cyclic), Slurm distributes and binds each task to an allocated
+core in a round-robin fashion across the sockets. The following table shows
+a possible pattern of allocation, distribution and binding for this job.
 For example, task id 2 is bound to CPU id 1.
 </p><p>
 </p><center>
@@ -2433,7 +2433,7 @@ For example, task id 2 is bound to CPU id 1.
 <br>
 </a><a name="Example11"></a>
 <h3>Example 11: Consumable resources with task affinity and socket binding, Case 1</h3>
-<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in 
+<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in
 a single node in the default partition. Apply socket binding to each task.
 </p>
 <p>slurm.conf options:</p>
@@ -2446,11 +2446,11 @@ TaskPluginParam=sched
 <pre>srun --nodes=1-1 --ntasks=6 --cpu_bind=sockets ...
 </pre>
 <p>Comments:</p>
-<p>Using the default allocation method within nodes (cyclic), Slurm allocates 3 
-CPUs on each socket of 1 node. Using the default distribution method within nodes 
-(cyclic), Slurm distributes and binds each task to all of the allocated CPUs in 
-one socket in a round-robin fashion across the sockets. The following table shows 
-a possible pattern of allocation, distribution and binding for this job. For 
+<p>Using the default allocation method within nodes (cyclic), Slurm allocates 3
+CPUs on each socket of 1 node. Using the default distribution method within nodes
+(cyclic), Slurm distributes and binds each task to all of the allocated CPUs in
+one socket in a round-robin fashion across the sockets. The following table shows
+a possible pattern of allocation, distribution and binding for this job. For
 example, task ids 1, 3 and 5 are all bound to CPU ids 4, 5 and 6.
 </p>
 <center>
@@ -2565,7 +2565,7 @@ example, task ids 1, 3 and 5 are all bound to CPU ids 4, 5 and 6.
 <br>
 <a name="Example12"></a>
 <h3>Example 12: Consumable resources with task affinity and socket binding, Case 2</h3>
-<p>A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in 
+<p>A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in
 a single node in the default partition. Allocate cores using the block allocation method.
 Distribute cores using the block distribution method. Apply socket binding to each task.
 </p>
@@ -2580,14 +2580,14 @@ TaskPluginParam=sched
 --distribution=block:block ...
 </pre>
 <p>Comments:</p>
-<p>Using the block allocation method, Slurm allocates 4 
-CPUs on one socket and 2 CPUs on the other socket of one node. Using the block distribution method within  
+<p>Using the block allocation method, Slurm allocates 4
+CPUs on one socket and 2 CPUs on the other socket of one node. Using the block distribution method within
 nodes, Slurm distributes 3 CPUs to each task.  Applying socket binding, Slurm binds each task to all
-allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows 
+allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows
 a possible pattern of allocation, distribution and binding for this job. In this example, using the
-block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on 
-socket id 1.  Using the block distribution method, CPU ids 0-2 were distributed to task id 0, and CPU ids 
-3-5 were distributed to task id 1.  Applying socket binding, task id 0 is therefore bound to the allocated 
+block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on
+socket id 1.  Using the block distribution method, CPU ids 0-2 were distributed to task id 0, and CPU ids
+3-5 were distributed to task id 1.  Applying socket binding, task id 0 is therefore bound to the allocated
 CPUs on socket 0, and task id 1 is bound to the allocated CPUs on both sockets.
 </p>
 <center>
@@ -2697,7 +2697,7 @@ CPUs on socket 0, and task id 1 is bound to the allocated CPUs on both sockets.
 <br>
 <a name="Example13"></a>
 <h3>Example 13: Consumable resources with task affinity and socket binding, Case 3</h3>
-<p>A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in 
+<p>A job requires 6 CPUs (2 tasks with 3 cpus per task and no overcommitment). Run the job in
 a single node in the default partition. Allocate cores using the block allocation method.
 Distribute cores using the cyclic distribution method. Apply socket binding to each task.
 </p>
@@ -2712,14 +2712,14 @@ TaskPluginParam=sched
 --distribution=block:cyclic ...
 </pre>
 <p>Comments:</p>
-<p>Using the block allocation method, Slurm allocates 4 
-CPUs on one socket and 2 CPUs on the other socket of one node. Using the cyclic distribution method within  
+<p>Using the block allocation method, Slurm allocates 4
+CPUs on one socket and 2 CPUs on the other socket of one node. Using the cyclic distribution method within
 nodes, Slurm distributes 3 CPUs to each task.  Applying socket binding, Slurm binds each task to all
-allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows 
+allocated CPUs in all sockets in which the task has a distributed CPU. The following table shows
 a possible pattern of allocation, distribution and binding for this job. In this example, using the
-block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on 
-socket id 1.  Using the cyclic distribution method, CPU ids 0, 1 and 4 were distributed to task id 0, and CPU ids 
-2, 3 and 5 were distributed to task id 1.  Applying socket binding, both tasks are therefore bound to the 
+block allocation method CPU ids 0-3 are allocated on socket id 0 and CPU ids 4-5 are allocated on
+socket id 1.  Using the cyclic distribution method, CPU ids 0, 1 and 4 were distributed to task id 0, and CPU ids
+2, 3 and 5 were distributed to task id 1.  Applying socket binding, both tasks are therefore bound to the
 allocated CPUs on both sockets.
 </p>
 <center>
@@ -2829,9 +2829,9 @@ allocated CPUs on both sockets.
 <br>
 <a name="Example14"></a>
 <h3>Example 14: Consumable resources with task affinity and customized allocation and distribution</h3>
-<p>A job requires 18 CPUs (18 tasks with no overcommitment). Run the job in the 
-default partition. Allocate 6 CPUs on each node using block allocation within 
-nodes. Use cyclic distribution of tasks to nodes and block distribution of 
+<p>A job requires 18 CPUs (18 tasks with no overcommitment). Run the job in the
+default partition. Allocate 6 CPUs on each node using block allocation within
+nodes. Use cyclic distribution of tasks to nodes and block distribution of
 tasks for CPU binding.
 </p>
 <p>slurm.conf options:</p>
@@ -2845,17 +2845,17 @@ TaskPluginParam=sched
 --distribution=cyclic:block --cpu_bind=cores ...
 </pre>
 <p>Comments:</p>
-<p>This example shows the use of task affinity with customized allocation of CPUs and 
-distribution of tasks across nodes and within nodes for binding. The srun options 
-specify the following conditions for the job: 18 tasks, 1 unique CPU per task, using 
-all 3 nodes in the partition, with 6 tasks per node. 
-The <font face="Courier New, monospace">CR_CORE_DEFAULT_DIST_BLOCK</font> 
-configuration option specifies block allocation within nodes. To satisfy these 
-conditions, Slurm allocates 6 CPUs on each node, with 4 CPUs allocated on one socket 
+<p>This example shows the use of task affinity with customized allocation of CPUs and
+distribution of tasks across nodes and within nodes for binding. The srun options
+specify the following conditions for the job: 18 tasks, 1 unique CPU per task, using
+all 3 nodes in the partition, with 6 tasks per node.
+The <font face="Courier New, monospace">CR_CORE_DEFAULT_DIST_BLOCK</font>
+configuration option specifies block allocation within nodes. To satisfy these
+conditions, Slurm allocates 6 CPUs on each node, with 4 CPUs allocated on one socket
 and 2 CPUs on the other socket. The <font face="Courier New, monospace">
---distribution=cyclic:block</font> option specifies cyclic distribution of 
-tasks to nodes and block distribution of tasks to CPUs within nodes for binding. 
-The following table shows a possible pattern of allocation, distribution and binding 
+--distribution=cyclic:block</font> option specifies cyclic distribution of
+tasks to nodes and block distribution of tasks to CPUs within nodes for binding.
+The following table shows a possible pattern of allocation, distribution and binding
 for this job. For example, task id 10 is bound to CPU id 3 on node n1.
 </p>
 <center>
@@ -2958,30 +2958,30 @@ for this job. For example, task id 10 is bound to CPU id 3 on node n1.
 			</td>
 			<td colspan="8" width="24%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">0<br>
-                        3<br>
-                        6<br>
-                        9<br>
-                        12<br>
-                        15
-                        </font></p>
+			3<br>
+			6<br>
+			9<br>
+			12<br>
+			15
+			</font></p>
 			</td>
 			<td colspan="8" width="24%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">1<br>
-                        4<br>
-                        7<br>
-                        10<br>
-                        13<br>
-                        16
-                        </font></p>
+			4<br>
+			7<br>
+			10<br>
+			13<br>
+			16
+			</font></p>
 			</td>
 			<td colspan="8" width="24%">
 				<p align="CENTER"><font style="font-size: 8pt" size="1">2<br>
-                        5<br>
-                        8<br>
-                        11<br>
-                        14<br>
-                        17
-                         </font></p>
+			5<br>
+			8<br>
+			11<br>
+			14<br>
+			17
+			 </font></p>
 			</td>
 		</tr>
 		<tr>
@@ -3146,10 +3146,10 @@ for this job. For example, task id 10 is bound to CPU id 3 on node n1.
 </center>
 <br>
 <a name="Example15"></a>
-<h3>Example 15: Consumable resources with task affinity to optimize the performance of a multi-task, 
+<h3>Example 15: Consumable resources with task affinity to optimize the performance of a multi-task,
 multi-thread job</h3>
-<p>A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment). Run 
-the job in the default partition, managing the CPUs to optimize the performance 
+<p>A job requires 9 CPUs (3 tasks and 3 CPUs per task with no overcommitment). Run
+the job in the default partition, managing the CPUs to optimize the performance
 of the job.</p>
 <p>slurm.conf options:</p>
 <pre>SelectType=select/cons_res
@@ -3161,18 +3161,18 @@ TaskPluginParam=sched
 <pre>srun --ntasks=3 --cpus-per-task=3 --ntasks-per-node=1 --cpu_bind=cores ...
 </pre>
 <p>Comments:</p>
-<p>To optimize the performance of this job, the user wishes to allocate 3 CPUs from each of 
-3 sockets and bind each task to the 3 CPUs in a single socket. The 
-<font face="Courier New, monospace">SelectTypeParameters</font> configuration option specifies 
-a consumable resource type of cores and block allocation within nodes. The 
-<font face="Courier New, monospace">TaskPlugin</font> 
-<font face="Courier New, monospace">and TaskPluginParam</font> 
-configuration options enable task affinity. The srun options specify the following 
-conditions for the job: 3 tasks, with 3 unique CPUs per task, with 1 task per node. To satisfy 
-these conditions, Slurm allocates 3 CPUs from one socket in each of the 3 nodes in the default partition. The 
-<font face="Courier New, monospace">--cpu_bind=cores</font> option causes Slurm to bind 
-each task to the 3 allocated CPUs on the node to which it is distributed. The 
-following table shows a possible pattern of allocation, distribution and binding 
+<p>To optimize the performance of this job, the user wishes to allocate 3 CPUs from each of
+3 sockets and bind each task to the 3 CPUs in a single socket. The
+<font face="Courier New, monospace">SelectTypeParameters</font> configuration option specifies
+a consumable resource type of cores and block allocation within nodes. The
+<font face="Courier New, monospace">TaskPlugin</font>
+<font face="Courier New, monospace">and TaskPluginParam</font>
+configuration options enable task affinity. The srun options specify the following
+conditions for the job: 3 tasks, with 3 unique CPUs per task, with 1 task per node. To satisfy
+these conditions, Slurm allocates 3 CPUs from one socket in each of the 3 nodes in the default partition. The
+<font face="Courier New, monospace">--cpu_bind=cores</font> option causes Slurm to bind
+each task to the 3 allocated CPUs on the node to which it is distributed. The
+following table shows a possible pattern of allocation, distribution and binding
 for this job. For example, task id 2 is bound to CPU ids 0, 1 and 2 on socket id 0 of node n2.
 </p>
 
@@ -3393,7 +3393,7 @@ for this job. For example, task id 2 is bound to CPU ids 0, 1 and 2 on socket id
 <br>
 <a name="Example16"></a>
 <h3>Example 16: Consumable resources with task cgroup and core binding</h3>
-<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a 
+<p>A job requires 6 CPUs (6 tasks with no overcommitment). Run the job in a
 single node in the default partition. Apply core binding to each task using the task/cgroup plugin.
 </p>
 <p>slurm.conf options:</p>
@@ -3411,7 +3411,7 @@ TaskAffinity=yes
 <p>Comments:</p>
 <p>The task/cgroup plugin currently supports only the block method for
 allocating cores within nodes and distributing tasks to CPUs for binding.
-The following table shows a possible pattern of allocation, distribution 
+The following table shows a possible pattern of allocation, distribution
 and binding for this job. For example, task id 2 is bound to CPU id 2.
 </p><p>
 </p><center>
diff --git a/doc/html/cray.shtml b/doc/html/cray.shtml
index 5ed8b58b0..9258760b1 100644
--- a/doc/html/cray.shtml
+++ b/doc/html/cray.shtml
@@ -7,6 +7,7 @@
 <li><a href="#features">Cray Specific Features</a></li>
 <li><a href="#admin_guide">Administrator Guide</a></li>
 <li><a href="#setup">Cray System Setup</a></li>
+<li><a href="#ha">High Availability</a></li>
 <li><a href="http://www.cray.com">Cray</a></li>
 </ul>
 
@@ -59,19 +60,32 @@
   able to be used by other jobs using NPC, if idle their state will appear as
   PerfCnts.  These nodes are still available for other jobs not using NPC.
 </p>
+
 <li>Core Specialization</li>
 <p>
+  To use set <b><i>CoreSpecPlugin=core_spec/cray</i></b>.
   Ability to reserve a number of cores allocated to the job for system
   operations and not used by the application. The application will not
   use these cores, but will be charged for their allocation.
 </p>
 </ul>
+
 <h2><a name="admin_guide">Admin Guide</a></h2>
 <p>
   Many new plugins were added to utilize the Cray system without
   ALPS.  These should be set up in your slurm.conf outside of your
   normal configuration.
 <ul>
+
+<li>BurstBuffer</li>
+<p>
+  Set <b><i>BurstBufferPlugins=burst_buffer/cray</i></b> to use.
+  The burst buffer capability on Cray systems is also known by the name
+  <i>DataWarp</i>.
+  For more information, see
+  <a href="burst_buffer.html">Slurm Burst Buffer Guide</a>.
+</p>
+
 <li>CoreSpec</li>
 <p>
   To use set <b><i>CoreSpecPlugin=core_spec/cray</i></b>.
@@ -79,8 +93,8 @@
 
 <li>JobSubmit</li>
 <p>
-  To use set <b><i>JobSubmitPlugins=job_submit/cray</i></b>.
-  Primarily this plugin is used to set a gres=craynetwork value which
+  Set <b><i>JobSubmitPlugins=job_submit/cray</i></b> to use.
+  This plugin is primarily used to set a gres=craynetwork value which
   is used to limit the number of applications that can run on a node
   at once.  For a node without MICs on it that number at most is 4.
   Nodes with MICs the number drops to 2.  This craynetwork gres needs
@@ -94,13 +108,23 @@
     ...
   </pre>
 </p>
+
+<li>Power</li>
+<p>
+  Set <b><i>PowerPlugin=power/cray</i></b> to use.
+  <b><i>PowerParameters</i></b> is also typically configured.
+  For more information, see
+  <a href="power_mgmt.html">Slurm Power Management Guide</a>.
+</p>
+
 <li>Proctrack</li>
 <p>
-  To use set <b><i>ProctrackType=proctrack/cray</i></b>.
+  Set <b><i>ProctrackType=proctrack/cray</i></b> to use.
 </p>
+
 <li>Select</li>
 <p>
-  To use set <b><i>SelectType=select/cray</i></b>.  This plugin is
+  Set <b><i>SelectType=select/cray</i></b> to use.  This plugin is
   a layered plugin.  Which means it enhances a lower layer select
   plugin.  By default it is layered on top of the <i>select/linear</i>
   plugin.  It can also be layered on top of the <i>select/cons_res</i> plugin
@@ -111,13 +135,15 @@
   to allocate (e.g. cores, sockets, memory, etc.). See the slurm.conf man
   page for details.
 </p>
+
 <li>Switch</li>
 <p>
-  To use set <b><i>SwitchType=switch/cray</i></b>.
+  Set <b><i>SwitchType=switch/cray</i></b> to use.
 </p>
+
 <li>Task</li>
 <p>
-  To use set <b><i>TaskPlugin=cray</i></b>.
+  Set <b><i>TaskPlugin=cray</i></b> to use.
   It is advised to use this in conjunction with other task plugins
   such as the <i>task/cgroup</i> plugin.  This can be done in this
   manner, <b><i>TaskPlugin=cgroup,cray</i></b>, you can also
@@ -129,7 +155,13 @@
 </p>
 </ul>
 
-<h2><a name="setup">Cray system Setup</a></h2>
+<h2><a name="setup">Cray system setup</a></h2>
+<p>Some Slurm plugins (burst_buffer/cray and power/cray) plugins
+parse JSON format data.
+These plugins are designed to make use of the JSON-C library for this purpose.
+See <a href="download.html#json">JSON-C installation instructions</a> for
+details.</p>
+
 <p>
   Some services on the system need to be set up to run correctly with
   Slurm.  Below is how to restart the service and the nodes they run
@@ -144,8 +176,9 @@
     <li>WLM_DETECT_ACTIVE=SLURM /etc/init.d/ncmd restart</li>
     <lI>WLM_DETECT_ACTIVE=SLURM /etc/init.d/apptermd restart</li>
   </ul>
+</ul>
 <p>
-  As with linux clusters you will need to start a slurmd on each of your
+  As with Linux clusters you will need to start a slurmd on each of your
   compute nodes.  If you choose to use munge authentication, advised,
   you will also need munge installed and a munged running on each of
   your compute nodes as well.  See the <a href="quickstart_admin.html">
@@ -154,8 +187,20 @@
   Slurm natively.
 </p>
 
+<h2><a name="ha">High Availability</a></h2>
+<p>
+A backup controller can be setup in or outside the Cray. However, when the
+backup is within the Cray, both the primary and the backup controllers will go
+down when the Cray is rebooted. It is best to setup the backup controller on a
+Cray external node so that the controller can still receive new jobs when the
+Cray is down.  When the backup is configured on an external node the
+<b><i>no_backup_scheduling</i></b> <b><i>SchedulerParameter</i></b> should be
+specified in the slurm.conf. This allows new jobs to be submitted while the Cray
+is down and prevents any new jobs from being started.
+</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 5 April 2014</p>
+<p style="text-align:center;">Last modified 26 February 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/crypto_plugins.shtml b/doc/html/crypto_plugins.shtml
index 210a15c47..b020303c0 100644
--- a/doc/html/crypto_plugins.shtml
+++ b/doc/html/crypto_plugins.shtml
@@ -6,8 +6,7 @@
 <p> This document describe. Slurm cryptographic plugins and the API that
 defines them.
 It is intended as a resource to programmers wishing to write their own
-Slurm cryptographic plugins.
-This is version 0 of the API.</p>
+Slurm cryptographic plugins.</p>
 
 <p>Slurm cryptographic plugins are Slurm plugins that implement
 a digital signature mechanism.
@@ -32,11 +31,18 @@ We recommend, for example:</p>
 <li><b>openssl</b>&#151;Open SSL.</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for
-cryptographic support.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <h2>Data Objects</h2>
 <p>The implementation must maintain (though not necessarily directly export) an
@@ -158,14 +164,8 @@ Size of the signature as returned in sig_size_p by crypto_sign().</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-
-<h2>Versioning</h2>
-<p> This document describes version 0 of the Slurm cryptographic API.
-Future releases of Slurm may revise this API.
-A cryptographic plugin conveys its ability to implement a particular
-API version using the mechanism outlined for Slurm plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/dist_plane.shtml b/doc/html/dist_plane.shtml
index 4ed5e3c17..4b97d4820 100644
--- a/doc/html/dist_plane.shtml
+++ b/doc/html/dist_plane.shtml
@@ -51,23 +51,6 @@ following allocation of the task ids:
 
 <p class="footer"><a href="#top">top</a></p>
 
-<h2>Assumptions and Limitations</h2>
-
-<p>Slurm assumes that the number of tasks divided by the plane_size is
-greater or equal to the number of nodes specified.
-
-<p>In other words if the following is true Slurm will generate an
-error message:
-<pre>
-if ((n/plane_size < min_nodes) ((N-1)*plane_size >= n))
-            generate an error message: Too few processes for the
-	           requested {plane, node} distribution.
-
-where min_nodes is the minimum number of nodes requested for the job.
-</pre>
-
-<p class="footer"><a href="#top">top</a></p>
-
 <h2>Plane distribution and task affinity</h2>
 
 <p>The concept behind this distribution is to divide the clusters into
@@ -150,6 +133,6 @@ following allocation of the task ids:
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 1 April 2009</p>
+<p style="text-align:center;">Last modified 14 July 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/documentation.shtml b/doc/html/documentation.shtml
index 0fb542c1c..bc20b322e 100644
--- a/doc/html/documentation.shtml
+++ b/doc/html/documentation.shtml
@@ -37,6 +37,7 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <ul>
 <li><a href="quickstart_admin.html">Quick Start Administrator Guide</a></li>
 <li><a href="accounting.html">Accounting</a></li>
+<li><a href="burst_buffer.html">Burst Buffer Guide</a></li>
 <li><a href="cgroups.html">Cgroups Guide</a></li>
 <li><a href="configurator.html">Configuration Tool (Full version)</a></li>
 <li><a href="configurator.easy.html">Configuration Tool (Simplified version)</a></li>
@@ -44,14 +45,14 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <li><a href="nonstop.html">Failure Management Support</a></li>
 <li><a href="big_sys.html">Large Cluster Administration Guide</a></li>
 <li><a href="licenses.html">License Management</a></li>
-<li><a href="power_save.html">Power Saving Guide</a></li>
+<li><a href="power_mgmt.html">Power Management Guide (power capping)</a></li>
+<li><a href="power_save.html">Power Saving Guide (power down idle nodes)</a></li>
 <li><a href="prolog_epilog.html">Prolog and Epilog Guide</a></li>
 <li><a href="troubleshoot.html">Troubleshooting Guide</a></li>
 <li><a href="wckey.html">WCKey Management</a></li>
 <li>Workload Prioritization</li>
 <ul>
 <li><a href="priority_multifactor.html">Multifactor Job Priority</a></li>
-<li><a href="priority_multifactor2.html">Ticket-Based Multifactor Job Priority</a></li>
 <li><a href="priority_multifactor3.html">Depth-Oblivious Fair-share Factor</a></li>
 <li><a href="fair_tree.html">Fair Tree Fairshare Algorithm</a></li>
 </ul>
@@ -60,7 +61,6 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <li><a href="sched_config.html">Scheduling Configuration Guide</a></li>
 <li><a href="cons_res.html">Consumable Resources Guide</a></li>
 <li><a href="core_spec.html">Core Specialization</a></li>
-<li><a href="dynalloc.html">Dynamic Resources Allocation (dynalloc)</a></li>
 <li><a href="elastic_computing.html">Elastic Computing</a></li>
 <li><a href="gang_scheduling.html">Gang Scheduling</a></li>
 <li><a href="gres.html">Generic Resource (GRES) Scheduling</a></li>
@@ -106,6 +106,7 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <li>Plugin Interface Details</li>
 <ul>
 <li><a href="authplugins.html">Authentication Plugin Programmer Guide</a></li>
+<li><a href="bb_plugins.html">Burst Buffer Plugin Programmer Guide</a></li>
 <li><a href="core_spec_plugins.html">Core Specialization Plugin Programmer Guide</a></li>
 <li><a href="crypto_plugins.html">Cryptographic Plugin Programmer Guide</a></li>
 <li><a href="ext_sensorsplugins.html">External Sensors Plugin Programmer Guide</a></li>
@@ -119,6 +120,7 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <li><a href="launch_plugins.html">Launch Plugin Programmer Guide</a></li>
 <li><a href="mpiplugins.html">MPI Plugin Programmer Guide</a></li>
 <li><a href="acct_gather_energy_plugins.html">Energy Accounting Plugin Programmer Guide</a></li>
+<li><a href="power_plugins.html">Power Management Plugin Programmer Guide</a></li>
 <li><a href="preemption_plugins.html">Preemption Plugin Programmer Guide</a></li>
 <li><a href="priority_plugins.html">Priority Plugin Programmer Guide</a></li>
 <li><a href="proctrack_plugins.html">Process Tracking Plugin Programmer Guide</a></li>
@@ -132,6 +134,6 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 </li>
 </ul>
 
-<p style="text-align:center;">Last modified 25 March 2015</p>
+<p style="text-align:center;">Last modified 25 August 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/download.shtml b/doc/html/download.shtml
index 579aacfd3..4e676c18b 100644
--- a/doc/html/download.shtml
+++ b/doc/html/download.shtml
@@ -50,8 +50,10 @@ for the application to manage Kerberos V credentials.</li>
 <ul>
 <li><a href="https://upc-bugs.lbl.gov/blcr/doc/html/">
 <b> BLCR (Berkeley Lab Checkpoint/Restart)</a></b>
-<li><a href="http://dmtcp.sourceforge.net//">
+<li><a href="http://dmtcp.sourceforge.net/">
 <b>DMTCP (Distributed MultiThreaded CheckPointing)</a></b>
+<li><a href="https://computation.llnl.gov/project/scr/">
+<b>Scalable Checkpoint/Restart (SCR) for MPI</a></b>
 </ul><br>
 
 <li><b>Databases</b> can be used to store accounting information.
@@ -359,10 +361,54 @@ easy and elegantly manner.
 <li><a href="mslurm/mslurm_overview.pdf">Overview</a></li>
 <li><a href="mslurm/mslurm_install_instructions.pdf">Installation Instructions</a></li>
 <li><a href="mslurm/mslurm.tgz">Code</a></li>
+</ul></li><br>
+
+<li><a name="json"><b>JSON</b></a><br>
+Some Slurm plugins (burst_buffer/cray and power/cray) plugins
+parse JSON format data.
+These plugins are designed to make use of the JSON-C library for this purpose.
+Instructions for the build are as follows:
+<ol>
+<li>Download json-c version 0.12 (or higher) from<br>
+<a href="https://github.com/json-c/json-c/wiki">
+https://github.com/json-c/json-c/wiki</a></li>
+
+<li>Unpackage json-c<br>
+gunzip json-c-0.12.tar.gz<br>
+tar -xf json-c-0.12.tar</li>
+
+<li>Built and install json-c
 <ul>
+<li>If you have current build tools<br>
+cd json-c-0.12<br>
+./configure --prefix=DESIRED_PATH<br>
+make<br>
+make install</li>
+
+<li>If you have old build tools<br>
+cd json-c-0.12<br>
+mv aclocal.m4 aclocal.m4.orig<br>
+mv ltmain.sh ltmain.sh.orig<br>
+./autogen.sh<br>
+./configure --prefix=DESIRED_JSON_PATH<br>
+make<br>
+make install</li>
+</ul>
+
+<li>Build and install Slurm<br>
+./configure --with-json=DESIRED_JSON_PATH ...<br>
+make -j</li>
+</ol><br>
+
+<li><b>Slurm-web</b><br>
+Slurm-web is a free software, distributed under the GPL version 2 license,
+that provides both a HTTP REST API (based on JSON format) and a web GUI
+with dashboards and graphical views of the current state of your
+Slurm-based HPC supercomputers. The website of Slurm-web, with screenshots:<br>
+<a href="http://edf-hpc.github.io/slurm-web">http://edf-hpc.github.io/slurm-web</a></li>
 
 </ul>
 
-<p style="text-align:center;">Last modified 24 June 2015</p>
+<p style="text-align:center;">Last modified 27 July 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/ext_sensorsplugins.shtml b/doc/html/ext_sensorsplugins.shtml
index ff0f0b118..78299c430 100644
--- a/doc/html/ext_sensorsplugins.shtml
+++ b/doc/html/ext_sensorsplugins.shtml
@@ -1,6 +1,6 @@
 <!--#include virtual="header.txt"-->
 
-<h1><a name="top">Slurm External Sensors Plugin API (ExtSensorsType)
+<h1><a name="top">External Sensors Plugin API (ExtSensorsType)
 </a></h1>
 
 <h2> Overview</h2>
@@ -27,6 +27,16 @@ for the type of external sensors. We currently use
 <li><b>rrd</b>&#151;Gets external sensors data from the
 RRD database.
 </ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/ext_sensors/rrd</span> and
 <span class="commandline">src/common/slurm_ext_sensors.c</span>
@@ -118,16 +128,10 @@ plugin and the frequency at which to gather external sensors data.</p>
 <dd>Specifies which external sensors plugin should be used.
 <dt><span class="commandline">ExtSensorsFreq</span>
 <dd>Time interval between pollings in seconds.
-</dl>
-
-<h2>Versioning</h2>
-<p>This document describes version 1 of the Slurm External Sensors Plugin API.
-Future releases of Slurm may revise this API. A, energy accounting plugin
-conveys its ability to implement a particular API version using the mechanism
-outlined for Slurm plugins.</p>
+</dl>>
 
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/faq.shtml b/doc/html/faq.shtml
index 1be5266a3..611255c98 100644
--- a/doc/html/faq.shtml
+++ b/doc/html/faq.shtml
@@ -59,6 +59,8 @@
 <li><a href="#req">How can a job in complete or failed state be requeued?</a></li>
 <li><a href="#cpu_count">Slurm documentation refers to CPUs, cores and threads.
   What exactly is considered a CPU?</a></li>
+<li><a href="#sbatch_srun">What is the difference between the sbatch
+  and srun commands?</a></li>
 </ol>
 
 <h2>For Administrators</h2>
@@ -177,6 +179,10 @@ script for Slurm?</a></li>
 <li><a href="#delete_partition">How do I safely remove partitions?</a></li>
 <li><a href="#cpu_freq">Why is Slurm unable to set the CPU frequency for jobs?</a></li>
 <li><a href="#mic_config">How can Slurm be configured to support Intel Phi (MIC)?</a></li>
+<li><a href="#cluster_acct">When adding a new cluster, how can the Slurm cluster
+    configuration be copied from an existing cluster to the new cluster?</a></li>
+<li><a href="#cray_dvs">How can I update Slurm on a Cray DVS file system without
+rebooting the nodes?</a></li>
 </ol>
 
 <h2>For Management</h2>
@@ -1004,7 +1010,32 @@ resource allocation with respect to base boards, sockets, cores and threads.</p>
 <p>(<b>NOTE:</b> An exception to this would be if the system administrator
 configured SelectTypeParameters=CR_CPU and each node's CPU count without its
 socket/core/thread specification. In that case, each thread would be
-independently scheduled as a CPU. This is not a typical configuration.)
+independently scheduled as a CPU. This is not a typical configuration.)</p>
+
+<p><a name="sbatch_srun"><b>31. What is the difference between the sbatch
+  and srun commands?</b></a><br>
+The srun command has two different modes of operation. First, if not run within
+an existing job (i.e. not within a Slurm job allocation created by salloc or
+sbatch), then it will create a job allocation and spawn an application.
+If run within an existing allocation, the srun command only spawns the
+application.
+For this question, we will only address the first mode of operation and compare
+creating a job allocation using the sbatch and srun commands.</p>
+
+<p>The srun command is designed for interactive use, with someone monitoring
+the output.
+The output of the application is seen as output of the srun command,
+typically at the user's terminal.
+The sbatch command is designed to submit a script for later execution and its
+output is written to a file.
+Command options used in the job allocation are almost identical.
+The most noticable difference in options is that the sbatch command supports
+the concept of <a href="job_array.html">job arrays</a>, while srun does not.
+Another significant difference is in fault tolerance.
+Failures involving sbatch jobs typically result in the job being requeued
+and executed again, while failures involving srun typically result in an
+error message being generated with the expectation that the user will respond
+in an appropriate fashion.</p> 
 
 <p class="footer"><a href="#top">top</a></p>
 
@@ -1780,6 +1811,13 @@ slurm.conf file as shown below.</p>
 SallocDefaultCommand="srun -n1 -N1 --mem-per-cpu=0 --pty --preserve-env --mpi=none $SHELL"
 </pre>
 
+<p>
+For cray systems, add --gres=craynetwork:0 to the options.
+<pre>
+SallocDefaultCommand="srun -n1 -N1 --mem-per-cpu=0 --gres=craynetwork:0 --pty --preserve-env --mpi=none $SHELL"
+</pre>
+</p>
+
 <p><a name="upgrade"><b>50. What should I be aware of when upgrading Slurm?</b></a></br>
 See the Quick Start Administrator Guide <a href="quickstart_admin.html#upgrade">Upgrade</a>
 section for details.</p>
@@ -2015,8 +2053,42 @@ install the latest MPSS and Slurm packages from yum/zypper,
 add new MICs (via console utility or GUI),
 add MICs to Slurm queues if necessary, restart the host, use MICs via Slurm.</p>
 
+<p><a name="cluster_acct"><b>63. When adding a new cluster, how can the Slurm cluster
+    configuration be copied from an existing cluster to the new cluster?</b></a><br>
+Accounts need to be configured the cluster. An easy way to copy information from
+an existing cluster is to use the sacctmgr command to dump that cluster's information,
+modify it using some editor, the load the new information using the sacctmgr
+command. See the sacctmgr man page for details, including an example.</p>
+
+<p><a name="cray_dvs"><b>64. How can I update Slurm on a Cray DVS file system
+   without rebooting the nodes?</b></a><br>
+The problem with DVS caching is related to the fact that the dereferenced value
+of /opt/slurm/default symlink is cached in the DVS attribute cache, and that
+cache is not dropped when the rest of the VM caches are.</p>
+
+<p>The Cray Native SLURM installation manual indicates that slurm should
+have a "default" symlink run through /etc/alternatives.
+As an alternative to that:
+<ol>
+<li>Institute a policy that all changes to files which could be open
+persistently (i.e., .so files) are always modified by creating a new access
+path.  I.e., installations go to a new directory.</li>
+<li>Dump the /etc/alternatives stuff, just use a regular symlink, e.g., default
+points to 15.8.0-1.</li>
+<li>Add a new mountpoint on all the compute nodes for /dsl/opt/slurm where the
+attrcache_timeout attribute is reduced from 14440s to 60s (or 15s -- whatever):<br>
+mount -t dvs /opt/slurm /dsl/opt/slurm -o<br>
+path=/dsl/opt/slurm,nodename=c0-0c0s0n0,loadbalance,cache,ro,attrcache_timeout=15<br>
+In the example above, c0-0c0s0n0 is the single DVS server for the system.</li>
+</ol>
+<p>Using this strategy avoids the caching problems, making upgrades simple.
+One just has to wait for about 20 seconds after changing the default symlinks
+before starting the slurmds again.</p>
+<p>(Information courtesy of Douglas Jacobsen, NERSC,
+Lawrence Berkeley National Laboratory)</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 16 June 2015</p>
+<p style="text-align:center;">Last modified 25 August 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/gres_plugins.shtml b/doc/html/gres_plugins.shtml
index b0ff01cfa..71d7e6665 100644
--- a/doc/html/gres_plugins.shtml
+++ b/doc/html/gres_plugins.shtml
@@ -12,14 +12,9 @@ Slurm Plugin API with the following specifications:
 <p><span class="commandline">const char
 gres_name[]="<i>gres_name</i>"</span><br>
 <p style="margin-left:.2in">
-The <i>gres_name</i> should matc <i>minor</i> in <i>plugin_version</i>
+The <i>gres_name</i> should match <i>minor</i> in <i>plugin_type</i>
 described below.</p>
 
-<p><span class="commandline">const char
-plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
-<p style="margin-left:.2in">
-A free-formatted ASCII text string that identifies the plugin.
-
 <p><span class="commandline">const char
 plugin_type[]="<i>major/minor</i>"</span><br>
 <p style="margin-left:.2in">
@@ -27,10 +22,17 @@ The major type must be &quot;gres.&quot;
 The minor type can be any suitable name
 for the type of accounting package.</p>
 
-<p><span class="commandline">const uint32_t
-plugin_version="<i>version_number</i>"</span><br>
-<p style="margin-left:.2in">
-The version number should be 120.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>We include samples in the Slurm distribution for
 <ul>
@@ -171,11 +173,8 @@ Data type depends upon the value of gres_step_data_type data_type.<br>
 <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
 <span class="commandline">SLURM_ERROR</span> on failure.
 
-<h2>Versioning</h2>
-<p> This document describes version 120 of the Slurm Generic Resource API.
-Future releases of Slurm may revise this API.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/high_throughput.shtml b/doc/html/high_throughput.shtml
index 3dc312a82..618efe3ec 100644
--- a/doc/html/high_throughput.shtml
+++ b/doc/html/high_throughput.shtml
@@ -64,7 +64,7 @@ site with a very large cluster
 <h2>Munge configuration</h2>
 
 <p>By default the Munge daemon runs with two threads, but a higher thread count
-can improve its throughput. We suggest startint the Munge daemon with ten
+can improve its throughput. We suggest starting the Munge daemon with ten
 threads for high throughput support (e.g. <i>"munged --num-threads 10"</i>).</p>
 
 <h2>User limits</h2>
diff --git a/doc/html/job_container_plugins.shtml b/doc/html/job_container_plugins.shtml
index fc0e30b24..f1f185dc1 100644
--- a/doc/html/job_container_plugins.shtml
+++ b/doc/html/job_container_plugins.shtml
@@ -1,6 +1,6 @@
 <!--#include virtual="header.txt"-->
 
-<h1><a name="top">Slurm Job Container Plugin API</a></h1>
+<h1><a name="top">Job Container Plugin API</a></h1>
 
 <h2> Overview</h2>
 <p> This document describes Slurm job container plugins and the API
@@ -10,8 +10,7 @@ own Slurm job container plugins.
 Note that job container plugin is designed for use with Slurm jobs.
 It also applies to the sbcast server process on compute nodes.
 There is a <a href="proctrack_plugins.html">proctrack plugin</a>
-designed for use with Slurm job steps.
-This is version 101 of the API.</p>
+designed for use with Slurm job steps.</p>
 
 <p>Slurm job container plugins are Slurm plugins that implement
 the Slurm job container API described herein.
@@ -28,10 +27,17 @@ Compute Node Clean Up (CNCU) the Cray infrastructure.</li>
 <li><b>none</b>&#151;Designed for all other systems.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span> symbols required
-by the Slurm Plugin API require no specialization for process tracking.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/proctrack/job_container/job_container_cncu.c</span>
@@ -134,14 +140,8 @@ to indicate the reason for failure.</p>
 <p style="margin-left:.2in"><b>Description</b>: Note change in configuration,
 especially the value of the DebugFlags with respect to JobContainer.</p>
 
-<h2>Versioning</h2>
-<p> This document describes version 101 of the Slurm job container API.
-Future releases of Slurm may revise this API. A job container plugin
-conveys its ability to implement a particular API version using the
-mechanism outlined for Slurm plugins.</p>
-
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/job_launch.shtml b/doc/html/job_launch.shtml
index df7edbf81..71222d481 100644
--- a/doc/html/job_launch.shtml
+++ b/doc/html/job_launch.shtml
@@ -37,12 +37,12 @@ following:</p>
 some of which involve plugins. The process is as follows:</p>
 <ol>
 <li>Call <b>job_submit</b> plugins to modify the request as appropriate</li>
-<li>Validate the the options are valid for this user (e.g. valid partition
+<li>Validate that the options are valid for this user (e.g. valid partition
 name, valid limits, etc.)</li>
 <li>Determine if this job is the highest priority runnable job, if so then
 really try to allocate resources for it now, otherwise only validate that it
 could run if no other jobs existed</li>
-<li>Determine which nodes could be used for the job. This if the feature
+<li>Determine which nodes could be used for the job. If the feature
 specification uses an exclusive OR option, then multiple iterations of the
 selection process below will be required with disjoint sets of nodes</li>
 <li>Call the <b>select</b> plugin to select the best resources for the request</li>
diff --git a/doc/html/job_submit_plugins.shtml b/doc/html/job_submit_plugins.shtml
index 4e9bbb4b4..d355b4060 100644
--- a/doc/html/job_submit_plugins.shtml
+++ b/doc/html/job_submit_plugins.shtml
@@ -1,6 +1,6 @@
 <!--#include virtual="header.txt"-->
 
-<h1><a name="top">Slurm Job Submit Plugin API</a></h1>
+<h1><a name="top">Job Submit Plugin API</a></h1>
 
 <h2> Overview</h2>
 <p> This document describes Slurm job submit plugins and the API that
@@ -36,6 +36,16 @@ the same location as the Slurm configuration file, <i>slurm.conf</i>.</li>
 <li><b>partition</b>&#151;Sets a job's default partition based upon job
 submission parameters and available partitions.</li>
 </ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>Slurm can be configured to use multiple job_submit plugins if desired,
 however the lua plugin will only execute one lua script named "job_submit.lua"
 located in the default script directory (typically the subdirectory "etc" of
@@ -117,7 +127,13 @@ be modified.<br>
 better ease of use. Sample Lua scripts can be found with the Slurm distribution
 in the directory <i>contribs/lua</i>. The default installation location of
 the Lua scripts is the same location as the Slurm configuration file,
-<i>slurm.conf</i>.</p>
+<i>slurm.conf</i>.
+Reading and writing of job environment variables using Lua is possible
+by referencing the environment variables as a data structure containing
+named elements. For example:<br>
+if (job_desc.environment.LANGUAGE == "en_US") then<br>
+....</p>
+
 
 <p class="commandline">
 int job_submit(struct job_descriptor *job_desc, List part_list, uint32_t submit_uid)
@@ -164,9 +180,6 @@ errno on failure. Slurm specific error numbers from <i>slurm/slurm_errno.h</i>
 may be used. On failure, the request will be rejected and the user will have an
 appropriate error message printed for that errno. 
 
-<h2>Versioning</h2>
-<p> This document describes version 110 of the Slurm Job Submission API. Future
-releases of Slurm may revise this API.
 <p class="footer"><a href="#top">top</a>
 
 <h2>Building</h2>
@@ -250,6 +263,6 @@ cp job_submit_mine.so file \
    /usr/local/lib/slurm/job_submit_mine.so
 </pre>
 
-<p style="text-align:center;">Last modified 25 February 2015</p>
+<p style="text-align:center;">Last modified 13 April 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/jobacct_gatherplugins.shtml b/doc/html/jobacct_gatherplugins.shtml
index c80fe623a..8e30c5ff1 100644
--- a/doc/html/jobacct_gatherplugins.shtml
+++ b/doc/html/jobacct_gatherplugins.shtml
@@ -32,8 +32,18 @@ information also gathered for each job. (Experimental, not to be used
 information to the standard rusage information also gathered for each job.
 <li><b>none</b>&#151;No information gathered.
 </ul>
-The <b>sacct</b> program can be used to display gathered data from regular
-accounting and from these plugins.
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
+<p>The <b>sacct</b> program can be used to display gathered data from regular
+accounting and from these plugins.</p>
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/jobacct_gather/linux</span> and
 <span class="commandline">src/common/slurm_jobacct_gather.[c|h]</span>
@@ -304,14 +314,8 @@ plugin and the frequency at which to gather information about running jobs.</p>
 <dd>Time interval between pollings in seconds.
 </dl>
 
-<h2>Versioning</h2>
-<p> This document describes version 2 of the Slurm Job Accounting Gather API. Future
-releases of Slurm may revise this API. A job accounting gather plugin conveys its
-ability to implement a particular API version using the mechanism outlined
-for Slurm plugins.</p>
-
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/jobcompplugins.shtml b/doc/html/jobcompplugins.shtml
index 9e444fcf7..f4e316d76 100644
--- a/doc/html/jobcompplugins.shtml
+++ b/doc/html/jobcompplugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p>This document describes Slurm job completion logging plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-job completion logging plugins. This is version 100 of the API.</p>
+job completion logging plugins.</p>
 <p>Slurm job completion logging plugins are Slurm plugins that implement the SLURM
 API for logging job information upon their completion. This may be used to log job information
 to a text file, database, etc. The plugins must conform to the Slurm Plugin API with the following
@@ -19,13 +19,19 @@ abbreviation for the type of scheduler. We recommend, for example:</p>
 <li><b>mysql</b>&#151;Job completion is written to a mysql database.</li>
 <li><b>script</b>&#151;Execute a script passing in job information in environment variables.</li>
 </ul>
-The <b>sacct</b> program with option <b>-c</b> can be used to display
-gathered data from database and filetxt plugins.
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for
-job completion logging support.
-Note carefully, however, the versioning discussion below.</p>
+
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/jobcomp/filetxt/jobcomp_filetxt.c</span> and
 <span class="commandline">src/plugins/jobcomp/none/jobcomp_none.c</span>
@@ -145,13 +151,8 @@ void slurm_jobcomp_archive(List selected_parts, void *params)
 <p style="margin-left:.2in"><b>Returns</b>: None</p>
 <p class="footer"><a href="#top">top</a></p>
 
-
-<h2>Versioning</h2>
-<p> This document describes version 100 of the Slurm job completion API. Future
-releases of Slurm may revise this API. A job completion plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm plugins.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/launch_plugins.shtml b/doc/html/launch_plugins.shtml
index aa47d902c..4d95b9c9c 100644
--- a/doc/html/launch_plugins.shtml
+++ b/doc/html/launch_plugins.shtml
@@ -25,6 +25,16 @@ systems with ALPS installed.</li>
   BlueGene/Q systems.</li>
 <li><b>slurm</b>&#151;Use Slurm's default launching infrastructure</li>
 </ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/launch/slurm/launch_slurm.c</span>
 for a sample implementation of a Slurm launch plugin.
@@ -133,6 +143,6 @@ The system <span class="commandline">_init()</span> is called before the Slurm
 
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/man_index.shtml b/doc/html/man_index.shtml
index 10fe44a47..bcac4167e 100644
--- a/doc/html/man_index.shtml
+++ b/doc/html/man_index.shtml
@@ -5,6 +5,7 @@
 <p><b>NOTE: This documentation is for Slurm version @SLURM_VERSION@.<br>
 Documentation for other versions of Slurm is distributed with the code</b></p>
 
+<h2>Commands</h2>
 <table border="1">
 <tr><td><a href="sacct.html">sacct</a></td><td>displays accounting data for all jobs and job steps in the Slurm job accounting log or Slurm database</td></tr>
 <tr><td><a href="sacctmgr.html">sacctmgr</a></td><td>Used to view and modify Slurm account information.</td></tr>
@@ -29,8 +30,13 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <tr><td><a href="sstat.html">sstat</a></td><td>Display various status information of a running job/step.</td></tr>
 <tr><td><a href="strigger.html">strigger</a></td><td>Used set, get or clear Slurm trigger information.</td></tr>
 <tr><td><a href="sview.html">sview</a></td><td>graphical user interface to view and modif. Slurm state.</td></tr>
+</table>
+
+<h2>Configuration Files</h2>
+<table border="1">
 <tr><td><a href="acct_gather.conf.html">acct_gather.conf</a></td><td>Slurm configuration file for the acct_gather plugins</td></tr>
 <tr><td><a href="bluegene.conf.html">bluegene.conf</a></td><td>Slurm configuration file for BlueGene systems</td></tr>
+<tr><td><a href="burst_buffer.conf.html">burst_buffer.conf</a></td><td>Slurm burst buffer configuration</td></tr>
 <tr><td><a href="cgroup.conf.html">cgroup.conf</a></td><td>Slurm configuration file for the cgroup support</td></tr>
 <tr><td><a href="cray.conf.html">cray.conf</a></td><td>Slurm configuration file Cray systems.</td></tr>
 <tr><td><a href="ext_sensors.conf.html">ext_sensors.conf</a></td><td>Slurm configuration file for the external sensor support</td></tr>
@@ -40,6 +46,10 @@ Documentation for other versions of Slurm is distributed with the code</b></p>
 <tr><td><a href="slurmdbd.conf.html">slurmdbd.conf</a></td><td>Slurm Database Daemon (SlurmDBD) configuration file</td></tr>
 <tr><td><a href="topology.conf.html">topology.conf</a></td><td>Slurm configuration file for defining the network topology</td></tr>
 <tr><td><a href="wiki.conf.html">wiki.conf</a></td><td>Slurm configuration file for wiki and wiki2 scheduler plugins</td></tr>
+</table>
+
+<h2>Daemons and Other</h2>
+<table border="1">
 <tr><td><a href="slurmctld.html">slurmctld</a></td><td>The central management daemon of Slurm.</td></tr>
 <tr><td><a href="slurmd.html">slurmd</a></td><td>The compute node daemon for Slurm.</td></tr>
 <tr><td><a href="slurmdbd.html">slurmdbd</a></td><td>Slurm Database Daemon.</td></tr>
diff --git a/doc/html/mpiplugins.shtml b/doc/html/mpiplugins.shtml
index dbac6a92e..7e1304736 100644
--- a/doc/html/mpiplugins.shtml
+++ b/doc/html/mpiplugins.shtml
@@ -5,11 +5,11 @@
 <h2> Overview</h2>
 <p> This document describes Slurm MPI selection plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-node selection plugins. This is version 0 of the API.</p>
+node selection plugins.</p>
 
-<p>Slurm mpi selection plugins are Slurm plugins that implement the which version of
+<p>Slurm MPI selection plugins are Slurm plugins that implement the which version of
 mpi is used during execution of the new Slurm job. API described herein. They are
-intended to provide a mechanism for both selecting mpi versions for pending jobs and
+intended to provide a mechanism for both selecting MPI versions for pending jobs and
 performing any mpi-specific tasks for job launch or termination. The plugins must
 conform to the Slurm Plugin API with the following specifications:</p>
 <p><span class="commandline">const char plugin_type[]</span><br>
@@ -26,10 +26,17 @@ abbreviation for the type of node selection algorithm. We recommend, for example
 <li><b>pmi2</b>&#151;For use with MPI2 and MVAPICH2.</li>
 <li><b>none</b>&#151;For use with most other versions of MPI.</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for node selection support.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>A simplified flow of logic follows:
 <br>
@@ -137,16 +144,6 @@ the plugin should return SLURM_ERROR, causing slurmctld to exit.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<h2>Versioning</h2>
-<p> This document describes version 0 of the Slurm node selection API. Future
-releases of Slurm may revise this API. A node selection plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm plugins.
-In addition, the credential is transmitted along with the version number of the
-plugin that transmitted it. It is at the discretion of the plugin author whether
-to maintain data format compatibility across different versions of the plugin.</p>
-
-<p class="footer"><a href="#top">top</a></p>
-
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/plugins.shtml b/doc/html/plugins.shtml
index 415007eed..eaf0d0759 100644
--- a/doc/html/plugins.shtml
+++ b/doc/html/plugins.shtml
@@ -14,27 +14,25 @@ from other plugins that implement that same API, by such means as the intended
 platform or the internal algorithm. For example, a plugin to interface to the
 Maui scheduler would give its type as &quot;sched/maui.&quot; It would implement
 the Slurm Scheduler API.</p>
+
 <h2>Versioning</h2>
-<p>Slurm plugin version numbers comprise a major and minor revision number. As
-Slurm evolves, changes to the individual plugin APIs may be necessary to implement
-new features. The major number identifies the version of the applicable API that
-the plugin implements. Incrementing the major version number denotes that the
-API has changed significantly and possibly incompatibly over prior versions.</p>
-<p>Because plugins are separate code objects and perhaps under the control of
-third parties, version skew may occur in a Slurm installation. Slurm may support
-multiple versions of each API in a backward-compatible fashion to provide time
-for plugin authors to update their plugins. Conversely, the plugin may support
-multiple versions of the API in order to be transparently portable across different
-Slurm installations. The version of the API spoken in an installation will be
-the highest-numbered version which is common to both Slurm and the plugin. Each
-Slurm release will document which API versions it supports. From time to time
-ancient API versions will be deprecated.</p>
-<p>The minor version number is incremented at the discretion of the plugin author
-and denotes revisions or upgrades particular to that implementation. If two or
-more plugins of the same type are provided in an installation, the plugin with
-the highest minor revision will be selected.</p>
+<p>Slurm plugin version numbers comprise a major, minor and micro revision number.
+If the major and/or minor revision number changes, this indicates major changes
+to the Slurm functionality including changes to APIs, command options, and
+plugins.
+These plugin changes may include new functions and/or function arguments.
+If only the micro revision number changes, this is indicative of bug fixes
+and possibly minor enhancements which should not adversely impact users.
+In all cases, rebuilding and installing all Slurm plugins is recommended
+at upgrade time.
+Not all compute nodes in a cluster need be updated at the same time, but
+all Slurm APIs, commands, plugins, etc. on a compute node should represent
+the same version of Slurm.</p>
+
 <p class="footer"><a href="#top">top</a></p>
+
 <h2>Data Objects</h2>
+
 <p>A plugin must define and export the following symbols:</p>
 <ul>
 <li><span class="commandline">char plugin_type[]<br>
@@ -45,21 +43,17 @@ API as stubs) should have a minor type of &quot;none.&quot;</li>
 </span> a free-form string that identifies the plugin in human-readable terms,
 such as &quot;Kerberos authentication.&quot; Slurm will use this string to identify
 the plugin to end users.</li>
-<li><span class="commandline">const uint32_t plugin_version</span><br>
-a 32-bit unsigned integer giving the version of the plugin as described above.
-The major revision number is multiplied by 1,000 and added to the minor revision
-number to produce the integer value. Thus, a plugin with a major revision number
-of 2 and a minor revision number of 35 will have a <span class="commandline">plugin_version</span>
-value of 2035.</li>
 </ul>
 <p>A plugin may optionally define and export the following symbols:</p>
 <ul>
-<li>const uint32_t min_plug_version<br>
-a 32-bit unsigned integer formatted the same as <span class="commandline">plugin_version</span>
-giving the lowest API version number with which this plugin is compatible. If
-this symbol is omitted, its value is assumed to be equivalent to the <span class="commandline">plugin_version</span>
-rounded to the next lowest 1,000. Only the major version number of this symbol
-is significant.</li>
+<li>const uint32_t plugin_version<br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 </ul>
 <p class="footer"><a href="#top">top</a></p>
 
@@ -112,6 +106,6 @@ utilize a thread for that functionality. This thread may be created by the
 for an example of how to do this.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 9 October 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/power_mgmt.shtml b/doc/html/power_mgmt.shtml
new file mode 100644
index 000000000..ded4c2c93
--- /dev/null
+++ b/doc/html/power_mgmt.shtml
@@ -0,0 +1,234 @@
+<!--#include virtual="header.txt"-->
+
+<h1>Slurm Power Management Guide</h1>
+
+<p>Slurm provide an integrated power management system for power capping.
+The mode of operation is to take the configured power cap for the system and
+distribute it across the compute nodes under Slurm control.
+Initially that power is distributed evenly across all compute nodes.
+Slurm then monitors actual power consumption and redistributes power as appropriate.
+Specifically, Slurm lowers the power caps on nodes using less than their cap
+and redistributes that power across the other nodes.
+The thresholds at which a node's power cap are raised or lowered are configurable
+as are the rate of change the power cap.
+In addition, starting a job on a node immediately triggers resetting the node's
+power cap to a higher level.
+Note this functionality is distinct from Slurm's ability to
+<a href="power_save.html">power down idle nodes</a>.</p>
+
+<h2>Configuration</h2>
+
+<p>The following configuration parameters are available:
+<ul>
+
+<li><b>DebugFlags=power</b>:
+Enable plugin-specific logging messages.</li>
+
+<li><b>PowerParameters</b>:
+Defines power management behavior.
+Changes to this value take effect when the Slurm daemons are reconfigured.
+Currently valid options are:
+<ul>
+<li><b>balance_interval=#</b> -
+  Specifies the time interval, in seconds, between attempts to balance power
+  caps across the nodes.
+  This also controls the frequency at which Slurm attempts to collect current
+  power consumption data (old data may be used until new data is available from
+  the underlying infrastructure and values below 10 seconds are not recommended
+  for Cray systems).
+  The default value is 30 seconds.
+  Supported by the power/cray plugin.</li>
+<li><b>capmc_path=/...</b> -
+  Specifies the absolute path of the <b>capmc</b> command.
+  The default value is "/opt/cray/capmc/default/bin/capmc".
+  Supported by the power/cray plugin.</li>
+<li><b>cap_watts=#[KW|MW]</b> -
+  Specifies the total power limit to be established across all compute nodes
+  managed by Slurm.
+  A value of 0 sets every compute node to have an unlimited cap.
+  The default value is 0.
+  Supported by the power/cray plugin.</li>
+<li><b>decrease_rate=#</b> -
+  Specifies the maximum rate of change in the power cap for a node where the
+  actual power usage is below the power cap by an amount greater than
+  lower_threshold (see below).
+  Value represents a percentage of the difference between a node's minimum and
+  maximum power consumption.
+  The default value is 50 percent.
+  Supported by the power/cray plugin.</li>
+<li><b>increase_rate=#</b> -
+  Specifies the maximum rate of change in the power cap for a node where the
+  actual power usage is within upper_threshold (see below) of the power cap.
+  Value represents a percentage of the difference between a node's minimum and
+  maximum power consumption.
+  The default value is 20 percent.
+  Supported by the power/cray plugin.</li>
+<li><b>job_level</b> -
+  All compute nodes associated with every job will be assigned the same power
+  cap.
+  Nodes shared by multiple jobs with have a power cap different from other
+  nodes allocated to the individual jobs.
+  By default, this is configurable by the user for each job.</li>
+<li><b>job_no_level</b> -
+  Power caps are established independently for each compute node.
+  This disabled the "--power=level" option available in the job submission
+  commands.
+  By default, this is configurable by the user for each job.</li>
+<li><b>lower_threshold=#</b> -
+  Specify a lower power consumption threshold.
+  If a node's current power consumption is below this percentage of its current
+  cap, then its power cap will be reduced.
+  The default value is 90 percent.
+  Supported by the power/cray plugin.</li>
+<li><b>recent_job=#</b> -
+  If a job has started or resumed execution (from suspend) on a compute node
+  within this number of seconds from the current time, the node's power cap will
+  be increased to the maximum.
+  The default value is 300 seconds.
+  Supported by the power/cray plugin.</li>
+<li><b>set_watts=#</b> -
+  Specifies the power limit to be set on every compute nodes managed by Slurm.
+  Every node gets this same power cap and there is no variation through time
+  based upon actual power usage on the node.
+  Supported by the power/cray plugin.</li>
+<li><b>upper_threshold=#</b> -
+  Specify an upper power consumption threshold.
+  If a node's current power consumption is above this percentage of its current
+  cap, then its power cap will be increased to the extent possible.
+  A node's power cap will also be increased if a job is newly started on it.
+  The default value is 95 percent.
+  Supported by the power/cray plugin.</li>
+</ul></li>
+
+<li><b>PowerPlugin</b>:
+Identifies the plugin used to manage system power consumption.
+Changes to this value require restarting Slurm daemons to take effect.
+By default, no power plugin is loaded.
+Currently valid options are:
+<ul>
+<li><b>power/cray</b> -
+   Used for Cray systems with power monitoring and management
+   functionality included as part of System Management Workstation (SMW)
+   7.0.UP03.</li>
+<li><b>power/none</b> - No power management support.</li>
+</ul></li>
+</ul>
+
+<p><b>Note for Cray systems:</b> The JSON-C library must be installed in order
+to build Slurm's power/cray plugin, which must parse JSON format data.
+See Slurm's <a href="download.html#json">JSON installation information</a>
+for details.</p>
+
+<p><b>Note for Cray systems:</b> Power management is provided for native
+Slurm configurations (i.e. without the ALPS resource manager).</p>
+
+<p><b>Note for Cray systems:</b> Use of the capmc command requires either 
+specifying its absolute path ("/opt/cray/capmc/default/bin/capmc" by default)
+or loading the capmc module:</p>
+<pre>
+$ module load capmc
+</pre>
+
+<h2>User and System Administrator Commands</h2>
+
+<p>Equal power caps for all nodes allocated to a job can be requested at job
+submission time by using the "--power=level" option with the salloc, sbatch
+or srun command.
+The system administrator can override the user option with the PowerParameters
+configuration parameter and the job_level or job_no_level option.</p>
+
+<p>Specific minimum and maximum CPU frequency in addition to CPU governor may
+be requested at job submit time using the "--cpu-freq" option  with the salloc,
+sbatch or srun command. The frequency requested may be "low", "medium",
+"highm1" (second highest available frequency), "high" or a specific frequency
+(expressed as a KHz value). The governor specification may be "conservative",
+"ondemand", "performance" or "powersave". These values are user requests
+subject to system constraints. Some examples follow.</p>
+<pre>
+$ sbatch --cpu-freq=2400000-3000000 ...
+$ salloc --cpu-freq=powersave ...
+$ srun --cpu-freq=highm1 ...
+</pre>
+
+<p>The power consumption and power cap data are available for all compute nodes
+using either the "scontrol show node" or sview commands.
+Information available includes "CurrentWatts" and "CapWatts".</p>
+
+<h2>Example</h2>
+
+<h3>Initial State</h3>
+<p>In our example, assume the following configuration:
+10 compute node cluster, where each node has a minimum power consumption of 100 watts
+and maximum power consumption of 200 watts.
+The following values for PowerParameters:
+balance_interval=60,
+cap_watts=1800,
+decrease_rate=30, increase_rate=10,
+lower_threshold=90, upper_threshold=98.
+The initial state is simply based upon the cap_watts divided by the number of
+compute nodes: 1800 watts / 10 nodes = 180 watts per node.</p>
+
+<h3>State in 60 Seconds</h3>
+<p>The power consumption is then examined balance_interval (60) seconds later.
+Assume that one of those nodes is consuming 110 watts and the others are
+using 180 watts.
+First we identify which nodes are consuming less than their lower_threshold
+of the power cap: 90% x 180 watts = 162 watts.
+One node falls in this category with 110 watts of power consumption.
+Its power cap is reduced by either half of the difference between it's current
+power cap and power consumption ((180 watts - 110 watts) / 2 = 35 watts) OR
+decrease_rate, which is a percentage of the difference between the node's
+maximum and minimum power consumption ((200 watts - 100 watts) x 30% = 30 watts).
+So that node's power cap is reduce from 180 watts to 150 watts.
+Ignoring the upper_threshold parameter for now, we now have 1650 watts available
+to distribute to the remaining 9 compute nodes, or 183 watts per node
+(1650 watts / 9 nodes = 183 watts per node).</p>
+
+<h3>State in 120 Seconds</h3>
+<p>The power consumption is then examined balance_interval (60) seconds later.
+Assume that one of those nodes is still consuming 110 watts, a second node is
+consuming 115 watts and the other eight are using 183 watts.
+First we identify which nodes are consuming less than their lower_threshold.
+Our node using 110 watts has its cap reduced by half of the difference between
+it's current power cap and power consumption
+((150 watts - 110 watts) / 2 = 20 watts);
+so that node's power cap is reduce from 150 watts to 130 watts.
+The node consuming 115 watts has its power cap reduced by 30 watts based
+decrease_rate; so that node's power cap is reduce from 183 watts to 153 watts.
+That leaves 1517 watts (1800 watts - 130 watts - 153 watts = 1517 watts) to
+be distributed over 8 nodes or 189 watts per node.</p>
+
+<h3>State in 180 Seconds</h3>
+<p>The power consumption is then examined balance_interval (60) seconds later.
+Assume the node previously consuming 110 watts is now consuming 128 watts.
+Since that is over upper_threshold of it's power cap
+(98% x 130 watts = 127 watts), its power cap is increased by increase_rate
+((200 watts - 100 watts) x 10% = 10 watts), so its power cap goes from
+130 watts to 140 watts.
+Assume the node previously consuming 115 watts has been allocated a new job.
+This triggers the node to be allocated the same power cap as nodes previously
+running at their power cap.
+Therefore we have 1660 watts available (1800 watts - 140 watts = 1660 watts)
+to be distributed over 9 nodes or 184 watts per node.</p>
+
+<h2>Notes</h2>
+<ul>
+<li>Slurm's power management plugin can be used in conjunction with the
+  <a href="power_save.html">power save mode</a>, where idle nodes are powered
+  down and then powered back up as needed. On a Cray system, set each node's
+  power cap to the minimum value before powering it down. Also set the default
+  power cap of each node to the minimum value as that will be used at power up
+  time.</li>
+<li>Cray permits independent power capping for accelerators (GPUs or MICs),
+  which is not currently used by Slurm.</li>
+<li>Current default values for configuration parameters should probably be
+  changed once we have a better understanding of the algorithm's behavior.</li>
+<li>No integration of this logic with gang scheduling currently exists.
+  It is not clear that configuration is practical to support as gang scheduling
+  time slices will typically be smaller than the power management
+  balance_interval and synchronizing changes may be difficult</li>
+</ul>
+
+<p style="text-align:center;">Last modified 5 May 2015</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/power_plugins.shtml b/doc/html/power_plugins.shtml
new file mode 100644
index 000000000..e3407490e
--- /dev/null
+++ b/doc/html/power_plugins.shtml
@@ -0,0 +1,96 @@
+<!--#include virtual="header.txt"-->
+
+<h1><a name="top">Power Management Plugin Programmer Guide</a></h1>
+
+<h2> Overview</h2>
+<p> This document describes the Slurm power management plugins and the APIs that
+defines them. It is intended as a resource to programmers wishing to write
+their own Slurm power management plugin. This is version 100 of the API.
+
+<p>Slurm power management plugins must conform to the
+Slurm Plugin API with the following specifications:
+
+<p><span class="commandline">const char
+plugin_name[]="<i>full&nbsp;text&nbsp;name</i>"</span>
+<p style="margin-left:.2in">
+A free-formatted ASCII text string that identifies the plugin.
+
+<p><span class="commandline">const char
+plugin_type[]="<i>major/minor</i>"</span><br>
+<p style="margin-left:.2in">
+The major type must be &quot;power&quot;.
+The minor type can be any suitable name for the type of power management
+package.
+The following power management plugins are included in the Slurm distribution
+<ul>
+<li><b>cray</b>&#151;Use Cray APIs to provide power management.</li>
+<li><b>none</b>&#151;Can be configured to log calls to its functions, but
+otherwise does nothing.</li>
+</ul>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
+<p>Slurm can be configured to use multiple power management plugins if desired.</p>
+
+<p class="footer"><a href="#top">top</a>
+
+<h2>API Functions</h2>
+<p>All of the following functions are required. Functions which are not
+implemented must be stubbed.
+
+<p class="commandline"> int init (void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+  Called when the plugin is loaded, before any other functions are
+  called. Put global initialization here.
+<p style="margin-left:.2in"><b>Returns</b>: <br>
+  <span class="commandline">SLURM_SUCCESS</span> on success, or<br>
+  <span class="commandline">SLURM_ERROR</span> on failure.</p>
+
+<p class="commandline"> void fini (void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+  Called when the plugin is removed. Clear any allocated storage here.
+<p style="margin-left:.2in"><b>Returns</b>: None.</p>
+
+<p><b>Note</b>: These init and fini functions are not the same as those
+described in the <span class="commandline">dlopen (3)</span> system library.
+The C run-time system co-opts those symbols for its own initialization.
+The system <span class="commandline">_init()</span> is called before the Slurm
+<span class="commandline">init()</span>, and the Slurm
+<span class="commandline">fini()</span> is called before the system's
+<span class="commandline">_fini()</span>.</p>
+
+<p class="commandline">
+void power_p_reconfig(void)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+This function is called when updated configuration information should be read.
+
+<p class="commandline">
+void power_p_job_resume(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Note that a previously suspended job is being resumed.
+Called by the slurmctld daemon.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) pointer to the job record being resumed.
+
+<p class="commandline">
+void power_p_job_start(struct job_record *job_ptr)
+<p style="margin-left:.2in"><b>Description</b>:<br>
+Note that a job has been allocated resources and is about to begin execution.
+Called by the slurmctld daemon.
+<p style="margin-left:.2in"><b>Arguments</b>: <br>
+<span class="commandline">job_ptr</span>
+(input) pointer to the job record being started.
+
+<p class="footer"><a href="#top">top</a>
+
+<p style="text-align:center;">Last modified 27 March 2015</p>
+
+<!--#include virtual="footer.txt"-->
diff --git a/doc/html/power_save.shtml b/doc/html/power_save.shtml
index 67bcb6305..971094b90 100644
--- a/doc/html/power_save.shtml
+++ b/doc/html/power_save.shtml
@@ -1,8 +1,9 @@
 <!--#include virtual="header.txt"-->
 
-<h1>Power Saving Guide</h1>
+<h1>Slurm Power Saving Guide</h1>
 
-<p>Slurm provides an integrated power saving mechanism for idle nodes.
+<p>Slurm provides an integrated power saving mechanism for powering down
+idle nodes.
 Nodes that remain idle for a configurable period of time can be placed
 in a power saving mode, which can reduce power consumption or fully power down
 the node.
diff --git a/doc/html/preempt.shtml b/doc/html/preempt.shtml
index 3ec3554c3..e7a6a60fd 100644
--- a/doc/html/preempt.shtml
+++ b/doc/html/preempt.shtml
@@ -4,7 +4,7 @@
 
 <P>
 Slurm supports job preemption, the act of stopping one or more "low-priority"
-jobs to let a "high-priority" job run uninterrupted until it completes.
+jobs to let a "high-priority" job run.
 Job preemption is implemented as a variation of Slurm's
 <a href="gang_scheduling.html">Gang Scheduling</a> logic.
 When a high-priority job has been allocated resources that have already been
@@ -64,8 +64,10 @@ QOS using the <I>slurm.conf</I> file or database respectively. This option is
 only honored if <I>PreemptMode=CANCEL</I>. The <I>GraceTime</I> is specified in
 seconds and the default value is zero, which results in no preemption delay.
 Once a job has been selected for preemption, it's end time is set to the
-current time plus <I>GraceTime</I> and the mechanism used to terminate jobs
-upon reaching their time limit is used to cancel the job.
+current time plus <I>GraceTime</I>. The job is immediately sent SIGCONT and
+SIGTERM signals in order to provide notification of its imminent termination.
+This is followed by the SIGCONT, SIGTERM and SIGKILL signal sequence upon
+reaching its new end time.
 </LI>
 <LI>
 <B>JobAcctGatherType and JobAcctGatherFrequency</B>: The "maximum data segment
@@ -102,9 +104,7 @@ Checkpointed jobs are not automatically restarted.
 jobs. Requeued jobs are permitted to be restarted on different resources.</LI>
 <LI>A value of <I>SUSPEND</I> will suspend and automatically resume the low
 priority jobs. The <I>SUSPEND</I> option must be used with the <I>GANG</I>
-option (e.g. "PreemptMode=SUSPEND,GANG") and with
-<I>PreemptType=preempt/partition_prio</I> (the logic to suspend and resume
-jobs currently only has the data structures to support partitions).</LI>
+option (e.g. "PreemptMode=SUSPEND,GANG").</LI>
 <LI>A value of <I>GANG</I> may be used with any of the above values and will
 execute a module responsible for resuming jobs previously suspended for either
 gang scheduling or job preemption with suspension.</LI>
@@ -121,10 +121,10 @@ can preempt jobs from lower priority partitions.</LI>
 <LI><I>preempt/qos</I> indicates that jobs from one Quality Of Service (QOS)
 can preempt jobs from a lower QOS. These jobs can be in the same partition
 or different partitions. PreemptMode must be set to CANCEL, CHECKPOINT,
-or REQUEUE. This option requires the use of a database identifying
+REQUEUE or SUSPEND. This option requires the use of a database identifying
 available QOS and their preemption rules. This option is not compatible with
-PreemptMode=OFF or PreemptMode=SUSPEND (i.e. preempted jobs must be removed
-from the resources).</LI>
+PreemptMode=OFF and PreemptMode=SUSPEND is only supported by the
+select/cons_res plugin.</LI>
 </UL>
 </LI>
 <LI>
@@ -151,6 +151,12 @@ max_share value is 4. In order to preempt jobs (and not gang schedule them),
 always set max_share to 1. To allow up to 2 jobs from this partition to be
 allocated to a common resource (and gang scheduled), set
 <I>Shared=FORCE:2</I>.
+NOTE: <I>PreemptType=QOS</I> will permit one additional job to be run
+on the partition if started due to job preemption. For example, a configuration
+of <I>Shared=FORCE:1</I> will only permit one job per resources normally,
+but a second job can be started if done so through preemption based upon QOS.
+The use of <I>PreemptType=QOS</I> and <I>PreemptType=Suspend</I> only applies
+with <I>SelectType=cons_res</I>.
 </LI>
 </UL>
 <P>
@@ -361,6 +367,21 @@ $ squeue
      95       med      tmp      moe   R       0:24      1 linux
 </PRE>
 
+<H2>Another Example</H2>
+<P>
+In this example we have one partition on which we want to execute only one
+job per resource (e.g. core) at a time except when a job submitted to the
+partition from a high priority Quality Of Service (QOS) is submitted. In that
+case, we want that second high priority job to be started and be gang scheduled
+with the other jobs on overlapping resources.
+</P>
+<PRE>
+# Excerpt from slurm.conf
+PreemptMode=Suspend,Gang
+PreemptType=preempt/qos
+PartitionName=normal Nodes=linux Default=NO  Shared=FORCE:1
+</PRE>
+
 <H2><A NAME="future_work">Future Ideas</A></H2>
 
 <P>
@@ -408,6 +429,6 @@ order to support ideal placements such as this, which can quickly complicate
 the design. Any and all help is welcome here!
 </P>
 
-<p style="text-align:center;">Last modified 15 April 2015</p>
+<p style="text-align:center;">Last modified 20 July 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/preemption_plugins.shtml b/doc/html/preemption_plugins.shtml
index df2917b68..58bdbda1e 100644
--- a/doc/html/preemption_plugins.shtml
+++ b/doc/html/preemption_plugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm preemption plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own
-Slurm preemption plugins. This is version 100 of the API.</p>
+Slurm preemption plugins.</p>
 
 <p>Slurm preemption plugins are Slurm plugins that identify which jobs
 can be preempted by a pending job. They must conform to the Slurm Plugin
@@ -25,10 +25,17 @@ partition to preempt jobs from a lower priority partition.</li>
 upon their Quality Of Service values as defined in the Slurm database.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span> symbols required by the Slurm
-Plugin API require no specialization for job preemption support.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/preempt/partition_prio/preempt_partition_prio.c</span>
@@ -93,14 +100,8 @@ preemption is enabled.</p>
 <p style="margin-left:.2in"><b>Returns</b>: true if running jobs may be
 preempted, otherwise false</p>
 
-<h2>Versioning</h2>
-<p> This document describes version 100 of the Slurm Preemption API. Future
-releases of Slurm may revise this API. A preemption plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm
-plugins.</p>
-
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/priority_multifactor.shtml b/doc/html/priority_multifactor.shtml
index dfac60ebd..5dcf281e6 100644
--- a/doc/html/priority_multifactor.shtml
+++ b/doc/html/priority_multifactor.shtml
@@ -11,6 +11,7 @@
 <LI> <a href=#jobsize>Job Size Factor</a>
 <LI> <a href=#partition>Partition Factor</a>
 <LI> <a href=#qos>Quality of Service (QOS) Factor</a>
+<LI> <a href=#tres>TRES Factors</a>
 <LI> <a href=#fairshare>Fair-share Factor</a>
 <LI> <a href=#sprio>The <i>sprio</i> utility</a>
 <LI> <a href=#config>Configuration</a>
@@ -31,7 +32,7 @@
 <a name=mfjppintro>
 <h2>Multi-factor 'Factors'</h2></a>
 
-<P> There are five factors in the Multi-factor Job Priority plugin that influence job priority:</P>
+<P> There are six factors in the Multi-factor Job Priority plugin that influence job priority:</P>
 
 <DL>
 <DT> Age
@@ -44,6 +45,9 @@
 <DD> a factor associated with each node partition
 <DT> QOS
 <DD> a factor associated with each Quality Of Service
+<DT> TRES
+<DD> each TRES Type has it's own factor for a job which represents the number of
+requested/allocated TRES Type in a given partition
 </DL>
 
 <P> Additionally, a weight can be assigned to each of the above
@@ -64,7 +68,10 @@ Job_priority =
 	(PriorityWeightFairshare) * (fair-share_factor) +
 	(PriorityWeightJobSize) * (job_size_factor) +
 	(PriorityWeightPartition) * (partition_factor) +
-	(PriorityWeightQOS) * (QOS_factor)
+	(PriorityWeightQOS) * (QOS_factor) +
+	SUM(TRES_weight_cpu * TRES_factor_cpu,
+	    TRES_weight_&lt;type&gt; * TRES_factor_&lt;type&gt;,
+	    ...)
 </PRE>
 
 <P> All of the factors in this formula are floating point numbers that
@@ -131,6 +138,18 @@ number, the greater the job priority will be for jobs that request
 this QOS.  This priority value is then normalized to the highest
 priority of all the QOS's to become the QOS factor.</P>
 
+<!-------------------------------------------------------------------------->
+<a name=tres>
+<h2>TRES Factors</h2></a>
+
+<P>
+Each TRES Type has its own priority factor for a job which represents the amount
+of TRES Type requested/allocated in a given partition. For global TRES Types,
+such as GRES, Licenses and Burst Buffers, the factor represents the number of
+TRES Type requested/allocated in the whole system. The more a given TRES Type is
+requested/allocated on a job, the greater the job priority will be for that job.
+</P>
+
 <!-------------------------------------------------------------------------->
 <a name=fairshare>
 <h2>Fair-share Factor</h2></a>
@@ -146,7 +165,46 @@ computing resources described below.</P>
 
 <P> Slurm's fair-share factor is a floating point number between 0.0 and 1.0 that reflects the shares of a computing resource that a user has been allocated and the amount of computing resources the user's jobs have consumed.  The higher the value, the higher is the placement in the queue of jobs waiting to be scheduled.</P>
 
-<P> The computing resource is currently defined to be computing cycles delivered by a machine in the units of processor*seconds.  Future versions of the fair-share factor may additionally include a memory integral component.</P>
+<P> By default, the computing resource is the computing cycles delivered by a
+machine in the units of allocated_cpus*seconds. Other resources can be taken into
+account by configuring a partition's TRESBillingWeights option. The
+TRESBillingWeights option allows you to account for consumed resources other
+than just CPUs by assigning different billing weights to different Trackable
+Resources (TRES) such as CPUs, nodes, memory, licenses and generic resources
+(GRES). For example, when billing only for CPUs, if a job requests 1CPU and 64GB
+of memory on a 16CPU, 64GB node the job will only be billed for 1CPU when it
+really used the whole node.
+</P>
+
+<P> By default, when TRESBillingWeights is configured, a job is billed for each
+individual TRES used. The billable TRES is calculated as the sum of all TRES
+types multiplied by their corresponding billing weight.
+</P>
+
+<P> For example, the following jobs on a partition configured with
+TRESBillingWeights=CPU=1.0,Mem=0.25 and 16CPU, 64GB nodes would be billed as:
+<pre>
+      CPUs       Mem GB
+Job1: (1 *1.0) + (60*0.25) = (1 + 15) = 16
+Job2: (16*1.0) + (1 *0.25) = (16+.25) = 16.25
+Job3: (16*1.0) + (60*0.25) = (16+ 15) = 31
+</pre>
+</P>
+
+<P>
+Another method of calculating the billable TRES is by taking the MAX of the
+individual TRES' on a node (e.g. cpus, mem, gres) plus the SUM of the global
+TRES' (e.g. licenses). For example the above job's billable TRES would
+be calculated as:
+<pre>
+          CPUs      Mem GB
+Job1: MAX((1 *1.0), (60*0.25)) = 15
+Job2: MAX((15*1.0), (1 *0.25)) = 15
+Job3: MAX((16*1.0), (64*0.25)) = 16
+</pre>
+This method is turned on by defining the MAX_TRES priority flags in the
+slurm.conf.
+</P>
 
 <h3> Normalized Shares</h3>
 
@@ -441,7 +499,7 @@ Account A's effective usage is therefore equal to .45.  Account D's effective us
 <a name=sprio>
 <h2>The <i>sprio</i> utility</h2></a>
 
-<P> The <i>sprio</i> command provides a summary of the five factors
+<P> The <i>sprio</i> command provides a summary of the six factors
 that comprise each job's scheduling priority.  While <i>squeue</i> has
 format options (%p and %Q) that display a job's composite priority,
 sprio can be used to display a breakdown of the priority components
@@ -508,9 +566,12 @@ factor as it is currently configured.</P>
 <DD> An unsigned integer that scales the contribution of the partition factor.
 <DT> PriorityWeightQOS
 <DD> An unsigned integer that scales the contribution of the quality of service factor.
+<DT> PriorityWeightTRES
+<DD> A list of TRES Types and weights that scales the contribution of each TRES
+  Type's factor.
 </DL>
 
-<P> Note:  As stated above, the five priority factors range from 0.0 to 1.0.  As such, the PriorityWeight terms may need to be set to a high enough value (say, 1000) to resolve very tiny differences in priority factors.  This is especially true with the fair-share factor, where two jobs may differ in priority by as little as .001. (or even less!)</P>
+<P> Note:  As stated above, the six priority factors range from 0.0 to 1.0.  As such, the PriorityWeight terms may need to be set to a high enough value (say, 1000) to resolve very tiny differences in priority factors.  This is especially true with the fair-share factor, where two jobs may differ in priority by as little as .001. (or even less!)</P>
 
 <!-------------------------------------------------------------------------->
 <a name=configexample>
diff --git a/doc/html/priority_multifactor2.shtml b/doc/html/priority_multifactor2.shtml
deleted file mode 100644
index d53838947..000000000
--- a/doc/html/priority_multifactor2.shtml
+++ /dev/null
@@ -1,201 +0,0 @@
-<!--#include virtual="header.txt"-->
-
-<h1>Ticket-Based Multifactor Priority Plugin</h1>
-
-<h2>Contents</h2>
-<ul>
-<li><a href=#intro>Introduction</a></li>
-<li><a href=#fairshare>Fair-share Factor</a></li>
-<li><a href=#config>Configuration</a></li>
-</ul>
-
-<!-------------------------------------------------------------------------->
-<a name=intro>
-<h2>Introduction</h2></a>
-
-<p><b>Note:</b> This algorithm is deprecated and has been removed in 15.08. Please
-consider using the <a href="fair_tree.html">Fair Tree</a> algorithm.
-</p>
-
-<p>A ticket-based variant of the priority/multifactor plugin is available.
-The reader is assumed to be familiar with the priority/multifactor plugin
-and only the differences are documented here.</p>
-
-<!-------------------------------------------------------------------------->
-<a name=fairshare>
-<h2>Fair-share Factor</h2></a>
-
-<p><b>Note:</b> Computing the fair-share factor requires the installation
-and operation of the <a href="accounting.html">Slurm Accounting
-Database</a> to provide the assigned shares and the consumed,
-computing resources described below.</p>
-
-<p>In the ticket-based variant, the fair-share component of the job
-priority is calculated differently. The goal is to make sure that the
-priority strictly follows the account hierarchy, so that jobs under
-accounts with usage lower than their fair share will always have a
-higher priority than jobs belonging to accounts which are over their
-fair share.</p>
-
-<p>The algorithm is based on <i>ticket</i> scheduling, where at the
-root of the account hierarchy one starts with a number of tickets,
-which are then distributed per the fairshare policy to the child
-accounts and users.  Then, the job whose user has the highest number
-of tickets is assigned the fairshare priority of 1.0, and the other
-pending jobs are assigned priorities according to how many tickets
-their users have compared to the highest priority job.</p>
-
-<pre>
-Priority<sub>FS</sub> = Tickets<sub>user</sub> / Tickets<sub>max</sub>
-</pre>
-
-<p>The normalized share and normalized usage are calculated in the
-same way as for the multifactor plugin. However, the fair-share factor
-for an account/user is calculated as</p>
-
-<pre>
-F = S/U<sub>eff</sub>
-</pre>
-
-<p>where the effective usage U<sub>eff</sub> is defined as</p>
-
-<pre>
-U<sub>eff</sub> = max(U, 0.01 * s)
-</pre>
-
-<p>This prevents F from diverging as the usage U approaches zero. Another
-way of seeing it is that an account/user that has used less than 1%
-of its fair share will get the maximum factor (which has the value
-100). When the usage of an account/user is exactly proportional to its
-fair share, the fair-share factor will have the value 1.0.</p>
-
-<p>Compared to the fair-share factor formula in the multifactor
-plugin, this formula behaves better when one has users which are much
-above their fair share, which can easily happen e.g. if an account has
-many other users with very little usage.</p>
-
-
-<h3>Distributing tickets</h3>
-
-<p>Tickets are distributed to pending jobs as follows. At the root of
-the account tree, start with N tickets (the exact value doesn't
-matter, only the proportions). Those N tickets are distributed
-to <i>active</i> child nodes (accounts/users) proportional to the
-number of shares the node has multiplied by the fairshare factor (S *
-F). An active node is defined as one which has pending jobs, or where
-one of its child/grandchild/etc. nodes have pending jobs. Tickets are
-thus distributed to a node per the formula</p>
-
-<pre>
-T = T<sub>parent</sub> * S * F / SUM(S*F)<sub>active_siblings</sub>
-</pre>
-
-<h3>Example</h3>
-
-<p>Here the same example as in the multifactor plugin page is shown,
-calculated using the ticket-based algorithm.</p>
-
-<ul>
-<li>User 1 normalized share: 0.3</li>
-<li>User 2 normalized share: 0.05</li>
-<li>User 3 normalized share: 0.05</li>
-<li>User 4 normalized share: 0.25</li>
-<li>User 5 normalized share: 0.35</li>
-</ul>
-
-<p>The effective usage for all the accounts equals the normalized usage,
-except for account F:</p>
-
-<ul>
-<li>Account F effective usage: max(0, 0.01 * 0.35) = 0.0035
-</ul>
-
-<p>The effective usage for all the users:</p>
-
-<ul>
-<li>User 1 effective usage: max(0.2, 0.01 * 0.3) = 0.2</li>
-<li>User 2 effective usage: max(0.25, 0.01 * 0.05) = 0.25</li>
-<li>User 3 effective usage: max(0.0, 0.01 * 0.05) = 0.0005</li>
-<li>User 4 effective usage: max(0.25, 0.01 * 0.25) = 0.25</li>
-<li>User 5 effective usage: max(0.0, 0.01 * 0.35) = 0.0035</li>
-</ul>
-
-<p>The fair-share factor for each account, calculated per the formula</p>
-
-<pre>
-F = S/U<sub>eff</sub>
-</pre>
-
-<p>is thus</p>
-
-<ul>
-<li>Account A fair-share factor: 0.4 / 0.45 = 0.89</li>
-<li>Account B fair-share factor: 0.3 / 0.2 = 1.50</li>
-<li>Account C fair-share factor: 0.1 / 0.25 = 0.4</li>
-<li>Account D fair-share factor: 0.6 / 0.25 = 2.40</li>
-<li>Account E fair-share factor: 0.25 / 0.25 = 1</li>
-<li>Account F fair-share factor: 0.35 / 0.0035 = 100</li>
-</ul>
-
-<p>Similarly, the fair-share factor for each user is</p>
-
-<ul>
-<li>User 1 fair-share factor: 0.3 / 0.2 = 1.5</li>
-<li>User 2 fair-share factor: 0.05 / 0.25 = 0.2</li>
-<li>User 3 fair-share factor: 0.05 / 0.0005 = 100</li>
-<li>User 4 fair-share factor: 0.25 / 0.25 = 1</li>
-<li>User 5 fair-share factor: 0.35 / 0.0035 = 100</li>
-</ul>
-
-<p>Now that the fair-share factors for all nodes in the tree have been
-calculated, we can distribute the tickets to the active nodes. Assume
-that only user 2 and user 5 have pending jobs. Assume that we start
-with 1000 tickets at the root.</p>
-
-<p>Since both child accounts of the root account (A and D) are active,
-distribute tickets to both of them. Thus,</p>
-
-<ul>
-<li>Account A tickets: 1000 * 0.4 * 0.89 / (0.4 * 0.89 + 0.6 * 2.40) = 198
-<li>Account D tickets: 1000 * 0.6 * 2.40 / (0.4 * 0.89 + 0.6 * 2.40) = 802
-</ul>
-
-<p>For the children of account A, only account C is active, so all 198
-tickets are given to account C. Similarly, for the children of account D, only
-account F is active, so all 802 tickets are given to accountF.</p>
-
-<p>Finally, user 2 then gets 198 tickets, and user 5 802 tickets. As user
-5 has the most tickets, the jobs belonging to user 5 in account F thus
-get the fair-share priority 1.0. The jobs of user 2 get a fair-share
-priority of</p>
-
-<pre>
-Priority<sub>FS</sub> = 198 / 802 = 0.25
-</pre>
-
-<!-------------------------------------------------------------------------->
-<a name=config>
-<h2>Configuration</h2></a>
-
-<p> The following slurm.conf (SLURM_CONFIG_FILE) parameters are used to
-configure the Multi-factor Job Priority 2 Plugin.  See slurm.conf(5) man page
-for more details.</p>
-
-<dl>
-<dt>PriorityFlags
-<dd>Set to "TICKET_BASED".
-<dt>PriorityType
-<dd>Set this value to "priority/multifactor".
-The default value for this variable is "priority/basic"
-which enables simple FIFO scheduling.
-</dl>
-
-<p>Note: As the ticket-based algorithm ensures that the highest
-priority pending job will have the fair-share factor 1.0, there is a
-need to rebalance the relative weights of the different factors
-compared to the priority/multifactor plugin.</p>
-
-<!-------------------------------------------------------------------------->
-<p style="text-align:center;">Last modified 14 January 2013</p>
-
-<!--#include virtual="footer.txt"-->
diff --git a/doc/html/priority_plugins.shtml b/doc/html/priority_plugins.shtml
index 4c24b7f5a..42fec5a98 100644
--- a/doc/html/priority_plugins.shtml
+++ b/doc/html/priority_plugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm priority plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own
-Slurm priority plugins. This is version 100 of the API.</p>
+Slurm priority plugins.</p>
 
 <p>Slurm priority plugins are Slurm plugins that implement the Slurm priority
 API described herein. They must conform to the Slurm Plugin API with the
@@ -23,10 +23,17 @@ job priority.</li>
 <li><b>multifactor</b>&#151;The multi-factor job priority plugin.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span> symbols required by the Slurm
-Plugin API require no specialization for job priority support.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/priority/basic/priority_basic.c</span>
@@ -51,7 +58,7 @@ errno, prior to any error condition arising, should be SLURM_SUCCESS. </p>
 <p style="margin-left:.2in"><b>Description</b>: A slurmctld structure that
 contains details about a job.</p>
 
-<p class="commandline"> acct_association_rec_t</p>
+<p class="commandline"> acct_assoc_rec_t</p>
 <p style="margin-left:.2in"><b>Description</b>: A slurm_accounting_storage
 structure that contains details about an association.</p>
 
@@ -114,7 +121,7 @@ when Slurm is reconfigured, but false if an RPC is used to change only the
 debug level of debug flags.</p>
 <p style="margin-left:.2in"><b>Returns</b>: void</p>
 
-<p class="commandline">void priority_p_set_assoc_usage(acct_association_rec_t *assoc)</p>
+<p class="commandline">void priority_p_set_assoc_usage(acct_assoc_rec_t *assoc)</p>
 <p style="margin-left:.2in"><b>Description</b>: Set the normalized and
 effective usage for an association.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
@@ -137,15 +144,8 @@ containing the requested job priority factors</p>
 <span class="commandline">job_ptr</span> (input) pointer to the job record.</p>
 <p style="margin-left:.2in"><b>Returns</b>: void</p>
 
-
-<h2>Versioning</h2>
-<p> This document describes version 101 of the Slurm Priority API. Future
-releases of Slurm may revise this API. A priority plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm
-plugins.</p>
-
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/proctrack_plugins.shtml b/doc/html/proctrack_plugins.shtml
index b713bd761..3eb5068d1 100644
--- a/doc/html/proctrack_plugins.shtml
+++ b/doc/html/proctrack_plugins.shtml
@@ -9,8 +9,7 @@ It is intended as a resource to programmers wishing to write their
 own Slurm process tracking plugins.
 Note that process tracking plugin is designed for use with Slurm job steps.
 There is a <a href="job_container_plugins.html">job_container plugin</a>
-designed for use with Slurm jobs.
-This is version 91 of the API.</p>
+designed for use with Slurm jobs.</p>
 
 <p>Slurm process tracking plugins are Slurm plugins that implement
 the Slurm process tracking API described herein.
@@ -48,10 +47,18 @@ NOTE: This kernel module records every process creation
 and termination.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span> symbols required
-by the Slurm Plugin API require no specialization for process tracking.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>The programmer is urged to study
 <span class="commandline">src/plugins/proctrack/pgid/proctrack_pgid.c</span>
@@ -68,7 +75,7 @@ enumerated <b>errno</b> to allow Slurm to discover as practically as possible
 the reason for any failed API call.
 These values must not be used as return values in integer-valued functions
 in the API.
-The proper error return value from integer-valued functions is Slurm_ERROR.
+The proper error return value from integer-valued functions is SLURM_ERROR.
 The implementation should endeavor to provide useful and pertinent information
 by whatever means is practical.
 Successful API calls are not required to reset errno to a known value.</p>
@@ -109,8 +116,8 @@ variable <i>cont_id</i>.</p>
 <p style="margin-left:.2in"><b>Argument</b>:
 <span class="commandline"> job</span>&nbsp; &nbsp;&nbsp;(input/output)
 Pointer to a slurmd job structure.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Slurm_SUCCESS if successful. On failure,
-the plugin should return Slurm_ERROR and set the errno to an appropriate value
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int proctrack_p_add (stepd_step_rec_t *job, pid_t pid);</p>
@@ -121,8 +128,8 @@ to a given job step's container.</p>
 Pointer to a slurmd job structure.<br>
 <span class="commandline"> pid</span>&nbsp; &nbsp;&nbsp;(input)
 The ID of the process to add to this job's container.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Slurm_SUCCESS if successful. On failure,
-the plugin should return Slurm_ERROR and set the errno to an appropriate value
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">int proctrack_p_signal (uint64_t id, int signal);</p>
@@ -134,9 +141,9 @@ Job step container's ID.<br>
 <span class="commandline"> signal</span> &nbsp;&nbsp;(input)
 Signal to be sent to processes. Note that a signal of zero
 just tests for the existence of processes in a given job step container.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Slurm_SUCCESS if the signal
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if the signal
 was sent.
-If the signal can not be sent, the function should return Slurm_ERROR and set
+If the signal can not be sent, the function should return SLURM_ERROR and set
 its errno to an appropriate value to indicate the reason for failure.</p>
 
 <p class="footer"><a href="#top">top</a></p>
@@ -149,8 +156,8 @@ needed.</p>
 <p style="margin-left:.2in"><b>Arguments</b>:
 <span class="commandline"> id</span> &nbsp;&nbsp; (input)
 Job step container's ID.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Slurm_SUCCESS if successful. On failure,
-the plugin should return Slurm_ERROR and set the errno to an appropriate value
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if successful. On failure,
+the plugin should return SLURM_ERROR and set the errno to an appropriate value
 to indicate the reason for failure.</p>
 
 <p class="commandline">uint64_t proctrack_p_find (pid_t pid);</p>
@@ -172,17 +179,11 @@ A job step container ID.<br>
 Array of process IDs in the container.<br>
 <span class="commandline"> npids</span>&nbsp; &nbsp;&nbsp;(output)
 Count of process IDs in the container.</p>
-<p style="margin-left:.2in"><b>Returns</b>: Slurm_SUCCESS if
-  successful, Slurm_ERROR else.</p>
-
-<h2>Versioning</h2>
-<p> This document describes version 91 of the Slurm Process Tracking API.
-Future releases of Slurm may revise this API. A process tracking plugin
-conveys its ability to implement a particular API version using the
-mechanism outlined for Slurm plugins.</p>
+<p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS if
+  successful, SLURM_ERROR else.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/prolog_epilog.shtml b/doc/html/prolog_epilog.shtml
index c1ffbe64f..18790314d 100644
--- a/doc/html/prolog_epilog.shtml
+++ b/doc/html/prolog_epilog.shtml
@@ -345,7 +345,7 @@ Available in PrologSlurmctld and EpilogSlurmctld only.</li>
 
 <li><b>SLURM_JOB_CONSTRAINTS</b>
 Features required to run the job.
-Available in PrologSlurmctld and EpilogSlurmctld only.</li>
+Available in Prolog, PrologSlurmctld and EpilogSlurmctld only.</li>
 
 <li><b>SLURM_JOB_DERIVED_EC</b>
 The highest exit code of all of the job steps.
@@ -421,6 +421,6 @@ PrologSlurmctld fails.</p>
 
 <p>Based upon work by Jason Sollom, Cray Inc. and used by permission.</p>
 
-<p style="text-align:center;">Last modified 17 February 2015</p>
+<p style="text-align:center;">Last modified 18 February 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/qos.shtml b/doc/html/qos.shtml
index 86a7ce4c3..55c1780a7 100644
--- a/doc/html/qos.shtml
+++ b/doc/html/qos.shtml
@@ -10,6 +10,7 @@ the job in three ways:
 <li> <a href=#priority>Job Scheduling Priority</a>
 <li> <a href=#preemption>Job Preemption</a>
 <li> <a href=#limits>Job Limits</a>
+<li> <a href=#partition>Partition QOS</a>
 <li> <a href=#qos_other>Other QOS Options</a>
 </ul>
 
@@ -71,123 +72,22 @@ database and described in the <a href="resource_limits.html"> Resource
 Limits section</a>.  When limits for a QOS have been defined, they
 will take precedence over the association's limits.
 
-<P> Here are the limits that will be imposed on jobs running under a
-QOS</P>
+<!-------------------------------------------------------------------------->
+<a name=partition>
+<h2>Partition QOS</h2></a>
+<P> Beginning in 15.08 you can attach a QOS to a partition.  This
+means a partition will have all the same limits as a QOS.  This also
+gives the ability of a true 'floating' partition, meaning if you
+assign all the nodes to a partition and then in the Partition's QOS
+limit the number of GrpCPUs or GrpNodes the partition will have
+access to all the nodes, but only be able to run on the number of
+resources in it.<p>
+<p>The Partition QOS will override the job's QOS.  If the opposite is
+desired you need to have the job's QOS have the 'OverPartQOS' flag
+which will reverse the order of precedence.</p>
 
-<ul>
-<li><b>GrpCpus</b> The total count of cpus able to be used at any given
-  time from jobs running from this QOS.  If this limit is reached new
-  jobs will be queued but only allowed to run after resources have been
-  relinquished from this group.
-</li>
-
-<li><b>GrpCPUMins</b> A hard limit of cpu minutes to be used by jobs
-  running from this QOS.  If this limit is reached all jobs running in
-  this group will be killed, and no new jobs will be allowed to run.
-</li>
-
-<li><b>GrpCPURunMins</b> Maximum number of CPU minutes all jobs
-  running with this QOS can run at the same time.  This takes into
-  consideration time limit of running jobs.  If the limit is reached
-  no new jobs are started until other jobs finish to allow time to
-  free up.
-</li>
-
-<li><b>GrpJobs</b>
-The total number of jobs able to run at any given time from this QOS.
-If this limit is reached new jobs will be queued but only allowed to
-run after previous jobs complete from this group.
-</li>
-
-<li><b>GrpMemory</b>
-The total amount of memory (MB) able to be used at any given time from
-jobs running from QOS.  If this limit is reached new jobs will be queued but only
-allowed to run after resources have been relinquished from this group.
-</li>
-
-<li><b>GrpNodes</b>
-The total count of nodes able to be used at any given time from jobs
-running from this QOS.  If this limit is reached new jobs will be queued
-but only allowed to run after resources have been relinquished from this group.
-Each job's node allocation is counted separately
-(i.e. if a single node has resources allocated to two jobs, this is counted
-as two allocated nodes).
-</li>
-
-<li><b>GrpSubmitJobs</b>
-The total number of jobs able to be submitted to the system at any given time
- from this QOS. If this limit is reached new submission requests will be denied
-until previous jobs complete from this group.
-</li>
-
-<li><b>GrpWall</b>
-The maximum wall clock time any job submitted to this group can run for.
-If this limit is reached submission requests will be denied and the
-running jobs will be killed.
-</li>
-
-<li><b>MaxCpusPerJob</b>
-The maximum size in cpus any given job can have from this QOS. Jobs submitted
-requesting more CPUs than the QOS limit will pend until they conform
-(possibly indefinitely); to allow such jobs to run may require
-changing this limit with sacctmgr. See DenyOnLimits below to deny these jobs at
-submission.
-</li>
-
-<li><b>MaxCPUMinsPerJob</b>
-Maximum number of CPU*minutes any job with this QOS can run. Jobs submitted
-requesting more CPU Minutes than the QOS limit will pend until they conform
-(possibly indefinitely); to allow such jobs to run may require
-changing this limit with sacctmgr. See DenyOnLimits below to deny these jobs at
-submission.
-</li>
-
-<li><b>MaxNodesPerJob</b>
-The maximum size in nodes any given job can have from this association.
-Jobs submitted requesting more nodes than the QOS limit will pend
-until they conform (possibly indefinitely); to allow such jobs to run
-may require changing this limit with sacctmgr. See DenyOnLimits below
-to deny these jobs at submission.
-</li>
-
-<li><b>MaxWallDurationPerJob</b>
-The maximum wall clock time any job submitted to this QOS can run for.
-Jobs submitted requesting time larger than the QOS limit will pend
-until they conform (possibly indefinitely); to allow such jobs to run
-may require changing this limit with sacctmgr. See DenyOnLimits below
-to deny these jobs at submission.
-</li>
-
-<li><b>MaxCpusPerUser</b>
-Maximum number of CPU's any user with this QOS can be allocated.
-Jobs submitted requesting more CPU's than the QOS limit will pend
-until they conform (possibly indefinitely); to allow such jobs to run
-may require changing this limit with sacctmgr. See DenyOnLimits below
-to deny these jobs at submission.
-</li>
-
-<li><b>MaxJobsPerUser</b>
-Maximum number of jobs a user can run with this QOS.
-</li>
-
-<li><b>MaxNodesPerUser</b>
-Maximum number of nodes that can be allocated to any user with this QOS.
-Jobs submitted requesting more nodes than the QOS limit will pend
-until they conform (possibly indefinitely); to allow such jobs to run
-may require changing this limit with sacctmgr. See DenyOnLimits below
-to deny these jobs at submission.
-Each job's node allocation is counted separately
-(i.e. if a single node has resources allocated to two jobs, this is counted
-as two allocated nodes).
-</li>
-
-<li><b>MaxSubmitJobsPerUser</b>
-Maximum number of jobs (pending or running) any user with this QOS can have.
-As the name implies, if this limit is reached the job will be denied
-at submission until previous jobs complete from this user.
-</li>
-</ul>
 
+<!-------------------------------------------------------------------------->
 <a name=qos_other>
 <h2>Other QOS Options</h2></a>
 <ul>
@@ -217,6 +117,9 @@ override the requested partition's MaxNodes limit.
 <li><b>PartitionMinNodes</b> If set, jobs using this QOS will be able to
 override the requested partition's MinNodes limit.
 
+<li><b>OverPartQOS</b> If set, jobs using this QOS will be able to
+override any limits used by the the requested partition's QOS limits.
+
 <li><b>PartitionTimeLimit</b> If set, jobs using this QOS will be able to
 override the requested partition's TimeLimit.
 
diff --git a/doc/html/quickstart_admin.shtml b/doc/html/quickstart_admin.shtml
index 188ce3403..aed44e392 100644
--- a/doc/html/quickstart_admin.shtml
+++ b/doc/html/quickstart_admin.shtml
@@ -730,7 +730,16 @@ Thus version 14.11.x was initially released in November 2014.
 Changes in the RPCs (remote procedure calls) and state files will only be made
 if the major and/or minor release number changes, which typically happens
 about once every nine months or so.
-Slurm's MPI libraries may also change if the major and/or minor release number
+A list of recent major/minor Slurm releases is shown below.</p>
+<ul>
+<li>2.5.x    (Released November 2012)</li>
+<li>2.6.x    (Released June 2013)</li>
+<li>14.03.x  (Released March 2014)</li>
+<li>14.11.x  (Released November 2014)</li>
+<li>15.08.x  (Released August 2015)</li>
+</ul>
+
+<p>Slurm's MPI libraries may also change if the major and/or minor release number
 change, requiring applications be re-linked (behavior may vary depending upon
 the MPI implementation used and the specific Slurm changes between releases).
 Locally developed Slurm plugins may also require modification.
@@ -817,12 +826,12 @@ See the RELEASE_NOTES file for details.</p>
 <p>FreeBSD administrators can install the latest stable Slurm as a binary
 package using:</p>
 <pre>
-pkg install slurm-hpc
+pkg install slurm-wlm
 </pre>
 
 <p>Or, it can be built and installed from source using:</p>
 <pre>
-cd /usr/ports/sysutils/slurm-hpc && make install
+cd /usr/ports/sysutils/slurm-wlm && make install
 </pre>
 
 <p>The binary package installs a minimal Slurm configuration suitable for
@@ -831,6 +840,6 @@ options such as mysql and gui tools via a configuration menu.</p>
 
 </pre> <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 15 April 2015</p>
+<p style="text-align:center;">Last modified 16 June 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/reservations.shtml b/doc/html/reservations.shtml
index 30152f2f7..9d2215447 100644
--- a/doc/html/reservations.shtml
+++ b/doc/html/reservations.shtml
@@ -6,18 +6,20 @@
 being executed by select users and/or select bank accounts.
 A resource reservation identifies the resources in that reservation
 and a time period during which the reservation is available.
-The resources which can be reserved include cores, nodes and/or licenses.
+The resources which can be reserved include cores, nodes, licenses and/or
+burst buffers.
 Note that resource reservations are not compatible with Slurm's
 gang scheduler plugin since the termination time of running jobs
 cannot be accurately predicted.</p>
 
-<p>Note that reserved licenses are treated somewhat differently than reserved
-cores or nodes. When cores or nodes are reserved, then jobs using that
-reservation can use only those resources and no other jobs can use those
-resources. Reserved licenses can only
-be used by jobs associated with that reservation, but licenses not explicitly
-reserved are available to any job. This eliminates the need to explicitly
-put licenses into every advanced reservation created.</p>
+<p>Note that reserved burst buffers and licenses are treated somewhat
+differently than reserved cores or nodes.
+When cores or nodes are reserved, then jobs using that reservation can use only
+those resources and no other jobs can use those resources.
+Reserved burst buffers and licenses can only be used by jobs associated with
+that reservation, but licenses not explicitly reserved are available to any job.
+This eliminates the need to explicitly put licenses into every advanced
+reservation created.</p>
 
 <p>Reservations can be created, updated, or destroyed only by user root
 or the configured <i>SlurmUser</i> using the <i>scontrol</i> command.
@@ -57,6 +59,7 @@ ReservationName=root_3 StartTime=2009-02-06T16:00:00
    Nodes=ALL NodeCnt=20
    Features=(null) PartitionName=(null)
    Flags=MAINT,SPEC_NODES,IGNORE_JOBS Licenses=(null)
+   BurstBuffers=(null)
    Users=root Accounts=(null)
 </pre>
 
@@ -84,6 +87,7 @@ ReservationName=root_4 StartTime=2009-04-06T16:00:00
    Nodes= NodeCnt=0
    Features=(null) PartitionName=(null)
    Flags=LICENSE_ONLY Licenses=lustre*1000
+   BurstBuffers=(null)
    Users=root Accounts=(null)
 </pre>
 
@@ -103,6 +107,7 @@ ReservationName=root_5 StartTime=2009-02-04T16:22:57
    Nodes=sun000 NodeCnt=1
    Features=(null) PartitionName=(null)
    Flags=MAINT,SPEC_NODES Licenses=(null)
+   BurstBuffers=(null)
    Users=root Accounts=(null)
 </pre>
 
@@ -120,7 +125,24 @@ ReservationName=alan_6 StartTime=2009-02-05T12:00:00
    EndTime=2009-02-05T13:00:00 Duration=60
    Nodes=sun[000-003,007,010-013,017] NodeCnt=10
    Features=(null) PartitionName=pdebug
-   Flags=DAILY Licenses=(null)
+   Flags=DAILY Licenses=(null) BurstBuffers=(null)
+   Users=alan,brenda Accounts=(null)
+</pre>
+
+<p>Our next example is to reserve 100GB of burst buffer space
+starting at noon today and with a duration of 60 minutes.
+The reservation will be available only to users "alan" and "brenda".</p>
+<pre>
+$ scontrol create reservation user=alan,brenda \
+   starttime=noon duration=60 flags=any_nodes burstbuffer=100GB
+Reservation created: alan_7
+
+$ scontrol show res
+ReservationName=alan_7 StartTime=2009-02-05T12:00:00
+   EndTime=2009-02-05T13:00:00 Duration=60
+   Nodes= NodeCnt=0
+   Features=(null) PartitionName=(null)
+   Flags=ANY_NODES Licenses=(null) BurstBuffer=100GB
    Users=alan,brenda Accounts=(null)
 </pre>
 
@@ -143,7 +165,7 @@ ReservationName=alan_8 StartTime=2011-12-05T12:00:00
    EndTime=2011-12-05T13:00:00 Duration=60
    Nodes=bgp[000x011,210x311] NodeCnt=4096
    Features=(null) PartitionName=pdebug
-   Flags= Licenses=(null)
+   Flags= Licenses=(null) BurstBuffers=(null)
    Users=alan,brenda Accounts=(null)
 </pre>
 
@@ -189,7 +211,7 @@ ReservationName=alan_9 StartTime=2011-12-05T13:00:00
    EndTime=2011-12-05T14:00:00 Duration=60
    Nodes=bgp[000x011,210x311] NodeCnt=4096
    Features=(null) PartitionName=pdebug
-   Flags= Licenses=(null)
+   Flags= Licenses=(null) BurstBuffers=(null)
    Users=-alan Accounts=foo
 </pre>
 
@@ -229,7 +251,8 @@ bash-3.00$ scontrol show ReservationName=root_3
 ReservationName=root_3 StartTime=2009-02-06T16:00:00
    EndTime=2009-02-06T18:30:00 Duration=150
    Nodes=ALL NodeCnt=20 Features=(null)
-   PartitionName=(null) Flags=MAINT,SPEC_NODES Licenses=(null)
+   PartitionName=(null) Flags=MAINT,SPEC_NODES
+   Licenses=(null) BurstBuffers=(null)
    Users=admin Accounts=(null)
 </pre>
 
@@ -329,6 +352,51 @@ $ scontrol create reservation user=operator nodes=tux8 \
   starttime=now+60minutes duration=100 flags=time_float
 </pre>
 
+<h2>Reservations that Replace Allocated Resources</h2>
+
+<p>Slurm can create an advanced reservation for which nodes which are allocated
+to jobs are automatically replaced with new idle nodes.
+The effect of this is to always maintain a constant size pool of resources.
+This is accomplished by using a "replace" flag as shown in the example below.
+This option is not supported on IBM Bluegene systems.</p>
+<pre>
+$ scontrol create reservation starttime=now duration=60 \
+  users=foo nodecnt=2 flags=replace
+Reservation created: foo_82
+
+$ scontrol show res
+ReservationName=foo_82 StartTime=2014-11-20T16:21:11
+   EndTime=2014-11-20T17:21:11 Duration=01:00:00
+   Nodes=tux[0-1] NodeCnt=2 CoreCnt=12 Features=(null)
+   PartitionName=debug Flags=REPLACE
+   Users=jette Accounts=(null) Licenses=(null) State=ACTIVE
+
+$ sbatch -n4 --reservation=foo_82 tmp
+Submitted batch job 97
+
+$ scontrol show res
+ReservationName=foo_82 StartTime=2014-11-20T16:21:11
+   EndTime=2014-11-20T17:21:11 Duration=01:00:00
+   Nodes=tux[1-2] NodeCnt=2 CoreCnt=12 Features=(null)
+   PartitionName=debug Flags=REPLACE
+   Users=jette Accounts=(null) Licenses=(null) State=ACTIVE
+
+$ sbatch -n4 --reservation=foo_82 tmp
+Submitted batch job 98
+
+$ scontrol show res
+ReservationName=foo_82 StartTime=2014-11-20T16:21:11
+   EndTime=2014-11-20T17:21:11 Duration=01:00:00
+   Nodes=tux[2-3] NodeCnt=2 CoreCnt=12 Features=(null)
+   PartitionName=debug Flags=REPLACE
+   Users=jette Accounts=(null) Licenses=(null) State=ACTIVE
+
+$ squeue
+JOBID PARTITION  NAME  USER ST  TIME  NODES NODELIST(REASON)
+   97     debug   tmp   foo  R  0:09      1 tux0
+   98     debug   tmp   foo  R  0:07      1 tux1
+</pre>
+
 <h2>Reservation Accounting</h2>
 
 <p>Jobs executed within a reservation are accounted for using the appropriate
@@ -354,7 +422,7 @@ considering the initiation of jobs.
 This will prevent the initiation of some jobs which would complete execution
 before a reservation given fewer jobs to time-slice with.</p>
 
-<p style="text-align: center;">Last modified 15 April 2015</p>
+<p style="text-align: center;">Last modified 24 June 2015</p>
 
 <!--#include virtual="footer.txt"-->
 
diff --git a/doc/html/resource_limits.shtml b/doc/html/resource_limits.shtml
index d613194b8..627b310b2 100644
--- a/doc/html/resource_limits.shtml
+++ b/doc/html/resource_limits.shtml
@@ -11,9 +11,12 @@ but should use their own resource limits mechanisms.</p>
 
 <h2>Hierachy</h2>
 
-<p>Slurm's hierarchical limits are enforced in the following order:
+<p>Slurm's hierarchical limits are enforced in the following order
+  with Job QOS and Partition QOS order being reversable by using the QOS
+  flag 'OverPartQOS':
 <ol>
-	<li>QOS limit</li>
+	<li>Partition QOS limit</li>
+	<li>Job QOS limit</li>
 	<li>User association</li>
 	<li>Account association(s)</li>
 	<li>Root/Cluster association</li>
@@ -106,116 +109,141 @@ jobs which belong to that association are immediately canceled.
 When limits are lowered, running jobs will not be canceled to
 satisfy the new limits, but the new lower limits will be enforced.</p>
 
-<h2>Policies supported</h2>
-
-<p> A limited subset of scheduling policy options are currently
-supported.
-The available options are expected to increase as development
-continues.
-Most of these scheduling policy options are available not only
-for a user association, but also for each cluster and account.
+<h2>Limits in both Associations and QOS</h2>
+<p>When dealing with Associations, most of these limits are available
+not only for a user association, but also for each cluster and account.
 If a new association is created for some user and a scheduling
 policy option is not specified the default will be: the option
 for the cluster/account pair, and if both are not specified
 then the option for the cluster, and if that also is not
 specified then no limit will apply.</p>
 
-<p>Currently available scheduling policy options:</p>
 <ul>
-<li><b>Fairshare=</b> Integer value used for determining priority.
-  Essentially this is the amount of claim this association and it's
-  children have to the above system. Can also be the string "parent",
-  this means that the parent association is used for fairshare.
-</li>
-
-<li><b>GrpCPUMins=</b> A hard limit of cpu minutes to be used by jobs
-  running from this association and its children.  If this limit is
+<li><b>GrpTRESMins=</b> The total number of TRES minutes that can
+  possibly be used by past, present and future jobs
+  running from an association and its children or QOS.  If this limit is
   reached all jobs running in this group will be killed, and no new
-  jobs will be allowed to run.  The usage is decayed (at a rate of
+  jobs will be allowed to run.  This usage is decayed (at a rate of
   PriorityDecayHalfLife).  It can also be reset (according to
   PriorityUsageResetPeriod) in order to allow jobs to run against the
-  association tree again.  This limit only applies when using the Priority
-  Multifactor plugin.
+  association tree or QOS again.  This limit only applies when using
+  the Priority Multifactor plugin.
 </li>
 
-<li><b>GrpCPUs=</b> The total count of cpus able to be used at any given
-  time from jobs running from this association and its children.  If
+<li><b>GrpTRESRunMins=</b> Used to limit the combined total number of TRES
+  minutes used by all jobs running with an association and its
+  children or QOS.  This takes into consideration time limit of
+  running jobs and consumes it, if the limit is reached no new jobs
+  are started until other jobs finish to allow time to free up.
+</li>
+
+<li><b>GrpTRES=</b> The total count of TRES able to be used at any given
+  time from jobs running from an association and its children or QOS.  If
   this limit is reached new jobs will be queued but only allowed to
   run after resources have been relinquished from this group.
 </li>
 
 <li><b>GrpJobs=</b> The total number of jobs able to run at any given
-  time from this association and its children.  If
+  time from an association and its children QOS.  If
   this limit is reached new jobs will be queued but only allowed to
   run after previous jobs complete from this group.
 </li>
 
-<li><b>GrpMemory=</b> The total amount of memory (MB) able to be used
-  at any given time from jobs running from this association and its
-  children.  If this limit is reached new jobs will be queued but only
-  allowed to run after resources have been relinquished from this group.
+<li><b>GrpSubmitJobs=</b> The total number of jobs able to be submitted
+  to the system at any given time from an association and its children
+  or QOS.  If this limit is reached new submission requests will be
+  denied until previous jobs complete from this group.
 </li>
 
-<li><b>GrpNodes=</b> The total count of nodes able to be used at any given
-  time from jobs running from this association and its children.  If
-  this limit is reached new jobs will be queued but only allowed to
-  run after resources have been relinquished from this group.
-  Each job's node allocation is counted separately
-  (i.e. if a single node has resources allocated to two jobs, this is counted
-  as two allocated nodes).
+<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
+  this group can run for.  If this limit is reached submission requests
+  will be denied.
 </li>
 
-<li><b>GrpSubmitJobs=</b> The total number of jobs able to be submitted
-  to the system at any given time from this association and its children.  If
-  this limit is reached new submission requests will be denied until
-  previous jobs complete from this group.
+<li><b>MaxTRESMinsPerJob=</b> A limit of TRES minutes to be used by a job.
+  If this limit is reached the job will be killed if not running in
+  Safe mode, othewise the job will pend until enough time is given to
+  complete the job.
 </li>
 
-<li><b>GrpWall=</b> The maximum wall clock time any job submitted to
-  this group can run for. If this limit is reached submission requests
-  will be denied and the running jobs will be killed.
+<li><b>MaxTRESPerJob=</b> The maximum size in TRES any given job can
+  have from the association/QOS.  If this limit is reached the job will
+  be denied at submission if the QOS running the job has the
+  DenyOnLimit flag set, otherwise the job will pend.
 </li>
 
-<li><b>MaxCPUsPerJob=</b> The maximum size in cpus any given job can
-  have from this association.  If this limit is reached the job will
-  be denied at submission.
+<li><b>MaxTRESPerNode=</b> The maximum size in TRES each node in a job
+  allocation can use.  If this limit is reached the job will
+  be denied at submission if the QOS running the job has the
+  DenyOnLimit flag set, otherwise the job will pend.
 </li>
 
 <li><b>MaxJobs=</b> The total number of jobs able to run at any given
-  time from this association.  If this limit is reached new jobs will
+  time for the given association/QOS.  If this limit is reached new jobs will
   be queued but only allowed to run after previous jobs complete from
-  this association.
-</li>
-
-<li><b>MaxNodesPerJob=</b> The maximum size in nodes any given job can
-  have from this association.  If this limit is reached the job will
-  be denied at submission.
+  the association/QOS.
 </li>
 
 <li><b>MaxSubmitJobs=</b> The maximum number of jobs able to be submitted
-  to the system at any given time from this association.  If
+  to the system at any given time from the given association/QOS.  If
   this limit is reached new submission requests will be denied until
   previous jobs complete from this association.
 </li>
 
 <li><b>MaxWallDurationPerJob=</b> The maximum wall clock time any job
-  submitted to this association can run for.  If this limit is reached
+  submitted can run for the given association/QOS.  If this limit is reached
   the job will be denied at submission.
 </li>
 
-<li><b>QOS=</b> comma separated list of QOS's this association is
-  able to run.
-</li>
-
 <!-- For future use
-<li><b>MaxCPUMinsPerJob=</b> A limit of cpu minutes to be used by jobs
-  running from this association.  If this limit is
+<li><b>MaxTRESMinsPerJob=</b> A limit of TRES minutes to be used by jobs
+  running from the given association/QOS.  If this limit is
   reached the job will be killed will be allowed to run.
 </li>
 -->
 
 </ul>
 
+<h2>Association specific scheduling policies supported</h2>
+
+<p> These represent the scheduling policies unique to associations.
+  Shared policies and limits QOS has in common are listed later.</p>
+
+<ul>
+<li><b>Fairshare=</b> Integer value used for determining priority.
+  Essentially this is the amount of claim this association and it's
+  children have to the above system. Can also be the string "parent",
+  when used on a user this means that the parent association is used
+  for fairshare.  If Fairshare=parent is set on an account, that
+  account's children will be effectively reparented for fairshare
+  calculations to the first parent of their parent that is not
+  Fairshare=parent.  Limits remain the same, only it's fairshare value
+  is affected.
+</li>
+
+<li><b>QOS=</b> comma separated list of QOS's an association is
+  able to run.
+</li>
+</ul>
+
+<h2>QOS specific limits supported</h2>
+
+<ul>
+<li><b>MaxTRESPerUser=</b> The maximum number of limited TRES a user can
+  request (combo of all jobs running or pending).  If this limit is breached
+  the job will pend unless the QOS running the job has the
+  DenyOnLimit flag set, which will cause the job to be denied at submission.
+</li>
+
+<li><b>MinTRESPerJob=</b> The minimum size in TRES any given job can
+  have from when using the requested QOS.  If this limit is breached
+  the job will pend unless the QOS running the job has the
+  DenyOnLimit flag set, which will cause the job to be denied at submission.
+</li>
+
+</ul>
+
+
 <p>The <b>MaxNodes</b> and <b>MaxWall</b> options already exist in
 Slurm's configuration on a per-partition basis, but the above options
 provide the ability to impose limits on a per-user basis.  The
diff --git a/doc/html/route_plugin.shtml b/doc/html/route_plugin.shtml
index fda891a05..30b14bc82 100644
--- a/doc/html/route_plugin.shtml
+++ b/doc/html/route_plugin.shtml
@@ -6,8 +6,7 @@
 <p> This document describes Slurm Route plugin and the API that
 defines them.
 It is intended as a resource to programmers wishing to write their own
-Slurm Route plugin.
-This is version 101 of the API.</p>
+Slurm Route plugin.</p>
 
 <p>Slurm Route plugins are Slurm plugins that redirect RPCs through
 intermediate forwarding nodes. The routing mechanism is similar
@@ -28,10 +27,17 @@ The minor type specifies the type of route mechanism.
 <li><b>topology</b>&#151;Route messages using topology.conf information.</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for
-route support.
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <h2>Data Objects</h2>
 <p>The implementation must maintain (though not necessarily directly export) an
@@ -122,11 +128,8 @@ address of node to send messages to be aggregated when primary collector
 is down. <br>
 <span class="commandline">NULL</span> if not set.
 
-<h2>Versioning</h2>
-<p> This document describes version 101 of the Slurm Route API.
-Future releases of Slurm may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 15 July 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/schedplugins.shtml b/doc/html/schedplugins.shtml
index cd3521f3b..4fcde0d04 100644
--- a/doc/html/schedplugins.shtml
+++ b/doc/html/schedplugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describe. Slurm scheduler plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-scheduler plugins. This describes version 110 of the API.</p>
+scheduler plugins.</p>
 
 <p>It is noteworthy that two different models are used for job scheduling.
 The <b>backfill</b> scheduler let. Slurm establish the initial job priority
@@ -37,16 +37,21 @@ as an external entity to control Slurm job scheduling.</li>
 Moab Cluster Suite</a> as an external entity to control Slurm job scheduling.
 Note that wiki2 is an expanded version of the wiki plugin with additional
 functions supported specifically for Moab.</li>
-
 </ul>
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for scheduler support.
-Note carefully, however, the versioning discussion below.</p>
-<p>The programmer is urged to study
-<span class="commandline">src/plugins/sched/backfill</span> and
-<span class="commandline">src/plugins/sched/builtin</span>
-for sample implementations of a Slurm scheduler plugin.</p>
+
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
@@ -181,12 +186,6 @@ specific error code.</p>
 or NULL if no description found in this plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<h2>Versioning</h2>
-<p> This document describes version 110 of the Slurm Scheduler API. Future
-releases of Slurm may revise this API. A scheduler plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm plugins.</p>
-<p class="footer"><a href="#top">top</a></p>
-
-<p style="text-align:center;">Last modified 27 February 2015</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/selectplugins.shtml b/doc/html/selectplugins.shtml
index 06fa422e3..5449b19e8 100644
--- a/doc/html/selectplugins.shtml
+++ b/doc/html/selectplugins.shtml
@@ -5,7 +5,7 @@
 <h2>Overview</h2>
 <p>This document describe. Slurm resource selection plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-node selection plugins. This is version 120 of the API.</p>
+node selection plugins.</p>
 
 <p>Slurm node selection plugins are Slurm plugins that implement the Slurm node selection
 API described herein. They are intended to provide a mechanism for both selecting
@@ -35,10 +35,18 @@ sets of nodes utilizing a best-fit algorithm. While supporting shared nodes,
 this plugin does not allocate individual processors, but can allocate memory to jobs.
 This plugin is recommended for systems without shared nodes.</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for node selection support.
-Note carefully, however, the versioning discussion below.</p>
+
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p>A simplified flow of logic follows:
 <pre>
@@ -759,7 +767,8 @@ elements in each dimension of the system size.</p>
 <p class="commandline">bitstr_t *select_g_ba_cnodelist2bitmap(char *cnodelist);</p>
 <p style="margin-left:.2in"><b>Description</b>: Returns a bitmap
 representing the cnodelist input with the bits of the cnodelist in a
-midplane not set.</p>
+midplane not set.
+<p style="margin-left:.2in"><b>Arguments</b>:<br>
 <span class="commandline"> cnodelist</span>&nbsp; &nbsp;&nbsp;(input)
 cnodelist (e.g. on a BGQ it would look something like '[00000x11331]').</br>
 <p style="margin-left:.2in"><b>Returns</b>: A bitmap the size of the
@@ -767,17 +776,6 @@ cnodelist (e.g. on a BGQ it would look something like '[00000x11331]').</br>
 
 <p class="footer"><a href="#top">top</a></p>
 
-
-<h2>Versioning</h2>
-<p> This document describes version 120 of the Slurm node selection API. Future
-releases of Slurm may revise this API. A node selection plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm plugins.
-In addition, the credential is transmitted along with the version number of the
-plugin that transmitted it. It is at the discretion of the plugin author whether
-to maintain data format compatibility across different versions of the plugin.</p>
-
-<p class="footer"><a href="#top">top</a></p>
-
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/slurm_ug_agenda.shtml b/doc/html/slurm_ug_agenda.shtml
index 492298b97..9f17cc92f 100644
--- a/doc/html/slurm_ug_agenda.shtml
+++ b/doc/html/slurm_ug_agenda.shtml
@@ -62,9 +62,10 @@ are shown below.</p>
 <tr><td nowrap>16:30 - 17:00</td><td>Technical</td><td>Lu, Zhang, et. al.</td><td>Extending Slurm with Support for SR-IOV and IVShmem </td></tr>
  
 <tr>
-  <td bgcolor="#F0F1C9">19:00 </td>
+  <td bgcolor="#F0F1C9">19:00</td>
   <td bgcolor="#F0F1C9">Dinner</td>
-  <td colspan="2" bgcolor="#F0F1C9">TBD</td>
+  <td bgcolor="#F0F1C9">Old Ebbitt Grill (Partial Atrium 1)</td>
+  <td bgcolor="#F0F1C9">675 15th Street, NW, Washington, DC 20005</td>
 </tr>
 </table>
 
@@ -82,7 +83,7 @@ are shown below.</p>
 <tr><td nowrap>08:00 - 08:30</td><td>Technical</td><td>Silla</td><td>Increasing cluster throughput with Slurm and rCUDA </td></tr>
 <tr><td nowrap>08:30 - 09:00</td><td>Technical</td><td>Rajagopal, Glesser</td><td>Towards a multi-constraints resources selection within Slurm</td></tr>
 <tr><td nowrap>09:00 - 09:30</td><td>Technical</td><td>Glesser, Georgiou</td><td>Improving Job Scheduling by using Machine Learning</td></tr>
-<tr><td nowrap>09:30 - 10:00</td><td>Technical</td><td>Chakraborty</td><td>Enhancing Startup Performance of Parallel Applications in Slurm</td></tr>
+<tr><td nowrap>09:30 - 10:00</td><td>Technical</td><td>Chakraborty, et.al.</td><td>Enhancing Startup Performance of Parallel Applications in Slurm</td></tr>
 <tr><td nowrap bgcolor="#F0F1C9">10:00 - 10:15</td><td colspan="3" bgcolor="#F0F1C9">Break</td></tr>
 <tr><td nowrap>10:15 - 10:45</td><td>Technical</td><td>Haymore</td><td>Profile-driven testbed</td></tr>
 <tr><td nowrap>10:45 - 11:15</td><td>Technical</td><td>Benini, Trofinoff </td><td>Workload Simulator</td></tr>
@@ -109,7 +110,7 @@ are shown below.</p>
 <h2>15 September 2015</h2>
 
 <h3>Keynote: TBD</h3>
-<p>William Putnam (NASA Center for Climate Simulation, NCCS)</p>
+<p>William Putman (NASA Center for Climate Simulation, NCCS)</p>
 
 <h3>Overview of Slurm Version 15.08</h3>
 <p>Morris Jette and Danny Auble (SchedMD)<br>Yiannis Georgiou (Bull)</p>
@@ -180,7 +181,7 @@ specific information about its configuration and use.</p>
 <p>A partition can now have an associated Quality Of Service (QOS).  This will
 allow a partition to have all the limits available to a QOS.  If a limit is set
 in both,  the partition QOS will take precedence over the job&rsquo;s QOS unless the
-job&rsquo;s QOS has the &rsquo;PartitionQOS&rsquo; flag set.  This also allows for truly floating
+job&rsquo;s QOS has the &rsquo;OverPartQOS&rsquo; flag set.  This also allows for truly floating
 partitions where a partition can have access to all the nodes in the system you
 can set a GrpCPU limit in the Partition QOS making it so only so many CPUs can be
 used at once it just doesn&rsquo;t matter which ones.  This can improve utilization as
@@ -269,7 +270,7 @@ systems,  the framework  goal is  to ease  the management  of multiple objective
 <p>More and more data are produced within Slurm by monitoring the system and the jobs. The methods studied in the field of big data, including Machine Learning, could be used to improve the scheduling. This talk will investigate the following question: to what extent Machine Learning techniques can be used to improve job scheduling? We will focus on two main approaches. The first one, based on an online supervised learning algorithm, we try to predict the execution time of jobs in order to improve backfilling. In the second approach a particular &rsquo;Learning2Rank&rsquo; algorithm is implemented within Slurm as a priority plugin to sort jobs in order to optimize a given objective.</p>
 
 <h3>Enhancing Startup Performance of Parallel Applications in Slurm</h3>
-<p>Sourav Chakraborty (Ohio State University)</p>
+<p>Sourav Chakraborty, Hari Subramoni, Jonathan Perkins, Adam Moody and Dhabaleswar K. Panda (Ohio State University)</p>
 <p>As system sizes continue to grow, time taken to launch a parallel application on large number of cores becomes an important factor affecting the overall system performance.  Slurm is a popular choice to launch parallel applications written in Message Passing Interface (MPI), Partitioned Global Address Space (PGAS) and other programming models.  Most of the libraries use the Process Management Interface (PMI) to communicate with the process manager and bootstrap themselves. The current PMI protocol suffers from several bottlenecks due to its design and implementation, and adversely affects the performance and scalability of launching parallel applications at large scale.</p>
 <p>In our earlier work, we identified several of these bottlenecks and evaluated different designs to address them. We also showed how the proposed designs can improve performance and scalability of the startup mechanism of MPI and hybrid MPI+PGAS applications. Some of these designs are already available as part of the MVAPICH2 MPI library and pre-release version of Slurm. In this work we present these designs to the Slurm community. We also present some newer designs and how they can accelerate startup of large scale MPI and PGAS applications.</p>
 
@@ -373,4 +374,6 @@ experiences performed in the SC3UIS platforms, mainly in GUANE-1 supercomputing
 <p>Tim Wickberg (The George Washington University)</p>
 <p>The George Washington University is proud to host the 2015 user group meeting in Washington DC. We present a brief overview of our user of Slurm on Colonial One, our University-wide shared HPC cluster. We present both a detailed overview of our use and configuration of the "fairshare" priority model to assign resources across disparate participating schools, colleges, and research centers, as well as some novel uses of the scheduler for non-traditional tasks such as file system backups.</p>
 
+<p style="text-align:center;">Last modified 17 August 2015</p>
+
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/slurmctld_plugstack.shtml b/doc/html/slurmctld_plugstack.shtml
index fb6d84cbf..18c946a25 100644
--- a/doc/html/slurmctld_plugstack.shtml
+++ b/doc/html/slurmctld_plugstack.shtml
@@ -23,6 +23,15 @@ The major type must be &quot;slurmctld_plugstack.&quot;
 The minor type can be any suitable name for the type of slurmctld package.
 Slurm can be configured to use multiple slurmctld_plugstack plugins if desired.</p>
 
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <h2>API Functions</h2>
 
 <p class="commandline"> int init (void)
@@ -53,11 +62,7 @@ In the case of the backup slurmctld daemon, the init and fini functions may
 be called multiple times (when it assumes control functions and then when it
 reliquishes them to the primary slurmctld daemon).</p>
 
-<h2>Versioning</h2>
-<p> This document describes version 100 of the Slurm Job Submission API. Future
-releases of Slurm may revise this API.
 <p class="footer"><a href="#top">top</a>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
-
+<p style="text-align:center;">Last modified 27 March 2015</p>
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/switchplugins.shtml b/doc/html/switchplugins.shtml
index 243564edf..b814ca6e5 100644
--- a/doc/html/switchplugins.shtml
+++ b/doc/html/switchplugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describe. Slurm switch (interconnect) plugins and the API that defines
 them. It is intended as a resource to programmers wishing to write their own SLURM
-switch plugins. This is version 110 of the API.
+switch plugins.
 Note that many of the API functions are used only by one of the daemons. For
 example the slurmctld daemon builds a job step's switch credential
 (<span class="commandline">switch_p_build_jobinfo</span>) while the
@@ -24,14 +24,19 @@ abbreviation for the type of switch. We recommend, for example:</p>
 switch service. This is the case for Ethernet and Myrinet interconnects.</li>
 <li><b>nrt</b>&#151;IBM Network Resource Table API.</li>
 </ul>
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for switch support.
-Note carefully, however, the versioning discussion below.</p>
-<p>The programmer is urged to study
-<span class="commandline">src/plugins/switch/switch_nrt.c</span> and
-<span class="commandline">src/plugins/switch/switch_none.c</span>
-for sample implementations of a Slurm switch plugin.</p>
+
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
 <p class="footer"><a href="#top">top</a></p>
 
 <h2>Data Objects</h2>
@@ -623,17 +628,9 @@ specific error code.</p>
 specific error code.</p>
 <p style="margin-left:.2in"><b>Returns</b>: Pointer to string describing the error
 or NULL if no description found in this plugin.</p>
-<p class="footer"><a href="#top">top</a></p>
 
-<h2>Versioning</h2>
-<p> This document describes version 0 of the Slurm Switch API. Future
-releases of Slurm may revise this API. A switch plugin conveys its ability
-to implement a particular API version using the mechanism outlined for Slurm plugins.
-In addition, the credential is transmitted along with the version number of the
-plugin that transmitted it. It is at the discretion of the plugin author whether
-to maintain data format compatibility across different versions of the plugin.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/taskplugins.shtml b/doc/html/taskplugins.shtml
index 12b56370c..ba8624af0 100644
--- a/doc/html/taskplugins.shtml
+++ b/doc/html/taskplugins.shtml
@@ -5,7 +5,7 @@
 <h2> Overview</h2>
 <p> This document describes Slurm task management plugins and the API
 that defines them. It is intended as a resource to programmers wishing
-to write their own Slurm scheduler plugins. This is version 2 of the API.</p>
+to write their own Slurm scheduler plugins.</p>
 
 <p>Slurm task management plugins are Slurm plugins that implement the
 Slurm task management API described herein. They would typically be
@@ -26,11 +26,17 @@ and the value of the <b>TaskPluginParam</b> as defined in the <b>slurm.conf</b>
 services. This is the default behavior and provides no task binding.</li>
 </ul>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization
-for task support.
-Note carefully, however, the versioning discussion below.</p>
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
 
 <p class="footer"><a href="#top">top</a></p>
 
@@ -229,11 +235,8 @@ data structure definition.</p>
 On failure, the plugin should return SLURM_ERROR and set the errno to an
 appropriate value to indicate the reason for failure.</p>
 
-<h2>Versioning</h2>
-<p> This document describes version 2 of the Slurm Task Plugin API.
-Future releases of Slurm may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/team.shtml b/doc/html/team.shtml
index 5ed6d7818..a370ca537 100644
--- a/doc/html/team.shtml
+++ b/doc/html/team.shtml
@@ -2,16 +2,17 @@
 
 <h1>Slurm Team</h1>
 <p>Slurm development has been a joint effort of many companies and
-organizations. Over 130 individuals have contributed to the project.
+organizations around the world.
+Over 170 individuals have contributed to the project.
 Lead Slurm developers are:
 <ul>
-<li>Danny Auble (SchedMD, formerly with Lawrence Livermore National Laboratory)</li>
-<li>Morris Jette (SchedMD, formerly with Lawrence Livermore National Laboratory)</li>
+<li>Danny Auble (SchedMD)</li>
+<li>Morris Jette (SchedMD)</li>
 </ul>
 
 <p>Slurm contributors include:</p>
 <ul>
-<!-- FUNDING ORGANIZATIONS, PLEASE KEEP IN ALPHABETICAL ORDER -->
+<!-- MAJOR CONTRIBUTING ORGANIZATIONS, PLEASE KEEP IN ALPHABETICAL ORDER -->
 <li><a href="http://www.bsc.es">Barcelona Supercomputing Center</a></li>
 <li><a href="http://www.bull.com">Bull</a></li>
 <li><a href="http://www.cea.fr">CEA</a></li>
@@ -25,6 +26,7 @@ Lead Slurm developers are:
 <li><a href="http://www.nvidia.com">NVIDIA</a></li>
 <li><a href="http://www.ornl.gov">Oak Ridge National Laboratory</a></li>
 <li><a href="http://www.schedmd.com">SchedMD</a></li>
+<li><a href="http://www.sgi.com">SGI</a></li>
 <li><a href="http://www.cscs.ch">Swiss National Supercomputing Centre</a></li>
 <br><!-- INDIVIDUALS, PLEASE KEEP IN ALPHABETICAL ORDER --><br>
 <li>Daniel Ahlin (KTH, Sweden)</li>
@@ -45,6 +47,7 @@ Lead Slurm developers are:
 <li>David Bigagli (SchedMD)</li>
 <li>Nicolas Bigaouette</li>
 <li>Anton Blanchard (Samba)</li>
+<li>Yoann Blein (Bull)</li>
 <li>Janne Blomqvist (Aalto University, Finland)</li>
 <li>David Bremer (Lawrence Livermore National Laboratory)</li>
 <li>Jon Bringhurst (Los Alamos National Laboratory)</li>
@@ -57,6 +60,7 @@ Lead Slurm developers are:
 <li>Thomas Cadeau (Bull)</li>
 <li>Hongjia Cao (National University of Defense Technology, China)</li>
 <li>Jimmy Cao (Greenplum/EMC)</li>
+<li>Nate Caraor (Penn State University)</li>
 <li>Ralph Castain (Intel, Greenplum/EMC, Los Alamos National Laboratory)</li>
 <li>Sourav Chakraborty (The Ohio State University)</li>
 <li>Fran&ccedil;ois Chevallier (CEA)</li>
@@ -144,6 +148,7 @@ Lead Slurm developers are:
 <li>L. Shawn Matott (University at Buffalo)</li>
 <li>Steven McDougall (SiCortex)</li>
 <li>Donna Mecozzi (Lawrence Livermore National Laboratory)</li>
+<li>Sergey Meirovich</li>
 <li>Bj&oslash;rn-Helge Mevik (University of Oslo, Norway)</li>
 <li>Stuart Midgley (Down Under GeoSolutions)</li>
 <li>Levi Morrison (Brigham Young University)</li>
@@ -169,11 +174,13 @@ Lead Slurm developers are:
 <li>Martin Perry (Bull)</li>
 <li>Dan Phung (Lawrence Livermore National Laboratory/Columbia University)</li>
 <li>Ashley Pittman (Quadrics, UK)</li>
+<li>Josko Plazonic (Princecton University)</li>
 <li>Artem Polyakov (ISP SB RAS, Russia)</li>
 <li>Ludovic Prevost (NEC, France)</li>
 <br>
 <li>Vijay Ramasubramanian (University of Maryland)</li>
 <li>Krishnakumar Ravi[KK] (HP)</li>
+<li>Michael Raymond (SGI)</li>
 <li>Chris Read</li>
 <li>Petter Reinholdtsen (University of Oslo, Norway)</li>
 <li>Gerrit Renker (Swiss National Supercomputing Centre)</li>
@@ -210,10 +217,12 @@ Lead Slurm developers are:
 <br>
 <li>Garrison Vaughan</li>
 <br>
+<li>Pythagoras Watson  (Lawrence Livermore National Laboratory)</li>
 <li>Daniel M. Weeks (Rensselaer Polytechnic Institute)</li>
 <li>Nathan Weeks (Iowa State University)</li>
 <li>Andy Wettstein (University of Chicago)</li>
 <li>Tim Wickberg (George Washington University)</li>
+<li>Chandler Wilkerson (Rice University)</li>
 <li>Ramiro Brito Willmersdorf (Universidade Federal de Pemambuco, Brazil)</li>
 <li>Jay Windley (Linux NetworX)</li>
 <li>Eric Winter</li>
@@ -224,6 +233,6 @@ Lead Slurm developers are:
 <!-- INDIVIDUALS, PLEASE KEEP IN ALPHABETICAL ORDER -->
 </ul>
 
-<p style="text-align:center;">Last modified 9 October 2014</p>
+<p style="text-align:center;">Last modified 26 August 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/topology.shtml b/doc/html/topology.shtml
index 98d1e0662..b19746d36 100644
--- a/doc/html/topology.shtml
+++ b/doc/html/topology.shtml
@@ -4,9 +4,11 @@
 
 <p>Slurm can be configured to support topology-aware resource
 allocation to optimize job performance.
-There are two primary modes of operation, one to optimize performance on
+Slurm supports several modes of operation, one to optimize performance on
 systems with a three-dimensional torus interconnect and another for
-a hierarchical interconnect.</p>
+a hierarchical interconnect.
+The hierarchical mode of operation supports both fat-tree or dragonfly networks,
+using slightly different algorithms.</p>
 
 <p>Slurm's native mode of resource selection is to consider the nodes
 as a one-dimensional array.
@@ -142,6 +144,12 @@ This configuration can be useful if one wants to schedule multiple phyisical
 clusters as a single logical cluster under the control of a single slurmctld
 daemon.</p>
 
+<p>For systems with a dragonfly network, configure Slurm with
+<i>TopologyPlugin=topology/tree</i> plus <i>TopologyParam=dragonfly</i>.
+If a single job can not be entirely placed within a single network leaf
+switch, the job will be spread across as many leaf switches as possible
+in order to optimize the job's network bandwidth.</p>
+
 <h2>User Options</h2>
 
 <p>For use with the topology/tree plugin, user can also specify the maximum
@@ -171,6 +179,6 @@ The value will be set component types listed in SLURM_TOPOLOGY_ADDR.
 Each component will be identified as either "switch" or "node".
 A period is used to separate each hardware component type.</p>
 
-<p style="text-align:center;">Last modified 5 February 2014</p>
+<p style="text-align:center;">Last modified 7 April 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/html/topology_plugin.shtml b/doc/html/topology_plugin.shtml
index dac88f729..0823e6820 100644
--- a/doc/html/topology_plugin.shtml
+++ b/doc/html/topology_plugin.shtml
@@ -1,13 +1,12 @@
 <!--#include virtual="header.txt"-->
 
-<h1><a name="top">Slurm Topology Plugin Programmer Guide</a></h1>
+<h1><a name="top">Topology Plugin Programmer Guide</a></h1>
 
 <h2> Overview</h2>
 <p> This document describes Slurm topology plugin and the API that
 defines them.
 It is intended as a resource to programmers wishing to write their own
-Slurm topology plugin.
-This is version 101 of the API.</p>
+Slurm topology plugin.</p>
 
 <p>Slurm topology plugins are Slurm plugins that implement
 convey system topology information so that Slurm is able to
@@ -26,11 +25,19 @@ We recommend, for example:</p>
 switches.</li>
 </ul></p>
 
-<p>The <span class="commandline">plugin_name</span> and
-<span class="commandline">plugin_version</span>
-symbols required by the Slurm Plugin API require no specialization for
-topology support.
-The actions performed by these plugins vary widely.
+<p><span class="commandline">const char plugin_name[]</span><br>
+Some descriptive name for the plugin.
+There is no requirement with respect to its format.</p>
+<p><span class="commandline">const uint32_t plugin_version</span><br>
+If specified, identifies the version of Slurm used to build this plugin and
+any attempt to load the plugin from a different version of Slurm will result
+in an error.
+If not specified, then the plugin may be loadeed by Slurm commands and
+daemons from any version, however this may result in difficult to diagnose
+failures due to changes in the arguments to plugin functions or changes
+in other Slurm functions used by the plugin.</p>
+
+<p>The actions performed by these plugins vary widely.
 In the case of <b>3d_torus</b>, the nodes in configuration file
 are re-ordered so that nodes which are nearby in the one-dimensional
 table are also nearby in logical three-dimensional space.
@@ -106,11 +113,8 @@ in the hierarchy is separated by a period. The final element will always be
 <p style="margin-left:.2in"><b>Returns</b>: SLURM_SUCCESS or
 SLURM_ERROR on failure.</p>
 
-<h2>Versioning</h2>
-<p> This document describes version 101 of the Slurm topology API.
-Future releases of Slurm may revise this API.</p>
 <p class="footer"><a href="#top">top</a></p>
 
-<p style="text-align:center;">Last modified 8 May 2014</p>
+<p style="text-align:center;">Last modified 27 March 2015</p>
 
 <!--#include virtual="footer.txt"-->
diff --git a/doc/man/Makefile.in b/doc/man/Makefile.in
index 29294a90c..b9ea6dd88 100644
--- a/doc/man/Makefile.in
+++ b/doc/man/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/doc/man/man1/Makefile.in b/doc/man/man1/Makefile.in
index 1e6ae7d8a..5e8ca7193 100644
--- a/doc/man/man1/Makefile.in
+++ b/doc/man/man1/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -223,6 +226,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -272,8 +277,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -292,6 +301,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -335,6 +347,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -358,6 +371,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/doc/man/man1/sacct.1 b/doc/man/man1/sacct.1
index 0987cbeb9..5dfa2502f 100644
--- a/doc/man/man1/sacct.1
+++ b/doc/man/man1/sacct.1
@@ -95,6 +95,13 @@ Use job completion instead of job accounting.  The \f3JobCompType\fP
 parameter in the slurm.conf file must be defined to a non-none option.
 .IP
 
+.TP
+\f3\-\-delimiter\f3=\fP\f2characters\fP
+ASCII characters used to separate the fields when specifying
+the \f3\-p\fP or \f3\-P\fP options. The default delimiter
+is a '|'. This options is ignored if \f3\-p\fP or \f3\-P\fP options
+are not specified.
+
 .TP
 \f3\-D\fP\f3,\fP \f3\-\-duplicates\fP
 If Slurm job ids are reset, some job numbers will probably appear more
@@ -119,24 +126,25 @@ Print a list of fields that can be specified with the \f3\-\-format\fP option.
 .ft 3
 Fields available:
 
-AllocCPUS        Account         AssocID      AveCPU
-AveCPUFreq       AveDiskRead     AveDiskWrite AvePages
-AveRSS           AveVMSize       BlockID      Cluster
-Comment          ConsumedEnergy  CPUTime      CPUTimeRAW
-DerivedExitCode  Elapsed         Eligible     End
-ExitCode         GID             Group        JobID
-JobIDRaw         JobName         Layout       MaxDiskRead
-MaxDiskReadNode  MaxDiskReadTask MaxDiskWrite MaxDiskWriteNode
-MaxDiskWriteTask MaxPages        MaxPagesNode MaxPagesTask
-MaxRSS           MaxRSSNode      MaxRSSTask   MaxVMSize
-MaxVMSizeNode    MaxVMSizeTask   MinCPU       MinCPUNode
-MinCPUTask       NCPUS           NNodes       NodeList
-NTasks           Priority        Partition    QOSRAW
-ReqCPUFreq       ReqCPUs         ReqMem       Reservation
-ReservationId    Reserved        ResvCPU      ResvCPURAW
-Start            State           Submit       Suspended
-SystemCPU        Timelimit       TotalCPU     UID
-User             UserCPU         WCKey        WCKeyID
+AllocCPUS        Account         AssocID        AveCPU
+AveCPUFreq       AveDiskRead     AveDiskWrite   AvePages
+AveRSS           AveVMSize       BlockID        Cluster
+Comment          ConsumedEnergy  CPUTime        CPUTimeRAW
+DerivedExitCode  Elapsed         Eligible       End
+ExitCode         GID             Group          JobID
+JobIDRaw         JobName         Layout         MaxDiskRead
+MaxDiskReadNode  MaxDiskReadTask MaxDiskWrite   MaxDiskWriteNode
+MaxDiskWriteTask MaxPages        MaxPagesNode   MaxPagesTask
+MaxRSS           MaxRSSNode      MaxRSSTask     MaxVMSize
+MaxVMSizeNode    MaxVMSizeTask   MinCPU         MinCPUNode
+MinCPUTask       NCPUS           NNodes         NodeList
+NTasks           Priority        Partition      QOSRAW
+ReqCPUFreqMin    ReqCPUFreqMax   ReqCPUFreqGov  ReqCPUs
+ReqMem           Reservation     ReservationId  Reserved
+ResvCPU          ResvCPURAW      Start          State
+Submit           Suspended       SystemCPU      Timelimit
+TotalCPU         Tres            UID            User
+UserCPU          WCKey           WCKeyID
 
 .ft 1
 .fi
@@ -178,6 +186,10 @@ Default is no restrictions.\&.
 \f3\-h\fP\f3,\fP \f3\-\-help\fP
 Displays a general help message.
 
+.TP
+\f3\-i\fP\f3,\fP \f3\-\-nnodes\fP\f3=\fP\f2N\fP
+Return jobs which ran on this many nodes (N = min[-max])
+
 .TP
 \f3\-j \fP\f2job(.step)\fP \f3,\fP  \f3\-\-jobs\fP\f3=\fP\f2job(.step)\fP
 Displays information about the specified job(.step) or list of job(.step)s.
@@ -214,7 +226,7 @@ avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,
 maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,
 alloccpus,elapsed,state,exitcode,maxdiskread,maxdiskreadnode,maxdiskreadtask,
 avediskread,maxdiskwrite,maxdiskwritenode,maxdiskwritetask,avediskwrite,
-allocgres,reqgres
+allocgres,reqgres,avecpufreq,reqcpufreqmin,reqcpufreqmax,reqcpufreqgov
 .ad
 
 .TP
@@ -236,6 +248,12 @@ No heading will be added to the output. The default action is to
 display a header.
 .IP
 
+.TP
+\f3\-\-noconvert\fP
+Don't convert units from their original type (e.g. 2048M won't be converted to
+2G).
+.IP
+
 .TP
 \f3\-N \fP\f2node_list\fP\f3, \-\-nodelist=\fP\f2node_list\fP
 Display jobs that ran on any of these node(s).  \f2node_list\fP can be
@@ -773,6 +791,11 @@ identical to that of the \f3Elapsed\fP field.
 NOTE: TotalCPU provides a measure of the task's parent process and
 does not include CPU time of child processes.
 
+.TP
+\f3Tres\fP
+Trackable resources. These are the resources specified by the
+job at submission time. For more details see AccountingStorageTRES in slurm.conf.
+
 .TP
 \f3UID\fP
 The user identifier of the user who ran the job.
diff --git a/doc/man/man1/sacctmgr.1 b/doc/man/man1/sacctmgr.1
index 9bca6b771..ed80a9952 100644
--- a/doc/man/man1/sacctmgr.1
+++ b/doc/man/man1/sacctmgr.1
@@ -234,7 +234,7 @@ The login name. Only lowercase usernames are supported.
 Workload  Characterization  Key. An arbitrary  string  for  grouping orthogonal accounts.
 
 .SH "GENERAL SPECIFICATIONS FOR ASSOCIATION BASED ENTITIES"
-\fBNOTE:\fR The group limits (GrpJobs, GrpNodes, etc.) are tested when a job is
+\fBNOTE:\fR The group limits (GrpJobs, GrpTRES, etc.) are tested when a job is
 being considered for being allocated resources.
 If starting a job would cause any of its group limit to be exceeded,
 that job will not be considered for scheduling even if that job might preempt
@@ -268,8 +268,8 @@ this QOS.
 NOTE: This value is only meaningful for QOS PreemptMode=CANCEL)
 
 .TP
-\fIGrpCPUMins\fP=<max cpu minutes>
-The total number of cpu minutes that can possibly be used by past,
+\fIGrpTRESMins\fP=<TRES=max TRES minutes,...>
+The total number of TRES minutes that can possibly be used by past,
 present and future jobs running from this association and its children.
 To clear a previously set value use the modify command with a new
 value of \-1.
@@ -286,16 +286,16 @@ jobs submitted with associations in the group will be delayed until
 they are able to run inside the limit.
 
 .TP
-\fIGrpCPURunMins\fP=<max cpu run minutes>
-Used to limit the combined total number of CPU minutes used by all
+\fIGrpTRESRunMins\fP=<TRES=max TRES run minutes,...>
+Used to limit the combined total number of TRES minutes used by all
 jobs running with this association and its children.  This takes into
 consideration time limit of running jobs and consumes it, if the limit
 is reached no new jobs are started until other jobs finish to allow
 time to free up.
 
 .TP
-\fIGrpCPUs\fP=<max cpus>
-Maximum number of CPUs running jobs are able to be allocated in aggregate for
+\fIGrpTRES\fP=<TRES=max TRES,...>
+Maximum number of TRES running jobs are able to be allocated in aggregate for
 this association and all associations which are children of this association.
 To clear a previously set value use the modify command with a new
 value of \-1.
@@ -309,26 +309,6 @@ Maximum number of running jobs in aggregate for
 this association and all associations which are children of this association.
 To clear a previously set value use the modify command with a new value of \-1.
 
-.TP
-\fIGrpMemory\fP=<max memory (MB) >
-Maximum amount of memory running jobs are able to be allocated in aggregate for
-this association and all associations which are children of this association.
-To clear a previously set value use the modify command with a new
-value of \-1.
-.P
-NOTE: This limit only applies fully when using the Select Consumable
-Resource plugin.
-
-.TP
-\fIGrpNodes\fP=<max nodes>
-Maximum number of nodes running jobs are able to be allocated in aggregate for
-this association and all associations which are children of this association.
-To clear a previously set value use the modify command with a new value of \-1.
-.P
-NOTE: Each job's node allocation is counted separately (i.e. if a
-single node has resources allocated to two jobs, this is counted as
-two allocated nodes).
-
 .TP
 \fIGrpSubmitJobs\fP=<max jobs>
 Maximum number of jobs which can be in a pending or running state at any time
@@ -354,16 +334,16 @@ jobs submitted with associations in the group will be delayed until
 they are able to run inside the limit.
 
 .TP
-\fIMaxCPUMins\fP=<max cpu minutes>
-Maximum number of CPU minutes each job is able to use in this association.
+\fIMaxTRESMins\fP=<max TRES minutes>
+Maximum number of TRES minutes each job is able to use in this association.
 This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new
 value of \-1.
 
 .TP
-\fIMaxCPUs\fP=<max cpus>
-Maximum number of CPUs each job is able to use in this association.
+\fIMaxTRES\fP=<max TRES>
+Maximum number of TRES each job is able to use in this association.
 This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new
@@ -380,14 +360,6 @@ This is overridden if set directly on a user.
 Default is the cluster's limit.
 To clear a previously set value use the modify command with a new value of \-1.
 
-.TP
-\fIMaxNodes\fP=<max nodes>
-Maximum number of nodes each job is able to use in this association.
-This is overridden if set directly on a user.
-Default is the cluster's limit.
-To clear a previously set value use the modify command with a new value of \-1.
-This is a c\-node limit on BlueGene systems.
-
 .TP
 \fIMaxSubmitJobs\fP=<max jobs>
 Maximum number of jobs which can this association can have in a
@@ -598,21 +570,21 @@ of their parent that is not Fairshare=parent.  Limits remain the same,
 only it's fairshare value is affected.
 
 .TP
-\fIGrpCPUMins\fP
-The total number of cpu minutes that can possibly be used by past,
+\fIGrpTRESMins\fP
+The total number of TRES minutes that can possibly be used by past,
 present and future jobs running from this association and its children.
 
 .TP
-\fIGrpCPURunMins\fP
-Used to limit the combined total number of CPU minutes used by all
+\fIGrpTRESRunMins\fP
+Used to limit the combined total number of TRES minutes used by all
 jobs running with this association and its children.  This takes into
 consideration time limit of running jobs and consumes it, if the limit
 is reached no new jobs are started until other jobs finish to allow
 time to free up.
 
 .TP
-\fIGrpCPUs\fP
-Maximum number of CPUs running jobs are able to be allocated in aggregate for
+\fIGrpTRES\fP
+Maximum number of TRES running jobs are able to be allocated in aggregate for
 this association and all associations which are children of this association.
 
 .TP
@@ -620,15 +592,6 @@ this association and all associations which are children of this association.
 Maximum number of running jobs in aggregate for
 this association and all associations which are children of this association.
 
-.TP
-\fIGrpNodes\fP
-Maximum number of nodes running jobs are able to be allocated in aggregate for
-this association and all associations which are children of this association.
-.P
-NOTE: Each job's node allocation is counted separately (i.e. if a
-single node has resources allocated to two jobs, this is counted as
-two allocated nodes).
-
 .TP
 \fIGrpSubmitJobs\fP
 Maximum number of jobs which can be in a pending or running state at any time
@@ -652,21 +615,17 @@ associations with a LFT inside this LFT and before the RGT are
 children of this association.
 
 .TP
-\fIMaxCPUMins\fP
-Maximum number of CPU minutes each job is able to use.
+\fIMaxTRESMins\fP
+Maximum number of TRES minutes each job is able to use.
 
 .TP
-\fIMaxCPUs\fP
-Maximum number of CPUs each job is able to use.
+\fIMaxTRES\fP
+Maximum number of TRES each job is able to use.
 
 .TP
 \fIMaxJobs\fP
 Maximum number of jobs each user is allowed to run at one time.
 
-.TP
-\fIMaxNodes\fP
-Maximum number of nodes each job is able to use.
-
 .TP
 \fIMaxSubmitJobs\fP
 Maximum number of jobs pending or running state at any time.
@@ -716,7 +675,7 @@ Type of machine, current classifications are capability and capacity.
 \fIFlags\fP=<flag list>
 Comma separated list of Attributes for a particular cluster.  Current
 Flags include AIX, BGL, BGP, BGQ, Bluegene, CrayXT, FrontEnd, MultipleSlurmd,
-SunConstellation, and XCPU
+and SunConstellation
 
 .TP
 \fIName\fP=<name>
@@ -758,8 +717,9 @@ When a slurmctld registers with the database the port the controller
 is listening on is placed here.
 
 .TP
-\fICPUCount\fP
-The current count of cpus on the cluster.
+\fITRES\fP
+Trackable RESources (BB (Burst buffer), CPU, Energy, GRES, License, Memory, and
+Node) this cluster is accounting for.
 
 .TP
 \fIFlags\fP
@@ -835,12 +795,12 @@ Specific events to look for, valid options are Cluster or Node,
 default is both.
 
 .TP
-\fIMaxCPUs\fP=<OPT>
-Max number of cpus affected by an event.
+\fIMaxTRES\fP=<OPT>
+Max number of TRES affected by an event.
 
 .TP
-\fIMinCPUs\fP=<OPT>
-Min number of cpus affected by an event.
+\fIMinTRES\fP=<OPT>
+Min number of TRES affected by an event.
 
 .TP
 \fINodes\fP=<comma separated list of node names>
@@ -889,8 +849,8 @@ The name of the cluster event happened on.
 The hostlist of nodes on a cluster in a cluster event.
 
 .TP
-\fICPUs\fP
-Number of cpus involved with the event.
+\fITRES\fP
+Number of TRES involved with the event.
 
 .TP
 \fIDuration\fP
@@ -999,6 +959,10 @@ override the requested partition's MaxNodes limit.
 If set jobs using this QOS will be able to
 override the requested partition's MinNodes limit.
 .TP
+\fIOverPartQOS\fP
+If set jobs using this QOS will be able to
+override any limits used by the the requested partition's QOS limits.
+.TP
 \fIPartitionTimeLimit\fP
 If set jobs using this QOS will be able to
 override the requested partition's TimeLimit.
@@ -1016,35 +980,26 @@ Preemption grace time to be extended to a job which has been
 selected for preemption.
 
 .TP
-\fIGrpCPUMins\fP
-The total number of cpu minutes that can possibly be used by past,
+\fIGrpTRESMins\fP
+The total number of TRES minutes that can possibly be used by past,
 present and future jobs running from this QOS.
 
 .TP
-\fIGrpCPURunMins\fP Used to limit the combined total number of CPU
+\fIGrpTRESRunMins\fP Used to limit the combined total number of TRES
 minutes used by all jobs running with this QOS.  This takes into
 consideration time limit of running jobs and consumes it, if the limit
 is reached no new jobs are started until other jobs finish to allow
 time to free up.
 
 .TP
-\fIGrpCPUs\fP
-Maximum number of CPUs running jobs are able to be allocated in aggregate for
+\fIGrpTRES\fP
+Maximum number of TRES running jobs are able to be allocated in aggregate for
 this QOS.
 
 .TP
 \fIGrpJobs\fP
 Maximum number of running jobs in aggregate for this QOS.
 
-.TP
-\fIGrpNodes\fP
-Maximum number of nodes running jobs are able to be allocated in aggregate for
-this QOS.
-.P
-NOTE: Each job's node allocation is counted separately (i.e. if a
-single node has resources allocated to two jobs, this is counted as
-two allocated nodes).
-
 .TP
 \fIGrpSubmitJobs\fP
 Maximum number of jobs which can be in a pending or running state at any time
@@ -1060,38 +1015,30 @@ running jobs will be killed.
 The id of the QOS.
 
 .TP
-\fIMaxCPUMins\fP
-Maximum number of CPU minutes each job is able to use.
+\fIMaxTRESMins\fP
+Maximum number of TRES minutes each job is able to use.
 
 .TP
-\fIMaxCPUs\fP
-Maximum number of CPUs each job is able to use.
+\fIMaxTRESperJob\fP
+Maximum number of TRES each job is able to use.
 
 .TP
-\fIMaxCpusPerUser\fP
-Maximum number of CPUs each user is able to use.
+\fIMaxTRESPerNode\fP
+Maximum number of TRES each node in a job allocation can use.
 
 .TP
-\fIMaxJobs\fP
-Maximum number of jobs each user is allowed to run at one time.
+\fIMaxTRESPerUser\fP
+Maximum number of TRES each user is able to use.
 
 .TP
-\fIMaxNodes\fP
-Maximum number of nodes each job is able to use.
+\fIMaxJobs\fP
+Maximum number of jobs each user is allowed to run at one time.
 
 .TP
-\fIMinCPUs\fP
-Minimum number of CPUs each job running under this QOS must request.
+\fIMinTRESPerJob\fP
+Minimum number of TRES each job running under this QOS must request.
 Otherwise the job will pend until modified.
 
-.TP
-\fIMaxNodesPerUser\fP
-Maximum number of nodes each user is able to use.
-.P
-NOTE: Each job's node allocation is counted separately (i.e. if a
-single node has resources allocated to two jobs, this is counted as
-two allocated nodes).
-
 .TP
 \fIMaxSubmitJobs\fP
 Maximum number of jobs pending or running state at any time per user.
@@ -1160,8 +1107,8 @@ value is zero, no preemption grace time is allowed on this partition.
 NOTE: This value is only meaningful for QOS PreemptMode=CANCEL.
 
 .TP
-\fIGrpCPUMins\fP
-The total number of cpu minutes that can possibly be used by past,
+\fIGrpTRESMins\fP
+The total number of TRES minutes that can possibly be used by past,
 present and future jobs running from this QOS.
 To clear a previously set value use the modify command with a new
 value of \-1.
@@ -1173,29 +1120,17 @@ submitted with this QOS will be delayed until they are able to run
 inside the limit.
 
 .TP
-\fIGrpCPUs\fP
-Maximum number of CPUs running jobs are able to be allocated in aggregate for
+\fIGrpTRES\fP
+Maximum number of TRES running jobs are able to be allocated in aggregate for
 this QOS.
 To clear a previously set value use the modify command with a new
-value of \-1.  (NOTE: This limit is not currently enforced in Slurm.
-You can still set this, but have to wait for future versions of Slurm
-before it is enforced.)
+value of \-1.
 
 .TP
 \fIGrpJobs\fP
 Maximum number of running jobs in aggregate for this QOS.
 To clear a previously set value use the modify command with a new value of \-1.
 
-.TP
-\fIGrpNodes\fP
-Maximum number of nodes running jobs are able to be allocated in aggregate for
-this QOS.
-To clear a previously set value use the modify command with a new value of \-1.
-.P
-NOTE: Each job's node allocation is counted separately (i.e. if a
-single node has resources allocated to two jobs, this is counted as
-two allocated nodes).
-
 .TP
 \fIGrpSubmitJobs\fP
 Maximum number of jobs which can be in a pending or running state at any time
@@ -1215,42 +1150,31 @@ submitted with this QOS will be delayed until they are able to run
 inside the limit.
 
 .TP
-\fIMaxCPUMins\fP
-Maximum number of CPU minutes each job is able to use.
+\fIMaxTRESMins\fP
+Maximum number of TRES minutes each job is able to use.
 To clear a previously set value use the modify command with a new
 value of \-1.
 
 .TP
-\fIMaxCPUs\fP
-Maximum number of CPUs each job is able to use.
+\fIMaxTRESPerJob\fP
+Maximum number of TRES each job is able to use.
 To clear a previously set value use the modify command with a new
-value of \-1.  (NOTE: This limit is not currently enforced in Slurm.
-You can still set this, but have to wait for future versions of Slurm
-before it is enforced.)
-
-.TP
-\fIMaxCpusPerUser\fP
-Maximum number of CPUs each user is able to use.
-To clear a previously set value use the modify command with a new value of \-1.
+value of \-1.
 
 .TP
-\fIMaxJobs\fP
-Maximum number of jobs each user is allowed to run at one time.
+\fIMaxTRESPerNode\fP
+Maximum number of TRES each node in a job allocation can use.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
-\fIMaxNodes\fP
-Maximum number of nodes each job is able to use.
+\fIMaxTRESPerUser\fP
+Maximum number of TRES each user is able to use.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
-\fIMaxNodesPerUser\fP
-Maximum number of nodes each user is able to use.
+\fIMaxJobs\fP
+Maximum number of jobs each user is allowed to run at one time.
 To clear a previously set value use the modify command with a new value of \-1.
-.P
-NOTE: Each job's node allocation is counted separately (i.e. if a
-single node has resources allocated to two jobs, this is counted as
-two allocated nodes).
 
 .TP
 \fIMaxSubmitJobs\fP
@@ -1266,8 +1190,8 @@ The value is recorded in minutes with rounding as needed.
 To clear a previously set value use the modify command with a new value of \-1.
 
 .TP
-\fIMinCPUs\fP
-Minimum number of CPUs each job running under this QOS must request.
+\fIMinTRES\fP
+Minimum number of TRES each job running under this QOS must request.
 Otherwise the job will pend until modified.
 To clear a previously set value use the modify command with a new
 value of \-1.
@@ -1303,11 +1227,11 @@ To clear a previously set value use the modify command with a new value of \-1.
 \fIUsageFactor\fP
 Usage factor when running with this QOS.  This is a float that is factored into
 the priority time calculations of running jobs.  e.g. if the usagefactor of a
-QOS was 2 for every cpu second a job ran it would count for 2.  Also if the
-usagefactor was .5, every second would only count for half of the time.  Setting
-this value to 0 will make it so that running jobs will not add time to fairshare
-or association/qos limits.  To clear a previously set value use the modify
-command with a new value of \-1.
+QOS was 2 for every TRESBillingUnit second a job ran it would count for 2.
+Also if the usagefactor was .5, every second would only count for half
+of the time.  Setting this value to 0 will make it so that running
+jobs will not add time to fairshare or association/qos limits.  To
+clear a previously set value use the modify command with a new value of \-1.
 
 
 .SH "SPECIFICATIONS FOR RESOURCE"
@@ -1573,6 +1497,39 @@ about the various associations the user may have on all the
 clusters in the system.  The Association format fields are described
 in the \fILIST/SHOW ASSOCIATION FORMAT OPTIONS\fP section.
 
+.SH "LIST/SHOW TRES"
+
+.TP
+\fIName\fP
+The name of the trackable resource.  This option is required for
+TRES types BB (Burst buffer), GRES, and License.  Types CPU, Energy,
+Memory, and Node do not have Names.  For example if GRES is the
+type then name is the denomination of the GRES itself e.g. GPU.
+
+.TP
+\fIID\fP
+The identification number of the trackable resource as it appears
+in the database.
+
+.TP
+\fIType\fP
+The type of the trackable resource. Current types are BB (Burst
+buffer), CPU, Energy, GRES, License, Memory, and Node.
+
+.SH "TRES information"
+Trackable RESources (TRES) are used in many QOS or Association limits.
+When setting the limits they are comma separated list.  Each TRES has
+a different limit, i.e. GrpTRESMins=cpu=10,mem=20 would make 2
+different limits 1 for 10 cpu minutes and 1 for 20 MB memory minutes.
+This is the case for each limit that deals with TRES.  To remove the
+limit \-1 is used i.e. GrpTRESMins=cpu-1 would remove only the cpu
+TRES limit.
+
+NOTE: On GrpTRES limits dealing with nodes as a TRES.  Each job's node
+allocation is counted separately (i.e. if a single node has resources
+allocated to two jobs, this is counted as two allocated nodes).
+
+NOTE: When dealing with Memory as a TRES all limits are in MB.
 
 .SH "GLOBAL FORMAT OPTION"
 When using the format option for listing various fields you can put a
@@ -1625,19 +1582,19 @@ To edit/create a file start with a cluster line for the new cluster
 Anything included on this line will be the defaults for all
 associations on this cluster.  These options are as follows...
 .TP
-\fIGrpCPUMins=\fP
-The total number of cpu minutes that can possibly be used by past,
+\fIGrpTRESMins=\fP
+The total number of TRES minutes that can possibly be used by past,
 present and future jobs running from this association and its children.
 .TP
-\fIGrpCPURunMins=\fP
-Used to limit the combined total number of CPU minutes used by all
+\fIGrpTRESRunMins=\fP
+Used to limit the combined total number of TRES minutes used by all
 jobs running with this association and its children.  This takes into
 consideration time limit of running jobs and consumes it, if the limit
 is reached no new jobs are started until other jobs finish to allow
 time to free up.
 .TP
-\fIGrpCPUs=\fP
-Maximum number of CPUs running jobs are able to be
+\fIGrpTRES=\fP
+Maximum number of TRES running jobs are able to be
 allocated in aggregate for this association and all associations which
 are children of this association.
 .TP
@@ -1702,19 +1659,19 @@ All account options are
 \fIDescription=\fP
 A brief description of the account.
 .TP
-\fIGrpCPUMins=\fP
-Maximum number of CPU hours running jobs are able to
+\fIGrpTRESMins=\fP
+Maximum number of TRES hours running jobs are able to
 be allocated in aggregate for this association and all associations
 which are children of this association.
-\fIGrpCPURunMins=\fP
-Used to limit the combined total number of CPU minutes used by all
+\fIGrpTRESRunMins=\fP
+Used to limit the combined total number of TRES minutes used by all
 jobs running with this association and its children.  This takes into
 consideration time limit of running jobs and consumes it, if the limit
 is reached no new jobs are started until other jobs finish to allow
 time to free up.
 .TP
-\fIGrpCPUs=\fP
-Maximum number of CPUs running jobs are able to be
+\fIGrpTRES=\fP
+Maximum number of TRES running jobs are able to be
 allocated in aggregate for this association and all associations which
 are children of this association.
 .TP
@@ -2083,13 +2040,38 @@ QOS expedite and it will not have access to QOS normal.
   zebra         g1       expedite,normal
 .br
 
+An example of listing trackable resources
+.br
+
+.br
+->sacctmgr show tres
+.br
+      Type              Name      ID
+.br
+---------- ----------------- --------
+.br
+       cpu                          1
+.br
+       mem                          2
+.br
+    energy                          3
+.br
+      node                          4
+.br
+      gres         gpu:tesla     1001
+.br
+   license               vcs     1002
+.br
+        bb              cray     1003
+.br
+
 .ec
 
 .SH "COPYING"
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
-Copyright (C) 2010\-2013 SchedMD LLC.
+Copyright (C) 2010\-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man1/salloc.1 b/doc/man/man1/salloc.1
index c1a1f311e..98e2d4451 100644
--- a/doc/man/man1/salloc.1
+++ b/doc/man/man1/salloc.1
@@ -121,6 +121,10 @@ This option is not supported on BlueGene systems (select/bluegene plugin
 is configured).
 If not specified, the scontrol show job will display 'ReqS:C:T=*:*:*'.
 
+.TP
+\fB\-\-bb\fR=<\fIspec\fR>
+Burst buffer specification. The form of the specification is system dependent.
+
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
 Submit the batch script to the Slurm controller immediately, like normal, but
@@ -239,6 +243,107 @@ Restrict node selection to nodes with at least the specified number of
 cores per socket.  See additional information under \fB\-B\fR option
 above when task/affinity plugin is enabled.
 
+.TP
+\fB\-\-cpu\-freq\fR =<\fIp1\fR[\-\fIp2\fR[:\fIp3\fR]]>
+
+Request that job steps initiated by srun commands inside this allocation
+be run at some requested frequency if possible, on the CPUs selected
+for the step on the compute node(s).
+
+\fBp1\fR can be  [#### | low | medium | high | highm1] which will set the
+frequency scaling_speed to the corresponding value, and set the frequency
+scaling_governor to UserSpace. See below for definition of the values.
+
+\fBp1\fR can be [Conservative | OnDemand | Performance | PowerSave] which
+will set the scaling_governor to the corresponding value. The governor has to be
+in the list set by the slurm.conf option CpuFreqGovernors.
+
+When \fBp2\fR is present, p1 will be the minimum scaling frequency and
+p2 will be the maximum scaling frequency.
+
+\fBp2\fR can be  [#### | medium | high | highm1] p2 must be greater than p1.
+
+\fBp3\fR can be [Conservative | OnDemand | Performance | PowerSave | UserSpace]
+which will set the governor to the corresponding value.
+
+If \fBp3\fR is UserSpace, the frequency scaling_speed will be set by a power
+or energy aware scheduling strategy to a value between p1 and p2 that lets the
+job run within the site's power goal. The job may be delayed if p1 is higher
+than a frequency that allows the job to run withing the goal.
+
+If the current frequency is < min, it will be set to min. Likewise,
+if the current frequency is > max, it will be set to max.
+
+Acceptable values at present include:
+.RS
+.TP 14
+\fB####\fR
+frequency in kilohertz
+.TP
+\fBLow\fR
+the lowest available frequency
+.TP
+\fBHigh\fR
+the highest available frequency
+.TP
+\fBHighM1\fR
+(high minus one) will select the next highest available frequency
+.TP
+\fBMedium\fR
+attempts to set a frequency in the middle of the available range
+.TP
+\fBConservative\fR
+attempts to use the Conservative CPU governor
+.TP
+\fBOnDemand\fR
+attempts to use the OnDemand CPU governor (the default value)
+.TP
+\fBPerformance\fR
+attempts to use the Performance CPU governor
+.TP
+\fBPowerSave\fR
+attempts to use the PowerSave CPU governor
+.TP
+\fBUserSpace\fR
+attempts to use the UserSpace CPU governor
+.TP
+.RE
+
+The following informational environment variable is set in the job
+step when \fB\-\-cpu\-freq\fR option is requested.
+.nf
+        SLURM_CPU_FREQ_REQ
+.fi
+
+This environment variable can also be used to supply the value for the
+CPU frequency request if it is set when the 'srun' command is issued.
+The \fB\-\-cpu\-freq\fR on the command line will override the
+environment variable value.  The form on the environment variable is
+the same as the command line.
+See the \fBENVIRONMENT VARIABLES\fR
+section for a description of the SLURM_CPU_FREQ_REQ variable.
+
+\fBNOTE\fR: This parameter is treated as a request, not a requirement.
+If the job step's node does not support setting the CPU frequency, or
+the requested value is outside the bounds of the legal frequencies, an
+error is logged, but the job step is allowed to continue.
+
+\fBNOTE\fR: Setting the frequency for just the CPUs of the job step
+implies that the tasks are confined to those CPUs.  If task
+confinement (i.e., TaskPlugin=task/affinity or
+TaskPlugin=task/cgroup with the "ConstrainCores" option) is not
+configured, this parameter is ignored.
+
+\fBNOTE\fR: When the step completes, the frequency and governor of each
+selected CPU is reset to the configured \fBCpuFreqDef\fR value with a
+default value of the OnDemand CPU governor.
+
+\fBNOTE\fR: When submitting jobs with  the \fB\-\-cpu\-freq\fR option
+with linuxproc as the ProctrackType can cause jobs to run too quickly before
+Accounting is able to poll for job information. As a result not all of
+accounting information will be present.
+.RE
+
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
 Advise the Slurm controller that ensuing job steps will require \fIncpus\fR
@@ -258,7 +363,10 @@ of 4 nodes, one for each of the 4 tasks.
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
 <\fIdependency_list\fR> is of the form
-<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
+<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR> or
+<\fItype:job_id[:job_id][?type:job_id[:job_id]]\fR>.
+All dependencies must be satisfied if the "," separator is used.
+Any dependency may be satisfied if the "?" separator is used.
 Many jobs can share the same dependency and these jobs may even belong to
 different  users. The  value may be changed after job submission using the
 scontrol command.
@@ -297,8 +405,9 @@ can be specified as full path or relative path to the directory where
 the command is executed.
 
 .TP
-\fB\-\-exclusive\fR
-The job allocation can not share nodes with other running jobs.
+\fB\-\-exclusive[=user]\fR
+The job allocation can not share nodes with other running jobs (or just other
+users with the "=user" option).
 The default shared/exclusive behavior depends on system configuration and the
 partition's \fBShared\fR option takes precedence over the job's option.
 
@@ -546,8 +655,9 @@ fashion across sockets.
 \fB\-\-mail\-type\fR=<\fItype\fR>
 Notify user by email when certain event types occur.
 Valid \fItype\fR values are BEGIN, END, FAIL, REQUEUE, ALL (equivalent to
-BEGIN, END, FAIL and REQUEUE), TIME_LIMIT, TIME_LIMIT_90 (reached 90 percent of
-time limit), TIME_LIMIT_80 (reached 80 percent of time limit), and TIME_LIMIT_50
+BEGIN, END, FAIL, REQUEUE, and STAGE_OUT), STAGE_OUT (burst buffer stage out
+completed), TIME_LIMIT, TIME_LIMIT_90 (reached 90 percent of time limit),
+TIME_LIMIT_80 (reached 80 percent of time limit), and TIME_LIMIT_50
 (reached 50 percent of time limit).
 Multiple \fItype\fR values may be specified in a comma separated list.
 The user to be notified is indicated with \fB\-\-mail\-user\fR.
@@ -926,6 +1036,13 @@ permitted to execute per node.  NOTE: \fBMAX_TASKS_PER_NODE\fR is
 defined in the file \fIslurm.h\fR and is not a variable, it is set at
 Slurm build time.
 
+.TP
+\fB\-\-power\fR=<\fIflags\fR>
+Comma separated list of power management plugin options.
+Currently available flags include:
+level (all nodes allocated to the job should have identical power caps,
+may be disabled by the Slurm configuration option PowerParameters=job_no_level).
+
 .TP
 \fB\-\-priority\fR=<value>
 Request a specific job priority.
@@ -1017,6 +1134,12 @@ Default value is dependent upon the node's configured CoreSpecCount value.
 If a value of zero is designated and the Slurm configuration option
 AllowSpecResourcesUsage is enabled, the job will be allowed to override
 CoreSpecCount and use the specialized resources on nodes it is allocated.
+This option can not be used with the \fB\-\-thread\-spec\fR option.
+
+.TP
+\fB\-\-sicp\fR
+Identify a job as one which jobs submitted to other clusters can be dependent
+upon.
 
 .TP
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
@@ -1065,6 +1188,13 @@ limit be imposed.  Acceptable time formats include "minutes",
 "minutes:seconds", "hours:minutes:seconds", "days\-hours",
 "days\-hours:minutes" and "days\-hours:minutes:seconds".
 
+.TP
+\fB\-\-thread\-spec\fR=<\fInum\fR>
+Count of specialized threads per node reserved by the job for system operations
+and not used by the application. The application will not use these threads,
+but will be charged for their allocation.
+This option can not be used with the \fB\-\-core\-spec\fR option.
+
 .TP
 \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
 Restrict node selection to nodes with at least the specified number of
@@ -1115,10 +1245,6 @@ Increase the verbosity of salloc's informational messages.  Multiple
 \fB\-v\fR's will further increase salloc's verbosity.  By default only
 errors will be displayed.
 
-.TP
-\fB\-W\fR, \fB\-\-wait\fR=<\fIseconds\fR>
-This option has been replaced by \fB\-\-immediate\fR=<\fIseconds\fR>.
-
 .TP
 \fB\-w\fR, \fB\-\-nodelist\fR=<\fInode name list\fR>
 Request a specific list of hosts.
@@ -1238,6 +1364,9 @@ Same as \fB\-\-acctg\-freq\fR
 \fBSALLOC_BELL\fR
 Same as \fB\-\-bell\fR
 .TP
+\fBSALLOC_BURST_BUFFER\fR
+Same as \fB\-\-bb\fR
+.TP
 \fBSALLOC_CONN_TYPE\fR
 Same as \fB\-\-conn\-type\fR
 .TP
@@ -1283,6 +1412,9 @@ Same as \fB\-O, \-\-overcommit\fR
 \fBSALLOC_PARTITION\fR
 Same as \fB\-p, \-\-partition\fR
 .TP
+\fBSALLOC_POWER\fR
+Same as \fB\-\-power\fR
+.TP
 \fBSALLOC_PROFILE\fR
 Same as \fB\-\-profile\fR
 .TP
@@ -1297,15 +1429,18 @@ for that number of switches. See \fB\-\-switches\fR.
 \fBSALLOC_RESERVATION\fR
 Same as \fB\-\-reservation\fR
 .TP
+\fBSALLOC_SICP\fR
+Same as \fB\-\-sicp\fR
+.TP
 \fBSALLOC_SIGNAL\fR
 Same as \fB\-\-signal\fR
 .TP
+\fBSALLOC_THREAD_SPEC\fR
+Same as \fB\-\-thread\-spec\fR
+.TP
 \fBSALLOC_TIMELIMIT\fR
 Same as \fB\-t, \-\-time\fR
 .TP
-\fBSALLOC_WAIT\fR
-Same as \fB\-W, \-\-wait\fR
-.TP
 \fBSALLOC_WAIT_ALL_NODES\fR
 Same as \fB\-\-wait\-all\-nodes\fR
 .TP
@@ -1448,7 +1583,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
-Copyright (C) 2010\-2013 SchedMD LLC.
+Copyright (C) 2010\-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man1/sbatch.1 b/doc/man/man1/sbatch.1
index 0cdce3378..54332bd3b 100644
--- a/doc/man/man1/sbatch.1
+++ b/doc/man/man1/sbatch.1
@@ -137,6 +137,11 @@ This option is not supported on BlueGene systems (select/bluegene plugin
 is configured).
 If not specified, the scontrol show job will display 'ReqS:C:T=*:*:*'.
 
+.TP
+\fB\-\-bb\fR=<\fIspec\fR>
+Burst buffer specification.
+The form of the specification is system dependent.
+
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
 Submit the batch script to the Slurm controller immediately, like normal, but
@@ -265,6 +270,107 @@ Restrict node selection to nodes with at least the specified number of
 cores per socket.  See additional information under \fB\-B\fR option
 above when task/affinity plugin is enabled.
 
+.TP
+\fB\-\-cpu\-freq\fR =<\fIp1\fR[\-\fIp2\fR[:\fIp3\fR]]>
+
+Request that job steps initiated by srun commands inside this sbatch script
+be run at some requested frequency if possible, on the CPUs selected
+for the step on the compute node(s).
+
+\fBp1\fR can be  [#### | low | medium | high | highm1] which will set the
+frequency scaling_speed to the corresponding value, and set the frequency
+scaling_governor to UserSpace. See below for definition of the values.
+
+\fBp1\fR can be [Conservative | OnDemand | Performance | PowerSave] which
+will set the scaling_governor to the corresponding value. The governor has to be
+in the list set by the slurm.conf option CpuFreqGovernors.
+
+When \fBp2\fR is present, p1 will be the minimum scaling frequency and
+p2 will be the maximum scaling frequency.
+
+\fBp2\fR can be  [#### | medium | high | highm1] p2 must be greater than p1.
+
+\fBp3\fR can be [Conservative | OnDemand | Performance | PowerSave | UserSpace]
+which will set the governor to the corresponding value.
+
+If \fBp3\fR is UserSpace, the frequency scaling_speed will be set by a power
+or energy aware scheduling strategy to a value between p1 and p2 that lets the
+job run within the site's power goal. The job may be delayed if p1 is higher
+than a frequency that allows the job to run withing the goal.
+
+If the current frequency is < min, it will be set to min. Likewise,
+if the current frequency is > max, it will be set to max.
+
+Acceptable values at present include:
+.RS
+.TP 14
+\fB####\fR
+frequency in kilohertz
+.TP
+\fBLow\fR
+the lowest available frequency
+.TP
+\fBHigh\fR
+the highest available frequency
+.TP
+\fBHighM1\fR
+(high minus one) will select the next highest available frequency
+.TP
+\fBMedium\fR
+attempts to set a frequency in the middle of the available range
+.TP
+\fBConservative\fR
+attempts to use the Conservative CPU governor
+.TP
+\fBOnDemand\fR
+attempts to use the OnDemand CPU governor (the default value)
+.TP
+\fBPerformance\fR
+attempts to use the Performance CPU governor
+.TP
+\fBPowerSave\fR
+attempts to use the PowerSave CPU governor
+.TP
+\fBUserSpace\fR
+attempts to use the UserSpace CPU governor
+.TP
+.RE
+
+The following informational environment variable is set in the job
+step when \fB\-\-cpu\-freq\fR option is requested.
+.nf
+        SLURM_CPU_FREQ_REQ
+.fi
+
+This environment variable can also be used to supply the value for the
+CPU frequency request if it is set when the 'srun' command is issued.
+The \fB\-\-cpu\-freq\fR on the command line will override the
+environment variable value.  The form on the environment variable is
+the same as the command line.
+See the \fBENVIRONMENT VARIABLES\fR
+section for a description of the SLURM_CPU_FREQ_REQ variable.
+
+\fBNOTE\fR: This parameter is treated as a request, not a requirement.
+If the job step's node does not support setting the CPU frequency, or
+the requested value is outside the bounds of the legal frequencies, an
+error is logged, but the job step is allowed to continue.
+
+\fBNOTE\fR: Setting the frequency for just the CPUs of the job step
+implies that the tasks are confined to those CPUs.  If task
+confinement (i.e., TaskPlugin=task/affinity or
+TaskPlugin=task/cgroup with the "ConstrainCores" option) is not
+configured, this parameter is ignored.
+
+\fBNOTE\fR: When the step completes, the frequency and governor of each
+selected CPU is reset to the configured \fBCpuFreqDef\fR value with a
+default value of the OnDemand CPU governor.
+
+\fBNOTE\fR: When submitting jobs with  the \fB\-\-cpu\-freq\fR option
+with linuxproc as the ProctrackType can cause jobs to run too quickly before
+Accounting is able to poll for job information. As a result not all of
+accounting information will be present.
+.RE
+
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
 Advise the Slurm controller that ensuing job steps will require \fIncpus\fR
@@ -284,7 +390,10 @@ of 4 nodes, one for each of the 4 tasks.
 Defer the start of this job until the specified dependencies have been
 satisfied completed.
 <\fIdependency_list\fR> is of the form
-<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
+<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR> or
+<\fItype:job_id[:job_id][?type:job_id[:job_id]]\fR>.
+All dependencies must be satisfied if the "," separator is used.
+Any dependency may be satisfied if the "?" separator is used.
 Many jobs can share the same dependency and these jobs may even belong to
 different  users. The  value may be changed after job submission using the
 scontrol command.
@@ -334,8 +443,9 @@ replaced by the job ID.
 See the \fB\-\-input\fR option for filename specification options.
 
 .TP
-\fB\-\-exclusive\fR
-The job allocation can not share nodes with other running jobs.
+\fB\-\-exclusive[=user]\fR
+The job allocation can not share nodes with other running jobs (or just other
+users with the "=user" option).
 The default shared/exclusive behavior depends on system configuration and the
 partition's \fBShared\fR option takes precedence over the job's option.
 
@@ -516,6 +626,14 @@ new job steps on the remaining nodes in their allocation.
 By default Slurm terminates the entire job allocation if any node fails in its
 range of allocated nodes.
 
+.TP
+\fB\-\-kill-on-invalid-dep\fR=<\fIyes|no\fR>
+If a job has an invalid dependency and it can never run this parameter tells
+Slurm to terminate it or not. A terminated job state will be JOB_CANCELLED.
+If this option is not specified the system wide behavior applies.
+By default the job stays pending with reason DependencyNeverSatisfied or if the
+kill_invalid_depend is specified in slurm.conf the job is terminated.
+
 .TP
 \fB\-L\fR, \fB\-\-licenses\fR=<\fBlicense\fR>
 Specification of licenses (or other resources available on all
@@ -638,11 +756,14 @@ fashion across sockets.
 \fB\-\-mail\-type\fR=<\fItype\fR>
 Notify user by email when certain event types occur.
 Valid \fItype\fR values are BEGIN, END, FAIL, REQUEUE, ALL (equivalent to
-BEGIN, END, FAIL and REQUEUE), TIME_LIMIT, TIME_LIMIT_90 (reached 90 percent of
-time limit), TIME_LIMIT_80 (reached 80 percent of time limit), and TIME_LIMIT_50
+BEGIN, END, FAIL, REQUEUE, and STAGE_OUT), STAGE_OUT (burst buffer stage out
+completed), TIME_LIMIT, TIME_LIMIT_90 (reached 90 percent of time limit),
+TIME_LIMIT_80 (reached 80 percent of time limit), and TIME_LIMIT_50
 (reached 50 percent of time limit).
 Multiple \fItype\fR values may be specified in a comma separated list.
 The user to be notified is indicated with \fB\-\-mail\-user\fR.
+Mail notifications on job BEGIN, END and FAIL apply to a job array as a whole
+rather than generating individual email messages for each task in the job array.
 
 .TP
 \fB\-\-mail\-user\fR=<\fIuser\fR>
@@ -1026,6 +1147,11 @@ Open the output and error files using append or truncate mode as specified.
 The default value is specified by the system configuration parameter
 \fIJobFileAppend\fR.
 
+.TP
+\fB\-\-parsable\fR
+Outputs only the job id number and the cluster name if present.
+The values are separated by a semicolon. Errors will still be displayed.
+
 .TP
 \fB\-p\fR, \fB\-\-partition\fR=<\fIpartition_names\fR>
 Request a specific partition for the resource allocation.  If not specified,
@@ -1037,6 +1163,13 @@ name ordering (although higher priority partitions will be considered first).
 When the job is initiated, the name of the partition used will be placed first
 in the job record partition string.
 
+.TP
+\fB\-\-power\fR=<\fIflags\fR>
+Comma separated list of power management plugin options.
+Currently available flags include:
+level (all nodes allocated to the job should have identical power caps,
+may be disabled by the Slurm configuration option PowerParameters=job_no_level).
+
 .TP
 \fB\-\-priority\fR=<value>
 Request a specific job priority.
@@ -1075,11 +1208,6 @@ Lustre data is collected.
 Network (InfiniBand) data is collected.
 .RE
 
-.TP
-\fB\-\-parsable\fR
-Outputs only the job id number and the cluster name if present.
-The values are separated by a semicolon. Errors will still be displayed.
-
 .TP
 \fB\-\-propagate\fR[=\fIrlimitfR]
 Allows users to specify which of the modifiable (soft) resource limits
@@ -1177,6 +1305,12 @@ Default value is dependent upon the node's configured CoreSpecCount value.
 If a value of zero is designated and the Slurm configuration option
 AllowSpecResourcesUsage is enabled, the job will be allowed to override
 CoreSpecCount and use the specialized resources on nodes it is allocated.
+This option can not be used with the \fB\-\-thread\-spec\fR option.
+
+.TP
+\fB\-\-sicp\fR
+Identify a job as one which jobs submitted to other clusters can be dependent
+upon.
 
 .TP
 \fB\-\-signal\fR=[B:]<\fIsig_num\fR>[@<\fIsig_time\fR>]
@@ -1239,6 +1373,13 @@ Validate the batch script and return an estimate of when a job would be
 scheduled to run given the current job queue and all the other arguments
 specifying the job requirements. No job is actually submitted.
 
+.TP
+\fB\-\-thread\-spec\fR=<\fInum\fR>
+Count of specialized threads per node reserved by the job for system operations
+and not used by the application. The application will not use these threads,
+but will be charged for their allocation.
+This option can not be used with the \fB\-\-core\-spec\fR option.
+
 .TP
 \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
 Restrict node selection to nodes with at least the specified number of
@@ -1418,6 +1559,9 @@ Same as \fB\-a, \-\-array\fR
 \fBSBATCH_BLRTS_IMAGE\fR
 Same as \fB\-\-blrts\-image\fR
 .TP
+\fBSBATCH_BURST_BUFFER\fR
+Same as \fB\-\-bb\fR
+.TP
 \fBSBATCH_CHECKPOINT\fR
 Same as \fB\-\-checkpoint\fR
 .TP
@@ -1436,7 +1580,6 @@ Same as \fB\-\-conn\-type\fR
 \fBSBATCH_CORE_SPEC\fR
 Same as \fB\-\-core\-spec\fR
 .TP
-.TP
 \fBSBATCH_DEBUG\fR
 Same as \fB\-v, \-\-verbose\fR
 .TP
@@ -1446,12 +1589,6 @@ Same as \fB\-m, \-\-distribution\fR
 \fBSBATCH_EXCLUSIVE\fR
 Same as \fB\-\-exclusive\fR
 .TP
-\fBSLURM_EXIT_ERROR\fR
-Specifies the exit code generated when a Slurm error occurs
-(e.g. invalid options).
-This can be used by a script to distinguish application exit codes from
-various Slurm error conditions.
-.TP
 \fBSBATCH_EXPORT\fR
 Same as \fB\-\-export\fR
 .TP
@@ -1506,6 +1643,9 @@ Same as \fB\-O, \-\-overcommit\fR
 \fBSBATCH_PARTITION\fR
 Same as \fB\-p, \-\-partition\fR
 .TP
+\fBSBATCH_POWER\fR
+Same as \fB\-\-power\fR
+.TP
 \fBSBATCH_PROFILE\fR
 Same as \fB\-\-profile\fR
 .TP
@@ -1526,9 +1666,15 @@ for that number of switches. See \fB\-\-switches\fR
 \fBSBATCH_REQUEUE\fR
 Same as \fB\-\-requeue\fR
 .TP
+\fBSBATCH_SICP\fR
+Same as \fB\-\-sicp\fR
+.TP
 \fBSBATCH_SIGNAL\fR
 Same as \fB\-\-signal\fR
 .TP
+\fBSBATCH_THREAD_SPEC\fR
+Same as \fB\-\-thread\-spec\fR
+.TP
 \fBSBATCH_TIMELIMIT\fR
 Same as \fB\-t, \-\-time\fR
 .TP
@@ -1544,6 +1690,12 @@ Same as \fB\-\-wckey\fR
 \fBSLURM_CONF\fR
 The location of the Slurm configuration file.
 .TP
+\fBSLURM_EXIT_ERROR\fR
+Specifies the exit code generated when a Slurm error occurs
+(e.g. invalid options).
+This can be used by a script to distinguish application exit codes from
+various Slurm error conditions.
+.TP
 \fBSLURM_STEP_KILLED_MSG_NODE_ID\fR=ID
 If set, only the specified node will log when the job or step are killed
 by a signal.
@@ -1568,6 +1720,15 @@ The block name on Blue Gene systems only.
 \fBSLURM_ARRAY_TASK_ID\fR
 Job array ID (index) number.
 .TP
+\fBSLURM_ARRAY_TASK_MAX\fR
+Job array's maximum ID (index) number.
+.TP
+\fBSLURM_ARRAY_TASK_MIN\fR
+Job array's minimum ID (index) number.
+.TP
+\fBSLURM_ARRAY_TASK_STEP\fR
+Job array's index step size.
+.TP
 \fBSLURM_ARRAY_JOB_ID\fR
 Job array's master job ID number.
 .TP
@@ -1759,7 +1920,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
-Copyright (C) 2010\-2013 SchedMD LLC.
+Copyright (C) 2010\-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man1/sbcast.1 b/doc/man/man1/sbcast.1
index 3f9d3cd7b..b329760ef 100644
--- a/doc/man/man1/sbcast.1
+++ b/doc/man/man1/sbcast.1
@@ -32,9 +32,9 @@ If the destination file already exists, replace it.
 Specify the fanout of messages used for file transfer.
 Maximum value is currently eight.
 .TP
-\fB\-j\fR \fInumber\fR, \fB\-\-jobid\fR=\fInumber\fR
-Specify the jobid to use.  If ran inside an allocation this is
-unneeded as the jobid will read from the environment.
+\fB\-j\fR \fIjobID[.stepID]\fR, \fB\-\-jobid\fR=\fIjobID[.stepID]\fR
+Specify the job ID to use with optional step ID.  If run inside an allocation
+this is unneeded as the job ID will read from the environment.
 .TP
 \fB\-p\fR, \fB\-\-preserve\fR
 Preserves modification times, access times, and modes from the
diff --git a/doc/man/man1/scontrol.1 b/doc/man/man1/scontrol.1
index e350a8f1b..253c453a5 100644
--- a/doc/man/man1/scontrol.1
+++ b/doc/man/man1/scontrol.1
@@ -1,4 +1,4 @@
-.TH scontrol "1" "Slurm Commands" "April 2015" "Slurm Commands"
+.TH scontrol "1" "Slurm Commands" "June 2015" "Slurm Commands"
 
 .SH "NAME"
 scontrol \- Used view and modify Slurm configuration and state.
@@ -344,8 +344,8 @@ contents of the slurm.conf configuration file.
 .TP
 \fBshow\fP \fIENTITY\fP \fIID\fP
 Display the state of the specified entity with the specified identification.
-\fIENTITY\fP may be \fIaliases\fP, \fIconfig\fP, \fIdaemons\fP, \fIfrontend\fP,
-\fIjob\fP, \fInode\fP, \fIpartition\fP, \fIreservation\fP, \fIslurmd\fP,
+\fIENTITY\fP may be \fIaliases\fP, \fIcache\fP, \fIconfig\fP, \fIdaemons\fP, \fIfrontend\fP,
+\fIjob\fP, \fInode\fP, \fIpartition\fP, \fIpowercap\fP, \fIreservation\fP, \fIslurmd\fP,
 \fIstep\fP, \fItopology\fP, \fIhostlist\fP, \fIhostlistsorted\fP or
 \fIhostnames\fP
 (also \fIblock\fP or \fIsubmp\fP on BlueGene systems).
@@ -361,6 +361,8 @@ named nodes will be shown.
 \fINodeHostname\fP (useful to get the list of virtual nodes associated with a
 real node in a configuration where multiple slurmd daemons execute on a single
 compute node).
+\fIcache\fP displays the current contents of the slurmctld's internal cache
+for users and associations.
 \fIconfig\fP displays parameter names from the configuration files in mixed
 case (e.g. SlurmdPort=7003) while derived parameters names are in upper case
 only (e.g. SLURM_VERSION).
@@ -429,12 +431,12 @@ system administrator (also see the \fBhold\fP command).
 
 .TP
 \fBupdate\fP \fISPECIFICATION\fP
-Update job, step, node, partition, or reservation configuration per the
-supplied specification. \fISPECIFICATION\fP is in the same format as the Slurm
-configuration file and the output of the \fIshow\fP command described above. It
-may be desirable to execute the \fIshow\fP command (described above) on the
-specific entity you which to update, then use cut\-and\-paste tools to enter
-updated configuration values to the \fIupdate\fP. Note that while most
+Update job, step, node, partition, powercapping or reservation configuration per
+the supplied specification. \fISPECIFICATION\fP is in the same format as the
+Slurm configuration file and the output of the \fIshow\fP command described 
+above. It may be desirable to execute the \fIshow\fP command (described above)
+on the specific entity you which to update, then use cut\-and\-paste tools to
+enter updated configuration values to the \fIupdate\fP. Note that while most
 configuration values can be changed using this command, not all can be changed
 using this mechanism. In particular, the hardware configuration of a node or
 the physical addition or removal of nodes from the cluster may only be
@@ -452,7 +454,7 @@ Display the version number of scontrol being executed.
 
 .TP
 \fBwait_job\fP \fIjob_id\fP
-Wait until a job andall of its nodes are ready for use or the job has entered
+Wait until a job and all of its nodes are ready for use or the job has entered
 some termination state. This option is particularly useful in the Slurm Prolog
 or in the batch script itself if nodes are powered down and restarted
 automatically as needed.
@@ -474,6 +476,11 @@ Repeat the last command executed.
 Account name to be changed for this job's resource use.
 Value may be cleared with blank data value, "Account=".
 .TP
+\fIBurstBuffer\fP=<spec>
+Burst buffer specification to be changed for this job's resource use.
+Value may be cleared with blank data value, "BurstBuffer=".
+Format is burst buffer plugin specific.
+.TP
 \fIConn\-Type\fP=<type>
 Reset the node connection type. Supported only on IBM BlueGene systems.
 Possible values on are "MESH", "TORUS" and "NAV" (mesh else torus).
@@ -1203,6 +1210,17 @@ and jobs already queued may not be allocated nodes and run.
 See also the "Alternate" partition specification.
 .RE
 
+.TP
+\fBSPECIFICATIONS FOR UPDATE COMMAND, POWERCAP\fR
+
+.TP
+\fIPowerCap\fP=<count>
+Set the amount of watts the cluster is limited to.
+Specify a number, "INFINITE" to enable the power capping logic without
+power restriction or "0" to disable the power capping logic.
+Update slurm.conf with any changes meant to be persistent across normal
+restarts of slurmctld or the execution of \fBscontrol reconfig\fR.
+
 .TP
 \fBSPECIFICATIONS FOR CREATE, UPDATE, AND DELETE COMMANDS, RESERVATIONS\fR
 .TP
@@ -1232,6 +1250,26 @@ If accounts are denied access to a reservation (account name preceded by a '\-')
 then all other accounts are implicitly allowed to use the reservation and it is
 not possible to also explicitly specify allowed accounts.
 
+.TP
+\fIBurstBuffer\fP=<buffer_spec>[,<buffer_spec>,...]
+Specification of burst buffer resources which are to be reserved.
+"buffer_spec" consists of four elements:
+[plugin:][type:]#[units]
+"plugin" is the burst buffer plugin name, currently either "cray" or "generic".
+If no plugin is specified, the reservation applies to all configured burst
+buffer plugins.
+"type" specifies a Cray generic burst buffer resource, for example "nodes".
+if "type" is not specified, the number is a measure of storage space.
+The "units" may be "N" (nodes), "GB" (gigabytes), "TB" (terabytes),
+"PB" (petabytes), etc. with the default units being gigabyes for reservations
+of storage space.
+For example "BurstBuffer=cray:2TB" (reserve 2TB of storage plus
+3 nodes from the Cray plugin) or
+"BurstBuffer=100GB" (reserve 100 GB of storage from all configured burst buffer
+plugins).
+Jobs using this reservation are not restricted to these burst buffer resources,
+but may use these reserved resources plus any which are generally available.
+
 .TP
 \fICoreCnt\fP=<num>
 This option is only supported when SelectType=select/cons_res. Identify number of
@@ -1250,6 +1288,8 @@ A new reservation must specify one or more resource to be included: NodeCnt,
 Nodes and/or Licenses.
 If a reservation includes Licenses, but no NodeCnt or Nodes, then the option
 \fIFlags=LICENSE_ONLY\fP must also be specified.
+Jobs using this reservation are not restricted to these licenses, but may
+use these reserved licenses plus any which are generally available.
 
 .TP
 \fINodeCnt\fP=<num>[,num,...]
@@ -1322,6 +1362,14 @@ Flags\-=DAILY (NOTE: this shortcut is not supported for all flags).
 Currently supported flags include:
 .RS
 .TP 14
+\fIANY_NODES\fR
+This is a reservation for burst buffers and/or licenses only and not compute
+nodes.
+If this flag is set, a job using this reservation may use the associated
+burst buffers and/or licenses plus any compute nodes.
+If this flag is not set, a job using this reservation may use only the nodes
+and licenses associated with the reservation.
+.TP
 \fIDAILY\fR
 Repeat the reservation at the same time every day
 .TP
@@ -1334,11 +1382,7 @@ This can be especially useful when reserving all nodes in the system
 for maintenance.
 .TP
 \fILICENSE_ONLY\fR
-This is a reservation for licenses only and not compute nodes.
-If this flag is set, a job using this reservation may use the associated
-licenses and any compute nodes.
-If this flag is not set, a job using this reservation may use only the nodes
-and licenses associated with the reservation.
+See \fIANY_NODES\fR.
 .TP
 \fIMAINT\fR
 Maintenance mode, receives special accounting treatment.
@@ -1354,6 +1398,14 @@ This flag can be used to reserve all nodes within the specified
 partition.  PartitionName and Nodes=ALL must be specified or
 this option is ignored.
 .TP
+\fRREPLACE\fR
+Resources allocated to jobs as automaticallly replenished using idle resources.
+This option can be used to maintain a constant number of idle resources
+available for pending jobs (subject to availability of idle resources).
+This should be used with the \fINodeCnt\fP reservation option; do not identify
+specific nodes to be included in the reservation.
+This option is not supported on IBM Bluegene systems.
+.TP
 \fISPEC_NODES\fR
 Reservation is for specific nodes (output only)
 .TP
@@ -1436,6 +1488,42 @@ specification is required.
 NOTE: Even on BGQ where node names are given in bg0000[00000] format
 this option takes an ionode name bg0000[0].
 
+.TP
+\fBSPECIFICATIONS FOR UPDATE COMMAND, LAYOUTS \fR
+.TP
+\fILayout\fP=<name>
+Identify the layout to be updated. This specification is required.
+.TP
+\fIEntity\fP=<entity list>
+Identify the entities to be updated. This specification is required.
+.TP
+\fIKey\fP=<value>
+Keys/Values to update for the entities. The format must respect the layout.d
+configuration files. Key=Type cannot be updated. At least one Key/Value is
+required, several can be set.
+
+.TP
+\fBSPECIFICATIONS FOR SHOW COMMAND, LAYOUTS\fR
+.PP
+Without options, lists all configured layouts. With a layout specified,
+shows entities with following options:
+
+.TP
+\fIKey\fP=<value>
+Keys/Values to update for the entities. The format must respect the layout.d
+configuration files. Key=Type cannot be updated. One Key/Value is required,
+several can be set.
+.TP
+\fIEntity\fP=<value>
+Entities to show, default is not used. Can be set to "*".
+.TP
+\fIType\fP=<value>
+Type of entities to show, default is not used.
+.TP
+\fInolayout\fP
+If not used, only entities with defining the tree are shown.
+With the option, only leaves are shown.
+
 .TP
 \fBDESCRIPTION FOR SHOW COMMAND, NODES\fR
 .TP
@@ -1534,6 +1622,11 @@ For other years it returns a date month and year without a time (e.g.
 A valid strftime() format can also be specified. For example, a value of
 "%a %T" will report the day of the week and a time stamp (e.g. "Mon 12:34:56").
 
+.TP
+\fBSLURM_TOPO_LEN\fR
+Specify the maximum size of the line when printing Topology. If not set, the
+default value "512" will be used.
+
 .SH "AUTHORIZATION"
 
 When using the Slurm db, users who have AdminLevel's defined (Operator
@@ -1671,7 +1764,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
-Copyright (C) 2010-2014 SchedMD LLC.
+Copyright (C) 2010-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man1/sinfo.1 b/doc/man/man1/sinfo.1
index 36697eeb5..59cc2adb4 100644
--- a/doc/man/man1/sinfo.1
+++ b/doc/man/man1/sinfo.1
@@ -72,6 +72,11 @@ indicate eight nodes, "linux00" through "linux07."
 Performance of the command can be measurably improved for systems with large
 numbers of nodes when a single node name is specified.
 
+.TP
+\fB\-\-noconvert\fR
+Don't convert units from their original type (e.g. 2048M won't be converted to
+2G).
+
 .TP
 \fB\-N\fR, \fB\-\-Node\fR
 Print information in a node\-oriented format.
@@ -197,6 +202,9 @@ List of node communication addresses
 \fB%O\fR
 CPU load of a node
 .TP
+\fB%e\fR
+Free memory of a node
+.TP
 \fB%p\fR
 Partition scheduling priority
 .TP
@@ -252,6 +260,163 @@ right justification of the field
 size of field
 .RE
 
+.TP
+\fB\-O <output_format>\fR, \fB\-\-Format=<output_format>\fR
+Specify the information to be displayed.
+Also see the \fB\-o <output_format>\fR, \fB\-\-format=<output_format>\fR
+option described below (which supports greater flexibility in formatting, but
+does not support access to all fields because we ran out of letters).
+Requests a comma separated list of job information to be displayed.
+
+.IP
+The format of each field is "type[:[.]size]"
+.RS
+.TP 8
+\fIsize\fR
+is the minimum field size.
+If no size is specified, 20 characters will be allocated to print the information.
+.TP
+\fI .\fR
+indicates the output should be right justified and size must be specified.
+By default, output is left justified.
+.RE
+
+.IP
+Valid \fItype\fR specifications include:
+.RS
+.TP 6
+\fBall\fR
+Print all fields available in the \-o format for this data type with a
+vertical bar separating each field.
+.TP
+\fBallocmem\fR
+Prints the amount of allocated memory on a node.
+.TP
+\fBallocnodes\fR
+Allowed allocating nodes.
+.TP
+\fBavailable\fR
+State/availability of a partition.
+.TP
+\fBcpus\fR
+Number of CPUs per node.
+.TP
+\fBcpusload\fR
+CPU load of a node.
+.TP
+\fBfreemem\fR
+Free memory of a node.
+.TP
+\fBcpusstate\fR
+Number of CPUs by state in the format
+"allocated/idle/other/total". Do not use this with a node
+state option ("%t" or "%T") or the different node states will
+be placed on separate lines.
+.TP
+\fBcores\fR
+Number of cores per socket.
+.TP
+\fBdefaulttime\fR
+Default time for any job in the format "days\-hours:minutes:seconds".
+\fBdisk\fR
+Size of temporary disk space per node in megabytes.
+.TP
+\fBfeatures\fR
+Features associated with the nodes.
+.TP
+\fBgroups\fR
+Groups which may use the nodes.
+.TP
+\fBgres\fR
+Generic resources (gres) associated with the nodes.
+.TP
+\fBmaxcpuspernode\fR
+The max number of CPUs per node available to jobs in the partition.
+.TP
+\fBmemory\fR
+Size of memory per node in megabytes.
+.TP
+\fBnodes\fR
+Number of nodes.
+.TP
+\fBnodeaddr\fR
+List of node communication addresses.
+.TP
+\fBnodeai\fR
+Number of nodes by state in the format "allocated/idle".
+Do not use this with a node state option ("%t" or "%T") or
+the different node states will be placed on separate lines.
+.TP
+\fBnodeaiot\fR
+Number of nodes by state in the format
+"allocated/idle/other/total".  Do not use this with a node
+state option ("%t" or "%T") or the different node states will
+be placed on separate lines.
+.TP
+\fBnodehost\fR
+List of node hostnames.
+.TP
+\fBnodelist\fR
+List of node names.
+.TP
+\fBpartition\fR
+Partition name followed by "*" for the default partition, also see \fB%R\fR.
+.TP
+\fBpartitionname\fR
+Partition name, also see \fB%P\fR.
+.TP
+\fBpreemptmode\fR
+PreemptionMode.
+.TP
+\fBpriority\fR
+Partition scheduling priority.
+.TP
+\fBreason\fR
+The reason a node is unavailable (down, drained, or draining states).
+.TP
+\fBroot\fR
+Only user root may initiate jobs, "yes" or "no".
+.TP
+\fBshare\fR
+Jobs may share nodes, "yes", "no", or "force".
+.TP
+\fBsize\fR
+Maximum job size in nodes.
+.TP
+\fBstatecompact\fR
+State of nodes, compact form.
+.TP
+\fBstatelong\fR
+State of nodes, extended form.
+.TP
+\fBsockets\fR
+Number of sockets per node.
+.TP
+\fBsocketcorethread\fR
+Extended processor information: number of sockets, cores, threads (S:C:T) per node.
+.TP
+\fBtime\fR
+Maximum time for any job in the format "days\-hours:minutes:seconds".
+.TP
+\fBtimestamp\fR
+Print the timestamp of the reason a node is unavailable.
+.TP
+\fBthreads\fR
+Number of threads per core.
+.TP
+\fBuser\fR
+Print the user name of who set the reason a node is unavailable.
+.TP
+\fBuserlong\fR
+Print the user name and uid of who set the reason a node is unavailable.
+.TP
+\fBversion\fR
+Print the version of the running slurmd daemon.
+.TP
+\fBweight\fR
+Scheduling weight of the nodes.
+.RE
+
 .TP
 \fB\-p <partition>\fR, \fB\-\-partition=<partition>\fR
 Print information only about the specified partition(s). Multiple partitions
diff --git a/doc/man/man1/smap.1 b/doc/man/man1/smap.1
index 9970c48bd..1fab89ef1 100644
--- a/doc/man/man1/smap.1
+++ b/doc/man/man1/smap.1
@@ -493,8 +493,18 @@ Job terminated due to preemption.
 \fBR   RUNNING\fR
 Job currently has an allocation.
 .TP
+\fBSE  SPECIAL_EXIT\fR
+The job was requeued in a special state. This state can be set by
+users, typically in EpilogSlurmctld, if the job has terminated with
+a particular exit value.
+.TP
+\fBST  STOPPED\fR
+Job has an allocation, but execution has been stopped with SIGSTOP signal.
+CPUS have been retained by this job.
+.TP
 \fBS   SUSPENDED\fR
-Job has an allocation, but execution has been suspended.
+Job has an allocation, but execution has been suspended and CPUs have been
+released for other jobs.
 .TP
 \fBTO  TIMEOUT\fR
 Job terminated upon reaching its time limit.
diff --git a/doc/man/man1/sprio.1 b/doc/man/man1/sprio.1
index fe45b37a8..d25e42f71 100644
--- a/doc/man/man1/sprio.1
+++ b/doc/man/man1/sprio.1
@@ -53,10 +53,10 @@ assigned non-zero weights are
 .RS
 .TP 15
 \fIdefault\fR
-"%.15i %.10Y %.10A %.10F %.10J %.10P %.10Q"
+"%.15i %.10Y %.10A %.10F %.10J %.10P %.10Q %20T"
 .TP
 \fI\-l, \-\-long\fR
-"%.15i %.8u %.10Y %.10A %.10F %.10J %.10P %.10Q %.6N"
+"%.15i %.8u %.10Y %.10A %.10F %.10J %.10P %.10Q %.6N %20T"
 .RE
 
 .IP
@@ -113,6 +113,12 @@ Normalized quality of service priority
 \fB%Q\fR
 Weighted quality of service priority
 .TP
+\fB%t\fR
+Normalized TRES priorities
+.TP
+\fB%T\fR
+Weighted TRES priorities
+.TP
 \fB%u\fR
 User name for a job
 .TP
diff --git a/doc/man/man1/squeue.1 b/doc/man/man1/squeue.1
index f7a3be4ad..265921fe0 100644
--- a/doc/man/man1/squeue.1
+++ b/doc/man/man1/squeue.1
@@ -87,6 +87,11 @@ A value of of '\fIall\fR' will query to run on all clusters.
 Request jobs or job steps having one of the specified names.  The
 list consists of a comma separated list of job names.
 
+.TP
+\fB\-\-noconvert\fR
+Don't convert units from their original type (e.g. 2048M won't be converted to
+2G).
+
 .TP
 \fB\-o <output_format>\fR, \fB\-\-format=<output_format>\fR
 Specify the information to be displayed, its size and position
@@ -482,6 +487,10 @@ script.
 Prints the number of boards per node allocated to the job.
 (Valid for jobs only)
 .TP
+\fBburstbuffer\fR
+Burst Buffer specification
+(Valid for jobs only)
+.TP
 \fBchptdir\fR
 Prints the directory where the job checkpoint will be written to.
 (Valid for job steps only)
@@ -775,8 +784,8 @@ Actual or expected start time of the job or job step.
 .TP
 \fBstate\fR
 Job state, extended form:
-PENDING, RUNNING, SUSPENDED, CANCELLED, COMPLETING, COMPLETED, CONFIGURING,
-FAILED, TIMEOUT, PREEMPTED, NODE_FAIL and SPECIAL_EXIT.
+PENDING, RUNNING, STOPPED, SUSPENDED, CANCELLED, COMPLETING, COMPLETED,
+CONFIGURING, FAILED, TIMEOUT, PREEMPTED, NODE_FAIL and SPECIAL_EXIT.
 See the \fBJOB STATE CODES\fR section below for more information.
 (Valid for jobs only)
 .TP
@@ -844,6 +853,9 @@ Clock skew between nodes in the cluster will cause the time to be inaccurate.
 If the time is obviously wrong (e.g. negative), it displays as "INVALID".
 (Valid for jobs and job steps)
 .TP
+\fBtres\fR
+Print the trackable resources allocated to the job.
+.TP
 \fBuserid\fR
 User ID for a job or job step.
 (Valid for jobs and job steps)
@@ -932,7 +944,7 @@ Specify the states of jobs to view.  Accepts a comma separated list of
 state names or "all". If "all" is specified then jobs of all states will be
 reported. If no state is specified then pending, running, and completing
 jobs are reported. Valid states (in both extended and compact form) include:
-PENDING (PD), RUNNING (R), SUSPENDED (S),
+PENDING (PD), RUNNING (R), SUSPENDED (S), STOPPED (ST),
 COMPLETING (CG), COMPLETED (CD), CONFIGURING (CF), CANCELLED (CA),
 FAILED (F), TIMEOUT (TO), PREEMPTED (PR), BOOT_FAIL (BF) , NODE_FAIL (NF)
 and SPECIAL_EXIT (SE).
@@ -1125,16 +1137,21 @@ Job terminated due to preemption.
 \fBR   RUNNING\fR
 Job currently has an allocation.
 .TP
+\fBSE  SPECIAL_EXIT\fR
+The job was requeued in a special state. This state can be set by
+users, typically in EpilogSlurmctld, if the job has terminated with
+a particular exit value.
+.TP
+\fBST  STOPPED\fR
+Job has an allocation, but execution has been stopped with SIGSTOP signal.
+CPUS have been retained by this job.
+.TP
 \fBS   SUSPENDED\fR
-Job has an allocation, but execution has been suspended.
+Job has an allocation, but execution has been suspended and CPUs have been
+released for other jobs.
 .TP
 \fBTO  TIMEOUT\fR
 Job terminated upon reaching its time limit.
-.TP
-\fBSE SPECIAL_EXIT\fR
-The job was requeued in a special state. This state can be set by
-users, typically in EpilogSlurmctld, if the job has terminated with
-a particular exit value.
 
 
 .SH "ENVIRONMENT VARIABLES"
diff --git a/doc/man/man1/sreport.1 b/doc/man/man1/sreport.1
index 021ddc7d4..fcd485284 100644
--- a/doc/man/man1/sreport.1
+++ b/doc/man/man1/sreport.1
@@ -1,4 +1,4 @@
-.TH sreport "1" "Slurm Commands" "April 2015" "Slurm Commands"
+.TH sreport "1" "Slurm Commands" "June 2015" "Slurm Commands"
 
 .SH "NAME"
 sreport \- Generate reports from the slurm accounting data.
@@ -38,6 +38,12 @@ insensitive and may be abbreviated. The default format is Minutes.
 Supported time format options are listed in the \fBtime\fP command
 section below.
 .TP
+\fB\-T,  \-\-tres <tres_names>\fR
+Trackable resource (TRES) to report values for.
+By default CPU resource use is reported.
+Multiple TRES names may be separated using a comma separated list for all
+reports except the job reports, which can only support a single TRES name.
+.TP
 \fB\-v\fR, \fB\-\-verbose\fR
 Print detailed event logging.
 .TP
@@ -130,6 +136,8 @@ Valid report types are:
 
 \fBjob\fP \<REPORT\> \<OPTIONS\>
 
+\fBreservation\fP \<REPORT\> \<OPTIONS\>
+
 \fBuser\fP \<REPORT\> \<OPTIONS\>
 
 .TP
@@ -360,19 +368,19 @@ AccountUtilizationByUser:
 .br
 UserUtilizationByAccount:
 .in 14
-Accounts, Cluster, CPUCount, Login, Proper, Used
+Accounts, Cluster, Login, Proper, TresCount, Used
 
 .in 10
 UserUtilizationByWckey:
 .br
 WCKeyUtilizationByUser:
 .in 14
-Cluster, CPUCount, Login, Proper, Used, Wckey
+Cluster, Login, Proper, TresCount, Used, Wckey
 
 .in 10
 Utilization:
 .in 14
-Allocated, Cluster, CPUCount, Down, Idle, Overcommited, PlannedDown, Reported, Reserved
+Allocated, Cluster, Down, Idle, Overcommited, PlannedDown, Reported, Reserved, TresCount, TresName
 
 .TP
 \fBFORMAT OPTIONS FOR JOB REPORTS\fP
@@ -391,7 +399,7 @@ Wckey, Cluster
 .in 10
 Utilization:
 .in 14
-Allocated, Associations, Cluster, CPUCount, CPUTime, End, Flags, Idle, Name, Nodes, ReservationId, Start, TotalTime
+Allocated, Associations, Cluster, End, Flags, Idle, Name, Nodes, ReservationId, Start, TotalTime, TresCount, TresName, TresTime
 
 .TP
 \fBFORMAT OPTIONS FOR USER REPORTS\fP
@@ -408,7 +416,10 @@ All commands and options are case-insensitive.
 Some \fBsreport\fR options may be set via environment variables. These
 environment variables, along with their corresponding options, are listed below.
 (Note: commandline options will always override these settings)
-.TP 20
+.TP 14
+\fBSREPORT_TRES\fR
+Same as \fB\-t, \-\-tres\fR
+.TP
 \fBSLURM_CONF\fR
 The location of the Slurm configuration file.
 
@@ -424,7 +435,7 @@ The location of the Slurm configuration file.
 \fBsreport job sizesbyaccount All_Clusters users=gore1 account=environ PrintJobCount\fP
 Report number of jobs by user gore1 within the environ account
 .TP
-\fBsreport cluster AccountUtilizationByUser cluster=zeus user=gore1 start=2/23/08 end=2/24/09 format=Accounts,Cluster,CPUCount,Login,Proper,Used\fP
+\fBsreport cluster AccountUtilizationByUser cluster=zeus user=gore1 start=2/23/08 end=2/24/09 format=Accounts,Cluster,TresCount,Login,Proper,Used\fP
 Report cluster account utilization with the specified fields during
 the specified 24 hour day of February 23, 2009, by user gore1
 .TP
@@ -439,7 +450,7 @@ Report top usage in percent of the lc account during the specified week
 Copyright (C) 2009\-2010 Lawrence Livermore National Security.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
-Copyright (C) 2010\-2013 SchedMD LLC.
+Copyright (C) 2010\-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man1/srun.1 b/doc/man/man1/srun.1
index e2ee9775d..4e3abd683 100644
--- a/doc/man/man1/srun.1
+++ b/doc/man/man1/srun.1
@@ -18,6 +18,25 @@ http://slurm.schedmd.com/cpu_management.html
 .SH "OPTIONS"
 .LP
 
+.TP
+\fB\-\-accel\-bind\fR=<\fIoptions\fR>
+Control how tasks are bound to generic resources of type gpu, mic and nic.
+Multiple options may be specified. Supported options are as include:
+.RS
+.TP
+\fBg\fR
+Bind each task to GPUs which are closest to the allocated CPUs.
+.TP
+\fBm\fR
+Bind each task to MICs which are closest to the allocated CPUs.
+.TP
+\fBn\fR
+Bind each task to NICs which are closest to the allocated CPUs.
+.TP
+\fBv\fR
+Verbose mode. Log how tasks are bound to GPU and NIC devices.
+.RE
+
 .TP
 \fB\-A\fR, \fB\-\-account\fR=<\fIaccount\fR>
 Charge resources used by this job to specified account.
@@ -109,6 +128,17 @@ This option is not supported on BlueGene systems (select/bluegene plugin
 is configured).
 If not specified, the scontrol show job will display 'ReqS:C:T=*:*:*'.
 
+.TP
+\fB\-\-bb\fR=<\fIspec\fR>
+Burst buffer specification. The form of the specification is system dependent.
+Also see \fB\-\-bbf\fR.
+
+.TP
+\fB\-\-bbf\fR=<\fIfile_name\fR>
+Path of file containing burst buffer specification.
+The form of the specification is system dependent.
+Also see \fB\-\-bb\fR.
+
 .TP
 \fB\-\-begin\fR=<\fItime\fR>
 Defer initiation of this job until the specified time.
@@ -220,6 +250,7 @@ For example: "\fB\-\-constraint=[rack1*2&rack2*4]"\fR might
 be used to specify that two nodes must be allocated from nodes with the feature
 of "rack1" and four nodes must be allocated from nodes with the feature
 "rack2".
+.TP
 .RE
 
 \fBWARNING\fR: When srun is executed from within salloc or sbatch,
@@ -294,7 +325,7 @@ allocation with a number of
 sockets, cores, or threads equal to the number of tasks times cpus\-per\-task,
 then the tasks will by default be bound to the appropriate resources (auto
 binding). Disable this mode of operation by explicitly setting
-"-\-cpu_bind=none". Use TaskPluginParam=autobind=threads to set
+"-\-cpu_bind=none". Use TaskPluginParam=autobind=[threads|cores|sockets] to set
 a default cpu binding in case "auto binding" doesn't find a match.
 .RE
 .RS
@@ -388,14 +419,42 @@ Show help message for cpu_bind
 .RE
 
 .TP
-\fB\-\-cpu\-freq\fR =<\fIrequested frequency in kilohertz\fR>
+\fB\-\-cpu\-freq\fR =<\fIp1\fR[\-\fIp2\fR[:\fIp3\fR]]>
 
-Request that the job step initiated by this srun command be run at the
+Request that the job step initiated by this srun command be run at some
 requested frequency if possible, on the CPUs selected for the step on
 the compute node(s).
+
+\fBp1\fR can be  [#### | low | medium | high | highm1] which will set the
+frequency scaling_speed to the corresponding value, and set the frequency
+scaling_governor to UserSpace. See below for definition of the values.
+
+\fBp1\fR can be [Conservative | OnDemand | Performance | PowerSave] which
+will set the scaling_governor to the corresponding value. The governor has to be
+in the list set by the slurm.conf option CpuFreqGovernors.
+
+When \fBp2\fR is present, p1 will be the minimum scaling frequency and
+p2 will be the maximum scaling frequency.
+
+\fBp2\fR can be  [#### | medium | high | highm1] p2 must be greater than p1.
+
+\fBp3\fR can be [Conservative | OnDemand | Performance | PowerSave | UserSpace]
+which will set the governor to the corresponding value.
+
+If \fBp3\fR is UserSpace, the frequency scaling_speed will be set by a power
+or energy aware scheduling strategy to a value between p1 and p2 that lets the
+job run within the site's power goal. The job may be delayed if p1 is higher
+than a frequency that allows the job to run withing the goal.
+
+If the current frequency is < min, it will be set to min. Likewise,
+if the current frequency is > max, it will be set to max.
+
 Acceptable values at present include:
 .RS
 .TP 14
+\fB####\fR
+frequency in kilohertz
+.TP
 \fBLow\fR
 the lowest available frequency
 .TP
@@ -419,6 +478,10 @@ attempts to use the Performance CPU governor
 .TP
 \fBPowerSave\fR
 attempts to use the PowerSave CPU governor
+.TP
+\fBUserSpace\fR
+attempts to use the UserSpace CPU governor
+.TP
 .RE
 
 The following informational environment variable is set in the job
@@ -430,7 +493,9 @@ step when \fB\-\-cpu\-freq\fR option is requested.
 This environment variable can also be used to supply the value for the
 CPU frequency request if it is set when the 'srun' command is issued.
 The \fB\-\-cpu\-freq\fR on the command line will override the
-environment variable value.  See the \fBENVIRONMENT VARIABLES\fR
+environment variable value.  The form on the environment variable is
+the same as the command line.
+See the \fBENVIRONMENT VARIABLES\fR
 section for a description of the SLURM_CPU_FREQ_REQ variable.
 
 \fBNOTE\fR: This parameter is treated as a request, not a requirement.
@@ -453,6 +518,8 @@ with linuxproc as the ProctrackType can cause jobs to run too quickly before
 Accounting is able to poll for job information. As a result not all of
 accounting information will be present.
 
+.RE
+
 .TP
 \fB\-c\fR, \fB\-\-cpus\-per\-task\fR=<\fIncpus\fR>
 Request that \fIncpus\fR be allocated \fBper process\fR. This may be
@@ -483,9 +550,13 @@ allocations when \-c has a value greater than \-c on salloc or sbatch.
 .TP
 \fB\-d\fR, \fB\-\-dependency\fR=<\fIdependency_list\fR>
 Defer the start of this job until the specified dependencies have been
-satisfied completed.
+satisfied completed. This option does not apply to job steps (executions of
+srun within an existing salloc or sbatch allocation) only to job allocations.
 <\fIdependency_list\fR> is of the form
-<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR>.
+<\fItype:job_id[:job_id][,type:job_id[:job_id]]\fR> or
+<\fItype:job_id[:job_id][?type:job_id[:job_id]]\fR>.
+All dependencies must be satisfied if the "," separator is used.
+Any dependency may be satisfied if the "?" separator is used.
 Many jobs can share the same dependency and these jobs may even belong to
 different  users. The  value may be changed after job submission using the
 scontrol command.
@@ -551,11 +622,11 @@ the Epilog parameter in slurm.conf.
 
 
 .TP
-\fB\-\-exclusive\fR
+\fB\-\-exclusive[=user]\fR
 This option has two slightly different meanings for job and job step
 allocations.
 When used to initiate a job, the job allocation cannot share nodes with
-other running jobs.
+other running jobs  (or just other users with the "=user" option).
 The default shared/exclusive behavior depends on system configuration and the
 partition's \fBShared\fR option takes precedence over the job's option.
 
@@ -743,23 +814,34 @@ Multiple license names should be comma separated (e.g.
 
 .TP
 \fB\-m\fR, \fB\-\-distribution\fR=
-\fIarbitrary\fR|<\fIblock\fR|\fIcyclic\fR|\fIplane=<options>\fR[:\fIblock\fR|\fIcyclic\fR|\fIfcyclic\fR]>
+\fI*\fR|\fIblock\fR|\fIcyclic\fR|\fIarbitrary\fR|\fIplane=<options>
+\fR[:\fI*\fR|\fIblock\fR|\fIcyclic\fR|\fIfcyclic\fR[:\fI*\fR|\fIblock\fR|
+\fIcyclic\fR|\fIfcyclic\fR]][,\fIPack\fR|\fINoPack\fR]
 
 Specify alternate distribution methods for remote processes.
-This option controls the assignment of tasks to the nodes on which
+This option controls the distribution of tasks to the nodes on which
 resources have been allocated, and the distribution of those resources
 to tasks for binding (task affinity). The first distribution
-method (before the ":") controls the distribution of resources across
-nodes. The optional second distribution method (after the ":")
-controls the distribution of resources across sockets within a node.
-
-Note that with select/cons_res, the number of cpus allocated on each
+method (before the first ":") controls the distribution of tasks to nodes. 
+The second distribution method (after the first ":")
+controls the distribution of allocated CPUs across sockets for binding
+to tasks. The third distribution method (after the second ":") controls
+the distribution of allocated CPUs across cores for binding to tasks.
+The second and third distributions apply only if task affinity is enabled.
+The third distribution is supported only if the task/cgroup plugin is
+configured. The default value for each distribution type is specified by *.
+
+Note that with select/cons_res, the number of CPUs allocated on each
 socket and node may be different. Refer to
 http://slurm.schedmd.com/mc_support.html
-for more information on resource allocation, assignment of tasks to
+for more information on resource allocation, distribution of tasks to
 nodes, and binding of tasks to CPUs.
 .RS
-First distribution method:
+First distribution method (distribution of tasks across nodes):
+
+.TP
+.B *
+Use the default method for distributing tasks to nodes (block).
 .TP
 .B block
 The block distribution method will distribute tasks to a node such
@@ -815,33 +897,75 @@ CPUs on those nodes. This option is meant primarily to control a job step's
 task layout in an existing job allocation for the srun command.
 
 .TP
-Second distribution method:
+Second distribution method (distribution of CPUs across sockets for binding):
+
+.TP
+.B *
+Use the default method for distributing CPUs across sockets (cyclic).
 .TP
 .B block
-The block distribution method will distribute tasks to sockets such
-that consecutive tasks share a socket.
+The block distribution method will distribute allocated CPUs 
+consecutively from the same socket for binding to tasks, before using
+the next consecutive socket.
 .TP
 .B cyclic
-The cyclic distribution method will distribute tasks to sockets such
-that consecutive tasks are distributed over consecutive sockets (in a
-round\-robin fashion).
-Tasks requiring more than one CPU will have all of those CPUs allocated on a
-single socket if possible.
+The cyclic distribution method will distribute allocated CPUs for
+binding to a given task consecutively from the same socket, and
+from the next consecutive socket for the next task, in a 
+round\-robin fashion across sockets. 
 .TP
 .B fcyclic
-The fcyclic distribution method will distribute tasks to sockets such
-that consecutive tasks are distributed over consecutive sockets (in a
-round\-robin fashion).
-Tasks requiring more than one CPU will have each CPUs allocated in a cyclic
-fashion across sockets.
+The fcyclic distribution method will distribute allocated CPUs 
+for binding to tasks from consecutive sockets in a
+round\-robin fashion across the sockets.
+
+.TP
+Third distribution method (distribution of CPUs across cores for binding):
+
+.TP
+.B *
+Use the default method for distributing CPUs across cores
+(inherited from second distribution method).
+.TP
+.B block
+The block distribution method will distribute allocated CPUs 
+consecutively from the same core for binding to tasks, before using
+the next consecutive core.
+.TP
+.B cyclic
+The cyclic distribution method will distribute allocated CPUs for
+binding to a given task consecutively from the same core, and
+from the next consecutive core for the next task, in a 
+round\-robin fashion across cores. 
+.TP
+.B fcyclic
+The fcyclic distribution method will distribute allocated CPUs 
+for binding to tasks from consecutive cores in a
+round\-robin fashion across the cores.
+
+
+.TP
+Optional control for task distribution over nodes:
+
+.TP
+.B Pack
+Rather than evenly distributing a job step's tasks evenly across it's allocated
+nodes, pack them as tightly as possible on the nodes.
+.TP
+.B NoPack
+Rather than packing a job step's tasks as tightly as possible on the nodes,
+distribute them evenly.
+This user option will supersede the SelectTypeParameters CR_Pack_Nodes
+configuration parameter.
 .RE
 
 .TP
 \fB\-\-mail\-type\fR=<\fItype\fR>
 Notify user by email when certain event types occur.
 Valid \fItype\fR values are BEGIN, END, FAIL, REQUEUE, ALL (equivalent to
-BEGIN, END, FAIL and REQUEUE), TIME_LIMIT, TIME_LIMIT_90 (reached 90 percent of
-time limit), TIME_LIMIT_80 (reached 80 percent of time limit), and TIME_LIMIT_50
+BEGIN, END, FAIL, REQUEUE, and STAGE_OUT), STAGE_OUT (burst buffer stage out
+completed), TIME_LIMIT, TIME_LIMIT_90 (reached 90 percent of time limit),
+TIME_LIMIT_80 (reached 80 percent of time limit), and TIME_LIMIT_50
 (reached 50 percent of time limit).
 Multiple \fItype\fR values may be specified in a comma separated list.
 The user to be notified is indicated with \fB\-\-mail\-user\fR.
@@ -1115,7 +1239,7 @@ order of preferences is IPONLY (which is not considered in User Space mode),
 HFI, IB, HPCE, and KMUX.
 .TP
 \fBCAU\fR=<\fIcount\fR>
-Number of Collecitve Acceleration Units (CAU) required.
+Number of Collective Acceleration Units (CAU) required.
 Applies only to IBM Power7-IH processors.
 Default value is zero.
 Independent CAU will be allocated for each programming interface (MPI, LAPI, etc.)
@@ -1300,6 +1424,13 @@ name ordering (although higher priority partitions will be considered first).
 When the job is initiated, the name of the partition used will be placed first
 in the job record partition string.
 
+.TP
+\fB\-\-power\fR=<\fIflags\fR>
+Comma separated list of power management plugin options.
+Currently available flags include:
+level (all nodes allocated to the job should have identical power caps,
+may be disabled by the Slurm configuration option PowerParameters=job_no_level).
+
 .TP
 \fB\-\-priority\fR=<value>
 Request a specific job priority.
@@ -1444,7 +1575,11 @@ silently ignored.
 
 .TP
 \fB\-\-resv\-ports\fR
-Reserve communication ports for this job.
+Reserve communication ports for this job. Users can specify the number
+of port they want to reserve. The parameter MpiParams=ports=12000-12999
+must be specified in \fIslurm.conf\fR. If not specified the default reserve
+number of ports equal to the number of tasks. If the number of reserved ports
+is zero no ports is reserved.
 Used for OpenMPI.
 
 .TP
@@ -1477,6 +1612,12 @@ Default value is dependent upon the node's configured CoreSpecCount value.
 If a value of zero is designated and the Slurm configuration option
 AllowSpecResourcesUsage is enabled, the job will be allowed to override
 CoreSpecCount and use the specialized resources on nodes it is allocated.
+This option can not be used with the \fB\-\-thread\-spec\fR option.
+
+.TP
+\fB\-\-sicp\fR
+Identify a job as one which jobs submitted to other clusters can be dependent
+upon.
 
 .TP
 \fB\-\-signal\fR=<\fIsig_num\fR>[@<\fIsig_time\fR>]
@@ -1597,6 +1738,13 @@ EXCEPTION: On Bluegene/Q systems on when running within an existing job
 allocation, this disables the use of "runjob" to launch tasks. The program
 will be executed directly by the slurmd daemon.
 
+.TP
+\fB\-\-thread\-spec\fR=<\fInum\fR>
+Count of specialized threads per node reserved by the job for system operations
+and not used by the application. The application will not use these threads,
+but will be charged for their allocation.
+This option can not be used with the \fB\-\-core\-spec\fR option.
+
 .TP
 \fB\-\-threads\-per\-core\fR=<\fIthreads\fR>
 Restrict node selection to nodes with at least the specified number of
@@ -1978,6 +2126,9 @@ Same as \fB\-\-acctg\-freq\fR
 \fBSLURM_BLRTS_IMAGE\fR
 Same as \fB\-\-blrts\-image\fR
 .TP
+\fBSLURM_BURST_BUFFER\fR
+Same as \fB\-\-bb\fR
+.TP
 \fBSLURM_CHECKPOINT\fR
 Same as \fB\-\-checkpoint\fR
 .TP
@@ -2127,6 +2278,9 @@ keys so PMI can skip the check for duplicate keys.
 This is the case for MPICH2 and reduces overhead in testing for duplicates
 for improved performance
 .TP
+\fBSLURM_POWER\fR
+Same as \fB\-\-power\fR
+.TP
 \fBSLURM_PROFILE\fR
 Same as \fB\-\-profile\fR
 .TP
@@ -2156,6 +2310,9 @@ Same as \fB\-\-restart\-dir\fR
 \fBSLURM_RESV_PORTS\fR
 Same as \fB\-\-resv\-ports\fR
 .TP
+\fBSLURM_SICP\fR
+Same as \fB\-\-sicp\fR
+.TP
 \fBSLURM_SIGNAL\fR
 Same as \fB\-\-signal\fR
 .TP
@@ -2186,6 +2343,13 @@ Same as \fB\-\-task\-epilog\fR
 \fBSLURM_TASK_PROLOG\fR
 Same as \fB\-\-task\-prolog
 .TP
+\fBSLURM_TEST_EXEC\fR
+if defined, then verify existence of the executable program on the local
+computer before attempting to launch it on compute nodes.
+.TP
+\fBSLURM_THREAD_SPEC\fR
+Same as \fB\-\-thread\-spec\fR
+.TP
 \fBSLURM_THREADS\fR
 Same as \fB\-T, \-\-threads\fR
 .TP
@@ -2643,7 +2807,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
-Copyright (C) 2010\-2013 SchedMD LLC.
+Copyright (C) 2010\-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man1/sshare.1 b/doc/man/man1/sshare.1
index 51c7bca09..dcd35297d 100644
--- a/doc/man/man1/sshare.1
+++ b/doc/man/man1/sshare.1
@@ -14,8 +14,8 @@ being provided by \fBslurmdbd\fR (Slurm Database daemon) which is
 read in from the slurmctld and used to process the shares available
 to a given association.  sshare provides Slurm share information of
 Account, User, Raw Shares, Normalized Shares, Raw Usage, Normalized
-Usage, Effective Usage, the Fair-share factor, the GrpCPUMins limit
-and accumulated currently running CPU-minutes for each association.
+Usage, Effective Usage, the Fair-share factor, the GrpCPUMins limit,
+Partitions and accumulated currently running CPU-minutes for each association.
 
 
 .SH "OPTIONS"
@@ -40,6 +40,11 @@ Long listing - includes the normalized usage information.
 \fB\-M\fR, \fB\-\-clusters\fR=<\fIstring\fR>
 Clusters to issue commands to.
 
+.TP
+\fB\-m\fR, \fB\-\-partition\fR
+Is there are association based partition in the system
+print their names.
+
 .TP
 \fB\-p\fR, \fB\-\-parsable\fR
 Output will be '|' delimited with a '|' at the end.
diff --git a/doc/man/man1/sstat.1 b/doc/man/man1/sstat.1
index 9d69ade14..bfd144524 100644
--- a/doc/man/man1/sstat.1
+++ b/doc/man/man1/sstat.1
@@ -72,6 +72,11 @@ NOTE: A step id of 'batch' will display the information about the batch step.
 No heading will be added to the output. The default action is to
 display a header.
 
+.TP
+\f3\-\-noconvert\fP
+Don't convert units from their original type (e.g. 2048M won't be converted to
+2G).
+
 .TP
 \f3\-o\fP\f3,\fP \f3\-\-format\fP,\fP \f3\-\-fields\fP
 Comma separated list of fields.
diff --git a/doc/man/man3/Makefile.am b/doc/man/man3/Makefile.am
index 992bb4a39..4a7e77771 100644
--- a/doc/man/man3/Makefile.am
+++ b/doc/man/man3/Makefile.am
@@ -63,6 +63,7 @@ man3_MANS = slurm_hostlist_create.3 \
 	slurm_job_step_layout_get.3 \
 	slurm_job_step_layout_free.3 \
 	slurm_job_will_run.3 \
+	slurm_job_will_run2.3 \
 	slurm_jobinfo_ctx_get.3 \
 	slurm_kill_job.3 \
 	slurm_kill_job_step.3 \
diff --git a/doc/man/man3/Makefile.in b/doc/man/man3/Makefile.in
index 1bb7b71ac..597470b14 100644
--- a/doc/man/man3/Makefile.in
+++ b/doc/man/man3/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -220,6 +223,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -269,8 +274,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -289,6 +298,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -332,6 +344,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -355,6 +368,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -481,6 +495,7 @@ man3_MANS = slurm_hostlist_create.3 \
 	slurm_job_step_layout_get.3 \
 	slurm_job_step_layout_free.3 \
 	slurm_job_will_run.3 \
+	slurm_job_will_run2.3 \
 	slurm_jobinfo_ctx_get.3 \
 	slurm_kill_job.3 \
 	slurm_kill_job_step.3 \
diff --git a/doc/man/man3/slurm_allocate_resources.3 b/doc/man/man3/slurm_allocate_resources.3
index 264f81311..36262e535 100644
--- a/doc/man/man3/slurm_allocate_resources.3
+++ b/doc/man/man3/slurm_allocate_resources.3
@@ -6,7 +6,8 @@ slurm_allocation_msg_thr_create, slurm_allocation_msg_thr_destroy,
 slurm_allocation_lookup, slurm_allocation_lookup_lite,
 slurm_confirm_allocation,
 slurm_free_submit_response_response_msg, slurm_init_job_desc_msg,
-slurm_job_will_run, slurm_read_hostfile, slurm_submit_batch_job
+slurm_job_will_run, slurm_job_will_run2,
+slurm_read_hostfile, slurm_submit_batch_job
 \- Slurm job initiation functions
 .SH "SYNTAX"
 .LP
@@ -85,8 +86,16 @@ void \fBslurm_init_job_desc_msg\fR (
 );
 .LP
 int \fBslurm_job_will_run\fR (
+.br
+	job_desc_msg_t *\fIjob_desc_msg_ptr\fP
+.br
+);
+.LP
+int slurm_job_will_run2\fR (
 .br
 	job_desc_msg_t *\fIjob_desc_msg_ptr\fP,
+.br
+	will_run_response_msg_t **\fIwill_run_resp\fP
 .br
 );
 .LP
@@ -129,7 +138,8 @@ Specifies the pointer to the structure to be created and filled in by the functi
 \fIslurm_allocate_resources\fP,
 \fIslurm_allocate_resources_blocking\fP,
 \fIslurm_allocation_lookup\fP, \fIslurm_allocation_lookup_lite\fP,
-\fIslurm_confirm_allocation\fP or \fIslurm_job_will_run\fP.
+\fIslurm_confirm_allocation\fP, \fIslurm_job_will_run\fP or
+\fIslurm_job_will_run\fP.
 .TP
 \fIslurm_alloc_msg_thr_ptr\fP
 Specifies the pointer to the structure created and returned by the
@@ -143,6 +153,9 @@ data structure's contents.
 .TP
 \fIslurm_submit_msg_ptr\fP
 Specifies the pointer to the structure to be created and filled in by the function \fIslurm_submit_batch_job\fP.
+.TP
+\fIwill_run_resp\fP
+Specifies when and where the specified job descriptor could be started.
 .SH "DESCRIPTION"
 .LP
 \fBslurm_allocate_resources\fR Request a resource allocation for a job. If
@@ -197,6 +210,8 @@ Execute this function before issuing a request to submit or modify a job.
 .LP
 \fBslurm_job_will_run\fR Determine if the supplied job description could be executed immediately.
 .LP
+\fBslurm_job_will_run2\fR Determine when and where the supplied job description can be executed.
+.LP
 \fBslurm_read_hostfile\fR Read a Slurm hostfile specified by
 "filename".  "filename" must contain a list of Slurm NodeNames, one
 per line.  Reads up to "n" number of hostnames from the file. Returns
@@ -421,6 +436,7 @@ which must be linked to your process for use
 (e.g. "cc \-lslurm myprog.c").
 
 .SH "COPYING"
+Copyright (C) 2010\-2014 SchedMD LLC.
 Copyright (C) 2002\-2006 The Regents of the University of California.
 Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 CODE\-OCEC\-09\-009. All rights reserved.
diff --git a/doc/man/man3/slurm_free_job_info_msg.3 b/doc/man/man3/slurm_free_job_info_msg.3
index c6ab172bf..384181aef 100644
--- a/doc/man/man3/slurm_free_job_info_msg.3
+++ b/doc/man/man3/slurm_free_job_info_msg.3
@@ -3,6 +3,8 @@
 .SH "NAME"
 slurm_free_job_alloc_info_response_msg, slurm_free_job_info_msg,
 slurm_get_end_time, slurm_get_rem_time, slurm_get_select_jobinfo,
+slurm_job_cpus_allocated_on_node, slurm_job_cpus_allocated_on_node_id,
+slurm_job_cpus_allocated_str_on_node, slurm_job_cpus_allocated_str_on_node_id,
 slurm_load_jobs, slurm_load_job_user, slurm_pid2jobid,
 slurm_print_job_info, slurm_print_job_info_msg
 \- Slurm job information reporting functions
@@ -136,6 +138,30 @@ int \fBslurm_job_cpus_allocated_on_node\fR (
 	const char *\fInode_name\fP
 .br
 );
+.LP
+int \fBslurm_job_cpus_allocated_str_on_node_id\fR (
+.br
+	char *\fIcpus\fP,
+.br
+	size_t \fIcpus_len\fP,
+.br
+	job_resources_t *\fIjob_resrcs_ptr\fP,
+.br
+	int \fInode_id\fP
+.br
+);
+.LP
+int \fBslurm_job_cpus_allocated_str_on_node\fR (
+.br
+	char *\fIcpus\fP,
+.br
+	size_t \fIcpus_len\fP,
+.br
+	job_resources_t *\fIjob_resrcs_ptr\fP,
+.br
+	const char *\fInode_name\fP
+.br
+);
 
 .SH "FORTRAN EXTENSION"
 .LP
@@ -153,6 +179,13 @@ reaches the end of it's allocated time.
 
 .SH "ARGUMENTS"
 .TP
+\fIcpus\fP
+Specifies a pointer to allocated memory into which the string representing the
+list of allocated CPUs on the node is placed.
+.TP
+\fIcpus_len\fP
+The size in bytes of the allocated memory space pointed by \fIcpus\fP.
+.TP
 \fIdata_type\fP
 Identifies the type of data to retrieve \fIjobinfo\fP. Note that different types of
 data are associated with different computer types and different configurations.
@@ -255,9 +288,13 @@ expected termination time of a specified Slurm job id. The time corresponds
 to the exhaustion of the job\'s or partition\'s time limit. NOTE: The data is
 cached locally and only retrieved from the Slurm controller once per minute.
 .LP
-\fBslurm_job_cpus_allocated_on_node\fR and 
-\fBslurm_job_cpus_allocated_on_node_id\fR return the number of CPUs allocated
-to a job on a specific node allocated to a job.
+\fBslurm_job_cpus_allocated_on_node\fR and
+\fBslurm_job_cpus_allocated_on_node_id\fR
+return the number of CPUs allocated to a job on a specific node allocated to a job.
+.LP
+\fBslurm_job_cpus_allocated_str_on_node\fR and
+\fBslurm_job_cpus_allocated_str_on_node_id\fR return a string representing the
+list of CPUs allocated to a job on a specific node allocated to a job.
 .LP
 \fBslurm_load_job\fR Returns a job_info_msg_t that contains an update time,
 record count, and array of job_table records for some specific job ID.
@@ -272,7 +309,7 @@ with a specific user ID.
 \fBslurm_load_job_user\fR issues RPC to get slurm information about all jobs to
 be run as the specified user.
 .LP
-\fBslurm_notify_job\fR Sends the specified message to standard output of 
+\fBslurm_notify_job\fR Sends the specified message to standard output of
 the specified job ID.
 .LP
 \fBslurm_pid2jobid\fR Returns a Slurm job id corresponding to the supplied
diff --git a/doc/man/man3/slurm_job_cpus_allocated_str_on_node.3 b/doc/man/man3/slurm_job_cpus_allocated_str_on_node.3
new file mode 100644
index 000000000..836ffa79b
--- /dev/null
+++ b/doc/man/man3/slurm_job_cpus_allocated_str_on_node.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_job_cpus_allocated_str_on_node_id.3 b/doc/man/man3/slurm_job_cpus_allocated_str_on_node_id.3
new file mode 100644
index 000000000..836ffa79b
--- /dev/null
+++ b/doc/man/man3/slurm_job_cpus_allocated_str_on_node_id.3
@@ -0,0 +1 @@
+.so man3/slurm_free_job_info_msg.3
diff --git a/doc/man/man3/slurm_job_will_run2.3 b/doc/man/man3/slurm_job_will_run2.3
new file mode 100644
index 000000000..6534eeb96
--- /dev/null
+++ b/doc/man/man3/slurm_job_will_run2.3
@@ -0,0 +1 @@
+.so man3/slurm_allocate_resources.3
diff --git a/doc/man/man5/Makefile.am b/doc/man/man5/Makefile.am
index 414875b65..5a2200b6c 100644
--- a/doc/man/man5/Makefile.am
+++ b/doc/man/man5/Makefile.am
@@ -3,6 +3,7 @@ htmldir = ${datadir}/doc/${PACKAGE}-${SLURM_VERSION_STRING}/html
 man5_MANS = \
 	acct_gather.conf.5 \
 	bluegene.conf.5 \
+	burst_buffer.conf.5 \
 	cgroup.conf.5 \
 	cray.conf.5 \
 	ext_sensors.conf.5 \
@@ -20,6 +21,7 @@ if HAVE_MAN2HTML
 html_DATA = \
 	acct_gather.conf.html \
 	bluegene.conf.html \
+	burst_buffer.conf.html \
 	cgroup.conf.html \
 	cray.conf.html \
 	ext_sensors.conf.html \
diff --git a/doc/man/man5/Makefile.in b/doc/man/man5/Makefile.in
index aafda3e01..6219f2397 100644
--- a/doc/man/man5/Makefile.in
+++ b/doc/man/man5/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -223,6 +226,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -272,8 +277,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -292,6 +301,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -335,6 +347,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -358,6 +371,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -422,6 +436,7 @@ top_srcdir = @top_srcdir@
 man5_MANS = \
 	acct_gather.conf.5 \
 	bluegene.conf.5 \
+	burst_buffer.conf.5 \
 	cgroup.conf.5 \
 	cray.conf.5 \
 	ext_sensors.conf.5 \
@@ -436,6 +451,7 @@ EXTRA_DIST = $(man5_MANS) $(am__append_1)
 @HAVE_MAN2HTML_TRUE@html_DATA = \
 @HAVE_MAN2HTML_TRUE@	acct_gather.conf.html \
 @HAVE_MAN2HTML_TRUE@	bluegene.conf.html \
+@HAVE_MAN2HTML_TRUE@	burst_buffer.conf.html \
 @HAVE_MAN2HTML_TRUE@	cgroup.conf.html \
 @HAVE_MAN2HTML_TRUE@	cray.conf.html \
 @HAVE_MAN2HTML_TRUE@	ext_sensors.conf.html \
diff --git a/doc/man/man5/acct_gather.conf.5 b/doc/man/man5/acct_gather.conf.5
index 1da92cf20..71fe8c584 100644
--- a/doc/man/man5/acct_gather.conf.5
+++ b/doc/man/man5/acct_gather.conf.5
@@ -50,12 +50,26 @@ accumulated, only the first and last adjustments are used to calculated the
 consumption. The default is "no".
 
 .TP
-\fBEnergyIPMIPowerSensor\fR=<number>
-This parameter is optional.  If the parameter is included, the plugin searches
-the node for a "watt" sensor with the number specified by <number>. If a
-matching sensor is found, that sensor is used for power data. If no matching
-sensor is found, a value of zero is returned for power data. If the parameter
-is omitted, the plugin will use the first "watt" sensor it finds.
+\fBEnergyIPMIPowerSensors\fR=<key=values>\fR
+Optionally specify the ids of the sensors to used.
+Multiple <key=values> can be set with ";" separators.
+The key "Node" is mandatory and is used to know the consumed energy for nodes
+(scontrol show node) and jobs (sacct).
+Other keys are optional and are named by administrator.
+These keys are useful only when profile is activated for energy to store power
+(in watt) of each key.
+<values> are integers, multiple values can be set with "," separators.
+The sum of the listed sensors is used for each key.
+EnergyIPMIPowerSensors is optional, default value is "Node=number" where
+"number" is the id of the first power sensor returned by ipmi-sensors.
+.br
+i.e.
+.br
+EnergyIPMIPowerSensors=Node=16,19,23,26;Socket0=16,23;Socket1=19,26;SSUP=23,26;KNC=16,19
+.br
+EnergyIPMIPowerSensors=Node=29,32;SSUP0=29;SSUP1=32
+.br
+EnergyIPMIPowerSensors=Node=1280
 
 .LP
 The following acct_gather.conf parameters are defined to control the
diff --git a/doc/man/man5/burst_buffer.conf.5 b/doc/man/man5/burst_buffer.conf.5
new file mode 100644
index 000000000..75bf2a5ab
--- /dev/null
+++ b/doc/man/man5/burst_buffer.conf.5
@@ -0,0 +1,216 @@
+.TH "burst_buffer.conf" "5" "August 2015" "burst_buffer.conf 15.08" "Slurm configuration file"
+.SH "NAME"
+burst_buffer.conf \- Slurm configuration file for burst buffer management.
+
+.SH "DESCRIPTION"
+\fBburst_buffer.conf\fP is an ASCII file which describes the configuration
+of burst buffer resource management.
+This file is only required on the head node(s), where the slurmctld daemon
+executes.
+The file location can be modified at system build time using the
+DEFAULT_SLURM_CONF parameter or at execution time by setting the SLURM_CONF
+environment variable.
+The file will always be located in the same directory as the \fBslurm.conf\fP
+file.
+In order to support multiple configuration files for mutliple burst buffer
+plugins, the configuration file may alternately be given a name containing
+the plugin name.
+For example, if "burst_buffer.conf" is not found, the burst_buffer/generic
+configuration could be read from a file named "burst_buffer_generic.conf".
+.LP
+Parameter names are case insensitive.
+Any text following a "#" in the configuration file is treated
+as a comment through the end of that line.
+Changes to the configuration file take effect upon restart of
+Slurm daemons, daemon receipt of the SIGHUP signal, or execution
+of the command "scontrol reconfigure".
+.LP
+The configuration parameters available include:
+
+.TP
+\fBAllowUsers\fR
+Colon delimited list of user names and/or IDs permitted to use burst buffers.
+The options \fBAllowUsers\fR and \fBDenyUsers\fR can not both be specified.
+By default all users are permitted to use burst buffers.
+
+.TP
+\fBCreateBuffer\fR
+Fully qualified path name of a program which will create both persistent
+and per\-job burst buffers.
+This option is not used by the burst_buffer/cray plugin.
+
+.TP
+\fBDefaultPool\fR
+Name of the pool used by default for resource allocations.
+The default value is the first pool reported by the burst buffer infrastructure.
+This option is only used by the burst_buffer/cray plugin.
+
+.TP
+\fBDenyUsers\fR
+Colon delimited list of user names and/or IDs prevented from using burst buffers.
+The options \fBAllowUsers\fR and \fBDenyUsers\fR can not both be specified.
+By default all users are permitted to use burst buffers.
+
+.TP
+\fBDestroyBuffer\fR
+Fully qualified path name of a program which will destroy both persistent
+and per\-job burst buffers.
+This option is not used by the burst_buffer/cray plugin.
+
+.TP
+\fBFlags\fR
+String used to control various functions.
+Multiple options may be comma separated.
+Supported options include:
+.RS
+.TP
+\fBDisablePersistent\fR
+Prevents regular users from being able to create and destroy persistent burst buffers.
+This is the default behaviour, only privileged users can create or destroy persistent burst buffers.
+.TP
+\fBEmulateCray\fR
+Emulating a Cray DataWarp system using the dw_wlm_cli script in the burst_buffer/cray plugin.
+.TP
+\fBEnablePersistent\fR
+Enables regular users to create and destroy persistent burst buffers.
+By default, only privileged users can create or destroy persistent burst buffers.
+.RE
+
+.TP
+\fBGetSysState\fR
+Fully qualified path name of a program which will return the current burst
+buffer state.
+See the src/plugins/burst_buffer/generic/bb_get_state.example in the
+Slurm distribution for an example.
+For the Cray plugin, this should be the path of the \fIdw_wlm_cli\fR command
+and it's default value is /opt/cray/dw_wlm/default/bin/dw_wlm_cli.
+
+.TP
+\fBGranularity\fR
+Granularity of job space allocations in units of bytes.
+The numeric value may have a suffix of "m" (megabytes), "g" (gigabytes),
+"t" (terabytes), "p" (petabytes), or "n" (nodes).
+Bytes is assumed if no suffix is supplied.
+This option is not used by the burst_buffer/cray plugin.
+
+.\ Possible future enhancement
+.\ .TP
+.\ \fBGres\fR
+.\ Generic resources associated with burst buffers.
+.\ This is a completely separate name space from the Gres defined in the slurm.conf
+.\ file.
+.\ The Gres value consistes of a comma separated list of generic resources,
+.\ each of which includes a name separated by a colon and a numeric value.
+.\ The numeric value can include a suffic of "k", "m" or "g", which multiplies
+.\ the numeric value by 1,024, 1,048,576, or 1,073,741,824 respectively.
+.\ The numeric value is a 32-bit value.
+.\ See the example below.
+
+.TP
+\fBPrivateData\fR
+If set to "true" then users will only be able to view burst buffers they can
+use.
+Slurm administrators will still be able to view all burst buffers.
+By default, users can view all burst buffers.
+
+.TP
+\fBStageInTimeout\fR
+If the stage in of files for a job takes more than this number of seconds,
+the burst buffer will be released and the job will be placed in a held state.
+A Slurm administrator will be required to release the job.
+By default there is no timeout of the stage in process.
+
+.TP
+\fBStageOutTimeout\fR
+If the stage out of files for a job takes more than this number of seconds,
+the burst buffer will be released and the job will be purged.
+By default there is no timeout of the stage out process.
+
+.TP
+\fBStartStageIn\fR
+Fully qualified path name of a program which will stage files in for a job.
+See the src/plugins/burst_buffer/generic/bb_start_stage_in.example in the
+Slurm distribution for an example.
+This option is not used by the burst_buffer/cray plugin.
+
+.TP
+\fBStartStageOut\fR
+Fully qualified path name of a program which will stage files out for a job.
+See the src/plugins/burst_buffer/generic/bb_start_stage_out.example in the
+Slurm distribution for an example.
+This option is not used by the burst_buffer/cray plugin.
+
+.TP
+\fBStopStageIn\fR
+Fully qualified path name of a program which will stop staging files in for a job.
+See the src/plugins/burst_buffer/generic/bb_stop_stage_out.example in the
+Slurm distribution for an example.
+This option is not used by the burst_buffer/cray plugin.
+
+.TP
+\fBStopStageOut\fR
+Fully qualified path name of a program which will stop staging files in for a job.
+See the src/plugins/burst_buffer/generic/bb_stop_stage_out.example in the
+Slurm distribution for an example.
+This option is not used by the burst_buffer/cray plugin.
+
+.SH "EXAMPLE"
+.LP
+.br
+##################################################################
+.br
+# Slurm's burst buffer configuration file (burst_buffer.conf)
+.br
+##################################################################
+.br
+AllowUsers=alan,brenda
+.br
+PrivateData=true
+.\ .br
+.\ Gres=nodes:10,other:20
+.br
+#
+.br
+Granularity=1G
+.br
+#
+.br
+StageInTimeout=30    # Seconds
+.br
+StageOutTimeout=30   # Seconds
+.br
+#
+.br
+CreateBuffer=/usr/local/slurm/15.08/sbin/CB
+.br
+DestroyBuffer=/usr/local/slurm/15.08/sbin/DB
+.br
+GetSysState=/usr/local/slurm/15.08/sbin/GSS
+.br
+StartStageIn=/usr/local/slurm/15.08/sbin/SSI
+.br
+StartStageOut=/usr/local/slurm/15.08/sbin/SSO
+.br
+StopStageIn=/usr/local/slurm/15.08/sbin/PSI
+.br
+StopStageOut=/usr/local/slurm/15.08/sbin/PSO
+
+.SH "COPYING"
+Copyright (C) 2014-2015 SchedMD LLC.
+.LP
+This file is part of Slurm, a resource management program.
+For details, see <http://slurm.schedmd.com/>.
+.LP
+Slurm is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 2 of the License, or (at your option)
+any later version.
+.LP
+Slurm is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+details.
+
+.SH "SEE ALSO"
+.LP
+\fBslurm.conf\fR(5)
diff --git a/doc/man/man5/gres.conf.5 b/doc/man/man5/gres.conf.5
index 39a103aa7..95b8774db 100644
--- a/doc/man/man5/gres.conf.5
+++ b/doc/man/man5/gres.conf.5
@@ -26,10 +26,9 @@ The overall configuration parameters available include:
 \fBCount\fR
 Number of resources of this type available on this node.
 The default value is set to the number of \fBFile\fR values specified (if any),
-otherwise the default value is one. A suffix of "K", "M" or "G" may be used
-to multiply the number by 1024, 1048576 or 1073741824 respectively.
-Note that Count is a 32\-bit field and the maximum value is 4,294,967,295.
- 
+otherwise the default value is one. A suffix of "K", "M", "G", "T" or "P" may be
+used to multiply the number by 1024, 1048576, 1073741824, etc. respectively.
+
 .TP
 \fBCPUs\fR
 Specify the CPU index numbers for the specific CPUs which can
diff --git a/doc/man/man5/slurm.conf.5 b/doc/man/man5/slurm.conf.5
index 2943519f5..b4331aff1 100644
--- a/doc/man/man5/slurm.conf.5
+++ b/doc/man/man5/slurm.conf.5
@@ -119,6 +119,19 @@ The listening port of the accounting storage database server.
 Only used for database type storage plugins, ignored otherwise.
 Also see \fBDefaultStoragePort\fR.
 
+.TP
+\fBAccountingStorageTRES\fR
+Comma separated list of resources you wish to track on the cluster.
+These are the resources requested by the sbatch/srun job when it
+is submitted.
+Currently this consists of any GRES, or license along with CPU, Memory,
+and Energy.  By default CPU, Energy, Memory, and Node are tracked.
+AccountingStorageTRES=cpu,energy,mem,node,gres/craynetwork,license/iop1
+will track cpu, energy, memory, nodes along with a gres called craynetwork
+as well as a license called iop1.  Whenever these resources are used on the
+cluster they are recorded. The TRES are automatically set up in the database
+on the start of the slurmctld.
+
 .TP
 \fBAccountingStorageType\fR
 The accounting storage mechanism type.  Acceptable values at
@@ -267,12 +280,28 @@ Additional information to be used for authentication of communications
 between the Slurm daemons (slurmctld and slurmd) and the Slurm
 clients.  The interpretation of this option is specific to the
 configured \fBAuthType\fR.
-In the case of \fIauth/munge\fR and \fIcrypto/munge\fR, the value
-of this parameter can specify the socket of a MUNGE daemon other than
-the default MUNGE daemon (e.g. "socket=/var/run/munge/munge.socket.2") or
-the credential time to live in seconds (e.g. "ttl=300").
 Multiple options may be specified in a comma delimited list.
 If not specified, the default authentication information will be used.
+.RS
+.TP 14
+\fBcred_expire\fR
+Default job step credential lifetime, in seconds (e.g. "cred_expire=1200").
+It must be sufficiently long enough to load user environment, run prolog,
+deal with the slurmd getting paged out of memory, etc.
+This also controls how long a requeued job must wait before starting again.
+The default value is 120 seconds.
+.TP
+\fBsocket\fR
+Path name to a MUNGE daemon socket to use
+(e.g. "socket=/var/run/munge/munge.socket.2").
+The default value is "/var/run/munge/munge.socket.2".
+Used by \fIauth/munge\fR and \fIcrypto/munge\fR.
+.TP
+\fBttl\fR
+Credential lifetime, in seconds (e.g. "ttl=300").
+The default value is dependent upon the Munge installation, but is typically
+300 seconds.
+.RE
 
 .TP
 \fBAuthType\fR
@@ -338,6 +367,12 @@ Therefore a job will not necessarily be terminated if its start time exceeds
 This configuration parameter is also applied to launch tasks and avoid aborting
 \fBsrun\fR commands due to long running \fBProlog\fR scripts.
 
+.TP
+\fBBurstBufferType\fR
+The plugin used to manage burst buffers.
+Acceptable values at present include "burst_buffer/none".
+More information later...
+
 .TP
 \fBCacheGroups\fR
 If set to 1, the slurmd daemon will cache /etc/groups entries.
@@ -483,6 +518,29 @@ attempts to use the Performance CPU governor
 attempts to use the PowerSave CPU governor
 .RE
 
+.TP
+\fBCpuFreqGovernors\fR
+List of CPU frequency governors allowed to be set with the salloc, sbatch, or
+srun option  \-\-cpu\-freq.
+Acceptable values at present include:
+.RS
+.TP 14
+\fBConservative\fR
+attempts to use the Conservative CPU governor
+.TP
+\fBOnDemand\fR
+attempts to use the OnDemand CPU governor (the default value)
+.TP
+\fBPerformance\fR
+attempts to use the Performance CPU governor
+.TP
+\fBPowerSave\fR
+attempts to use the PowerSave CPU governor
+.TP
+\fBUserSpace\fR
+attempts to use the UserSpace CPU governor
+.RE
+The default is OnDemand.
 .TP
 \fBCryptoType\fR
 The cryptographic signature tool to be used in the creation of
@@ -523,9 +581,15 @@ BlueGene block selection for jobs
 \fBBGBlockWires\fR
 BlueGene block wiring (switch state details)
 .TP
+\fBBurstBuffer\fR
+Burst Buffer plugin
+.TP
 \fBCPU_Bind\fR
 CPU binding details for jobs and steps
 .TP
+\fBCpuFrequency\fR
+Cpu frequency details for jobs and steps using the \-\-cpu\-freq option.
+.TP
 \fBDB_ASSOC\fR
 SQL statements/queries when dealing with associations in the database.
 .TP
@@ -582,6 +646,9 @@ License management details
 \fBNO_CONF_HASH\fR
 Do not log when the slurm.conf files differs between Slurm daemons
 .TP
+\fBPower\fR
+Power management plugin
+.TP
 \fBPriority\fR
 Job prioritization
 .TP
@@ -594,6 +661,9 @@ Advanced reservations
 \fBSelectType\fR
 Resource selection plugin
 .TP
+\fBSICP\fR
+Inter\-cluster job details
+.TP
 \fBSteps\fR
 Slurmctld resource allocation for job steps
 .TP
@@ -693,9 +763,11 @@ The default value is "NO", meaning user root will be able to execute jobs.
 \fBDisableRootJobs\fR may also be set by partition.
 
 .TP
-\fBDynallocPort\fR
-Socket port used for MapReduce dynamic allocation communications.
-Used only by the slurmctld/dynalloc plugin.
+\fBEioTimeout\fR
+The number of seconds srun waits for slurmstepd to close the TCP/IP
+connection used to relay data between the user application and srun
+when the user application terminates. The default value is 60 seconds.
+May not exceed 65533.
 
 .TP
 \fBEnforcePartLimits\fR
@@ -1006,6 +1078,16 @@ Acceptable values at present include:
 .TP 20
 \fBNoShared\fR
 Exclude shared memory from accounting.
+.TP
+\fBUsePss\fR
+Use PSS value instead of RSS to calculate real usage of memory.
+The PSS value will be saved as RSS.
+.TP
+\fBNoOverMemoryKill\fR
+Do not kill process that uses more then requested memory.
+This parameter should be used with caution as if jobs exceeds
+its memory allocation it may affect other processes and/or machine
+health.
 .RE
 
 .TP
@@ -1159,19 +1241,36 @@ it will be forcibly terminated.
 The default value is 30 seconds.
 The value may not exceed 65533.
 
+.TP
+\fBLaunchParameters\fR
+Identifies options to the job launch plugin.
+Acceptable values include:
+.RS
+.TP 12
+\fBtest_exec\fR
+Validate the executable command's existence prior to attemping launch on
+the compute nodes
+.RE
+
 .TP
 \fBLaunchType\fR
 Identifies the mechanism to be used to launch application tasks.
-Acceptable values include
-"launch/aprun" for use with Cray systems with ALPS,
-"launch/poe" for use with IBM Parallel Environment (PE),
-"launch/runjob" for use with IBM BlueGene/Q systems, and
-"launch/slurm" for all other systems.
-The default value is
-"launch/aprun" for Cray systems,
-"launch/poe" for systems with the IBM NRT library installed,
-"launch/runjob" for IBM BlueGene/Q systems, and
-"launch/slurm" for all other systems.
+Acceptable values include:
+.RS
+.TP 15
+\fBlaunch/aprun\fR
+For use with Cray systems with ALPS and the default value for those systems
+.TP
+\fBlaunch/poe\fR
+For use with IBM Parallel Environment (PE) and the default value for systems
+with the IBM NRT library installed.
+.TP
+\fBlaunch/runjob\fR
+For use with IBM BlueGene/Q systems and the default value for those systems
+.TP
+\fBlaunch/slurm\fR
+For all other systems and the default value for those systems
+.RE
 
 .TP
 \fBLicenses\fR
@@ -1235,11 +1334,11 @@ It only takes effect upon restart of the slurmctld daemon.
 .TP
 \fBMaxJobId\fR
 The maximum job id to be used for jobs submitted to Slurm without a
-specific requested value. Job id values generated will incremented by 1
-for each subsequent job. This may be used to provide a meta\-scheduler
-with a job id space which is disjoint from the interactive jobs.
+specific requested value EXCEPT for jobs visible between clusters.
+Job id values generated will incremented by 1 for each subsequent job.
 Once \fBMaxJobId\fR is reached, the next job will be assigned \fBFirstJobId\fR.
-The default value is 4294901760 (0xffff0000).
+The default value is 2,147,418,112 (0x7fff0000).
+Jobs visible across clusters will always have a job ID of 2,147,483,648 or higher.
 Also see \fBFirstJobId\fR.
 
 .TP
@@ -1306,7 +1405,6 @@ its memory or other resources. The default value is 300 seconds.
 A value of zero prevents any job record purging.
 In order to eliminate some possible race conditions, the minimum non\-zero
 value for \fBMinJobAge\fR recommended is 2.
-May not exceed 65533.
 
 .TP
 \fBMpiDefault\fR
@@ -1331,6 +1429,60 @@ MPI parameters.
 Used to identify ports used by OpenMPI only and the input format is
 "ports=12000\-12999" to identify a range of communication ports to be used.
 
+.TP
+\fBMsgAggregationParams\fR
+Message aggregation parameters. Message aggregation
+is an optional feature that may improve system performance by reducing
+the number of separate messages passed between nodes. The feature
+works by routing messages through one or more message collector
+nodes between their source and destination nodes. At each
+collector node, messages with the same destination received
+during a defined message collection window are packaged into a single
+composite message. When the window expires, the composite message
+is sent to the next collector node on
+the route to its destination. The route between each source
+and destination node is provided by the Route plugin. When a
+composite message is received at its destination node, the
+original messages are extracted and processed as if they
+had been sent directly.
+.br
+.br
+Currently, the only message types supported by message
+aggregation are the node registration, batch script completion,
+step completion, and epilog complete messages.
+.br
+.br
+.RE
+The format for this parameter is as follows:
+.b.br
+.RS
+.TP 12
+\fBMsgAggregationParams=\fR\fI<option>\fR\fB=\fR\fI<value>\fR
+where \fI<option>\fR=\fI<value>\fR specify a particular control
+variable. Multiple, comma-separated \fI<option>\fR=\fI<value>\fR
+pairs may be specified. Supported options are as follows:
+.br
+.RS
+.TP
+\fBWindowMsgs=\fI<number>\fR
+where \fI<number>\fR is the maximum number of messages
+in each message collection window.
+.TP
+\fBWindowTime=\fI<time>\fR
+where \fI<time>\fR is the maximum elapsed time in milliseconds of
+each message collection window.
+.br
+.br
+.TP
+.RE
+.RE
+A window expires when either \fBWindowMsgs\fR or \fBWindowTime\fR is
+reached. By default, message aggregation is disabled. To enable
+the feature, set \fBWindowMsgs\fR to a value greater than 1. The
+default value for \fBWindowTime\fR is 100 milliseconds.
+.RE
+.RE
+
 .TP
 \fBOverTimeLimit\fR
 Number of minutes by which a job can exceed its time limit before
@@ -1361,6 +1513,105 @@ part of a user's job step.  Default location is "plugstack.conf"
 in the same directory as the system slurm.conf. For more information
 on SPANK plugins, see the \fBspank\fR(8) manual.
 
+.TP
+\fBPowerParameters\fR
+System power management parameters.
+The supported parameters are specific to the \fBPowerPlugin\fR.
+Changes to this value take effect when the Slurm daemons are reconfigured.
+More information about system power management is available here
+<http://slurm.schedmd.com/power_mgmt.html>.
+Options current supported by any plugins are listed below.
+.RS
+.TP
+\fBbalance_interval=#\fR
+Specifies the time interval, in seconds, between attempts to rebalance power
+caps across the nodes.
+This also controls the frequency at which Slurm attempts to collect current
+power consumption data (old data may be used until new data is available from
+the underlying infrastructure and values below 10 seconds are not recommended
+for Cray systems).
+The default value is 30 seconds.
+Supported by the power/cray plugin.
+.TP
+\fBcapmc_path=\fR
+Specifies the absolute path of the capmc command.
+The default value is "/opt/cray/capmc/default/bin/capmc".
+Supported by the power/cray plugin.
+.TP
+\fBcap_watts=#\fR
+Specifies the total power limit to be established across all compute nodes
+managed by Slurm.
+A value of 0 sets every compute node to have an unlimited cap.
+The default value is 0.
+Supported by the power/cray plugin.
+.TP
+\fBdecrease_rate=#\fR
+Specifies the maximum rate of change in the power cap for a node where the
+actual power usage is below the power cap by an amount greater than
+\fBlower_threshold\fR (see below).
+Value represents a percentage of the difference between a node's minimum and
+maximum power consumption.
+The default value is 50 percent.
+Supported by the power/cray plugin.
+.TP
+\fBincrease_rate=#\fR
+Specifies the maximum rate of change in the power cap for a node where the
+actual power usage is within \fBupper_threshold\fR (see below) of the power cap.
+Value represents a percentage of the difference between a node's minimum and
+maximum power consumption.
+The default value is 20 percent.
+Supported by the power/cray plugin.
+.TP
+\fBjob_level\fR
+All nodes associated with every job will have the same power cap, to the extent
+possible.
+Also see the \-\-power=level option on the job submission commands.
+.TP
+\fBjob_no_level\fR
+Disable the user's ability to set every node associated with a job to the same
+power cap.
+Each node will have it's power cap set independently.
+This disables the \-\-power=level option on the job submission commands.
+.TP
+\fBlower_threshold=#\fR
+Specify a lower power consumption threshold.
+If a node's current power consumption is below this percentage of its current
+cap, then its power cap will be reduced.
+The default value is 90 percent.
+Supported by the power/cray plugin.
+.TP
+\fBrecent_job=#\fR
+If a job has started or resumed execution (from suspend) on a compute node
+within this number of seconds from the current time, the node's power cap will
+be increased to the maximum.
+The default value is 300 seconds.
+Supported by the power/cray plugin.
+.TP
+\fBset_watts=#\fR
+Specifies the power limit to be set on every compute nodes managed by Slurm.
+Every node gets this same power cap and there is no variation through time
+based upon actual power usage on the node.
+Supported by the power/cray plugin.
+.TP
+\fBupper_threshold=#\fR
+Specify an upper power consumption threshold.
+If a node's current power consumption is above this percentage of its current
+cap, then its power cap will be increased to the extent possible.
+The default value is 95 percent.
+Supported by the power/cray plugin.
+.RE
+
+.TP
+\fBPowerPlugin\fR
+Identifies the plugin used for system power management.
+Currently supported plugins include:
+\fBcray\fR and
+\fBnone\fR.
+Changes to this value require restarting Slurm daemons to take effect.
+More information about system power management is available here
+<http://slurm.schedmd.com/power_mgmt.html>.
+By default, no power plugin is loaded.
+
 .TP
 \fBPreemptMode\fR
 Enables gang scheduling and/or controls the mechanism used to preempt
@@ -1404,10 +1655,7 @@ preempts jobs by suspending them.
 A suspended job will resume execution once the high priority job
 preempting it completes.
 The \fBSUSPEND\fR may only be used with the \fBGANG\fR option
-(the gang scheduler module performs the job resume operation)
-and with \fBPreemptType=preempt/partition_prio\fR (the logic to
-suspend and resume jobs current only has the data structures to
-support partitions).
+(the gang scheduler module performs the job resume operation).
 .RE
 
 .TP
@@ -1429,8 +1677,9 @@ This is not compatible with \fBPreemptMode=OFF\fR.
 \fBpreempt/qos\fR
 Job preemption rules are specified by Quality Of Service (QOS) specifications
 in the Slurm database.
-This is not compatible with \fBPreemptMode=OFF\fR or \fBPreemptMode=SUSPEND\fR
-(i.e. preempted jobs must be removed from the resources).
+This optioin is not compatible with \fBPreemptMode=OFF\fR.
+A configuration of \fBPreemptMode=SUSPEND\fR is only supported by the
+\fBselect/cons_res\fR plugin.
 .RE
 
 .TP
@@ -1488,9 +1737,6 @@ their priority.
 \fBSMALL_RELATIVE_TO_TIME\fR
 If set, the job's size component will be based upon not the job size alone, but
 the job's size divided by it's time limit.
-.TP
-\fBTICKET_BASED\fR
-If set, priority will be calculated based on the ticket system.
 .RE
 
 .TP
@@ -1602,6 +1848,20 @@ component contributes to the job's priority.
 Applicable only if PriorityType=priority/multifactor.
 The default value is 0.
 
+.TP
+\fBPriorityWeightTRES\fR
+A comma separated list of TRES Types and weights that sets the degree that each
+TRES Type contributes to the job's priority.
+
+.nf
+e.g.
+PriorityWeightTRES=CPU=1000,Mem=2000,GRES/gpu=3000
+.fi
+
+Applicable only if PriorityType=priority/multifactor and if
+AccountingStorageTRES is configured with each TRES Type.
+The default values are 0.
+
 .TP
 \fBPrivateData\fR
 This controls what type of information is hidden from regular users.
@@ -1708,12 +1968,21 @@ If the prolog fails (returns a non\-zero exit code), this will result in the
 node being set to a DRAIN state and the job being requeued in a held state.
 See \fBProlog and Epilog Scripts\fR for more information.
 
+.TP
+\fBPrologEpilogTimeout\fR
+The interval in seconds Slurms waits for Prolog and Epilog before terminating
+them. The dafault behavior is to wait indefinitely. This interval applies to
+the Prolog and Epilog run by slurmd daemon before and after the job, the
+PrologSlurmctld and EpilogSlurmctld run by slurmctld daemon, and the SPANK
+plugins run by the slurmstep daemon.
+
 .TP
 \fBPrologFlags\fR
 Flags to control the Prolog behavior. By default no flags are set.
-Currently the options are:
+Multiple flags may be specified in a comma\-separated list.
+Currently supported options are:
 .RS
-.TP 6
+.TP 8
 \fBAlloc\fR
 If set, the Prolog script will be executed at job allocation. By default,
 Prolog is executed just before the task is launched. Therefore, when salloc
@@ -1721,13 +1990,20 @@ is started, no Prolog is executed. \fBAlloc\fR is useful for preparing things
 before a user starts to use any allocated resources.
 In particular, this flag is needed on a Cray system when cluster compatibility
 mode is enabled.
-.TP 6
+.TP
+\fBContain\fR
+At job allocation time, use the ProcTrack plugin to create a job container
+on all allocated compute nodes.
+This container may be used for user processes not launched under Slurm control,
+for example the PAM module may place processes launch through a direct user
+login into this container.
+.TP
 \fBNoHold\fR
-If set, Alloc is also set.  This will allow for salloc to not block until the
-prolog is finished on each node.  The blocking will happen when steps reach
-the slurmd and before any execution has happened in the step. This is a much
-faster way to work and if using srun to launch your tasks you should use this
-flag.
+If set, the Alloc flag should also be set.  This will allow for salloc to not
+block until the prolog is finished on each node.  The blocking will happen when
+steps reach the slurmd and before any execution has happened in the step.
+This is a much faster way to work and if using srun to launch your tasks you
+should use this flag.
 .RE
 
 .TP
@@ -2009,7 +2285,7 @@ etc. are equal to or greater than the values configured in slurm.conf.
 .TP
 \fBRoutePlugin\fR
 Identifies the plugin to be used for defining which nodes will be used
-to forward a message.
+for message forwarding and message aggregation.
 .RS
 .TP
 \fBroute/default\fR
@@ -2045,6 +2321,11 @@ consume any of the CPU or memory resources, configure it as a pseudo\-terminal,
 and preserve all of the job's environment variables (i.e. and not over\-write
 them with the job step's allocation information).
 
+For Cray systems, add --gres=craynetwork:0. e.g.:
+.nf
+    SallocDefaultCommand = "srun \-n1 \-N1 \-\-mem\-per\-cpu=0 --gres=craynetwork:0 \-\-pty \-\-preserve\-env \-\-mpi=none $SHELL"
+.fi
+
 .TP
 \fBSchedulerParameters\fR
 The interpretation of this parameter varies by \fBSchedulerType\fR.
@@ -2058,6 +2339,15 @@ submitted at a very high rate (i.e. using the sbatch command) and one wishes
 to reduce the overhead of attempting to schedule each job at submit time.
 The default value is 3 seconds.
 .TP
+\fBbf_busy_nodes\fR
+When selecting resources for pending jobs to reserve for future execution
+(i.e. the job can not be started immediately), then preferentially select
+nodes that are in use.
+This will tend to leave currently idle resources available for backfilling
+longer running jobs, but may result in allocations having less than optimal
+network topology.
+This option is currently only supported by the select/cons_res plugin.
+.TP
 \fBbf_continue\fR
 The backfill scheduler periodically releases locks in order to permit other
 operations to proceed rather than blocking all activity for what could be an
@@ -2250,6 +2540,13 @@ For example if MessageTimeout=10, the time limit will be 2 seconds
 Maximum number of seconds that a job can delay execution waiting for the
 specified desired switch count. The default value is 300 seconds.
 .TP
+\fBno_backup_scheduling\fR
+If used, the backup controller will not schedule jobs when it takes over. The
+backup controller will allow jobs to be submitted, modified and cancelled but
+won't schedule new jobs. This is useful in Cray environments when the backup
+controller resides on an external Cray node.  A restart is required to alter
+this option. This is explicitly set on a Cray/ALPS system.
+.TP
 \fBpack_serial_at_end\fR
 If used with the select/cons_res plugin then put serial jobs at the end of
 the available nodes rather than using a best fit algorithm.
@@ -2446,13 +2743,6 @@ after each allocation.  This also sets NHC_NO_STEPS, so the NHC will never run.
 The following options are supported for \fBSelectType=select/cons_res\fR:
 .RS
 .TP
-\fBCR_ALLOCATE_FULL_SOCKET\fR
-Jobs are allocated whole sockets rather than individual cores.
-Must be used with \fBCR_Socket\fR or \fBCR_Socket_Memory\fR option.
-NOTE: This is also needed for more accurate accounting.  Without it the
-number of CPUs allocated the job will not be rounded up to include CPUs
-not used by the allocation even though they are allocated.
-.TP
 \fBCR_CPU\fR
 CPUs are consumable resources.
 Configure the number of \fBCPUs\fR on each node, which may be equal to the
@@ -2515,7 +2805,7 @@ use the least loaded nodes in selected partitions.
 .TP
 \fBCR_Pack_Nodes\fR
 If a job allocation contains more resources than will be used for launching
-tasks (e.g. if whole nodes are allocated to a job), then rather than evenly
+tasks (e.g. if whole nodes are allocated to a job), then rather than 
 distributing a job's tasks evenly across it's allocated nodes, pack them as
 tightly as possible on thsee nodes.
 For example, consider a job allocation containing two \fBentire\fR nodes with
@@ -2530,10 +2820,6 @@ Sockets are consumable resources.
 On nodes with multiple cores, each core or thread is counted as a CPU
 to satisfy a job's resource requirement, but multiple jobs are not
 allocated resources on the same socket.
-NOTE: With this option even though the entire socket will be allocated
-to the job the count of CPUs allocated to a job will not round up to
-account for every CPU on an allocated socket without the
-\fBCR_ALLOCATE_FULL_SOCKET\fR option.
 .TP
 \fBCR_Socket_Memory\fR
 Memory and sockets are consumable resources.
@@ -2541,10 +2827,6 @@ On nodes with multiple cores, each core or thread is counted as a CPU
 to satisfy a job's resource requirement, but multiple jobs are not
 allocated resources on the same socket.
 Setting a value for \fBDefMemPerCPU\fR is strongly recommended.
-NOTE: With this option even though the entire socket will be allocated
-to the job the count of CPUs allocated to a job will not round up to
-account for every CPU on an allocated socket without the
-\fBCR_ALLOCATE_FULL_SOCKET\fR option.
 .TP
 \fBCR_Memory\fR
 Memory is a consumable resource.
@@ -2863,7 +3145,7 @@ Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
 \fBSuspendRate\fR
 The rate at which nodes are place into power save mode by \fBSuspendProgram\fR.
 The value is number of nodes per minute and it can be used to prevent
-a large drop in power power consumption (e.g. after a large job completes).
+a large drop in power consumption (e.g. after a large job completes).
 A value of zero results in no limits being imposed.
 The default value is 60 nodes per minute.
 Related configuration options include \fBResumeTimeout\fR, \fBResumeProgram\fR,
@@ -2992,7 +3274,7 @@ Overrides user options.
 .TP
 \fBAutobind\fR
 Set a default binding in the event that "auto binding" doesn't find a match.
-Currently only supports threads. (E.g. TaskPluginParam=autobind=threads).
+Set to Threads, Cores or Sockets (E.g. TaskPluginParam=autobind=threads).
 .RE
 
 .TP
@@ -3055,6 +3337,13 @@ temporary storage. This parameter is used in establishing a node's \fBTmpDisk\fR
 space.
 The default value is "/tmp".
 
+.TP
+\fBTopologyParam\fR
+Comma separated options identifing network topology options.
+The value of "dragonfly" is valid when TopologyPlugin=topology/tree.
+The value of "NoInAddrAny" is used to directly bind to the address of what
+the node resolves to instead of binding messages to any address on the node.
+
 .TP
 \fBTopologyPlugin\fR
 Identifies the plugin to be used for determining the network topology
@@ -3377,17 +3666,19 @@ resource.
 A generic resource can also be specified as non\-consumable (i.e. multiple
 jobs can use the same generic resource) with the optional field ":no_consume".
 The final field must specify a generic resources count.
-A suffix of "K", "M" or "G" may be used to multiply the count by 1024,
-1048576 or 1073741824 respectively.
+A suffix of "K", "M", "G", "T" or "P" may be used to multiply the number by
+1024, 1048576, 1073741824, etc. respectively.
 (e.g."Gres=gpu:tesla:1,gpu:kepler:1,bandwidth:lustre:no_consume:4G").
 By default a node has no generic resources and its maximum count is
-4,294,967,295.
+that of an unsigned 64bit integer.
 Also see \fBFeature\fR.
 
 .TP
 \fBMemSpecLimit\fR
 Limit on combined real memory allocation for compute node daemons
-(slurmd, slurmstepd), in megabytes.
+(slurmd, slurmstepd), in megabytes. This memory is not available to job
+allocations. The deamons won't be killed when they exhaust the memory allocation
+(ie. the OOM Killer is disabled for the daemon's memory cgroup).
 
 .TP
 \fBPort\fR
@@ -3772,6 +4063,38 @@ Also refer to DenyQos.
 Partition name of alternate partition to be used if the state of this partition
 is "DRAIN" or "INACTIVE."
 
+.TP
+\fBTRESBillingWeights\fR
+TRESBillingWeights is used to define the billing weights of each TRES type that
+will be used in calcuating the usage of a job.
+
+Billing weights are specified as a comma-separated list of
+\fI<TRES Type>\fR=\fI<TRES Billing Weight>\fR pairs.
+
+Any TRES Type is available for billing. Note that the memory is weighted per
+gigabyte.
+
+By default the billing of TRES is calculated as the sum of all TRES types
+multiplied by their corresponding billing weight.  For example, when a job is
+allocated 1 CPU and 8 GB of memory on a partition configured with
+TRESBillingWeights="CPU=1.0,Mem=0.25,GRES/gpu=2.0", the billable TRES will be:
+(1*1.0) + (8*0.25) + (0*2.0) = 3.0.
+
+If PriorityFlags=MAX_TRES is configured, the billable TRES is calculated as the
+MAX of individual TRES' on a node (e.g. cpus, mem, gres) plus the sum of all
+global TRES' (e.g. licenses). Using the same example above the billable TRES
+will be MAX(1*1.0, 8*0.25) + (0*2.0) = 2.0.
+
+If TRESBillingWeights is not defined then the job is billed against the total
+number of allocated CPUs.
+
+\fBNOTE:\fR TRESBillingWeights is only used when calcuating fairshare and
+doesn't affect job priority directly as it is currently not used for the size of
+the job. If you want TRES' to play a role in the job's priority then refer to
+the PriorityWeightTRES option.
+
+.RE
+
 .TP
 \fBDefault\fR
 If this keyword is set, jobs submitted without a partition
@@ -3834,12 +4157,25 @@ The default value will be the value of \fBDisableRootJobs\fR set
 outside of a partition specification (which is "NO", allowing user
 root to execute jobs).
 
+.TP
+\fBExclusiveUser\fR
+If set to "YES" then nodes will be exclusively allocated to users.
+Multiple jobs may be run for the same user, but only one user can be active
+at a time.
+This capability is also available on a per-job basis by using the
+\fB\-\-exclusive=user\fR option.
+
 .TP
 \fBGraceTime\fR
 Specifies, in units of seconds, the preemption grace time
 to be extended to a job which has been selected for preemption.
 The default value is zero, no preemption grace time is allowed on
 this partition.
+Once a job has been selected for preemption, it's end time is set to the
+current time plus GraceTime. The job is immediately sent SIGCONT and SIGTERM
+signals in order to provide notification of its imminent termination.
+This is followed by the SIGCONT, SIGTERM and SIGKILL signal sequence upon
+reaching its new end time.
 (Meaningful only for PreemptMode=CANCEL)
 
 .TP
@@ -3930,6 +4266,7 @@ Node names may be specified using the node range expression syntax
 described above. A blank list of nodes
 (i.e. "Nodes= ") can be used if one wants a partition to exist,
 but have no resources (possibly on a temporary basis).
+A value of "ALL" is mapped to all nodes configured in the cluster.
 
 .TP
 \fBPartitionName\fR
@@ -3964,6 +4301,16 @@ Note that a partition's priority takes precedence over a job's
 priority.
 The value may not exceed 65533.
 
+.TP
+\fBQOS\fR
+Used to extend the limits available to a QOS on a partition.  Jobs will not be
+associated to this QOS outside of being associated to the partition.  They
+will still be associated to their requested QOS.
+By default, no QOS is used.
+\fBNOTE:\fR If a limit is set in both the Partition's QOS and the Job's QOS
+the Partition QOS will be honored unless the Job's QOS has the
+\fBOverPartQOS\fR flag set in which the Job's QOS will have priority.
+
 .TP
 \fBReqResv\fR
 Specifies users of this partition are required to designate a reservation
@@ -3988,8 +4335,7 @@ The default value is "NO".
 \fBSelectTypeParameters\fR
 Partition\-specific resource allocation type.
 Supported values are \fBCR_Core\fR and \fBCR_Socket\fR.
-Use requires the system\-wide \fBSelectTypeParameters\fR value be set plus
-\fBCR_ALLOCATE_FULL_SOCKET\fR.
+Use requires the system\-wide \fBSelectTypeParameters\fR value be set.
 
 .TP
 \fBShared\fR
@@ -4036,6 +4382,12 @@ core to execute up to four jobs at once.
 Recommended only for BlueGene systems configured with
 small blocks or for systems running
 with gang scheduling (\fBSchedulerType=sched/gang\fR).
+NOTE: \fIPreemptType=QOS\fR will permit one additional job to be run
+on the partition if started due to job preemption. For example, a configuration
+of \fIShared=FORCE:1\fR will only permit one job per resources normally,
+but a second job can be started if done so through preemption based upon QOS.
+The use of \fIPreemptType=QOS\fR and \fIPreemptType=Suspend\fR only applies
+with \fISelectType=cons_res\fR.
 .TP
 \fBYES\fR
 Makes all resources in the partition available for sharing upon request by
@@ -4153,7 +4505,7 @@ Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
 .TP
 \fBSLURM_JOB_CONSTRAINTS\fR
 Features required to run the job.
-Available in \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
+Available in \fBProlog\fR, \fBPrologSlurmctld\fR and \fBEpilogSlurmctld\fR only.
 .TP
 \fBSLURM_JOB_DERIVED_EC\fR
 The highest exit code of all of the job steps.
@@ -4584,7 +4936,7 @@ Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 .br
 Copyright (C) 2008\-2010 Lawrence Livermore National Security.
 .br
-Copyright (C) 2010-2013 SchedMD LLC.
+Copyright (C) 2010-2015 SchedMD LLC.
 .LP
 This file is part of Slurm, a resource management program.
 For details, see <http://slurm.schedmd.com/>.
diff --git a/doc/man/man5/slurmdbd.conf.5 b/doc/man/man5/slurmdbd.conf.5
index 565fc5837..0f98bd180 100644
--- a/doc/man/man5/slurmdbd.conf.5
+++ b/doc/man/man5/slurmdbd.conf.5
@@ -172,6 +172,9 @@ and could impact performance.
 Valid subsystems available today (with more to come) include:
 .RS
 .TP 17
+\fBDB_ARCHIVE\fR
+SQL statements/queries when dealing with archiving and purging the database.
+.TP
 \fBDB_ASSOC\fR
 SQL statements/queries when dealing with associations in the database.
 .TP
diff --git a/doc/man/man8/Makefile.in b/doc/man/man8/Makefile.in
index 15fa1cba2..4d1d501c8 100644
--- a/doc/man/man8/Makefile.in
+++ b/doc/man/man8/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -223,6 +226,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -272,8 +277,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -292,6 +301,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -335,6 +347,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -358,6 +371,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/etc/init.d.slurm.in b/etc/init.d.slurm.in
index ecbaa7c1d..5cff4640b 100644
--- a/etc/init.d.slurm.in
+++ b/etc/init.d.slurm.in
@@ -238,9 +238,9 @@ slurmstop() {
        done
     done
 
-    # slurmstatus return 1 in case of stopped daemon
+    # slurmstatus return 1 or 3 in case of stopped daemon
     # and that is what we are looking for here
-    if [[ ${RETVAL} == "1" ]]
+    if [[ ${RETVAL} =~ 1|3 ]]
     then
         RETVAL=0
     else
diff --git a/etc/layouts.d.power.conf.example b/etc/layouts.d.power.conf.example
new file mode 100644
index 000000000..d0d5a8e13
--- /dev/null
+++ b/etc/layouts.d.power.conf.example
@@ -0,0 +1,7 @@
+Priority=10
+Root=Cluster
+
+Entity=Cluster Type=Center CurrentSumPower=0 IdleSumWatts=0 MaxSumWatts=0 Enclosed=nodes[0-199]
+
+Entity=nodes[0-199] Type=Node CurrentPower=0 IdleWatts=103 MaxWatts=308 DownWatts=103 PowerSaveWatts=12
+
diff --git a/etc/layouts.d.power_cpufreq.conf.example b/etc/layouts.d.power_cpufreq.conf.example
new file mode 100644
index 000000000..5630f2e93
--- /dev/null
+++ b/etc/layouts.d.power_cpufreq.conf.example
@@ -0,0 +1,15 @@
+Priority=10
+Root=Cluster
+
+Entity=Cluster Type=Center CurrentSumPower=0 IdleSumWatts=0 MaxSumWatts=0 Enclosed=virtual[0-3]
+
+Entity=virtual0 Type=Node CurrentPower=0 IdleWatts=0 MaxWatts=0 DownWatts=2 PowerSaveWatts=6 CoresCount=0 LastCore=15 Cpufreq1=1200000 Cpufreq2=1400000 Cpufreq3=1600000 Cpufreq4=1800000 Cpufreq5=2000000 Cpufreq6=2200000 Cpufreq7=2400000 Cpufreq8=2600000 NumFreqChoices=8 Enclosed=virtualcore[0-15] 
+
+Entity=virtual1 Type=Node CurrentPower=0 IdleWatts=0 MaxWatts=0 DownWatts=2 PowerSaveWatts=6 CoresCount=0 LastCore=15 Cpufreq1=1200000 Cpufreq2=1400000 Cpufreq3=1600000 Cpufreq4=1800000 Cpufreq5=2000000 Cpufreq6=2200000 Cpufreq7=2400000 Cpufreq8=2600000 NumFreqChoices=8 Enclosed=virtualcore[16-31]
+
+Entity=virtual2 Type=Node CurrentPower=0 IdleWatts=0 MaxWatts=0 DownWatts=2 PowerSaveWatts=6 CoresCount=0 LastCore=15 Cpufreq1=1200000 Cpufreq2=1400000 Cpufreq3=1600000 Cpufreq4=1800000 Cpufreq5=2000000 Cpufreq6=2200000 Cpufreq7=2400000 Cpufreq8=2600000 NumFreqChoices=8 Enclosed=virtualcore[32-47]
+
+Entity=virtual3 Type=Node CurrentPower=0 IdleWatts=0 MaxWatts=0 DownWatts=2 PowerSaveWatts=6 CoresCount=0 LastCore=15 Cpufreq1=1200000 Cpufreq2=1400000 Cpufreq3=1600000 Cpufreq4=1800000 Cpufreq5=2000000 Cpufreq6=2200000 Cpufreq7=2400000 Cpufreq8=2600000 NumFreqChoices=8 Enclosed=virtualcore[48-63]
+
+Entity=virtualcore[0-63] Type=Core CurrentCorePower=0 IdleCoreWatts=7 MaxCoreWatts=20 CurrentCoreFreq=0 Cpufreq1Watts=8 Cpufreq2Watts=10 Cpufreq3Watts=12 Cpufreq4Watts=14 Cpufreq5Watts=15 Cpufreq6Watts=16 Cpufreq7Watts=17 Cpufreq8Watts=19 
+
diff --git a/etc/slurmctld.service.in b/etc/slurmctld.service.in
index 84cdd49da..2fba94343 100644
--- a/etc/slurmctld.service.in
+++ b/etc/slurmctld.service.in
@@ -5,9 +5,8 @@ ConditionPathExists=@sysconfdir@/slurm.conf
 
 [Service]
 Type=forking
-EnvironmentFile=/etc/default/slurmctld
-ExecStart=@sbindir@/slurmctld $SLURMCTLD_OPTIONS
-PIDFile=/var/run/slurm/slurmctld.pid
+ExecStart=@sbindir@/slurmctld
+PIDFile=/var/run/slurmctld.pid
 
 [Install]
 WantedBy=multi-user.target
diff --git a/etc/slurmd.service.in b/etc/slurmd.service.in
index 207bff96b..3b1061f65 100644
--- a/etc/slurmd.service.in
+++ b/etc/slurmd.service.in
@@ -5,9 +5,8 @@ ConditionPathExists=@sysconfdir@/slurm.conf
 
 [Service]
 Type=forking
-EnvironmentFile=/etc/default/slurmd
-ExecStart=@sbindir@/slurmd $SLURMD_OPTIONS
-PIDFile=/var/run/slurm/slurmd.pid
+ExecStart=@sbindir@/slurmd
+PIDFile=/var/run/slurmd.pid
 
 [Install]
 WantedBy=multi-user.target
diff --git a/etc/slurmdbd.service.in b/etc/slurmdbd.service.in
index 1ea04cbff..bd8a1741c 100644
--- a/etc/slurmdbd.service.in
+++ b/etc/slurmdbd.service.in
@@ -5,9 +5,8 @@ ConditionPathExists=@sysconfdir@/slurmdbd.conf
 
 [Service]
 Type=forking
-EnvironmentFile=/etc/default/slurmdbd
-ExecStart=@sbindir@/slurmdbd $SLURMDBD_OPTIONS
-PIDFile=/var/run/slurm/slurmdbd.pid
+ExecStart=@sbindir@/slurmdbd
+PIDFile=/var/run/slurmdbd.pid
 
 [Install]
 WantedBy=multi-user.target
diff --git a/slurm.spec b/slurm.spec
index dd522d863..30d23bff5 100644
--- a/slurm.spec
+++ b/slurm.spec
@@ -142,7 +142,6 @@ BuildRequires: mysql-devel
 BuildRequires: cray-libalpscomm_cn-devel
 BuildRequires: cray-libalpscomm_sn-devel
 BuildRequires: libnuma-devel
-BuildConflicts: cray-libnuma1
 BuildRequires: libhwloc-devel
 BuildRequires: cray-libjob-devel
 BuildRequires: gtk2-devel
@@ -390,7 +389,7 @@ utilites will provide more information and greater depth of understanding
 %package pam_slurm
 Summary: PAM module for restricting access to compute nodes via Slurm
 Group: System Environment/Base
-Requires: slurm slurm-devel
+Requires: slurm
 BuildRequires: pam-devel
 Obsoletes: pam_slurm
 %description pam_slurm
@@ -424,7 +423,6 @@ Gives the ability for Slurm to use Berkeley Lab Checkpoint/Restart
 	%{?with_proctrack:--with-proctrack=%{?with_proctrack}}\
 	%{?with_cpusetdir:--with-cpusetdir=%{?with_cpusetdir}} \
 	%{?with_apbasildir:--with-apbasildir=%{?with_apbasildir}} \
-	%{?with_xcpu:--with-xcpu=%{?with_xcpu}} \
 	%{?with_mysql_config:--with-mysql_config=%{?with_mysql_config}} \
 	%{?with_pg_config:--with-pg_config=%{?with_pg_config}} \
 	%{?with_ssl:--with-ssl=%{?with_ssl}} \
@@ -437,12 +435,12 @@ Gives the ability for Slurm to use Berkeley Lab Checkpoint/Restart
 	%{?slurm_with_multiple_slurmd:--enable-multiple-slurmd} \
 	%{?with_cflags}
 
-make %{?_smp_mflags}
+%__make %{?_smp_mflags}
 
 %install
 rm -rf "$RPM_BUILD_ROOT"
-DESTDIR="$RPM_BUILD_ROOT" make install
-DESTDIR="$RPM_BUILD_ROOT" make install-contrib
+DESTDIR="$RPM_BUILD_ROOT" %__make install
+DESTDIR="$RPM_BUILD_ROOT" %__make install-contrib
 
 %ifos aix5.3
    mv ${RPM_BUILD_ROOT}%{_bindir}/srun ${RPM_BUILD_ROOT}%{_sbindir}
@@ -455,9 +453,9 @@ DESTDIR="$RPM_BUILD_ROOT" make install-contrib
       ln -s ../../etc/init.d/slurmdbd $RPM_BUILD_ROOT/usr/sbin/rcslurmdbd
    fi
    if [ -d /usr/lib/systemd/system ]; then
-      install -D -m755 etc/slurmctld.service $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmctld.service
-      install -D -m755 etc/slurmd.service    $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmd.service
-      install -D -m755 etc/slurmdbd.service  $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmdbd.service
+      install -D -m644 etc/slurmctld.service $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmctld.service
+      install -D -m644 etc/slurmd.service    $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmd.service
+      install -D -m644 etc/slurmdbd.service  $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmdbd.service
    fi
 %endif
 
@@ -478,15 +476,17 @@ DESTDIR="$RPM_BUILD_ROOT" make install-contrib
    rm -f $RPM_BUILD_ROOT/%{_sbindir}/slurmconfgen.py
 %endif
 
-install -D -m644 etc/slurm.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.conf.example
 install -D -m644 etc/cgroup.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup.conf.example
 install -D -m644 etc/cgroup_allowed_devices_file.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup_allowed_devices_file.conf.example
 install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup.release_common.example
 install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup/release_freezer
 install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup/release_cpuset
 install -D -m755 etc/cgroup.release_common.example ${RPM_BUILD_ROOT}%{_sysconfdir}/cgroup/release_memory
-install -D -m644 etc/slurmdbd.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurmdbd.conf.example
+install -D -m644 etc/layouts.d.power.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/layouts.d.power.conf.example
+install -D -m644 etc/layouts.d.power_cpufreq.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/layouts.d.power_cpufreq.conf.example
+install -D -m644 etc/slurm.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.conf.example
 install -D -m755 etc/slurm.epilog.clean ${RPM_BUILD_ROOT}%{_sysconfdir}/slurm.epilog.clean
+install -D -m644 etc/slurmdbd.conf.example ${RPM_BUILD_ROOT}%{_sysconfdir}/slurmdbd.conf.example
 install -D -m755 contribs/sgather/sgather ${RPM_BUILD_ROOT}%{_bindir}/sgather
 install -D -m755 contribs/sjstat ${RPM_BUILD_ROOT}%{_bindir}/sjstat
 
@@ -519,6 +519,8 @@ rm -f $RPM_BUILD_ROOT/%{_libdir}/security/*.la
 %if %{?with_pam_dir}0
 rm -f $RPM_BUILD_ROOT/%{with_pam_dir}/pam_slurm.a
 rm -f $RPM_BUILD_ROOT/%{with_pam_dir}/pam_slurm.la
+rm -f $RPM_BUILD_ROOT/%{with_pam_dir}/pam_slurm_adopt.a
+rm -f $RPM_BUILD_ROOT/%{with_pam_dir}/pam_slurm_adopt.la
 %endif
 rm -f $RPM_BUILD_ROOT/lib/security/pam_slurm.a
 rm -f $RPM_BUILD_ROOT/lib/security/pam_slurm.la
@@ -526,6 +528,12 @@ rm -f $RPM_BUILD_ROOT/lib32/security/pam_slurm.a
 rm -f $RPM_BUILD_ROOT/lib32/security/pam_slurm.la
 rm -f $RPM_BUILD_ROOT/lib64/security/pam_slurm.a
 rm -f $RPM_BUILD_ROOT/lib64/security/pam_slurm.la
+rm -f $RPM_BUILD_ROOT/lib/security/pam_slurm_adopt.a
+rm -f $RPM_BUILD_ROOT/lib/security/pam_slurm_adopt.la
+rm -f $RPM_BUILD_ROOT/lib32/security/pam_slurm_adopt.a
+rm -f $RPM_BUILD_ROOT/lib32/security/pam_slurm_adopt.la
+rm -f $RPM_BUILD_ROOT/lib64/security/pam_slurm_adopt.a
+rm -f $RPM_BUILD_ROOT/lib64/security/pam_slurm_adopt.la
 %if ! %{slurm_with auth_none}
 rm -f $RPM_BUILD_ROOT/%{_libdir}/slurm/auth_none.so
 %endif
@@ -588,6 +596,8 @@ test -f $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmctld.service	&&
   echo /usr/lib/systemd/system/slurmctld.service		>> $LIST
 test -f $RPM_BUILD_ROOT/usr/lib/systemd/system/slurmd.service	&&
   echo /usr/lib/systemd/system/slurmd.service		>> $LIST
+test -f $RPM_BUILD_ROOT/%{_bindir}/netloc_to_topology		&&
+  echo %{_bindir}/netloc_to_topology			>> $LIST
 
 test -f $RPM_BUILD_ROOT/opt/modulefiles/slurm/%{version}-%{release} &&
   echo /opt/modulefiles/slurm/%{version}-%{release} >> $LIST
@@ -678,6 +688,8 @@ test -f $RPM_BUILD_ROOT/%{_perldir}/auto/Slurmdb/Slurmdb.bs    &&
 
 LIST=./plugins.files
 touch $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/acct_gather_energy_cray.so  &&
+   echo %{_libdir}/slurm/acct_gather_energy_cray.so  >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/acct_gather_energy_ipmi.so  &&
    echo %{_libdir}/slurm/acct_gather_energy_ipmi.so  >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/acct_gather_energy_rapl.so  &&
@@ -686,18 +698,22 @@ test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/acct_gather_infiniband_ofed.so &&
    echo %{_libdir}/slurm/acct_gather_infiniband_ofed.so >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/acct_gather_profile_hdf5.so &&
    echo %{_libdir}/slurm/acct_gather_profile_hdf5.so >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/burst_buffer_cray.so        &&
+   echo %{_libdir}/slurm/burst_buffer_cray.so        >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/crypto_openssl.so           &&
    echo %{_libdir}/slurm/crypto_openssl.so           >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/ext_sensors_rrd.so          &&
    echo %{_libdir}/slurm/ext_sensors_rrd.so          >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/jobcomp_elasticsearch.so    &&
+   echo %{_libdir}/slurm/jobcomp_elasticsearch.so    >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/launch_slurm.so             &&
    echo %{_libdir}/slurm/launch_slurm.so             >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/launch_aprun.so             &&
    echo %{_libdir}/slurm/launch_aprun.so             >> $LIST
+test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/power_cray.so               &&
+   echo %{_libdir}/slurm/power_cray.so               >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/select_bluegene.so          &&
    echo %{_libdir}/slurm/select_bluegene.so          >> $LIST
-test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/slurmctld_dynalloc.so       &&
-   echo %{_libdir}/slurm/slurmctld_dynalloc.so       >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_affinity.so            &&
    echo %{_libdir}/slurm/task_affinity.so            >> $LIST
 test -f $RPM_BUILD_ROOT/%{_libdir}/slurm/task_cgroup.so              &&
@@ -718,13 +734,21 @@ touch $LIST
 %if %{?with_pam_dir}0
     test -f $RPM_BUILD_ROOT/%{with_pam_dir}/pam_slurm.so	&&
 	echo %{with_pam_dir}/pam_slurm.so	>>$LIST
+    test -f $RPM_BUILD_ROOT/%{with_pam_dir}/pam_slurm_adopt.so	&&
+	echo %{with_pam_dir}/pam_slurm_adopt.so	>>$LIST
 %else
-    test -f $RPM_BUILD_ROOT/lib/security/pam_slurm.so		&&
+    test -f $RPM_BUILD_ROOT/lib/security/pam_slurm.so	&&
 	echo /lib/security/pam_slurm.so		>>$LIST
-    test -f $RPM_BUILD_ROOT/lib32/security/pam_slurm.so		&&
+    test -f $RPM_BUILD_ROOT/lib32/security/pam_slurm.so	&&
 	echo /lib32/security/pam_slurm.so	>>$LIST
-    test -f $RPM_BUILD_ROOT/lib64/security/pam_slurm.so		&&
+    test -f $RPM_BUILD_ROOT/lib64/security/pam_slurm.so	&&
 	echo /lib64/security/pam_slurm.so	>>$LIST
+    test -f $RPM_BUILD_ROOT/lib/security/pam_slurm_adopt.so		&&
+	echo /lib/security/pam_slurm_adopt.so		>>$LIST
+    test -f $RPM_BUILD_ROOT/lib32/security/pam_slurm_adopt.so		&&
+	echo /lib32/security/pam_slurm_adopt.so		>>$LIST
+    test -f $RPM_BUILD_ROOT/lib64/security/pam_slurm_adopt.so		&&
+	echo /lib64/security/pam_slurm_adopt.so		>>$LIST
 %endif
 #############################################################################
 
@@ -748,6 +772,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/src/*
 %{_mandir}/man1/*
 %{_mandir}/man5/acct_gather.*
+%{_mandir}/man5/burst_buffer.*
 %{_mandir}/man5/ext_sensors.*
 %{_mandir}/man5/cgroup.*
 %{_mandir}/man5/cray.*
@@ -772,13 +797,15 @@ rm -rf $RPM_BUILD_ROOT
 %config %{_sysconfdir}/slurm.conf.template
 %{_sbindir}/slurmconfgen.py
 %endif
-%config %{_sysconfdir}/slurm.conf.example
 %config %{_sysconfdir}/cgroup.conf.example
 %config %{_sysconfdir}/cgroup_allowed_devices_file.conf.example
 %config %{_sysconfdir}/cgroup.release_common.example
 %config %{_sysconfdir}/cgroup/release_freezer
 %config %{_sysconfdir}/cgroup/release_cpuset
 %config %{_sysconfdir}/cgroup/release_memory
+%config %{_sysconfdir}/layouts.d.power.conf.example
+%config %{_sysconfdir}/layouts.d.power_cpufreq.conf.example
+%config %{_sysconfdir}/slurm.conf.example
 %config %{_sysconfdir}/slurm.epilog.clean
 %exclude %{_mandir}/man1/sjobexit*
 %exclude %{_mandir}/man1/sjstat*
@@ -874,6 +901,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/acct_gather_infiniband_none.so
 %{_libdir}/slurm/acct_gather_energy_none.so
 %{_libdir}/slurm/acct_gather_profile_none.so
+%{_libdir}/slurm/burst_buffer_generic.so
 %{_libdir}/slurm/checkpoint_none.so
 %{_libdir}/slurm/checkpoint_ompi.so
 %{_libdir}/slurm/core_spec_cray.so
@@ -893,6 +921,9 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/jobcomp_filetxt.so
 %{_libdir}/slurm/jobcomp_none.so
 %{_libdir}/slurm/jobcomp_script.so
+%{_libdir}/slurm/layouts_power_cpufreq.so
+%{_libdir}/slurm/layouts_power_default.so
+%{_libdir}/slurm/layouts_unit_default.so
 %if ! %{slurm_with bluegene}
 %{_libdir}/slurm/mpi_lam.so
 %{_libdir}/slurm/mpi_mpich1_p4.so
@@ -904,6 +935,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/mpi_pmi2.so
 %endif
 %{_libdir}/slurm/mpi_none.so
+%{_libdir}/slurm/power_none.so
 %{_libdir}/slurm/preempt_job_prio.so
 %{_libdir}/slurm/preempt_none.so
 %{_libdir}/slurm/preempt_partition_prio.so
@@ -930,6 +962,7 @@ rm -rf $RPM_BUILD_ROOT
 %{_libdir}/slurm/switch_none.so
 %{_libdir}/slurm/task_none.so
 %{_libdir}/slurm/topology_3d_torus.so
+%{_libdir}/slurm/topology_hypercube.so
 %{_libdir}/slurm/topology_node_rank.so
 %{_libdir}/slurm/topology_none.so
 %{_libdir}/slurm/topology_tree.so
@@ -1042,7 +1075,7 @@ fi
 %endif
 
 %preun
-if [ "$1" = 0 ]; then
+if [ "$1" -eq 0 ]; then
     if [ -x /etc/init.d/slurm ]; then
 	[ -x /sbin/chkconfig ] && /sbin/chkconfig --del slurm
 	if /etc/init.d/slurm status | grep -q running; then
@@ -1052,7 +1085,7 @@ if [ "$1" = 0 ]; then
 fi
 
 %preun slurmdbd
-if [ "$1" = 0 ]; then
+if [ "$1" -eq 0 ]; then
     if [ -x /etc/init.d/slurmdbd ]; then
 	[ -x /sbin/chkconfig ] && /sbin/chkconfig --del slurmdbd
 	if /etc/init.d/slurmdbd status | grep -q running; then
@@ -1062,7 +1095,9 @@ if [ "$1" = 0 ]; then
 fi
 
 %postun
-if [ "$1" = 0 ]; then
+if [ "$1" -gt 1 ]; then
+    /etc/init.d/slurm condrestart
+elif [ "$1" -eq 0 ]; then
     if [ -x /sbin/ldconfig ]; then
 	/sbin/ldconfig %{_libdir}
     fi
@@ -1071,6 +1106,11 @@ fi
 %insserv_cleanup
 %endif
 
+%postun slurmdbd
+if [ "$1" -gt 1 ]; then
+    /etc/init.d/slurmdbd condrestart
+fi
+
 #############################################################################
 
 
diff --git a/slurm/slurm.h.in b/slurm/slurm.h.in
index bec89e6c2..5ccc90d50 100644
--- a/slurm/slurm.h.in
+++ b/slurm/slurm.h.in
@@ -3,9 +3,9 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010-2013 SchedMD <http://www.schedmd.com>.
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Copyright 2013 Cray Inc. All Rights Reserved.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2012-2013 Los Alamos National Security, LLC.
+ *  Portions Copyright 2013 Cray Inc. All Rights Reserved.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -220,12 +220,21 @@ typedef struct sbcast_cred sbcast_cred_t;		/* opaque data type */
 
 /* INFINITE is used to identify unlimited configurations,  */
 /* eg. the maximum count of nodes any job may use in some partition */
-#define	INFINITE (0xffffffff)
-#define NO_VAL	 (0xfffffffe)
+#define	INFINITE   (0xffffffff)
+#define	INFINITE64 (0xffffffffffffffff)
+#define NO_VAL	   (0xfffffffe)
+#define NO_VAL64   (0xfffffffffffffffe)
 #define MAX_TASKS_PER_NODE 128
 
 /* Job step ID of batch scripts */
 #define SLURM_BATCH_SCRIPT (0xfffffffe)
+/* Job step ID of external process container */
+#define SLURM_EXTERN_CONT  (0xffffffff)
+
+/* How many seconds to wait after eio_signal_shutdown() is called before
+ * terminating the job and abandoning any I/O remaining to be processed.
+ */
+#define DEFAULT_EIO_SHUTDOWN_WAIT 60
 
 #define SLURM_ID_HASH_NUM 10000000000
 /*
@@ -267,30 +276,37 @@ enum job_states {
 	JOB_BOOT_FAIL,		/* terminated due to node boot failure */
 	JOB_END			/* not a real state, last entry in table */
 };
-#define	JOB_STATE_BASE	0x00ff	/* Used for job_states above */
-#define	JOB_STATE_FLAGS	0xff00	/* Used for state flags below */
-#define	JOB_COMPLETING	0x8000	/* Waiting for epilog completion */
-#define	JOB_CONFIGURING	0x4000	/* Allocated nodes booting */
-#define	JOB_RESIZING	0x2000	/* Size of job about to change, flag set
-				 * before calling accounting functions
-				 * immediately before job changes size */
-#define JOB_SPECIAL_EXIT 0x1000 /* Requeue an exit job in hold */
-#define JOB_REQUEUE_HOLD 0x0800 /* Requeue any job in hold */
-#define JOB_REQUEUE      0x0400 /* Requeue job in completing state */
+#define	JOB_STATE_BASE	  0x000000ff	/* Used for job_states above */
+#define	JOB_STATE_FLAGS	  0xffffff00	/* Used for state flags below */
+
+#define JOB_LAUNCH_FAILED 0x00000100
+#define JOB_UPDATE_DB     0x00000200 /* Send job start to database again */
+#define JOB_REQUEUE       0x00000400 /* Requeue job in completing state */
+#define JOB_REQUEUE_HOLD  0x00000800 /* Requeue any job in hold */
+#define JOB_SPECIAL_EXIT  0x00001000 /* Requeue an exit job in hold */
+#define	JOB_RESIZING	  0x00002000 /* Size of job about to change, flag set
+					before calling accounting functions
+					immediately before job changes size */
+#define	JOB_CONFIGURING	  0x00004000 /* Allocated nodes booting */
+#define	JOB_COMPLETING	  0x00008000 /* Waiting for epilog completion */
+#define JOB_STOPPED       0x00010000 /* Job is stopped state (holding resources,
+					but sent SIGSTOP */
 
 #define READY_JOB_FATAL	   -2	/* fatal error */
 #define READY_JOB_ERROR    -1	/* ordinary error */
 #define READY_NODE_STATE 0x01	/* node is ready */
 #define READY_JOB_STATE  0x02	/* job is ready to execute */
 
-#define MAIL_JOB_BEGIN    0x0001	/* notify when job begins */
-#define MAIL_JOB_END      0x0002	/* notify when job ends */
-#define MAIL_JOB_FAIL     0x0004	/* notify if job fails */
-#define MAIL_JOB_REQUEUE  0x0008	/* notify if job requeued */
-#define MAIL_JOB_TIME100  0x0010	/* notify on reaching 100% of time limit */
-#define MAIL_JOB_TIME90   0x0020	/* notify on reaching 90% of time limit */
-#define MAIL_JOB_TIME80   0x0040	/* notify on reaching 80% of time limit */
-#define MAIL_JOB_TIME50   0x0080	/* notify on reaching 50% of time limit */
+#define MAIL_JOB_BEGIN     0x0001	/* Notify when job begins */
+#define MAIL_JOB_END       0x0002	/* Notify when job ends */
+#define MAIL_JOB_FAIL      0x0004	/* Notify if job fails */
+#define MAIL_JOB_REQUEUE   0x0008	/* Notify if job requeued */
+#define MAIL_JOB_TIME100   0x0010	/* Notify on reaching 100% of time limit */
+#define MAIL_JOB_TIME90    0x0020	/* Notify on reaching 90% of time limit */
+#define MAIL_JOB_TIME80    0x0040	/* Notify on reaching 80% of time limit */
+#define MAIL_JOB_TIME50    0x0080	/* Notify on reaching 50% of time limit */
+#define MAIL_JOB_STAGE_OUT 0x0100	/* Notify on completion of burst buffer
+					 * stage out */
 
 #define NICE_OFFSET 10000	/* offset for job's nice value */
 
@@ -343,41 +359,183 @@ enum job_state_reason {
 	WAIT_DEP_INVALID,        /* Dependency condition invalid or never
 				  * satisfied
 				  */
-	WAIT_QOS_GRP_CPU,            /* QOS GrpCpus exceeded */
-	WAIT_QOS_GRP_CPU_MIN,        /* QOS GrpCPUMins exceeded */
-	WAIT_QOS_GRP_CPU_RUN_MIN,    /* QOS GrpCPURunMins exceeded */
+	WAIT_QOS_GRP_CPU,            /* QOS GrpTRES exceeded (CPU) */
+	WAIT_QOS_GRP_CPU_MIN,        /* QOS GrpTRESMins exceeded (CPU) */
+	WAIT_QOS_GRP_CPU_RUN_MIN,    /* QOS GrpTRESRunMins exceeded (CPU) */
 	WAIT_QOS_GRP_JOB,            /* QOS GrpJobs exceeded */
-	WAIT_QOS_GRP_MEMORY,         /* QOS GrpMemory exceeded */
-	WAIT_QOS_GRP_NODES,          /* QOS GrpNodes exceeded */
+	WAIT_QOS_GRP_MEM,            /* QOS GrpTRES exceeded (Memory) */
+	WAIT_QOS_GRP_NODE,           /* QOS GrpTRES exceeded (Node) */
 	WAIT_QOS_GRP_SUB_JOB,        /* QOS GrpSubmitJobs exceeded */
 	WAIT_QOS_GRP_WALL,           /* QOS GrpWall exceeded */
-	WAIT_QOS_MAX_CPUS_PER_JOB,   /* QOS MaxCpusPerJob exceeded */
-	WAIT_QOS_MAX_CPU_MINS_PER_JOB,/* QOS MaxCpusMinsPerJob exceeded */
-	WAIT_QOS_MAX_NODE_PER_JOB,   /* QOS MaxNodesPerJob exceeded */
+	WAIT_QOS_MAX_CPU_PER_JOB,    /* QOS MaxTRESPerJob exceeded (CPU) */
+	WAIT_QOS_MAX_CPU_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob exceeded (CPU) */
+	WAIT_QOS_MAX_NODE_PER_JOB,   /* QOS MaxTRESPerJob exceeded (Node) */
 	WAIT_QOS_MAX_WALL_PER_JOB,   /* QOS MaxWallDurationPerJob exceeded */
-	WAIT_QOS_MAX_CPU_PER_USER,   /* QOS MaxCpusPerUser exceeded */
+	WAIT_QOS_MAX_CPU_PER_USER,   /* QOS MaxTRESPerUser exceeded (CPU) */
 	WAIT_QOS_MAX_JOB_PER_USER,   /* QOS MaxJobsPerUser exceeded */
-	WAIT_QOS_MAX_NODE_PER_USER,  /* QOS MaxNodesPerUser exceeded */
+	WAIT_QOS_MAX_NODE_PER_USER,  /* QOS MaxTRESPerUser exceeded (Node) */
 	WAIT_QOS_MAX_SUB_JOB,        /* QOS MaxSubmitJobsPerUser exceeded */
-	WAIT_QOS_MIN_CPUS,           /* QOS MinCPUsPerJob not reached */
-	WAIT_ASSOC_GRP_CPU,          /* ASSOC GrpCpus exceeded */
-	WAIT_ASSOC_GRP_CPU_MIN,      /* ASSOC GrpCPUMins exceeded */
-	WAIT_ASSOC_GRP_CPU_RUN_MIN,  /* ASSOC GrpCPURunMins exceeded */
+	WAIT_QOS_MIN_CPU,            /* QOS MinTRESPerJob not reached (CPU) */
+	WAIT_ASSOC_GRP_CPU,          /* ASSOC GrpTRES exceeded (CPU) */
+	WAIT_ASSOC_GRP_CPU_MIN,      /* ASSOC GrpTRESMins exceeded (CPU) */
+	WAIT_ASSOC_GRP_CPU_RUN_MIN,  /* ASSOC GrpTRESRunMins exceeded (CPU) */
 	WAIT_ASSOC_GRP_JOB,          /* ASSOC GrpJobs exceeded */
-	WAIT_ASSOC_GRP_MEMORY,       /* ASSOC GrpMemory exceeded */
-	WAIT_ASSOC_GRP_NODES,        /* ASSOC GrpNodes exceeded */
+	WAIT_ASSOC_GRP_MEM,          /* ASSOC GrpTRES exceeded (Memory) */
+	WAIT_ASSOC_GRP_NODE,         /* ASSOC GrpTRES exceeded (Node) */
 	WAIT_ASSOC_GRP_SUB_JOB,      /* ASSOC GrpSubmitJobs exceeded */
 	WAIT_ASSOC_GRP_WALL,         /* ASSOC GrpWall exceeded */
 	WAIT_ASSOC_MAX_JOBS,         /* ASSOC MaxJobs exceeded */
-	WAIT_ASSOC_MAX_CPUS_PER_JOB, /* ASSOC MaxCpusPerJob exceeded */
-	WAIT_ASSOC_MAX_CPU_MINS_PER_JOB,/* ASSOC MaxCpusMinsPerJob exceeded */
-	WAIT_ASSOC_MAX_NODE_PER_JOB, /* ASSOC MaxNodesPerJob exceeded */
+	WAIT_ASSOC_MAX_CPU_PER_JOB,  /* ASSOC MaxTRESPerJob exceeded (CPU) */
+	WAIT_ASSOC_MAX_CPU_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob
+					 * exceeded (CPU) */
+	WAIT_ASSOC_MAX_NODE_PER_JOB, /* ASSOC MaxTRESPerJob exceeded (NODE) */
 	WAIT_ASSOC_MAX_WALL_PER_JOB, /* ASSOC MaxWallDurationPerJob
 				      * exceeded */
 	WAIT_ASSOC_MAX_SUB_JOB,      /* ASSOC MaxSubmitJobsPerUser exceeded */
 
 	WAIT_MAX_REQUEUE,            /* MAX_BATCH_REQUEUE reached */
-	WAIT_ARRAY_TASK_LIMIT        /* job array running task limit */
+	WAIT_ARRAY_TASK_LIMIT,       /* job array running task limit */
+	WAIT_BURST_BUFFER_RESOURCE,  /* Burst buffer resources */
+	WAIT_BURST_BUFFER_STAGING,   /* Burst buffer file stage-in */
+	FAIL_BURST_BUFFER_OP,	     /* Burst buffer operation failure */
+	WAIT_POWER_NOT_AVAIL,        /* not enough power available */
+	WAIT_POWER_RESERVED,         /* job is waiting for available power
+				      * because of power reservations */
+	WAIT_ASSOC_GRP_UNK,          /* ASSOC GrpTRES exceeded
+				      * (Unknown) */
+	WAIT_ASSOC_GRP_UNK_MIN,      /* ASSOC GrpTRESMins exceeded
+				      * (Unknown) */
+	WAIT_ASSOC_GRP_UNK_RUN_MIN,  /* ASSOC GrpTRESRunMins exceeded
+				      * (Unknown) */
+	WAIT_ASSOC_MAX_UNK_PER_JOB,  /* ASSOC MaxTRESPerJob exceeded
+				      * (Unknown) */
+	WAIT_ASSOC_MAX_UNK_PER_NODE,  /* ASSOC MaxTRESPerNode exceeded
+				       * (Unknown) */
+	WAIT_ASSOC_MAX_UNK_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob
+					 * exceeded (Unknown) */
+	WAIT_ASSOC_MAX_CPU_PER_NODE,  /* ASSOC MaxTRESPerNode exceeded (CPU) */
+	WAIT_ASSOC_GRP_MEM_MIN,      /* ASSOC GrpTRESMins exceeded
+				      * (Memory) */
+	WAIT_ASSOC_GRP_MEM_RUN_MIN,  /* ASSOC GrpTRESRunMins exceeded
+				      * (Memory) */
+	WAIT_ASSOC_MAX_MEM_PER_JOB,  /* ASSOC MaxTRESPerJob exceeded (Memory) */
+	WAIT_ASSOC_MAX_MEM_PER_NODE,  /* ASSOC MaxTRESPerNode exceeded (CPU) */
+	WAIT_ASSOC_MAX_MEM_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob
+					 * exceeded (Memory) */
+	WAIT_ASSOC_GRP_NODE_MIN,     /* ASSOC GrpTRESMins exceeded (Node) */
+	WAIT_ASSOC_GRP_NODE_RUN_MIN, /* ASSOC GrpTRESRunMins exceeded (Node) */
+	WAIT_ASSOC_MAX_NODE_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob
+					  * exceeded (Node) */
+	WAIT_ASSOC_GRP_ENERGY,           /* ASSOC GrpTRES exceeded
+					  * (Energy) */
+	WAIT_ASSOC_GRP_ENERGY_MIN,       /* ASSOC GrpTRESMins exceeded
+					  * (Energy) */
+	WAIT_ASSOC_GRP_ENERGY_RUN_MIN,   /* ASSOC GrpTRESRunMins exceeded
+					  * (Energy) */
+	WAIT_ASSOC_MAX_ENERGY_PER_JOB,   /* ASSOC MaxTRESPerJob exceeded
+					  * (Energy) */
+	WAIT_ASSOC_MAX_ENERGY_PER_NODE,  /* ASSOC MaxTRESPerNode
+					  * exceeded (Energy) */
+	WAIT_ASSOC_MAX_ENERGY_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob
+					    * exceeded (Energy) */
+	WAIT_ASSOC_GRP_GRES,          /* ASSOC GrpTRES exceeded (GRES) */
+	WAIT_ASSOC_GRP_GRES_MIN,      /* ASSOC GrpTRESMins exceeded (GRES) */
+	WAIT_ASSOC_GRP_GRES_RUN_MIN,  /* ASSOC GrpTRESRunMins exceeded (GRES) */
+	WAIT_ASSOC_MAX_GRES_PER_JOB,  /* ASSOC MaxTRESPerJob exceeded (GRES) */
+	WAIT_ASSOC_MAX_GRES_PER_NODE, /* ASSOC MaxTRESPerNode exceeded (GRES) */
+	WAIT_ASSOC_MAX_GRES_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob
+					  * exceeded (GRES) */
+	WAIT_ASSOC_GRP_LIC,          /* ASSOC GrpTRES exceeded
+				      * (license) */
+	WAIT_ASSOC_GRP_LIC_MIN,      /* ASSOC GrpTRESMins exceeded
+				      * (license) */
+	WAIT_ASSOC_GRP_LIC_RUN_MIN,  /* ASSOC GrpTRESRunMins exceeded
+				      * (license) */
+	WAIT_ASSOC_MAX_LIC_PER_JOB,  /* ASSOC MaxTRESPerJob exceeded
+				      * (license) */
+	WAIT_ASSOC_MAX_LIC_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob exceeded
+					 * (license) */
+	WAIT_ASSOC_GRP_BB,          /* ASSOC GrpTRES exceeded
+				     * (burst buffer) */
+	WAIT_ASSOC_GRP_BB_MIN,      /* ASSOC GrpTRESMins exceeded
+				     * (burst buffer) */
+	WAIT_ASSOC_GRP_BB_RUN_MIN,  /* ASSOC GrpTRESRunMins exceeded
+				     * (burst buffer) */
+	WAIT_ASSOC_MAX_BB_PER_JOB,  /* ASSOC MaxTRESPerJob exceeded
+				     * (burst buffer) */
+	WAIT_ASSOC_MAX_BB_PER_NODE, /* ASSOC MaxTRESPerNode exceeded
+				     * (burst buffer) */
+	WAIT_ASSOC_MAX_BB_MINS_PER_JOB,/* ASSOC MaxTRESMinsPerJob exceeded
+					* (burst buffer) */
+	WAIT_QOS_GRP_UNK,           /* QOS GrpTRES exceeded (Unknown) */
+	WAIT_QOS_GRP_UNK_MIN,       /* QOS GrpTRESMins exceeded (Unknown) */
+	WAIT_QOS_GRP_UNK_RUN_MIN,   /* QOS GrpTRESRunMins exceeded (Unknown) */
+	WAIT_QOS_MAX_UNK_PER_JOB,   /* QOS MaxTRESPerJob exceeded (Unknown) */
+	WAIT_QOS_MAX_UNK_PER_NODE,  /* QOS MaxTRESPerNode exceeded (Unknown) */
+	WAIT_QOS_MAX_UNK_PER_USER,  /* QOS MaxTRESPerUser exceeded (Unknown) */
+	WAIT_QOS_MAX_UNK_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob
+				       * exceeded (Unknown) */
+	WAIT_QOS_MIN_UNK,           /* QOS MinTRESPerJob exceeded (Unknown) */
+	WAIT_QOS_MAX_CPU_PER_NODE,  /* QOS MaxTRESPerNode exceeded (CPU) */
+	WAIT_QOS_GRP_MEM_MIN,       /* QOS GrpTRESMins exceeded
+				     * (Memory) */
+	WAIT_QOS_GRP_MEM_RUN_MIN,   /* QOS GrpTRESRunMins exceeded
+				     * (Memory) */
+	WAIT_QOS_MAX_MEM_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob
+				       * exceeded (Memory) */
+	WAIT_QOS_MAX_MEM_PER_JOB,   /* QOS MaxTRESPerJob exceeded (CPU) */
+	WAIT_QOS_MAX_MEM_PER_NODE,  /* QOS MaxTRESPerNode exceeded (MEM) */
+	WAIT_QOS_MAX_MEM_PER_USER,  /* QOS MaxTRESPerUser exceeded (CPU) */
+	WAIT_QOS_MIN_MEM,           /* QOS MinTRESPerJob not reached (Memory) */
+	WAIT_QOS_GRP_ENERGY,        /* QOS GrpTRES exceeded (Energy) */
+	WAIT_QOS_GRP_ENERGY_MIN,    /* QOS GrpTRESMins exceeded (Energy) */
+	WAIT_QOS_GRP_ENERGY_RUN_MIN, /* QOS GrpTRESRunMins exceeded (Energy) */
+	WAIT_QOS_MAX_ENERGY_PER_JOB, /* QOS MaxTRESPerJob exceeded (Energy) */
+	WAIT_QOS_MAX_ENERGY_PER_NODE,/* QOS MaxTRESPerNode exceeded (Energy) */
+	WAIT_QOS_MAX_ENERGY_PER_USER,/* QOS MaxTRESPerUser exceeded (Energy) */
+	WAIT_QOS_MAX_ENERGY_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob
+					  * exceeded (Energy) */
+	WAIT_QOS_MIN_ENERGY,        /* QOS MinTRESPerJob not reached (Energy) */
+	WAIT_QOS_GRP_NODE_MIN,     /* QOS GrpTRESMins exceeded (Node) */
+	WAIT_QOS_GRP_NODE_RUN_MIN, /* QOS GrpTRESRunMins exceeded (Node) */
+	WAIT_QOS_MAX_NODE_MINS_PER_JOB,  /* QOS MaxTRESMinsPerJob
+					  * exceeded (Node) */
+	WAIT_QOS_MIN_NODE,          /* QOS MinTRESPerJob not reached (Node) */
+	WAIT_QOS_GRP_GRES,          /* QOS GrpTRES exceeded (GRES) */
+	WAIT_QOS_GRP_GRES_MIN,      /* QOS GrpTRESMins exceeded (GRES) */
+	WAIT_QOS_GRP_GRES_RUN_MIN,  /* QOS GrpTRESRunMins exceeded (GRES) */
+	WAIT_QOS_MAX_GRES_PER_JOB,  /* QOS MaxTRESPerJob exceeded (GRES) */
+	WAIT_QOS_MAX_GRES_PER_NODE, /* QOS MaxTRESPerNode exceeded (GRES) */
+	WAIT_QOS_MAX_GRES_PER_USER, /* QOS MaxTRESPerUser exceeded
+				     * (GRES) */
+	WAIT_QOS_MAX_GRES_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob
+					* exceeded (GRES) */
+	WAIT_QOS_MIN_GRES,          /* QOS MinTRESPerJob not reached (CPU) */
+	WAIT_QOS_GRP_LIC,           /* QOS GrpTRES exceeded (license) */
+	WAIT_QOS_GRP_LIC_MIN,       /* QOS GrpTRESMins exceeded (license) */
+	WAIT_QOS_GRP_LIC_RUN_MIN,   /* QOS GrpTRESRunMins exceeded (license) */
+	WAIT_QOS_MAX_LIC_PER_JOB,   /* QOS MaxTRESPerJob exceeded (license) */
+	WAIT_QOS_MAX_LIC_PER_USER,  /* QOS MaxTRESPerUser exceeded (license) */
+	WAIT_QOS_MAX_LIC_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob exceeded
+				       * (license) */
+	WAIT_QOS_MIN_LIC,           /* QOS MinTRESPerJob not reached
+				     * (license) */
+	WAIT_QOS_GRP_BB,            /* QOS GrpTRES exceeded
+				     * (burst buffer) */
+	WAIT_QOS_GRP_BB_MIN,        /* QOS GrpTRESMins exceeded
+				     * (burst buffer) */
+	WAIT_QOS_GRP_BB_RUN_MIN,    /* QOS GrpTRESRunMins exceeded
+				     * (burst buffer) */
+	WAIT_QOS_MAX_BB_PER_JOB,   /* QOS MaxTRESPerJob exceeded
+				    * (burst buffer) */
+	WAIT_QOS_MAX_BB_PER_NODE,  /* QOS MaxTRESPerNode exceeded
+				    * (burst buffer) */
+	WAIT_QOS_MAX_BB_PER_USER,  /* QOS MaxTRESPerUser exceeded
+				    * (burst buffer) */
+	WAIT_QOS_MAX_BB_MINS_PER_JOB,/* QOS MaxTRESMinsPerJob exceeded
+				      * (burst buffer) */
+	WAIT_QOS_MIN_BB,           /* QOS MinTRESPerJob not reached
+				    * (burst buffer) */
 };
 
 enum job_acct_types {
@@ -526,9 +684,9 @@ enum jobacct_data_type {
 	JOBACCT_DATA_TOT_PAGES,	/* data-> uint64_t psize */
 	JOBACCT_DATA_MIN_CPU,	/* data-> uint32_t psize */
 	JOBACCT_DATA_MIN_CPU_ID,	/* data-> jobacct_id_t psize */
-	JOBACCT_DATA_TOT_CPU,	/* data-> uint32_t psize */
+	JOBACCT_DATA_TOT_CPU,	/* data-> double psize */
 	JOBACCT_DATA_ACT_CPUFREQ, /* data-> uint32_t psize hb*/
-	JOBACCT_DATA_CONSUMED_ENERGY, /* data-> uint32_t psize hb*/
+	JOBACCT_DATA_CONSUMED_ENERGY, /* data-> uint64_t psize hb*/
 	JOBACCT_DATA_MAX_DISK_READ, /* data->double psize */
 	JOBACCT_DATA_MAX_DISK_READ_ID, /* data->jobacct_id_t psize */
 	JOBACCT_DATA_TOT_DISK_READ, /* data->double psize */
@@ -543,35 +701,81 @@ enum acct_energy_type {
 	ENERGY_DATA_RECONFIG,
 	ENERGY_DATA_PROFILE,
 	ENERGY_DATA_LAST_POLL,
+	ENERGY_DATA_SENSOR_CNT,
+	ENERGY_DATA_NODE_ENERGY,
 };
 
-/* Possible task distributions across the nodes */
+/*
+ * Task distribution states/methods
+ *
+ * Symbol format is SLURM_DIST_<node>_<socket>_<core>
+ *
+ * <node>   = Method for distributing tasks to nodes.
+ *            This determines the order in which task ids are 
+ *            distributed to the nodes selected for the job/step.
+ * <socket> = Method for distributing allocated lllps across sockets.
+ *            This determines the order in which allocated lllps are
+ *            distributed across sockets for binding to tasks.
+ * <core>   = Method for distributing allocated lllps across cores.
+ *            This determines the order in which allocated lllps are
+ *            distributed across cores for binding to tasks.
+ *
+ * Note that the socket and core distributions apply only to task affinity.
+ */
 typedef enum task_dist_states {
 	/* NOTE: start SLURM_DIST_CYCLIC at 1 for HP MPI */
-	SLURM_DIST_CYCLIC = 1,	/* distribute tasks 1 per node, round robin */
-	SLURM_DIST_BLOCK,	/* distribute tasks filling node by node */
-	SLURM_DIST_ARBITRARY,	/* arbitrary task distribution  */
-	SLURM_DIST_PLANE,	/* distribute tasks by filling up
-				   planes of lllp first and then by
-				   going across the nodes See
-				   documentation for more
-				   information */
-	SLURM_DIST_CYCLIC_CYCLIC,/* distribute tasks 1 per node,
-				    round robin, same for lowest
-				    level of logical processor (lllp) */
-	SLURM_DIST_CYCLIC_BLOCK, /* cyclic for node and block for lllp  */
-	SLURM_DIST_BLOCK_CYCLIC, /* block for node and cyclic for lllp  */
-	SLURM_DIST_BLOCK_BLOCK,	/* block for node and block for lllp  */
-	SLURM_NO_LLLP_DIST,	/* No distribution specified for lllp */
-	SLURM_DIST_UNKNOWN,	/* unknown dist */
-	SLURM_DIST_CYCLIC_CFULL, /* Same as cyclic:cyclic except for
-				    multi-cpu tasks cyclically
-				    bind cpus */
-	SLURM_DIST_BLOCK_CFULL, /* Same as block:cyclic except for
-				   multi-cpu tasks cyclically
-				   bind cpus  */
+	SLURM_DIST_CYCLIC               = 0x0001,
+	SLURM_DIST_BLOCK                = 0x0002,
+	SLURM_DIST_ARBITRARY            = 0x0003,
+	SLURM_DIST_PLANE                = 0x0004,
+	SLURM_DIST_CYCLIC_CYCLIC        = 0x0011,
+	SLURM_DIST_CYCLIC_BLOCK         = 0x0021,
+	SLURM_DIST_CYCLIC_CFULL         = 0x0031,
+	SLURM_DIST_BLOCK_CYCLIC         = 0x0012,
+	SLURM_DIST_BLOCK_BLOCK          = 0x0022,
+	SLURM_DIST_BLOCK_CFULL          = 0x0032,
+	SLURM_DIST_CYCLIC_CYCLIC_CYCLIC = 0x0111,
+	SLURM_DIST_CYCLIC_CYCLIC_BLOCK  = 0x0211,
+	SLURM_DIST_CYCLIC_CYCLIC_CFULL  = 0x0311,
+	SLURM_DIST_CYCLIC_BLOCK_CYCLIC  = 0x0121,
+	SLURM_DIST_CYCLIC_BLOCK_BLOCK   = 0x0221,
+	SLURM_DIST_CYCLIC_BLOCK_CFULL   = 0x0321,
+	SLURM_DIST_CYCLIC_CFULL_CYCLIC  = 0x0131,
+	SLURM_DIST_CYCLIC_CFULL_BLOCK   = 0x0231,
+	SLURM_DIST_CYCLIC_CFULL_CFULL   = 0x0331,
+	SLURM_DIST_BLOCK_CYCLIC_CYCLIC  = 0x0112,
+	SLURM_DIST_BLOCK_CYCLIC_BLOCK   = 0x0212,
+	SLURM_DIST_BLOCK_CYCLIC_CFULL   = 0x0312,
+	SLURM_DIST_BLOCK_BLOCK_CYCLIC   = 0x0122,
+	SLURM_DIST_BLOCK_BLOCK_BLOCK    = 0x0222,
+	SLURM_DIST_BLOCK_BLOCK_CFULL    = 0x0322,
+	SLURM_DIST_BLOCK_CFULL_CYCLIC   = 0x0132,
+	SLURM_DIST_BLOCK_CFULL_BLOCK    = 0x0232,
+	SLURM_DIST_BLOCK_CFULL_CFULL    = 0x0332,
+	
+	SLURM_DIST_NODECYCLIC           = 0x0001,
+	SLURM_DIST_NODEBLOCK            = 0x0002,	
+	SLURM_DIST_SOCKCYCLIC           = 0x0010,
+	SLURM_DIST_SOCKBLOCK            = 0x0020,
+	SLURM_DIST_SOCKCFULL            = 0x0030,
+	SLURM_DIST_CORECYCLIC           = 0x0100,
+	SLURM_DIST_COREBLOCK            = 0x0200,
+	SLURM_DIST_CORECFULL            = 0x0300,
+
+	SLURM_DIST_NO_LLLP              = 0x1000,
+	SLURM_DIST_UNKNOWN              = 0x2000,
 } task_dist_states_t;
 
+#define SLURM_DIST_STATE_BASE		0x00FFFF
+#define SLURM_DIST_STATE_FLAGS		0xFF0000
+#define SLURM_DIST_PACK_NODES		0x800000
+#define SLURM_DIST_NO_PACK_NODES	0x400000
+
+#define SLURM_DIST_NODEMASK               0xF00F
+#define SLURM_DIST_SOCKMASK               0xF0F0
+#define SLURM_DIST_COREMASK               0xFF00
+#define SLURM_DIST_NODESOCKMASK           0xF0FF
+
 /* Open stdout/err file mode, 0 for system default (JobFileAppend) */
 #define OPEN_MODE_APPEND	1
 #define OPEN_MODE_TRUNCATE	2
@@ -605,21 +809,30 @@ typedef enum cpu_bind_type {	/* cpu binding type from --cpu_bind=... */
 	CPU_BIND_CPUSETS   = 0x8000,
 
 	/* default binding if auto binding doesn't match. */
-	CPU_AUTO_BIND_TO_THREADS = 0x04000
+	CPU_AUTO_BIND_TO_THREADS = 0x04000,
+	CPU_AUTO_BIND_TO_CORES   = 0x10000,
+	CPU_AUTO_BIND_TO_SOCKETS = 0x20000
 } cpu_bind_type_t;
 
 /* Flag to indicate that cpu_freq is a range: low,medium,high,high-1
  * instead of an integer value in kilohertz */
-#define CPU_FREQ_RANGE_FLAG	0x80000000
-#define CPU_FREQ_LOW		0x80000001
-#define CPU_FREQ_MEDIUM		0x80000002
-#define CPU_FREQ_HIGH		0x80000003
-#define CPU_FREQ_HIGHM1		0x80000004
-#define CPU_FREQ_PERFORMANCE	0x80000005
-#define CPU_FREQ_POWERSAVE	0x80000006
-#define CPU_FREQ_USERSPACE	0x80000007
-#define CPU_FREQ_ONDEMAND	0x80000008
-#define CPU_FREQ_CONSERVATIVE	0x80000009
+#define CPU_FREQ_RANGE_FLAG		0x80000000
+#define CPU_FREQ_LOW			0x80000001
+#define CPU_FREQ_MEDIUM			0x80000002
+#define CPU_FREQ_HIGH			0x80000003
+#define CPU_FREQ_HIGHM1			0x80000004
+#define CPU_FREQ_CONSERVATIVE		0x88000000
+#define CPU_FREQ_ONDEMAND		0x84000000
+#define CPU_FREQ_PERFORMANCE		0x82000000
+#define CPU_FREQ_POWERSAVE		0x81000000
+#define CPU_FREQ_USERSPACE		0x80800000
+#define CPU_FREQ_GOV_MASK   		0x8ff00000
+/* Vestigial values for transition from v14.11 systems */
+#define CPU_FREQ_PERFORMANCE_OLD	0x80000005
+#define CPU_FREQ_POWERSAVE_OLD		0x80000006
+#define CPU_FREQ_USERSPACE_OLD		0x80000007
+#define CPU_FREQ_ONDEMAND_OLD		0x80000008
+#define CPU_FREQ_CONSERVATIVE_OLD	0x80000009
 
 typedef enum mem_bind_type {    /* memory binding type from --mem_bind=... */
 	/* verbose can be set with any other flag */
@@ -633,6 +846,13 @@ typedef enum mem_bind_type {    /* memory binding type from --mem_bind=... */
 	MEM_BIND_LOCAL  = 0x20	/* =local */
 } mem_bind_type_t;
 
+typedef enum accel_bind_type {    /* accelerator binding from --accel_bind= */
+	ACCEL_BIND_VERBOSE         = 0x01, /* 'v' verbose */
+	ACCEL_BIND_CLOSEST_GPU     = 0x02, /* 'g' Use closest GPU to the CPU */
+	ACCEL_BIND_CLOSEST_MIC     = 0x04, /* 'm' Use closest NIC to CPU */
+	ACCEL_BIND_CLOSEST_NIC     = 0x08, /* 'n' Use closest NIC to CPU */
+} accel_bind_type_t;
+
 /* The last entry in node_states must be STATE_END, keep in sync with
  * node_state_string. values may be ORed with NODE_STATE_FLAGS below.
  * Node states typically alternate between NODE_STATE_IDLE and
@@ -738,12 +958,8 @@ enum ctx_keys {
 /* By default, distribute cores using a block approach inside the nodes */
 #define CR_CORE_DEFAULT_DIST_BLOCK 0x1000
 
-/* Allocate full sockets to jobs rather than individual cores */
-#define CR_ALLOCATE_FULL_SOCKET 0x2000
-
 #define CR_LLN		0x4000  /* Select nodes by "least loaded." */
 
-
 #define MEM_PER_CPU  0x80000000
 #define SHARED_FORCE 0x8000
 
@@ -770,12 +986,12 @@ enum ctx_keys {
 					 * value that is higher than slurmd */
 
 #define PRIORITY_FLAGS_ACCRUE_ALWAYS	0x0001	/* Flag to always accrue age
-						 * priority to pending jobs ignoring
-						 * dependencies or holds */
-#define PRIORITY_FLAGS_TICKET_BASED     0x0002 /* Enable the ticket based multifactor
-						* plugin. The default is usage based
-						* multifactor plugin.
-						*/
+						 * priority to pending jobs
+						 * ignoring dependencies or
+						 * holds */
+#define PRIORITY_FLAGS_MAX_TRES 	0x0002  /* Calcuate billed_tres as the
+						 * MAX of TRES on a node rather
+						 * than the sum or TRES. */
 #define PRIORITY_FLAGS_SIZE_RELATIVE	0x0004	/* Enable job size measurement
 						 * relative to its time limit */
 #define PRIORITY_FLAGS_DEPTH_OBLIVIOUS	0x0008	/* Flag to use depth oblivious
@@ -785,7 +1001,14 @@ enum ctx_keys {
 						 * running jobs, not only the
 						 * pending jobs. */
 #define PRIORITY_FLAGS_FAIR_TREE	0x0020	/* Prioritize by level in
-+						 * account hierarchy. */
+						 * account hierarchy. */
+/* These bits are set in job_desc_msg_t and are used by the
+ * slurmctld. The first two bits indicate if job should be canceled
+ * if it has invalid dependency or should stay pending indefinitely.
+ */
+#define KILL_INV_DEP       0x01
+#define NO_KILL_INV_DEP    0x02
+
 /*****************************************************************************\
  *      SLURM HOSTLIST FUNCTIONS
 \*****************************************************************************/
@@ -1086,6 +1309,8 @@ typedef bitstr_t bitoff_t;
 #define ALLOC_SID_ADMIN_HOLD	0x00000001	/* admin job hold */
 #define ALLOC_SID_USER_HOLD	0x00000002	/* user job hold */
 
+#define SLURM_POWER_FLAGS_LEVEL 0x0001		/* Equal power cap on all nodes */
+
 /*****************************************************************************\
  *	PROTOCOL DATA STRUCTURE DEFINITIONS
 \*****************************************************************************/
@@ -1095,21 +1320,36 @@ typedef struct dynamic_plugin_data {
 } dynamic_plugin_data_t;
 
 typedef struct acct_gather_energy {
-	uint32_t base_consumed_energy;
+	uint64_t base_consumed_energy;
 	uint32_t base_watts;	  /* lowest power consump of node, in watts */
-	uint32_t consumed_energy; /* total energy consumed by node, in joules */
+	uint64_t consumed_energy; /* total energy consumed by node, in joules */
 	uint32_t current_watts;	  /* current power consump of node, in watts */
-	uint32_t previous_consumed_energy;
+	uint64_t previous_consumed_energy;
 	time_t poll_time;         /* When information was last retrieved */
 } acct_gather_energy_t;
 
 typedef struct ext_sensors_data {
-	uint32_t consumed_energy;    /* total energy consumed, in joules */
+	uint64_t consumed_energy;    /* total energy consumed, in joules */
 	uint32_t temperature;	     /* temperature, in celsius */
 	time_t   energy_update_time; /* last update time for consumed_energy */
 	uint32_t current_watts;      /* current power consumption, in watts */
 } ext_sensors_data_t;
 
+typedef struct power_mgmt_data {
+	uint32_t cap_watts;	/* power consumption limit of node, in watts */
+	uint32_t current_watts;	/* current power consumption, in watts */
+	uint64_t joule_counter;	/* total energy consumption by node, in joules */
+	uint32_t new_cap_watts;	/* new power consumption limit of node, in watts */
+	uint32_t max_watts;	/* maximum power consumption by node, in watts */
+	uint32_t min_watts;	/* minimum power consumption by node, in watts */
+	time_t new_job_time;	/* set when a new job has been scheduled on the
+				 * node, used to trigger higher cap */
+	uint16_t state;		/* Power state information */
+	uint64_t time_usec;	/* Data timestamp in microseconds since start
+				 * of the day */
+} power_mgmt_data_t;
+
+#define CORE_SPEC_THREAD 0x8000	/* If set, this is a thread count not core count */
 typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	char *account;		/* charge to specified account */
 	char *acctg_freq;	/* accounting polling intervals (seconds) */
@@ -1127,12 +1367,16 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	char *array_inx;	/* job array index values */
 	void *array_bitmap;	/* NOTE: Set by slurmctld */
 	time_t begin_time;	/* delay initiation until this time */
+	uint32_t bitflags;      /* bitflags */
+	char *burst_buffer;	/* burst buffer specifications */
 	uint16_t ckpt_interval;	/* periodically checkpoint this job */
 	char *ckpt_dir;	 	/* directory to store checkpoint images */
+	char *clusters;		/* cluster names used for multi-cluster jobs */
 	char *comment;		/* arbitrary comment (used by Moab scheduler) */
 	uint16_t contiguous;	/* 1 if job requires contiguous nodes,
 				 * 0 otherwise,default=0 */
-	uint16_t core_spec;	/* specialized core count */
+	uint16_t core_spec;	/* specialized core/thread count,
+				 * see CORE_SPEC_THREAD */
 	char *cpu_bind;		/* binding map for map/mask_cpu - This
 				 * currently does not matter to the
 				 * job allocation, setting this does
@@ -1141,6 +1385,9 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 				 * currently does not matter to the
 				 * job allocation, setting this does
 				 * not do anything for steps. */
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	char *dependency;	/* synchronize job execution with other jobs */
 	time_t end_time;	/* time by which job must complete, used for
 				 * job update only now, possible deadline
@@ -1180,6 +1427,8 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 				 * default in SLURM config */
 	uint16_t plane_size;	/* plane size when task_dist =
 				   SLURM_DIST_PLANE */
+	uint8_t power_flags;	/* power management flags,
+				 * see SLURM_POWER_FLAGS_ */
 	uint32_t priority;	/* relative priority of the job,
 				 * explicitly set only for user root,
 				 * 0 == held (don't initiate) */
@@ -1192,14 +1441,17 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	uint16_t requeue;	/* enable or disable job requeue option */
 	char *reservation;	/* name of reservation to use */
 	char *script;		/* the actual job script, default NONE */
-	uint16_t shared;	/* 1 if job can share nodes with other jobs,
+	uint16_t shared;	/* 2 if the job can only share nodes with other
+				 *   jobs owned by that user,
+				 * 1 if job can share nodes with other jobs,
 				 * 0 if job needs exclusive access to the node,
 				 * or NO_VAL to accept the system default.
 				 * SHARED_FORCE to eliminate user control. */
+	uint8_t sicp_mode;	/* set for inter-cluster jobs */
 	char **spank_job_env;	/* environment variables for job prolog/epilog
 				 * scripts as set by SPANK plugins */
 	uint32_t spank_job_env_size; /* element count in spank_env */
-	uint16_t task_dist;	/* see enum task_dist_state */
+	uint32_t task_dist;	/* see enum task_dist_state */
 	uint32_t time_limit;	/* maximum run time in minutes, default is
 				 * partition limit */
 	uint32_t time_min;	/* minimum run time in minutes, default is
@@ -1265,6 +1517,8 @@ typedef struct job_descriptor {	/* For submit, allocate, and update requests */
 	char *std_err;		/* pathname of stderr */
 	char *std_in;		/* pathname of stdin */
 	char *std_out;		/* pathname of stdout */
+	uint64_t *tres_req_cnt; /* used internally in the slurmctld,
+				   DON'T PACK */
 	uint32_t wait4switch;   /* Maximum time to wait for minimum switches */
 	char *wckey;            /* wckey for job */
 } job_desc_msg_t;
@@ -1282,15 +1536,21 @@ typedef struct job_info {
 	uint16_t batch_flag;	/* 1 if batch: queued job with script */
 	char *batch_host;	/* name of host running batch script */
 	char *batch_script;	/* contents of batch script */
+	uint32_t bitflags;      /* Various job flags */
 	uint16_t boards_per_node;  /* boards per node required by job   */
+	char *burst_buffer;	/* burst buffer specifications */
 	char *command;		/* command to be executed, built from submitted
 				 * job's argv and NULL for salloc command */
 	char *comment;		/* arbitrary comment (used by Moab scheduler) */
 	uint16_t contiguous;	/* 1 if job requires contiguous nodes */
 	uint16_t core_spec;	/* specialized core count */
 	uint16_t cores_per_socket; /* cores per socket required by job  */
+	double billable_tres;	/* billable TRES cache. updated upon resize */
 	uint16_t cpus_per_task;	/* number of processors required for
 				 * each task */
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	char *dependency;	/* synchronize job execution with other jobs */
 	uint32_t derived_ec;	/* highest exit code of all job steps */
 	time_t eligible_time;	/* time job is eligible for running */
@@ -1305,7 +1565,7 @@ typedef struct job_info {
 	uint32_t group_id;	/* group job submitted as */
 	uint32_t job_id;	/* job ID */
 	job_resources_t *job_resrcs; /* opaque data type, job resources */
-	uint16_t job_state;	/* state of the job, see enum job_states */
+	uint32_t job_state;	/* state of the job, see enum job_states */
 	char *licenses;		/* licenses required by the job */
 	uint32_t max_cpus;	/* maximum number of cpus usable by job */
 	uint32_t max_nodes;	/* maximum number of nodes usable by job */
@@ -1327,6 +1587,8 @@ typedef struct job_info {
 	uint32_t pn_min_memory; /* minimum real memory per node, default=0 */
 	uint16_t pn_min_cpus;   /* minimum # CPUs per node, default=0 */
 	uint32_t pn_min_tmp_disk; /* minimum tmp disk per node, default=0 */
+	uint8_t power_flags;	/* power management flags,
+				 * see SLURM_POWER_FLAGS_ */
 	time_t preempt_time;	/* preemption signal time */
 	time_t pre_sus_time;	/* time job ran prior to last suspend */
 	uint32_t priority;	/* relative priority of the job,
@@ -1350,6 +1612,7 @@ typedef struct job_info {
 						*/
 	uint16_t shared;	/* 1 if job can share nodes with other jobs */
 	uint16_t show_flags;	/* conveys level of details requested */
+	uint8_t sicp_mode;	/* set for inter-cluster jobs */
 	uint16_t sockets_per_board;/* sockets per board required by job */
 	uint16_t sockets_per_node; /* sockets per node required by job  */
 	time_t start_time;	/* time execution begins, actual or expected */
@@ -1364,6 +1627,8 @@ typedef struct job_info {
 	uint32_t time_limit;	/* maximum run time in minutes or INFINITE */
 	uint32_t time_min;	/* minimum run time in minutes or INFINITE */
 	uint16_t threads_per_core; /* threads per core required by job  */
+	char *tres_req_str;	/* tres reqeusted in the job */
+	char *tres_alloc_str;   /* tres used in the job */
 	uint32_t user_id;	/* user the job runs as */
 	uint32_t wait4switch;   /* Maximum time to wait for minimum switches */
 	char *wckey;            /* wckey for job */
@@ -1388,6 +1653,16 @@ typedef struct job_info_msg {
 	slurm_job_info_t *job_array;	/* the job records */
 } job_info_msg_t;
 
+typedef struct sicp_info {
+	uint32_t job_id;
+	uint32_t job_state;
+} sicp_info_t;
+
+typedef struct sicp_info_msg {
+	uint32_t record_count;		/* number of records */
+	sicp_info_t *sicp_array;	/* inter-cluster job records */
+} sicp_info_msg_t;
+
 typedef struct step_update_request_msg {
 	time_t end_time;	/* step end time */
 	uint32_t exit_code;	/* exit code for job (status from wait call) */
@@ -1411,7 +1686,7 @@ typedef struct slurm_step_layout {
 	 * is the number of tasks assigned to the corresponding node */
 	uint16_t *tasks;
 	uint32_t task_cnt;	/* total number of tasks in the step */
-	uint16_t task_dist;	/* see enum task_dist_state */
+	uint32_t task_dist;	/* see enum task_dist_state */
 	/* Array (of length "node_cnt") of task ID arrays.  The length
 	 * of each subarray is designated by the corresponding value in
 	 * the tasks array. */
@@ -1495,7 +1770,9 @@ typedef struct suspend_msg {
 typedef struct {
 	uint16_t ckpt_interval;	/* checkpoint interval in minutes */
 	uint32_t cpu_count;	/* number of required processors */
-	uint32_t cpu_freq;	/* requested cpu frequency */
+	uint32_t cpu_freq_min;	/* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;	/* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;	/* cpu frequency governor */
 	uint16_t exclusive;	/* 1 if CPUs not shared with other steps */
 	char *features;		/* required node features, default NONE */
 	uint16_t immediate;	/* 1 if allocate to run or fail immediately,
@@ -1524,7 +1801,7 @@ typedef struct {
 	uint16_t relative;	/* first node to use of job's allocation */
 	uint16_t resv_port_cnt;	/* reserve ports if set */
 	uint32_t task_count;	/* number of tasks required */
-	uint16_t task_dist;	/* see enum task_dist_state, default
+	uint32_t task_dist;	/* see enum task_dist_state, default
 				 * is SLURM_DIST_CYCLIC */
 	uint32_t time_limit;	/* step time limit */
 	uid_t uid;		/* user ID */
@@ -1560,15 +1837,18 @@ typedef struct {
 	char *task_epilog;
 	uint16_t cpu_bind_type;	/* use cpu_bind_type_t */
 	char *cpu_bind;
-	uint32_t cpu_freq;      /* requested cpu frequency */
+	uint32_t cpu_freq_min;	/* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;	/* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;	/* cpu frequency governor */
 	uint16_t mem_bind_type;	/* use mem_bind_type_t */
 	char *mem_bind;
+	uint16_t accel_bind_type; /* --accel-bind= */
 
 	uint16_t max_sockets;
 	uint16_t max_cores;
 	uint16_t max_threads;
 	uint16_t cpus_per_task;
-	uint16_t task_dist;
+	uint32_t task_dist;
 	char *partition;
 	bool preserve_env;
 
@@ -1622,7 +1902,9 @@ typedef struct {
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
 	uint32_t num_cpus;	/* how many cpus are being used by step */
-	uint32_t cpu_freq;	/* requested cpu frequency */
+	uint32_t cpu_freq_min;	/* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;	/* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;	/* cpu frequency governor */
 	uint32_t num_tasks;	/* number of tasks */
 	char *partition;	/* name of assigned partition */
 	char *resv_ports;	/* ports allocated for MPI */
@@ -1632,9 +1914,11 @@ typedef struct {
 						* slurm_get_select_jobinfo()
 						*/
 	time_t start_time;	/* step start time */
-	uint16_t state;		/* state of the step, see enum job_states */
+	uint32_t state;		/* state of the step, see enum job_states */
 	uint32_t step_id;	/* step ID */
+	uint32_t task_dist;	/* see enum task_dist_state */
 	uint32_t time_limit;	/* step time limit */
+	char *tres_alloc_str;   /* tres used in the job */
 	uint32_t user_id;	/* user the job runs as */
 } job_step_info_t;
 
@@ -1676,11 +1960,13 @@ typedef struct node_info {
 	uint16_t cores;         /* number of cores per socket       */
 	uint16_t core_spec_cnt; /* number of specialized cores on node */
 	uint32_t cpu_load;	/* CPU load * 100 */
+	uint32_t free_mem;	/* free memory in MiB */
 	uint16_t cpus;		/* configured count of cpus running on
 				 * the node */
 	char *cpu_spec_list;	/* node's specialized cpus */
 	acct_gather_energy_t *energy;	 /* energy data */
 	ext_sensors_data_t *ext_sensors; /* external sensor data */
+	power_mgmt_data_t *power;        /* power management data */
 	char *features;		/* list of a node's features */
 	char *gres;		/* list of a node's generic resources */
 	char *gres_drain;	/* list of drained GRES */
@@ -1691,6 +1977,7 @@ typedef struct node_info {
 	char *node_hostname;	/* node's hostname (optional) */
 	uint32_t node_state;	/* see enum node_states */
 	char *os;		/* operating system currently running */
+	uint32_t owner;		/* User allowed to use this node or NO_VAL */
 	uint32_t real_memory;	/* configured MB of real memory on the node */
 	char *reason;		/* reason for node being DOWN or DRAINING */
 	time_t reason_time;	/* Time stamp when reason was set, ignore if
@@ -1706,6 +1993,7 @@ typedef struct node_info {
 	uint16_t threads;       /* number of threads per core */
 	uint32_t tmp_disk;	/* configured MB of total disk in TMP_FS */
 	uint32_t weight;	/* arbitrary priority of node for scheduling */
+	char *tres_fmt_str;	/* str representing configured TRES on node */
 	char *version;		 /* Slurm version number */
 } node_info_t;
 
@@ -1763,9 +2051,39 @@ typedef struct job_alloc_info_msg {
 	uint32_t job_id;	/* job ID */
 } job_alloc_info_msg_t;
 
+typedef struct layout_info_msg {
+	uint32_t record_count;	/* number of char* */
+	char** records;		/* layout or list of layouts */
+} layout_info_msg_t;
+
+typedef struct update_layout_msg {
+	char* layout;
+	char* arg;
+} update_layout_msg_t;
+
+typedef struct step_alloc_info_msg {
+	uint32_t job_id;	/* job ID */
+	uint32_t step_id;	/* step ID */
+} step_alloc_info_msg_t;
+
+typedef struct powercap_info_msg {
+	uint32_t power_cap;	/* power cap value in watts */
+	uint32_t power_floor;	/* power floor value in watts */
+	uint32_t power_change;	/* power change rate limit in watts/minute */
+	uint32_t min_watts;	/* min consumption of the cluster in watts */
+	uint32_t cur_max_watts;	/* current max consumption of the cluster in
+				 * watts */
+	uint32_t adj_max_watts;	/* adjusted (removing DownNodes) max consumption
+				 * of the cluster in watts */
+	uint32_t max_watts;	/* max consumption of the cluster in watts */
+} powercap_info_msg_t;
+
+typedef struct powercap_info_msg update_powercap_msg_t;
+
 typedef struct acct_gather_node_resp_msg {
-	char *node_name;	  /* node name */
 	acct_gather_energy_t *energy;
+	char *node_name;	  /* node name */
+	uint16_t sensor_cnt;
 } acct_gather_node_resp_msg_t;
 
 typedef struct acct_gather_energy_req_msg {
@@ -1781,6 +2099,8 @@ typedef struct acct_gather_energy_req_msg {
 #define PART_FLAG_REQ_RESV	0x0010	/* Set if reservation is required */
 #define PART_FLAG_LLN		0x0020	/* Set if least loaded node selection
 					 * is desired */
+#define PART_FLAG_EXCLUSIVE_USER 0x0040	/* Set if nodes allocated exclusively
+					 * by user */
 /* Used with slurm_update_partition() to clear flags associated with existing
  * partitions. For example, if a partition is currently hidden and you want
  * to make it visible then set flags to PART_FLAG_HIDDEN_CLR and call
@@ -1791,6 +2111,7 @@ typedef struct acct_gather_energy_req_msg {
 #define PART_FLAG_ROOT_ONLY_CLR	0x0800	/* Clear ROOT_ONLY partition flag */
 #define PART_FLAG_REQ_RESV_CLR	0x1000	/* Clear RES_REQ partition flag */
 #define PART_FLAG_LLN_CLR	0x2000	/* Clear LLN partition flag */
+#define PART_FLAG_EXC_USER_CLR	0x4000	/* Clear EXCLUSIVE_USER flag */
 
 typedef struct partition_info {
 	char *allow_alloc_nodes;/* list names of allowed allocating
@@ -1802,6 +2123,7 @@ typedef struct partition_info {
 	char *allow_qos;	/* comma delimited list of qos,
 				 * null indicates all */
 	char *alternate; 	/* name of alternate partition */
+	char *billing_weights_str;/* per TRES billing weights string */
 	uint16_t cr_type;	/* see CR_* values */
 	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	uint32_t default_time;	/* minutes, NO_VAL or INFINITE */
@@ -1822,9 +2144,11 @@ typedef struct partition_info {
 	char *nodes;		/* list names of nodes in partition */
 	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	uint16_t priority;	/* scheduling priority for jobs */
+	char *qos_char;	        /* The partition QOS name */
 	uint16_t state_up;	/* see PARTITION_ states above */
 	uint32_t total_cpus;	/* total number of cpus in the partition */
 	uint32_t total_nodes;	/* total number of nodes in the partition */
+	char    *tres_fmt_str;	/* str of configured TRES in partition */
 } partition_info_t;
 
 typedef struct delete_partition_msg {
@@ -1832,10 +2156,17 @@ typedef struct delete_partition_msg {
 } delete_part_msg_t;
 
 typedef struct resource_allocation_response_msg {
+	char *account;          /* allocation account */
 	uint32_t job_id;	/* assigned job id */
 	char *alias_list;	/* node name/address/hostnamne aliases */
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	uint16_t *cpus_per_node;/* cpus per node */
 	uint32_t *cpu_count_reps;/* how many nodes have same cpu count */
+	uint32_t env_size;	/* element count in environment */
+	char **environment;	/* environment variables to set for job,
+				 *  name=value pairs, one per line */
 	uint32_t error_code;	/* error code for warning message */
 	uint32_t node_cnt;	/* count of nodes */
 	char *node_list;	/* assigned list of nodes */
@@ -1844,6 +2175,8 @@ typedef struct resource_allocation_response_msg {
 	uint32_t pn_min_memory;  /* minimum real memory per node OR
 				  * real memory per CPU | MEM_PER_CPU,
 				  * default=0 (no limit) */
+	char *qos;               /* allocation qos */
+	char *resv_name;         /* allocation reservation */
 	dynamic_plugin_data_t *select_jobinfo;	/* opaque data structure,
 						 * use
 						 * slurm_get_select_jobinfo()
@@ -1871,6 +2204,14 @@ typedef struct partition_info_msg {
 	partition_info_t *partition_array; /* the partition records */
 } partition_info_msg_t;
 
+typedef struct will_run_response_msg {
+	uint32_t job_id;	/* ID of job to start */
+	char *node_list;	/* nodes where job will start */
+	List preemptee_job_id;	/* jobs preempted to start this job */
+	uint32_t proc_cnt;	/* CPUs allocated to job at start */
+	time_t start_time;	/* time when job will start */
+} will_run_response_msg_t;
+
 
 /* BLUEGENE specific information */
 
@@ -1993,10 +2334,10 @@ void slurm_init_update_block_msg PARAMS((update_block_msg_t *update_block_msg));
 #define RESERVE_FLAG_IGN_JOBS		0x00000040	/* Ignore running jobs */
 #define RESERVE_FLAG_NO_IGN_JOB		0x00000080	/* Clear ignore running
 							 * jobs flag */
-#define RESERVE_FLAG_LIC_ONLY		0x00000100	/* Reserve licenses
-							 * only */
-#define RESERVE_FLAG_NO_LIC_ONLY	0x00000200	/* Clear reserve
-							 * licenses only flag */
+#define RESERVE_FLAG_ANY_NODES		0x00000100	/* Use any compute
+							 * nodes */
+#define RESERVE_FLAG_NO_ANY_NODES	0x00000200	/* Clear any compute
+							 * node flag */
 #define RESERVE_FLAG_STATIC     	0x00000400	/* Static node allocation */
 #define RESERVE_FLAG_NO_STATIC  	0x00000800	/* Clear static node
 							 * allocation */
@@ -2012,22 +2353,27 @@ void slurm_init_update_block_msg PARAMS((update_block_msg_t *update_block_msg));
 							 * on each node */
 #define RESERVE_FLAG_TIME_FLOAT		0x00020000	/* Time offset is
 							 * relative */
+#define RESERVE_FLAG_REPLACE		0x00040000	/* Replace resources
+							 * as assigned to jobs */
 
 typedef struct reserve_info {
 	char *accounts;		/* names of accounts permitted to use */
+	char *burst_buffer;	/* burst buffer resources to be included */
+	uint32_t core_cnt;	/* count of cores required */
 	time_t end_time;	/* end time of reservation */
 	char *features;		/* required node features */
 	uint32_t flags;		/* see RESERVE_FLAG_* above */
 	char *licenses;		/* names of licenses to be reserved */
 	char *name;		/* name of reservation */
 	uint32_t node_cnt;	/* count of nodes required */
-	uint32_t core_cnt;	/* count of cores required */
 	int32_t *node_inx;	/* list index pairs into node_table for *nodes:
 				 * start_range_1, end_range_1,
 				 * start_range_2, .., -1  */
 	char *node_list;	/* list of reserved nodes or ALL */
 	char *partition;	/* name of partition to be used */
 	time_t start_time;	/* start time of reservation */
+	uint32_t resv_watts;    /* amount of power to reserve  */
+	char *tres_str;         /* list of TRES's used by reservation */
 	char *users;		/* names of users permitted to use */
 } reserve_info_t;
 
@@ -2039,13 +2385,14 @@ typedef struct reserve_info_msg {
 
 typedef struct resv_desc_msg {
 	char *accounts;		/* names of accounts permitted to use */
-	uint32_t duration;	/* duration of reservation in seconds */
+	char *burst_buffer;	/* burst buffer resources to be included */
+	uint32_t *core_cnt;	/* Count of cores required */
+	uint32_t duration;	/* duration of reservation in minutes */
 	time_t end_time;	/* end time of reservation */
 	char *features;		/* required node features */
 	uint32_t flags;		/* see RESERVE_FLAG_* above */
 	char *licenses;		/* names of licenses to be reserved */
 	char *name;		/* name of reservation (optional on create) */
-	uint32_t *core_cnt;	/* Count of cores required */
 	uint32_t *node_cnt;	/* Count of nodes required. Specify set of job
 				 * sizes with trailing zero to optimize layout
 				 * for those jobs just specify their total size
@@ -2054,6 +2401,8 @@ typedef struct resv_desc_msg {
 	char *node_list;	/* list of reserved nodes or ALL */
 	char *partition;	/* name of partition to be used */
 	time_t start_time;	/* start time of reservation */
+	uint32_t resv_watts;    /* amount of power to reserve  */
+	char *tres_str;         /* list of TRES's used by reservation */
 	char *users;		/* names of users permitted to use */
 } resv_desc_msg_t;
 
@@ -2122,8 +2471,12 @@ typedef struct reservation_name_msg {
 #define DEBUG_FLAG_DB_STEP      0x0000002000000000 /* Database step debug */
 #define DEBUG_FLAG_DB_USAGE     0x0000004000000000 /* Usage/Rollup debug */
 #define DEBUG_FLAG_DB_WCKEY     0x0000008000000000 /* Database WCKey debug */
-
-
+#define DEBUG_FLAG_BURST_BUF    0x0000010000000000 /* Burst buffer plugin */
+#define DEBUG_FLAG_CPU_FREQ     0x0000020000000000 /* --cpu_freq debug */
+#define DEBUG_FLAG_POWER        0x0000040000000000 /* Power plugin debug */
+#define DEBUG_FLAG_SICP		0x0000080000000000 /* Slurm Inter-Cluster */
+#define DEBUG_FLAG_DB_ARCHIVE	0x0000100000000000 /* DBD Archiving/Purging */
+#define DEBUG_FLAG_DB_TRES      0x0000200000000000 /* Database TRES debug */
 
 #define GROUP_FORCE		0x8000	/* if set, update group membership
 					 * info even if no updates to
@@ -2150,10 +2503,12 @@ typedef struct reservation_name_msg {
 #define HEALTH_CHECK_CYCLE	0x8000	/* cycle through nodes node */
 #define HEALTH_CHECK_NODE_ANY	0x000f	/* execute on all node states */
 
-#define PROLOG_FLAG_ALLOC  0x0001 /* execute prolog upon allocation */
-#define PROLOG_FLAG_NOHOLD 0x0002 /* don't block salloc/srun until
-				   * slurmctld knows the prolog has
-				   * run on each node in the allocation */
+#define PROLOG_FLAG_ALLOC	0x0001 /* execute prolog upon allocation */
+#define PROLOG_FLAG_NOHOLD	0x0002 /* don't block salloc/srun until
+					* slurmctld knows the prolog has
+					* run on each node in the allocation */
+#define PROLOG_FLAG_CONTAIN 	0x0004 /* Use proctrack plugin to create a
+					* container upon allocation */
 
 #define LOG_FMT_ISO8601_MS      0
 #define LOG_FMT_ISO8601         1
@@ -2165,6 +2520,7 @@ typedef struct reservation_name_msg {
 
 typedef struct slurm_ctl_conf {
 	time_t last_update;	/* last update time of the build parameters */
+	char *accounting_storage_tres; /* list of tres */
 	uint16_t accounting_storage_enforce; /* job requires valid association:
 					      * user/account/partition/cluster */
 	char *accounting_storage_backup_host;	/* accounting storage
@@ -2189,6 +2545,7 @@ typedef struct slurm_ctl_conf {
 	char *backup_addr;	/* comm path of slurmctld secondary server */
 	char *backup_controller;/* name of slurmctld secondary server */
 	uint16_t batch_start_timeout;	/* max secs for batch job to start */
+	char *bb_type;		/* burst buffer plugin type */
 	time_t boot_time;	/* time slurmctld last booted */
 	char *checkpoint_type;	/* checkpoint plugin type */
 	char *chos_loc;		/* Chroot OS path */
@@ -2199,11 +2556,12 @@ typedef struct slurm_ctl_conf {
 	char *control_addr;	/* comm path of slurmctld primary server */
 	char *control_machine;	/* name of slurmctld primary server */
 	uint32_t cpu_freq_def;	/* default cpu frequency / governor */
+	uint32_t cpu_freq_govs;	/* cpu freq governors allowed */
 	char *crypto_type;	/* cryptographic signature plugin */
 	uint64_t debug_flags;	/* see DEBUG_FLAG_* above for values */
 	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	uint16_t disable_root_jobs; /* if set then user root can't run jobs */
-	uint16_t dynalloc_port;  /* port for dynamic allocation connection */
+	uint16_t eio_timeout;     /* timeout for the eio thread */
 	uint16_t enforce_part_limits;	/* if set, reject job exceeding
 					 * partition size and/or time limits */
 	char *epilog;		/* pathname of job epilog */
@@ -2251,6 +2609,7 @@ typedef struct slurm_ctl_conf {
 				    * the processes is aborted or crashed */
 	uint16_t kill_wait;	/* seconds between SIGXCPU to SIGKILL
 				 * on job termination */
+	char *launch_params;	/* step launcher plugin options */
 	char *launch_type;	/* type of step launcher to use */
 	char *layouts;		/* comma separted list of layouts plugins */
 	char *licenses;		/* licenses available on this cluster */
@@ -2264,10 +2623,11 @@ typedef struct slurm_ctl_conf {
 	uint32_t max_step_cnt;	/* maximum number of steps per job */
 	uint16_t max_tasks_per_node; /* maximum tasks per node */
 	uint16_t mem_limit_enforce; /* Enforce mem limit at runtime y|n */
-	uint16_t min_job_age;	/* COMPLETED jobs over this age (secs)
+	uint32_t min_job_age;	/* COMPLETED jobs over this age (secs)
 				 * purged from in memory records */
 	char *mpi_default;	/* Default version of MPI in use */
 	char *mpi_params;	/* MPI parameters */
+	char *msg_aggr_params;  /* Message aggregation parameters */
 	uint16_t msg_timeout;	/* message timeout */
 	uint32_t next_job_id;	/* next slurm generated job_id to assign */
 	char *node_prefix;      /* prefix of nodes in partition, only set in
@@ -2276,6 +2636,8 @@ typedef struct slurm_ctl_conf {
 				   * number of minutes before cancellation */
 	char *plugindir;	/* pathname to plugins */
 	char *plugstack;        /* pathname to plugin stack config file */
+	char *power_parameters;	/* power management parameters */
+	char *power_plugin;	/* power management plugin type */
 	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	char *preempt_type;	/* job preemption selection plugin */
 	uint32_t priority_decay_hl; /* priority decay half life in
@@ -2296,10 +2658,12 @@ typedef struct slurm_ctl_conf {
 	uint32_t priority_weight_js; /* weight for Job Size factor */
 	uint32_t priority_weight_part; /* weight for Partition factor */
 	uint32_t priority_weight_qos; /* weight for QOS factor */
+	char    *priority_weight_tres; /* weights (str) for different TRES' */
 	uint16_t private_data;	/* block viewing of information,
 				 * see PRIVATE_DATA_* */
 	char *proctrack_type;	/* process tracking plugin type */
 	char *prolog;		/* pathname of job prolog run by slurmd */
+	uint16_t prolog_epilog_timeout; /* prolog/epilog timeout */
 	char *prolog_slurmctld;	/* pathname of job prolog run by slurmctld */
 	uint16_t propagate_prio_process; /* process priority propagation,
 					  * see PROP_PRIO_* */
@@ -2373,9 +2737,10 @@ typedef struct slurm_ctl_conf {
 	char *switch_type;	/* switch or interconnect type */
 	char *task_epilog;	/* pathname of task launch epilog */
 	char *task_plugin;	/* task launch plugin */
-	uint16_t task_plugin_param;	/* see CPU_BIND_* */
+	uint32_t task_plugin_param;	/* see CPU_BIND_* */
 	char *task_prolog;	/* pathname of task launch prolog */
 	char *tmp_fs;		/* pathname of temporary file system */
+	char *topology_param;	/* network topology parameters */
 	char *topology_plugin;	/* network topology plugin */
 	uint16_t track_wckey;    /* see if we are using wckey or not */
 	uint16_t tree_width;    /* number of threads per node to span */
@@ -2484,7 +2849,7 @@ typedef struct stats_info_response_msg {
 	uint32_t bf_backfilled_jobs;
 	uint32_t bf_last_backfilled_jobs;
 	uint32_t bf_cycle_counter;
-	uint32_t bf_cycle_sum;
+	uint64_t bf_cycle_sum;
 	uint32_t bf_cycle_last;
 	uint32_t bf_cycle_max;
 	uint32_t bf_last_depth;
@@ -2580,6 +2945,34 @@ typedef struct {
 	uint32_t *error_code;
 } job_array_resp_msg_t;
 
+/* Association manager state running in the slurmctld */
+typedef struct {
+	List assoc_list; /* list of slurmdb_assoc_rec_t with usage packed */
+	List qos_list;   /* list of slurmdb_qos_rec_t with usage packed */
+	uint32_t tres_cnt;
+	char **tres_names;
+	List user_list;  /* list of slurmdb_user_rec_t */
+} assoc_mgr_info_msg_t;
+
+#define ASSOC_MGR_INFO_FLAG_ASSOC 0x00000001
+#define ASSOC_MGR_INFO_FLAG_USERS 0x00000002
+#define ASSOC_MGR_INFO_FLAG_QOS   0x00000004
+
+typedef struct {
+	List acct_list; /* char * list of account names */
+	uint32_t flags; /* flags determining what is returned */
+	List qos_list;  /* char * list of qos names */
+	List user_list; /* char * list of user names */
+} assoc_mgr_info_request_msg_t;
+
+typedef struct network_callerid_msg {
+	unsigned char ip_src[16];
+	unsigned char ip_dst[16];
+	uint32_t port_src;
+	uint32_t port_dst;
+	int32_t af;	// NOTE: un/packed as uint32_t
+} network_callerid_msg_t;
+
 /*****************************************************************************\
  *	RESOURCE ALLOCATION FUNCTIONS
 \*****************************************************************************/
@@ -2733,15 +3126,27 @@ extern void slurm_free_submit_response_response_msg PARAMS(
  */
 extern int slurm_job_will_run PARAMS((job_desc_msg_t * job_desc_msg));
 
+/*
+ * slurm_job_will_run2 - determine if a job would execute immediately if
+ *      submitted now
+ * IN job_desc_msg - description of resource allocation request
+ * OUT will_run_resp - job run time data
+ *      free using slurm_free_will_run_response_msg()
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_job_will_run2 PARAMS((job_desc_msg_t *req,
+				will_run_response_msg_t **will_run_resp));
+
 /*
  * slurm_sbcast_lookup - retrieve info for an existing resource allocation
  *	including a credential needed for sbcast
- * IN jobid - job allocation identifier
+ * IN job_id - job allocation identifier
+ * IN step_id - step allocation identifier (or NO_VAL for entire job)
  * OUT info - job allocation information including a credential for sbcast
  * RET 0 on success, otherwise return -1 and set errno to indicate the error
  * NOTE: free the "resp" using slurm_free_sbcast_cred_msg
  */
-extern int slurm_sbcast_lookup PARAMS((uint32_t jobid,
+extern int slurm_sbcast_lookup PARAMS((uint32_t job_id, uint32_t step_id,
 				       job_sbcast_cred_msg_t **info));
 
 extern void slurm_free_sbcast_cred_msg PARAMS((job_sbcast_cred_msg_t * msg));
@@ -2753,9 +3158,23 @@ extern void slurm_free_sbcast_cred_msg PARAMS((job_sbcast_cred_msg_t * msg));
  * OUT
  *
  */
-extern int slurm_load_licenses PARAMS((time_t, license_info_msg_t **, uint16_t));
+extern int slurm_load_licenses PARAMS((time_t, license_info_msg_t **,
+				       uint16_t));
 extern void slurm_free_license_info_msg PARAMS((license_info_msg_t *));
 
+/* get the running assoc_mgr info
+ * IN assoc_mgr_info_request_msg_t: request filtering data returned
+ * OUT assoc_mgr_info_msg_t: returned structure filled in with
+ * assoc_mgr lists, must be freed by slurm_free_assoc_mgr_info_msg
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_load_assoc_mgr_info PARAMS((assoc_mgr_info_request_msg_t *,
+					     assoc_mgr_info_msg_t **));
+extern void slurm_free_assoc_mgr_info_msg PARAMS ((assoc_mgr_info_msg_t *));
+extern void slurm_free_assoc_mgr_info_request_msg PARAMS (
+	(assoc_mgr_info_request_msg_t *));
+
+
 /*****************************************************************************\
  *	JOB/STEP SIGNALING FUNCTIONS
 \*****************************************************************************/
@@ -2787,12 +3206,8 @@ extern int slurm_kill_job_step PARAMS((uint32_t job_id, uint32_t step_id,
 /*
  * slurm_kill_job2()
  */
-extern int slurm_kill_job2 PARAMS((const char *, uint16_t, uint16_t));
-
-/*
- * slurm_kill_job_step2()
- */
-extern int slurm_kill_job_step2 PARAMS((const char *, uint16_t, uint16_t));
+extern int slurm_kill_job2 PARAMS((const char *job_id, uint16_t signal,
+				   uint16_t flags));
 
 /*
  * slurm_signal_job - send the specified signal to all steps of an existing job
@@ -3142,6 +3557,34 @@ extern int slurm_job_cpus_allocated_on_node_id PARAMS(
 extern int slurm_job_cpus_allocated_on_node PARAMS(
 	(job_resources_t *job_resrcs_ptr, const char *node_name));
 
+/*
+ * slurm_job_cpus_allocated_str_on_node_id -
+ *                        get the string representation of cpus allocated
+ *                        to a job on a node by node id
+ * IN cpus		- str where the resulting cpu list is returned
+ * IN cpus_len		- max size of cpus str
+ * IN job_resrcs_ptr	- pointer to job_resources structure
+ * IN node_id		- zero-origin node id in allocation
+ * RET 0 on success or -1 on error
+ */
+extern int slurm_job_cpus_allocated_str_on_node_id PARAMS(
+	(char *cpus, size_t cpus_len,
+	 job_resources_t *job_resrcs_ptr, int node_id));
+
+/*
+ * slurm_job_cpus_allocated_str_on_node -
+ *                        get the string representation of cpus allocated
+ *                        to a job on a node by node name
+ * IN cpus		- str where the resulting cpu list is returned
+ * IN cpus_len		- max size of cpus str
+ * IN job_resrcs_ptr	- pointer to job_resources structure
+ * IN node_name		- name of node
+ * RET 0 on success or -1 on error
+ */
+extern int slurm_job_cpus_allocated_str_on_node PARAMS(
+	(char *cpus, size_t cpus_len,
+	 job_resources_t *job_resrcs_ptr, const char *node_name));
+
 /*****************************************************************************\
  *	SLURM JOB CONTROL CONFIGURATION READ/PRINT/UPDATE FUNCTIONS
 \*****************************************************************************/
@@ -3441,16 +3884,20 @@ extern int slurm_load_node_single PARAMS((node_info_msg_t **resp,
 					 char *node_name, uint16_t show_flags));
 
 /*
- * slurm_node_energy - issue RPC to get the energy data on this machine
+ * slurm_get_node_energy_n - issue RPC to get the energy data of all
+ * configured sensors on the target machine
  * IN  host  - name of node to query, NULL if localhost
  * IN  delta - Use cache if data is newer than this in seconds
- * OUT acct_gather_energy_t structure on success or NULL other wise
- * RET 0 or a slurm error code
- * NOTE: free the response using slurm_acct_gather_energy_destroy
+ * OUT sensor_cnt - number of sensors
+ * OUT energy - array of acct_gather_energy_t structures on success or
+ *                NULL other wise
+ * RET 0 on success or a slurm error code
+ * NOTE: free the response using xfree
  */
 extern int slurm_get_node_energy PARAMS(
 	(char *host, uint16_t delta,
-	 acct_gather_energy_t **acct_gather_energy));
+	 uint16_t *sensors_cnt,
+	 acct_gather_energy_t **energy));
 
 /*
  * slurm_free_node_info_msg - free the node information response message
@@ -3627,6 +4074,45 @@ extern void slurm_print_topo_info_msg PARAMS(
 extern void slurm_print_topo_record PARAMS((FILE * out, topo_info_t *topo_ptr,
 					    int one_liner));
 
+/*****************************************************************************\
+ *	SLURM POWERCAPPING READ/PRINT/UPDATE FUNCTIONS
+\*****************************************************************************/
+
+/*
+ * slurm_load_powercap - issue RPC to get slurm powercapping details 
+ * IN powercap_info_msg_pptr - place to store a pointer to the result
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_powercap_info_msg
+ */
+extern int slurm_load_powercap PARAMS(
+	(powercap_info_msg_t **powercap_info_msg_pptr));
+
+/*
+ * slurm_free_powercap_info_msg - free the powercapping information
+ *	response message
+ * IN msg - pointer to powercapping information response message
+ * NOTE: buffer is loaded by slurm_load_powercap.
+ */
+extern void slurm_free_powercap_info_msg PARAMS((powercap_info_msg_t *msg));
+
+/*
+ * slurm_print_powercap_info_msg - output information about powercapping
+ *	configuration based upon message as loaded using slurm_load_powercap
+ * IN out - file to write to
+ * IN powercap_info_msg_ptr - powercapping information message pointer
+ * IN one_liner - print as a single line if not zero
+ */
+extern void slurm_print_powercap_info_msg PARAMS(
+	(FILE * out, powercap_info_msg_t *powercap_info_msg_ptr,
+	 int one_liner));
+
+/*
+ * slurm_update_powercap - issue RPC to update powercapping cap 
+ * IN powercap_msg - description of powercapping updates
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+extern int slurm_update_powercap PARAMS((update_powercap_msg_t * powercap_msg));
+
 /*****************************************************************************\
  *	SLURM SELECT READ/PRINT/UPDATE FUNCTIONS
 \*****************************************************************************/
@@ -3743,6 +4229,21 @@ extern int slurm_update_partition PARAMS((update_part_msg_t * part_msg));
  */
 extern int slurm_delete_partition PARAMS((delete_part_msg_t * part_msg));
 
+/*****************************************************************************\
+ *      SLURM LAYOUT PRINT/UPDATE FUNCTIONS
+\*****************************************************************************/
+extern void slurm_print_layout_info PARAMS(
+	(FILE* out, layout_info_msg_t *layout_info_ptr, int one_liner ));
+
+extern int slurm_load_layout PARAMS(
+	(char *layout_type, char *entities, char *type,
+	 uint32_t no_relation, layout_info_msg_t **resp));
+
+extern int slurm_update_layout PARAMS((update_layout_msg_t * layout_info_msg));
+
+extern void slurm_free_layout_info_msg PARAMS(
+	(layout_info_msg_t * layout_info_msg));
+
 /*****************************************************************************\
  *	SLURM RESERVATION CONFIGURATION READ/PRINT/UPDATE FUNCTIONS
 \*****************************************************************************/
@@ -4120,7 +4621,6 @@ extern int slurm_checkpoint_tasks PARAMS((uint32_t job_id, uint16_t step_id,
 					  time_t begin_time, char *image_dir,
 					  uint16_t max_wait, char *nodelist));
 
-
 /*****************************************************************************\
  *      SLURM TRIGGER FUNCTIONS
 \*****************************************************************************/
@@ -4162,6 +4662,164 @@ extern void slurm_free_trigger_msg PARAMS((trigger_info_msg_t * trigger_free));
  */
 void slurm_init_trigger_msg PARAMS((trigger_info_t *trigger_info_msg));
 
+/*****************************************************************************\
+ *      SLURM BURST BUFFER FUNCTIONS
+\*****************************************************************************/
+#define BB_FLAG_DISABLE_PERSISTENT	0x0001	/* Disable regular user to create
+						 * and destroy persistent burst
+						 * buffers */
+#define BB_FLAG_ENABLE_PERSISTENT	0x0002	/* Allow regular user to create
+						 * and destroy persistent burst
+						 * buffers */
+#define BB_FLAG_EMULATE_CRAY		0x0004	/* Using dw_wlm_cli emulator */
+#define BB_FLAG_PRIVATE_DATA		0x0008	/* Buffers only visible to owner */
+
+#define BB_SIZE_IN_NODES	0x8000000000000000
+#define BB_STATE_PENDING	0x0000		/* Placeholder: no action started */
+#define BB_STATE_ALLOCATING	0x0001		/* Cray: bbs_setup started */
+#define BB_STATE_ALLOCATED	0x0002		/* Cray: bbs_setup started */
+#define BB_STATE_DELETING	0x0005		/* Cray: bbs_setup started */
+#define BB_STATE_DELETED	0x0006		/* Cray: bbs_setup started */
+#define BB_STATE_STAGING_IN	0x0011		/* Cray: bbs_data_in started */
+#define BB_STATE_STAGED_IN	0x0012		/* Cray: bbs_data_in complete */
+#define BB_STATE_RUNNING	0x0021		/* Job is running */
+#define BB_STATE_SUSPEND	0x0022		/* Job is suspended (future) */
+#define BB_STATE_STAGING_OUT	0x0031		/* Cray: bbs_post_run then
+						 * bbs_data_out started */
+#define BB_STATE_STAGED_OUT	0x0032		/* Cray: bbs_data_out complete */
+#define BB_STATE_TEARDOWN	0x0041		/* Cray: bbs_teardown started */
+#define BB_STATE_COMPLETE	0x0042		/* Cray: bbs_teardown complete */
+
+typedef struct {
+	uint64_t avail_cnt;	/* Total count of available resources, unused
+				 * by burst_buffer_resv_t */
+	uint64_t granularity;	/* Granularity of resource allocation size */
+	char *name;		/* Generic burst buffer resource, e.g. "nodes" */
+	uint64_t used_cnt;	/* Count of used resources */
+} burst_buffer_gres_t;
+
+typedef struct {
+	char *account;		/* Associated account (for limits) */
+	uint32_t array_job_id;
+	uint32_t array_task_id;
+	time_t create_time;	/* Time of creation */
+	uint32_t gres_cnt;	/* Count of records in gres_ptr */
+	burst_buffer_gres_t *gres_ptr;
+	uint32_t job_id;
+	char *name;		/* Name of persistent burst buffer */
+	char *partition;	/* Associated partition (for limits) */
+	char *qos;		/* Associated QOS (for limits) */
+	uint64_t size;		/* In bytes by default */
+	uint16_t state;		/* See BB_STATE_* */
+	uint32_t user_id;
+} burst_buffer_resv_t;
+
+typedef struct {
+	uint32_t user_id;
+	uint64_t used;
+} burst_buffer_use_t;
+
+typedef struct {
+	char *allow_users;
+	char *default_pool;		/* Name of default pool to use */
+	char *create_buffer;
+	char *deny_users;
+	char *destroy_buffer;
+	uint32_t flags;			/* See BB_FLAG_* above */
+	char *get_sys_state;
+	uint64_t granularity;		/* Granularity of resource allocation */
+	uint32_t gres_cnt;		/* Count of records in gres_ptr */
+	burst_buffer_gres_t *gres_ptr;
+	char *name;			/* Plugin name */
+	uint32_t stage_in_timeout;	/* Seconds or zero */
+	uint32_t stage_out_timeout;	/* Seconds or zero */
+	char *start_stage_in;
+	char *start_stage_out;
+	char *stop_stage_in;
+	char *stop_stage_out;
+	uint64_t total_space;		/* In bytes */
+	uint64_t used_space;		/* In bytes */
+
+	uint32_t  buffer_count;
+	burst_buffer_resv_t *burst_buffer_resv_ptr;
+
+	uint32_t  use_count;
+	burst_buffer_use_t *burst_buffer_use_ptr;
+} burst_buffer_info_t;
+
+typedef struct {
+	burst_buffer_info_t *burst_buffer_array;
+	uint32_t  record_count;		/* Elements in burst_buffer_array */
+} burst_buffer_info_msg_t;
+
+/*
+ * slurm_burst_buffer_state_string - translate burst buffer state number to
+ *	it string equivalent
+ */
+extern char *slurm_burst_buffer_state_string(uint16_t state);
+
+/*
+ * slurm_load_burst_buffer_info - issue RPC to get slurm all burst buffer plugin
+ *	information
+ * IN burst_buffer_info_msg_pptr - place to store a burst buffer configuration
+ *	pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_burst_buffer_info_msg
+ */
+extern int slurm_load_burst_buffer_info PARAMS(
+		(burst_buffer_info_msg_t **burst_buffer_info_msg_pptr));
+
+/*
+ * slurm_free_burst_buffer_info_msg - free buffer returned by
+ *	slurm_load_burst_buffer
+ * IN burst_buffer_info_msg_ptr - pointer to burst_buffer_info_msg_t
+ * RET 0 or a slurm error code
+ */
+extern void slurm_free_burst_buffer_info_msg PARAMS(
+		(burst_buffer_info_msg_t *burst_buffer_info_msg));
+
+/*
+ * slurm_print_burst_buffer_info_msg - output information about burst buffers
+ *	based upon message as loaded using slurm_load_burst_buffer
+ * IN out - file to write to
+ * IN info_ptr - burst_buffer information message pointer
+ * IN one_liner - print as a single line if true
+ * IN verbose - higher values to log additional details
+ */
+extern void slurm_print_burst_buffer_info_msg PARAMS(
+		(FILE *out, burst_buffer_info_msg_t *info_ptr, int one_liner,
+		 int verbosity));
+
+/*
+ * slurm_print_burst_buffer_record - output information about a specific Slurm
+ *	burst_buffer record based upon message as loaded using
+ *	slurm_load_burst_buffer_info()
+ * IN out - file to write to
+ * IN burst_buffer_ptr - an individual burst buffer record pointer
+ * IN one_liner - print as a single line if not zero
+ * IN verbose - higher values to log additional details
+ * RET out - char * containing formatted output (must be freed after call)
+ *	   NULL is returned on failure.
+ */
+extern void slurm_print_burst_buffer_record PARAMS(
+			(FILE *out, burst_buffer_info_t *burst_buffer_ptr,
+			 int one_liner, int verbose));
+
+/*
+ * slurm_network_callerid - issue RPC to get the job id of a job from a remote
+ * slurmd based upon network socket information.
+ *
+ * IN req - Information about network connection in question
+ * OUT job_id -  ID of the job or NO_VAL
+ * OUT node_name - name of the remote slurmd
+ * IN node_name_size - size of the node_name buffer
+ * RET SLURM_PROTOCOL_SUCCESS or SLURM_FAILURE on error
+ */
+extern int slurm_network_callerid PARAMS(
+		(network_callerid_msg_t req, uint32_t *job_id,
+		char *node_name, int node_name_size));
+
+
 END_C_DECLS
 
 #endif
diff --git a/slurm/slurm_errno.h b/slurm/slurm_errno.h
index 210b5de39..7d35b10af 100644
--- a/slurm/slurm_errno.h
+++ b/slurm/slurm_errno.h
@@ -107,7 +107,7 @@ enum {
 	SLURMCTLD_COMMUNICATIONS_RECEIVE_ERROR,
 	SLURMCTLD_COMMUNICATIONS_SHUTDOWN_ERROR,
 
-	/* _info.c/communcation layer RESPONSE_SLURM_RC message codes */
+	/* _info.c/communication layer RESPONSE_SLURM_RC message codes */
 	SLURM_NO_CHANGE_IN_DATA =			1900,
 
 	/* slurmctld error codes */
@@ -202,6 +202,14 @@ enum {
 	ESLURM_JOB_NOT_FINISHED,
 	ESLURM_TRIGGER_DUP,
 	ESLURM_INTERNAL =				2090,
+	ESLURM_INVALID_BURST_BUFFER_CHANGE,
+	ESLURM_BURST_BUFFER_PERMISSION,
+	ESLURM_BURST_BUFFER_LIMIT,
+	ESLURM_INVALID_BURST_BUFFER_REQUEST,
+	ESLURM_PRIO_RESET_FAIL,
+	ESLURM_POWER_NOT_AVAIL,
+	ESLURM_POWER_RESERVED,
+	ESLURM_INVALID_POWERCAP,
 
 	/* switch specific error codes, specific values defined in plugin module */
 	ESLURM_SWITCH_MIN = 3000,
@@ -223,7 +231,7 @@ enum {
 	ESLURMD_CREDENTIAL_EXPIRED,
 	ESLURMD_CREDENTIAL_REVOKED,
 	ESLURMD_CREDENTIAL_REPLAYED,
-	ESLURMD_CREATE_BATCH_DIR_ERROR,
+	ESLURMD_CREATE_BATCH_DIR_ERROR =		4010,
 	ESLURMD_MODIFY_BATCH_DIR_ERROR,
 	ESLURMD_CREATE_BATCH_SCRIPT_ERROR,
 	ESLURMD_MODIFY_BATCH_SCRIPT_ERROR,
@@ -233,7 +241,7 @@ enum {
 	ESLURMD_SET_SID_ERROR,
 	ESLURMD_CANNOT_SPAWN_IO_THREAD,
 	ESLURMD_FORK_FAILED,
-	ESLURMD_EXECVE_FAILED,
+	ESLURMD_EXECVE_FAILED =				4020,
 	ESLURMD_IO_ERROR,
 	ESLURMD_PROLOG_FAILED,
 	ESLURMD_EPILOG_FAILED,
diff --git a/slurm/slurmdb.h b/slurm/slurmdb.h
index 0835d6f45..d323455f4 100644
--- a/slurm/slurmdb.h
+++ b/slurm/slurmdb.h
@@ -139,6 +139,7 @@ typedef enum {
 	SLURMDB_REMOVE_RES,
 	SLURMDB_MODIFY_RES,
 	SLURMDB_REMOVE_QOS_USAGE,
+	SLURMDB_ADD_TRES,
 } slurmdb_update_type_t;
 
 /* Define QOS flags */
@@ -154,6 +155,7 @@ typedef enum {
 #define	QOS_FLAG_NO_RESERVE          0x00000010
 #define	QOS_FLAG_REQ_RESV            0x00000020
 #define	QOS_FLAG_DENY_LIMIT          0x00000040
+#define	QOS_FLAG_OVER_PART_QOS       0x00000080
 
 /* Define Server Resource flags */
 #define	SLURMDB_RES_FLAG_BASE        0x0fffffff /* apply to get real flags */
@@ -186,7 +188,7 @@ typedef enum {
 #define CLUSTER_FLAG_BGP    0x00000004 /* This is a bluegene/p cluster */
 #define CLUSTER_FLAG_BGQ    0x00000008 /* This is a bluegene/q cluster */
 #define CLUSTER_FLAG_SC     0x00000010 /* This is a sun constellation cluster */
-#define CLUSTER_FLAG_XCPU   0x00000020 /* This has xcpu */
+#define CLUSTER_FLAG_XCPU   0x00000020 /* This has xcpu, removed v15.08 */
 #define CLUSTER_FLAG_AIX    0x00000040 /* This is an aix cluster */
 #define CLUSTER_FLAG_MULTSD 0x00000080 /* This cluster is multiple slurmd */
 #define CLUSTER_FLAG_CRAYXT 0x00000100 /* This cluster is a ALPS cray
@@ -200,27 +202,26 @@ typedef enum {
 #define CLUSTER_FLAG_CRAY   0x00000500 /* This cluster is a cray.
 					  Combo of CRAY_A | CRAY_N */
 
-/* Define assoc_mgr_association_usage_t below to avoid including
- * extraneous slurmdb headers */
-#ifndef __assoc_mgr_association_usage_t_defined
-#  define  __assoc_mgr_association_usage_t_defined
-/* opaque data type */
-typedef struct assoc_mgr_association_usage assoc_mgr_association_usage_t;
-#endif
-
-/* Define assoc_mgr_qos_usage_t below to avoid including
- * extraneous slurmdb headers */
-#ifndef __assoc_mgr_qos_usage_t_defined
-#  define  __assoc_mgr_qos_usage_t_defined
-/* opaque data type */
-typedef struct assoc_mgr_qos_usage assoc_mgr_qos_usage_t;
-#endif
-
 /********************************************/
 
 /* Association conditions used for queries of the database */
 
-/* slurmdb_association_cond_t is used in other structures below so
+/* slurmdb_tres_rec_t is used in other structures below so this needs
+ * to be declared before hand.
+ */
+typedef struct {
+	uint64_t alloc_secs; /* total amount of secs allocated if used in an
+				accounting_list */
+	uint32_t rec_count;  /* number of records alloc_secs is, DON'T PACK */
+	uint64_t count; /* Count of tres on a given cluster, 0 if
+			   listed generically. */
+	uint32_t id;    /* Database ID for the tres */
+	char *name;     /* Name of tres if type is generic like GRES
+			   or License. */
+	char *type;     /* Type of tres (CPU, MEM, etc) */
+} slurmdb_tres_rec_t;
+
+/* slurmdb_assoc_cond_t is used in other structures below so
  * this needs to be declared first.
  */
 typedef struct {
@@ -229,27 +230,8 @@ typedef struct {
 
 	List def_qos_id_list;   /* list of char * */
 
-	List fairshare_list;	/* fairshare number */
-
-	List grp_cpu_mins_list; /* list of char * */
-	List grp_cpu_run_mins_list; /* list of char * */
-	List grp_cpus_list; /* list of char * */
-	List grp_jobs_list;	/* list of char * */
-	List grp_mem_list;	/* list of char * */
-	List grp_nodes_list; /* list of char * */
-	List grp_submit_jobs_list; /* list of char * */
-	List grp_wall_list; /* list of char * */
-
 	List id_list;		/* list of char */
 
-	List max_cpu_mins_pj_list; /* list of char * */
-	List max_cpu_run_mins_list; /* list of char * */
-	List max_cpus_pj_list; /* list of char * */
-	List max_jobs_list;	/* list of char * */
-	List max_nodes_pj_list; /* list of char * */
-	List max_submit_jobs_list; /* list of char * */
-	List max_wall_pj_list; /* list of char * */
-
 	uint16_t only_defs;  /* only send back defaults */
 
 	List parent_acct_list;	/* name of parent account */
@@ -269,7 +251,7 @@ typedef struct {
 	uint16_t without_parent_info; /* don't give me parent id/name */
 	uint16_t without_parent_limits; /* don't give me limits from
 					 * parents */
-} slurmdb_association_cond_t;
+} slurmdb_assoc_cond_t;
 
 /* slurmdb_job_cond_t is used by slurmdb_archive_cond_t so it needs to
  * be defined before hand.
@@ -341,7 +323,7 @@ typedef struct {
 /************** alphabetical order of structures **************/
 
 typedef struct {
-	slurmdb_association_cond_t *assoc_cond;/* use acct_list here for
+	slurmdb_assoc_cond_t *assoc_cond;/* use acct_list here for
 						  names */
 	List description_list; /* list of char * */
 	List organization_list; /* list of char * */
@@ -351,7 +333,7 @@ typedef struct {
 } slurmdb_account_cond_t;
 
 typedef struct {
-	List assoc_list; /* list of slurmdb_association_rec_t *'s */
+	List assoc_list; /* list of slurmdb_assoc_rec_t *'s */
 	List coordinators; /* list of slurmdb_coord_rec_t *'s */
 	char *description;
 	char *name;
@@ -360,9 +342,9 @@ typedef struct {
 
 typedef struct {
 	uint64_t alloc_secs; /* number of cpu seconds allocated */
-	uint64_t consumed_energy; /* energy allocated in Joules */
 	uint32_t id;	/* association/wckey ID		*/
 	time_t period_start; /* when this record was started */
+	slurmdb_tres_rec_t tres_rec;
 } slurmdb_accounting_rec_t;
 
 typedef struct {
@@ -399,18 +381,33 @@ typedef struct {
 			     insert of jobs since past */
 } slurmdb_archive_rec_t;
 
-/* slurmdb_association_cond_t is defined above alphabetical */
+typedef struct {
+	uint64_t count;  /* Count of tres on a given cluster, 0 if
+			    listed generically. */
+	List id_list;    /* Database ID */
+	List name_list;  /* Name of tres if type is generic like GRES
+			    or License. */
+	List type_list;  /* Type of tres (CPU, MEM, etc) */
+	uint16_t with_deleted;
+} slurmdb_tres_cond_t;
+
+/* slurmdb_tres_rec_t is defined above alphabetical */
 
-typedef struct slurmdb_association_rec {
-	List accounting_list; 	   /* list of slurmdb_accounting_rec_t *'s */
+/* slurmdb_assoc_cond_t is defined above alphabetical */
+
+/* This has slurmdb_assoc_rec_t's in it so we define the struct afterwards. */
+typedef struct slurmdb_assoc_usage slurmdb_assoc_usage_t;
+
+typedef struct slurmdb_assoc_rec {
+	List accounting_list; /* list of slurmdb_accounting_rec_t *'s */
 	char *acct;		   /* account/project associated to
-				    * association */
-	struct slurmdb_association_rec *assoc_next; /* next association with
+				    * assoc */
+	struct slurmdb_assoc_rec *assoc_next; /* next assoc with
 						       * same hash index
 						       * based off the
 						       * account/user
 						       * DOESN'T GET PACKED */
-	struct slurmdb_association_rec *assoc_next_id; /* next association with
+	struct slurmdb_assoc_rec *assoc_next_id; /* next assoc with
 							* same hash index
 							* DOESN'T GET PACKED */
 	char *cluster;		   /* cluster associated to association */
@@ -418,29 +415,37 @@ typedef struct slurmdb_association_rec {
 	uint32_t def_qos_id;       /* Which QOS id is this
 				    * associations default */
 
-	uint64_t grp_cpu_mins;     /* max number of cpu minutes the
-				    * underlying group of
-				    * associations can run for */
-	uint64_t grp_cpu_run_mins; /* max number of cpu minutes the
-				    * underlying group of
-				    * assoiciations can
-				    * having running at one time */
-	uint32_t grp_cpus;         /* max number of cpus the
-				    * underlying group of
-				    * associations can allocate at one time */
 	uint32_t grp_jobs;	   /* max number of jobs the
 				    * underlying group of associations can run
 				    * at one time */
-	uint32_t grp_mem;          /* max amount of memory the
-				    * underlying group of
-				    * associations can allocate at once */
-	uint32_t grp_nodes;        /* max number of nodes the
-				    * underlying group of
-				    * associations can allocate at once */
 	uint32_t grp_submit_jobs;  /* max number of jobs the
 				    * underlying group of
 				    * associations can submit at
 				    * one time */
+	char *grp_tres;            /* max number of cpus the
+				    * underlying group of
+				    * associations can allocate at one time */
+	uint64_t *grp_tres_ctld;   /* grp_tres broken out in an array
+				    * based off the ordering of the total
+				    * number of TRES in the system
+				    * (DON'T PACK) */
+	char *grp_tres_mins;       /* max number of cpu minutes the
+				    * underlying group of
+				    * associations can run for */
+	uint64_t *grp_tres_mins_ctld; /* grp_tres_mins broken out in an array
+				       * based off the ordering of the total
+				       * number of TRES in the system
+				       * (DON'T PACK) */
+	char *grp_tres_run_mins;   /* max number of cpu minutes the
+				    * underlying group of
+				    * assoiciations can
+				    * having running at one time */
+	uint64_t *grp_tres_run_mins_ctld; /* grp_tres_run_mins
+					   * broken out in an array
+					   * based off the ordering
+					   * of the total number of TRES in
+					   * the system
+					   * (DON'T PACK) */
 	uint32_t grp_wall;         /* total time in hours the
 				    * underlying group of
 				    * associations can run for */
@@ -454,19 +459,37 @@ typedef struct slurmdb_association_rec {
 				    * associations and jobs as a left
 				    * most container used with rgt */
 
-	uint64_t max_cpu_mins_pj;  /* max number of cpu seconds this
-				    * association can have per job */
-	uint64_t max_cpu_run_mins; /* max number of cpu minutes this
-				    * association can
-				    * having running at one time */
-	uint32_t max_cpus_pj;      /* max number of cpus this
-				    * association can allocate per job */
 	uint32_t max_jobs;	   /* max number of jobs this
 				    * association can run at one time */
-	uint32_t max_nodes_pj;     /* max number of nodes this
-				    * association can allocate per job */
 	uint32_t max_submit_jobs;  /* max number of jobs that can be
 				      submitted by association */
+	char *max_tres_mins_pj;    /* max number of cpu seconds this
+				    * association can have per job */
+	uint64_t *max_tres_mins_ctld; /* max_tres_mins broken out in an array
+				       * based off the ordering of the
+				       * total number of TRES in the system
+				       * (DON'T PACK) */
+	char *max_tres_run_mins;   /* max number of cpu minutes this
+				    * association can
+				    * having running at one time */
+	uint64_t *max_tres_run_mins_ctld; /* max_tres_run_mins
+					   * broken out in an array
+					   * based off the ordering
+					   * of the total number of TRES in
+					   * the system
+					   * (DON'T PACK) */
+	char *max_tres_pj;         /* max number of cpus this
+				    * association can allocate per job */
+	uint64_t *max_tres_ctld;   /* max_tres broken out in an array
+				    * based off the ordering of the
+				    * total number of TRES in the system
+				    * (DON'T PACK) */
+	char *max_tres_pn;         /* max number of TRES this
+				    * association can allocate per node */
+	uint64_t *max_tres_pn_ctld;   /* max_tres_pn broken out in an array
+				       * based off the ordering of the
+				       * total number of TRES in the system
+				       * (DON'T PACK) */
 	uint32_t max_wall_pj;      /* longest time this
 				    * association can run a job */
 
@@ -485,9 +508,65 @@ typedef struct slurmdb_association_rec {
 				    * association */
 
 	uint32_t uid;		   /* user ID */
-	assoc_mgr_association_usage_t *usage;
-	char *user;		   /* user associated to association */
-} slurmdb_association_rec_t;
+	slurmdb_assoc_usage_t *usage;
+	char *user;		   /* user associated to assoc */
+} slurmdb_assoc_rec_t;
+
+struct slurmdb_assoc_usage {
+	List children_list;     /* list of children associations
+				 * (DON'T PACK) */
+	uint64_t *grp_used_tres; /* array of active tres counts */
+	uint64_t *grp_used_tres_run_secs; /* array of running tres secs
+					   * (DON'T PACK for state file) */
+
+	double grp_used_wall;   /* group count of time used in running jobs */
+	double fs_factor;	/* Fairshare factor. Not used by all algorithms
+				 * (DON'T PACK for state file) */
+	uint32_t level_shares;  /* number of shares on this level of
+				 * the tree (DON'T PACK for state file) */
+
+	slurmdb_assoc_rec_t *parent_assoc_ptr; /* ptr to direct
+						* parent assoc
+						* set in slurmctld
+						* (DON'T PACK) */
+
+	slurmdb_assoc_rec_t *fs_assoc_ptr;    /* ptr to fairshare parent
+					       * assoc if fairshare
+					       * == SLURMDB_FS_USE_PARENT
+					       * set in slurmctld
+					       * (DON'T PACK) */
+
+	double shares_norm;     /* normalized shares
+				 * (DON'T PACK for state file) */
+
+	uint32_t tres_cnt; /* size of the tres arrays,
+			    * (DON'T PACK for state file) */
+	long double usage_efctv;/* effective, normalized usage
+				 * (DON'T PACK for state file) */
+	long double usage_norm;	/* normalized usage
+				 * (DON'T PACK for state file) */
+	long double usage_raw;	/* measure of TRESBillableUnits usage */
+
+	long double *usage_tres_raw; /* measure of each TRES usage */
+	uint32_t used_jobs;	/* count of active jobs
+				 * (DON'T PACK for state file) */
+	uint32_t used_submit_jobs; /* count of jobs pending or running
+				    * (DON'T PACK for state file) */
+
+	/* Currently FAIR_TREE systems are defining data on
+	 * this struct but instead we could keep a void pointer to system
+	 * specific data. This would allow subsystems to define whatever data
+	 * they need without having to modify this struct; it would also save
+	 * space.
+	 */
+	long double level_fs;	/* (FAIR_TREE) Result of fairshare equation
+				 * compared to the association's siblings
+				 * (DON'T PACK for state file) */
+
+	bitstr_t *valid_qos;    /* qos available for this association
+				 * derived from the qos_list.
+				 * (DON'T PACK for state file) */
+};
 
 typedef struct {
 	uint16_t classification; /* how this machine is classified */
@@ -508,7 +587,6 @@ typedef struct {
 				    * DOESN'T GET PACKED */
 	char *control_host;
 	uint32_t control_port;
-	uint32_t cpu_count;
 	uint16_t dimensions; /* number of dimensions this cluster is */
 	int *dim_size; /* For convenience only.
 			* Size of each dimension For now only on
@@ -518,21 +596,21 @@ typedef struct {
 	char *name;
 	char *nodes;
 	uint32_t plugin_id_select; /* id of the select plugin */
-	slurmdb_association_rec_t *root_assoc; /* root association for
+	slurmdb_assoc_rec_t *root_assoc; /* root assoc for
 						* cluster */
 	uint16_t rpc_version; /* version of rpc this cluter is running */
+	char *tres_str;       /* comma separated list of TRES */
 } slurmdb_cluster_rec_t;
 
 typedef struct {
 	uint64_t alloc_secs; /* number of cpu seconds allocated */
-	uint64_t consumed_energy; /* energy allocated in Joules */
-	uint32_t cpu_count; /* number of cpus during time period */
 	uint64_t down_secs; /* number of cpu seconds down */
 	uint64_t idle_secs; /* number of cpu seconds idle */
 	uint64_t over_secs; /* number of cpu seconds overcommitted */
 	uint64_t pdown_secs; /* number of cpu seconds planned down */
 	time_t period_start; /* when this record was started */
 	uint64_t resv_secs; /* number of cpu seconds reserved */
+	slurmdb_tres_rec_t tres_rec;
 } slurmdb_cluster_accounting_rec_t;
 
 typedef struct {
@@ -564,7 +642,6 @@ typedef struct {
 	char *cluster;          /* Name of associated cluster */
 	char *cluster_nodes;    /* node list in cluster during time
 				 * period (only set in a cluster event) */
-	uint32_t cpu_count;     /* Number of CPUs effected by event */
 	uint16_t event_type;    /* type of event (slurmdb_event_type_t) */
 	char *node_name;        /* Name of node (only set in a node event) */
 	time_t period_end;      /* End of period */
@@ -574,6 +651,7 @@ typedef struct {
 	uint32_t reason_uid;    /* uid of that who set the reason */
 	uint16_t state;         /* State of node during time
 				   period (only set in a node event) */
+	char *tres_str;         /* TRES touched by this event */
 } slurmdb_event_rec_t;
 
 /* slurmdb_job_cond_t is defined above alphabetical */
@@ -586,7 +664,6 @@ typedef struct {
 
 typedef struct {
 	char    *account;
-	uint32_t alloc_cpus;
 	char	*alloc_gres;
 	uint32_t alloc_nodes;
 	uint32_t array_job_id;	/* job_id of a job array or 0 if N/A */
@@ -624,7 +701,7 @@ typedef struct {
 	char *resv_name;
 	uint32_t show_full;
 	time_t start;
-	uint16_t	state;
+	uint32_t state;
 	slurmdb_stats_t stats;
 	List    steps; /* list of slurmdb_step_rec_t *'s */
 	time_t submit;
@@ -635,6 +712,8 @@ typedef struct {
 	uint32_t tot_cpu_sec;
 	uint32_t tot_cpu_usec;
 	uint16_t track_steps;
+	char *tres_alloc_str;
+	char *tres_req_str;
 	uint32_t uid;
 	char 	*used_gres;
 	char    *user;
@@ -644,50 +723,112 @@ typedef struct {
 	uint32_t wckeyid;
 } slurmdb_job_rec_t;
 
+typedef struct {
+	List job_list; /* list of job pointers to submitted/running
+			  jobs (DON'T PACK) */
+	uint32_t grp_used_jobs;	/* count of active jobs (DON'T PACK
+				 * for state file) */
+	uint32_t grp_used_submit_jobs; /* count of jobs pending or running
+					* (DON'T PACK for state file) */
+	uint64_t *grp_used_tres; /* count of tres in use in this qos
+				 * (DON'T PACK for state file) */
+	uint64_t *grp_used_tres_run_secs; /* count of running tres secs
+					 * (DON'T PACK for state file) */
+	double grp_used_wall;   /* group count of time (minutes) used in
+				 * running jobs (DON'T PACK for state file) */
+	double norm_priority;/* normalized priority (DON'T PACK for
+			      * state file) */
+	uint32_t tres_cnt; /* size of the tres arrays,
+			    * (DON'T PACK for state file) */
+	long double usage_raw;	/* measure of resource usage (DON'T
+				 * PACK for state file) */
+
+	long double *usage_tres_raw; /* measure of each TRES usage (DON'T
+				      * PACK for state file)*/
+	List user_limit_list; /* slurmdb_used_limits_t's (DON'T PACK
+			       * for state file) */
+} slurmdb_qos_usage_t;
+
 typedef struct {
 	char *description;
 	uint32_t id;
 	uint32_t flags; /* flags for various things to enforce or
 			   override other limits */
 	uint32_t grace_time; /* preemption grace time */
-	uint64_t grp_cpu_mins; /* max number of cpu minutes all jobs
-				* running under this qos can run for */
-	uint64_t grp_cpu_run_mins; /* max number of cpu minutes all jobs
-				    * running under this qos can
-				    * having running at one time */
-	uint32_t grp_cpus; /* max number of cpus this qos
-			      can allocate at one time */
 	uint32_t grp_jobs;	/* max number of jobs this qos can run
 				 * at one time */
-	uint32_t grp_mem; /* max amount of memory this qos
-			     can allocate at one time */
-	uint32_t grp_nodes; /* max number of nodes this qos
-			       can allocate at once */
 	uint32_t grp_submit_jobs; /* max number of jobs this qos can submit at
 				   * one time */
+	char *grp_tres;            /* max number of tres ths qos can
+				    * allocate at one time */
+	uint64_t *grp_tres_ctld;   /* grp_tres broken out in an array
+				    * based off the ordering of the total
+				    * number of TRES in the system
+				    * (DON'T PACK) */
+	char *grp_tres_mins;       /* max number of tres minutes this
+				    * qos can run for */
+	uint64_t *grp_tres_mins_ctld; /* grp_tres_mins broken out in an array
+				       * based off the ordering of the total
+				       * number of TRES in the system
+				       * (DON'T PACK) */
+	char *grp_tres_run_mins;   /* max number of tres minutes this
+				    * qos can have running at one time */
+	uint64_t *grp_tres_run_mins_ctld; /* grp_tres_run_mins
+					   * broken out in an array
+					   * based off the ordering
+					   * of the total number of TRES in
+					   * the system
+					   * (DON'T PACK) */
 	uint32_t grp_wall; /* total time in hours this qos can run for */
 
-	uint64_t max_cpu_mins_pj; /* max number of cpu mins a job can
-				   * use with this qos */
-	uint64_t max_cpu_run_mins_pu; /* max number of cpu mins a user can
-				       * allocate at a given time when
-				       * using this qos (Not yet valid option) */
-	uint32_t max_cpus_pj; /* max number of cpus a job can
-			       * allocate with this qos */
-	uint32_t max_cpus_pu; /* max number of cpus a user can
-			       * allocate with this qos at one time */
 	uint32_t max_jobs_pu;	/* max number of jobs a user can
 				 * run with this qos at one time */
-	uint32_t max_nodes_pj; /* max number of nodes a job can
-				* allocate with this qos at one time */
-	uint32_t max_nodes_pu; /* max number of nodes a user can
-				* allocate with this qos at one time */
 	uint32_t max_submit_jobs_pu; /* max number of jobs a user can
 					submit with this qos at once */
+	char *max_tres_mins_pj;    /* max number of tres seconds this
+				    * qos can have per job */
+	uint64_t *max_tres_mins_pj_ctld; /* max_tres_mins broken out in an array
+					  * based off the ordering of the
+					  * total number of TRES in the system
+					  * (DON'T PACK) */
+	char *max_tres_pj;         /* max number of tres this
+				    * qos can allocate per job */
+	uint64_t *max_tres_pj_ctld;   /* max_tres_pj broken out in an array
+				       * based off the ordering of the
+				       * total number of TRES in the system
+				       * (DON'T PACK) */
+	char *max_tres_pn;         /* max number of tres this
+				    * qos can allocate per job */
+	uint64_t *max_tres_pn_ctld;   /* max_tres_pj broken out in an array
+				       * based off the ordering of the
+				       * total number of TRES in the system
+				       * (DON'T PACK) */
+	char *max_tres_pu;         /* max number of tres this
+				    * QOS can allocate per user */
+	uint64_t *max_tres_pu_ctld;   /* max_tres broken out in an array
+				       * based off the ordering of the
+				       * total number of TRES in the system
+				       * (DON'T PACK) */
+	char *max_tres_run_mins_pu;   /* max number of tres minutes this
+				       * qos can having running at one
+				       * time, currently this doesn't
+				       * do anything.
+				       */
+	uint64_t *max_tres_run_mins_pu_ctld; /* max_tres_run_mins_pu
+					      * broken out in an array
+					      * based off the ordering
+					      * of the total number of TRES in
+					      * the system, currently
+					      * this doesn't do anything.
+					      * (DON'T PACK) */
 	uint32_t max_wall_pj; /* longest time this
 			       * qos can run a job */
-	uint32_t min_cpus_pj; /* min number of cpus a job can
-			       * allocate with this qos */
+	char *min_tres_pj; /* min number of tres a job can
+			    * allocate with this qos */
+	uint64_t *min_tres_pj_ctld;   /* min_tres_pj broken out in an array
+				       * based off the ordering of the
+				       * total number of TRES in the system
+				       * (DON'T PACK) */
 
 	char *name;
 	bitstr_t *preempt_bitstr; /* other qos' this qos can preempt */
@@ -697,7 +838,7 @@ typedef struct {
 	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	uint32_t priority;  /* ranged int needs to be a unint for
 			     * heterogeneous systems */
-	assoc_mgr_qos_usage_t *usage; /* For internal use only, DON'T PACK */
+	slurmdb_qos_usage_t *usage; /* For internal use only, DON'T PACK */
 	double usage_factor; /* factor to apply to usage in this qos */
 	double usage_thres; /* percent of effective usage of an
 			       association when breached will deny
@@ -725,11 +866,8 @@ typedef struct {
 } slurmdb_reservation_cond_t;
 
 typedef struct {
-	uint64_t alloc_secs; /* number of cpu seconds allocated */
 	char *assocs; /* comma separated list of associations */
 	char *cluster; /* cluster reservation is for */
-	uint32_t cpus; /* how many cpus are in reservation */
-	uint64_t down_secs; /* number of cpu seconds down */
 	uint32_t flags; /* flags for reservation. */
 	uint32_t id;   /* id of reservation. */
 	char *name; /* name of reservation */
@@ -740,6 +878,10 @@ typedef struct {
 	time_t time_start_prev; /* If start time was changed this is
 				 * the pervious start time.  Needed
 				 * for accounting */
+	char *tres_str;
+	List tres_list; /* list of slurmdb_tres_rec_t, only set when
+			 * job usage is requested.
+			 */
 } slurmdb_reservation_rec_t;
 
 typedef struct {
@@ -754,24 +896,26 @@ typedef struct {
 	time_t end;
 	int32_t exitcode;
 	slurmdb_job_rec_t *job_ptr;
-	uint32_t ncpus;
 	uint32_t nnodes;
 	char *nodes;
 	uint32_t ntasks;
 	char *pid_str;
-	uint32_t req_cpufreq;
+	uint32_t req_cpufreq_min;
+	uint32_t req_cpufreq_max;
+	uint32_t req_cpufreq_gov;
 	uint32_t requid;
 	time_t start;
-	enum job_states	state;
+	uint32_t state;
 	slurmdb_stats_t stats;
 	uint32_t stepid;	/* job's step number */
 	char *stepname;
 	uint32_t suspended;
 	uint32_t sys_cpu_sec;
 	uint32_t sys_cpu_usec;
-	uint16_t task_dist;
+	uint32_t task_dist;
 	uint32_t tot_cpu_sec;
 	uint32_t tot_cpu_usec;
+	char *tres_alloc_str;
 	uint32_t user_cpu_sec;
 	uint32_t user_cpu_usec;
 } slurmdb_step_rec_t;
@@ -836,19 +980,19 @@ typedef struct {
 /* Right now this is used in the slurmdb_qos_rec_t structure.  In the
  * user_limit_list. */
 typedef struct {
-	uint64_t cpu_run_mins; /* how many cpu mins are allocated
-				* currently */
-	uint32_t cpus; /* count of CPUs allocated */
 	uint32_t jobs;	/* count of active jobs */
-	uint32_t nodes;	/* count of nodes allocated */
 	uint32_t submit_jobs; /* count of jobs pending or running */
+	uint64_t *tres; /* array of TRES allocated */
+	uint64_t *tres_run_mins; /* array of how many TRES mins are
+				  * allocated currently, currently this doesn't
+				  * do anything and isn't set up. */
 	uint32_t uid;
 } slurmdb_used_limits_t;
 
 typedef struct {
 	uint16_t admin_level; /* really slurmdb_admin_level_t but for
 				 packing purposes needs to be uint16_t */
-	slurmdb_association_cond_t *assoc_cond; /* use user_list here for
+	slurmdb_assoc_cond_t *assoc_cond; /* use user_list here for
 						   names and acct_list for
 						   default accounts */
 	List def_acct_list; /* list of char * (We can't really use
@@ -903,8 +1047,7 @@ typedef struct {
 } slurmdb_wckey_cond_t;
 
 typedef struct {
-	List accounting_list; 	/* list of slurmdb_accounting_rec_t *'s */
-
+	List accounting_list; /* list of slurmdb_accounting_rec_t *'s */
 	char *cluster;		/* cluster associated */
 
 	uint32_t id;		/* id identifing a combination of
@@ -926,7 +1069,7 @@ typedef struct {
 } slurmdb_print_tree_t;
 
 typedef struct {
-	slurmdb_association_rec_t *assoc;
+	slurmdb_assoc_rec_t *assoc;
 	char *sort_name;
 	List children;
 } slurmdb_hierarchical_rec_t;
@@ -936,9 +1079,8 @@ typedef struct {
 typedef struct {
 	char *acct;
 	char *cluster;
-	uint64_t consumed_energy;
-	uint64_t cpu_secs;
 	char *parent_acct;
+	List tres_list; /* list of slurmdb_tres_rec_t *'s */
 	char *user;
 } slurmdb_report_assoc_rec_t;
 
@@ -946,49 +1088,44 @@ typedef struct {
 	char *acct;
 	List acct_list; /* list of char *'s */
 	List assoc_list; /* list of slurmdb_report_assoc_rec_t's */
-	uint64_t consumed_energy;
-	uint64_t cpu_secs;
 	char *name;
+	List tres_list; /* list of slurmdb_tres_rec_t *'s */
 	uid_t uid;
 } slurmdb_report_user_rec_t;
 
 typedef struct {
+	List accounting_list; /* list of slurmdb_accounting_rec_t *'s */
 	List assoc_list; /* list of slurmdb_report_assoc_rec_t *'s */
-	uint64_t consumed_energy;
-	uint32_t cpu_count;
-	uint64_t cpu_secs;
 	char *name;
+	List tres_list; /* list of slurmdb_tres_rec_t *'s */
 	List user_list; /* list of slurmdb_report_user_rec_t *'s */
 } slurmdb_report_cluster_rec_t;
 
 typedef struct {
+	uint32_t count; /* count of jobs */
 	List jobs; /* This should be a NULL destroy since we are just
 		    * putting a pointer to a slurmdb_job_rec_t here
 		    * not allocating any new memory */
 	uint32_t min_size; /* smallest size of job in cpus here 0 if first */
 	uint32_t max_size; /* largest size of job in cpus here INFINITE if
 			    * last */
-	uint32_t count; /* count of jobs */
-	uint64_t cpu_secs; /* how many cpus secs taken up by this
-			    * grouping */
+	List tres_list; /* list of slurmdb_tres_rec_t *'s */
 } slurmdb_report_job_grouping_t;
 
 typedef struct {
 	char *acct; /*account name */
 	uint32_t count; /* total count of jobs taken up by this acct */
-	uint64_t cpu_secs; /* how many cpus secs taken up by this
-			    * acct */
 	List groups; /* containing slurmdb_report_job_grouping_t's*/
 	uint32_t lft;
 	uint32_t rgt;
+	List tres_list; /* list of slurmdb_tres_rec_t *'s */
 } slurmdb_report_acct_grouping_t;
 
 typedef struct {
+	List acct_list; /* containing slurmdb_report_acct_grouping_t's */
 	char *cluster; /*cluster name */
 	uint32_t count; /* total count of jobs taken up by this cluster */
-	uint64_t cpu_secs; /* how many cpus secs taken up by this
-			    * cluster */
-	List acct_list; /* containing slurmdb_report_acct_grouping_t's */
+	List tres_list; /* list of slurmdb_tres_rec_t *'s */
 } slurmdb_report_cluster_grouping_t;
 
 /* global variable for cross cluster communication */
@@ -1053,39 +1190,39 @@ extern int slurmdb_archive_load(void *db_conn,
 
 /*
  * add associations to accounting system
- * IN:  association_list List of slurmdb_association_rec_t *
+ * IN:  assoc_list List of slurmdb_assoc_rec_t *
  * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
 extern int slurmdb_associations_add(void *db_conn, List assoc_list);
 
 /*
  * get info from the storage
- * IN:  slurmdb_association_cond_t *
- * RET: List of slurmdb_association_rec_t *
+ * IN:  slurmdb_assoc_cond_t *
+ * RET: List of slurmdb_assoc_rec_t *
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_associations_get(void *db_conn,
-				     slurmdb_association_cond_t *assoc_cond);
+				     slurmdb_assoc_cond_t *assoc_cond);
 
 /*
  * modify existing associations in the accounting system
- * IN:  slurmdb_association_cond_t *assoc_cond
- * IN:  slurmdb_association_rec_t *assoc
+ * IN:  slurmdb_assoc_cond_t *assoc_cond
+ * IN:  slurmdb_assoc_rec_t *assoc
  * RET: List containing (char *'s) else NULL on error
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_associations_modify(void *db_conn,
-					slurmdb_association_cond_t *assoc_cond,
-					slurmdb_association_rec_t *assoc);
+					slurmdb_assoc_cond_t *assoc_cond,
+					slurmdb_assoc_rec_t *assoc);
 
 /*
  * remove associations from accounting system
- * IN:  slurmdb_association_cond_t *assoc_cond
+ * IN:  slurmdb_assoc_cond_t *assoc_cond
  * RET: List containing (char *'s) else NULL on error
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_associations_remove(
-	void *db_conn, slurmdb_association_cond_t *assoc_cond);
+	void *db_conn, slurmdb_assoc_cond_t *assoc_cond);
 
 /************** cluster functions **************/
 
@@ -1129,20 +1266,20 @@ extern List slurmdb_clusters_remove(void *db_conn,
 /************** cluster report functions **************/
 
 /* report for clusters of account per user
- * IN: slurmdb_association_cond_t *assoc_cond
+ * IN: slurmdb_assoc_cond_t *assoc_cond
  * RET: List containing (slurmdb_report_cluster_rec_t *'s) else NULL on error
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_report_cluster_account_by_user(void *db_conn,
-						   slurmdb_association_cond_t *assoc_cond);
+						   slurmdb_assoc_cond_t *assoc_cond);
 
 /* report for clusters of users per account
- * IN: slurmdb_association_cond_t *assoc_cond
+ * IN: slurmdb_assoc_cond_t *assoc_cond
  * RET: List containing (slurmdb_report_cluster_rec_t *'s) else NULL on error
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_report_cluster_user_by_account(void *db_conn,
-						   slurmdb_association_cond_t *assoc_cond);
+						   slurmdb_assoc_cond_t *assoc_cond);
 
 /* report for clusters of wckey per user
  * IN: slurmdb_wckey_cond_t *wckey_cond
@@ -1255,12 +1392,12 @@ extern List slurmdb_jobs_get(void *db_conn, slurmdb_job_cond_t *job_cond);
 
 /*
  * get info from the storage
- * IN:  slurmdb_association_cond_t *
- * RET: List of slurmdb_association_rec_t *
+ * IN:  slurmdb_assoc_cond_t *
+ * RET: List of slurmdb_assoc_rec_t *
  * note List needs to be freed with slurm_list_destroy() when called
  */
 extern List slurmdb_problems_get(void *db_conn,
-				 slurmdb_association_cond_t *assoc_cond);
+				 slurmdb_assoc_cond_t *assoc_cond);
 
 /*
  * get info from the storage
@@ -1286,14 +1423,28 @@ extern List slurmdb_txn_get(void *db_conn, slurmdb_txn_cond_t *txn_cond);
  * the position of the id in the select plugin array, as well as sets up the
  * control_addr and dim_size parts of the structure.
  *
- * IN: cluster_names - comman separated string of cluster names
+ * IN: cluster_names - comma separated string of cluster names
  * RET: List of slurmdb_cluster_rec_t *
  * note List needs to bbe freed with slurm_list_destroy() when called
  */
 extern List slurmdb_get_info_cluster(char *cluster_names);
 
+/*
+ * get the first cluster that will run a job
+ * IN: req - description of resource allocation request
+ * IN: cluster_names - comma separated string of cluster names
+ * OUT: cluster_rec - record of selected cluster or NULL if none found or
+ * 		      cluster_names is NULL
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ * note cluster_rec needs to be freed with slurmdb_destroy_cluster_rec() when
+ * called
+ */
+extern int slurmdb_get_first_avail_cluster(job_desc_msg_t *req,
+        char *cluster_names, slurmdb_cluster_rec_t **cluster_rec);
+
 /************** helper functions **************/
-extern void slurmdb_destroy_user_defs(void *object);
+extern void slurmdb_destroy_assoc_usage(void *object);
+extern void slurmdb_destroy_qos_usage(void *object);
 extern void slurmdb_destroy_user_rec(void *object);
 extern void slurmdb_destroy_account_rec(void *object);
 extern void slurmdb_destroy_coord_rec(void *object);
@@ -1301,9 +1452,12 @@ extern void slurmdb_destroy_clus_res_rec(void *object);
 extern void slurmdb_destroy_cluster_accounting_rec(void *object);
 extern void slurmdb_destroy_cluster_rec(void *object);
 extern void slurmdb_destroy_accounting_rec(void *object);
-extern void slurmdb_destroy_association_rec(void *object);
+extern void slurmdb_free_assoc_mgr_state_msg(void *object);
+extern void slurmdb_free_assoc_rec_members(slurmdb_assoc_rec_t *assoc);
+extern void slurmdb_destroy_assoc_rec(void *object);
 extern void slurmdb_destroy_event_rec(void *object);
 extern void slurmdb_destroy_job_rec(void *object);
+extern void slurmdb_free_qos_rec_members(slurmdb_qos_rec_t *qos);
 extern void slurmdb_destroy_qos_rec(void *object);
 extern void slurmdb_destroy_reservation_rec(void *object);
 extern void slurmdb_destroy_step_rec(void *object);
@@ -1311,6 +1465,8 @@ extern void slurmdb_destroy_res_rec(void *object);
 extern void slurmdb_destroy_txn_rec(void *object);
 extern void slurmdb_destroy_wckey_rec(void *object);
 extern void slurmdb_destroy_archive_rec(void *object);
+extern void slurmdb_destroy_tres_rec_noalloc(void *object);
+extern void slurmdb_destroy_tres_rec(void *object);
 extern void slurmdb_destroy_report_assoc_rec(void *object);
 extern void slurmdb_destroy_report_user_rec(void *object);
 extern void slurmdb_destroy_report_cluster_rec(void *object);
@@ -1318,7 +1474,8 @@ extern void slurmdb_destroy_report_cluster_rec(void *object);
 extern void slurmdb_destroy_user_cond(void *object);
 extern void slurmdb_destroy_account_cond(void *object);
 extern void slurmdb_destroy_cluster_cond(void *object);
-extern void slurmdb_destroy_association_cond(void *object);
+extern void slurmdb_destroy_tres_cond(void *object);
+extern void slurmdb_destroy_assoc_cond(void *object);
 extern void slurmdb_destroy_event_cond(void *object);
 extern void slurmdb_destroy_job_cond(void *object);
 extern void slurmdb_destroy_job_modify_cond(void *object);
@@ -1340,18 +1497,20 @@ extern void slurmdb_destroy_report_job_grouping(void *object);
 extern void slurmdb_destroy_report_acct_grouping(void *object);
 extern void slurmdb_destroy_report_cluster_grouping(void *object);
 
-extern void slurmdb_init_association_rec(slurmdb_association_rec_t *assoc,
+extern void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t *assoc,
 					 bool free_it);
 extern void slurmdb_init_clus_res_rec(slurmdb_clus_res_rec_t *clus_res,
 				      bool free_it);
 extern void slurmdb_init_cluster_rec(slurmdb_cluster_rec_t *cluster,
 				     bool free_it);
 extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos,
-				 bool free_it);
+				 bool free_it, uint32_t init_val);
 extern void slurmdb_init_res_rec(slurmdb_res_rec_t *res,
 				 bool free_it);
 extern void slurmdb_init_wckey_rec(slurmdb_wckey_rec_t *wckey,
 				   bool free_it);
+extern void slurmdb_init_tres_cond(slurmdb_tres_cond_t *tres,
+				    bool free_it);
 extern void slurmdb_init_cluster_cond(slurmdb_cluster_cond_t *cluster,
 				      bool free_it);
 extern void slurmdb_init_res_cond(slurmdb_res_cond_t *cluster,
@@ -1440,11 +1599,29 @@ extern List slurmdb_qos_modify(void *db_conn,
  */
 extern List slurmdb_qos_remove(void *db_conn, slurmdb_qos_cond_t *qos_cond);
 
+/************** tres functions **************/
+
+/*
+ * add tres's to accounting system
+ * IN:  tres_list List of char *
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ */
+extern int slurmdb_tres_add(void *db_conn, uint32_t uid, List tres_list);
+
+/*
+ * get info from the storage
+ * IN:  slurmdb_tres_cond_t *
+ * RET: List of slurmdb_tres_rec_t *
+ * note List needs to be freed with slurm_list_destroy() when called
+ */
+extern List slurmdb_tres_get(void *db_conn, slurmdb_tres_cond_t *tres_cond);
+
+
 /************** usage functions **************/
 
 /*
  * get info from the storage
- * IN/OUT:  in void * (slurmdb_association_rec_t *) or
+ * IN/OUT:  in void * (slurmdb_assoc_rec_t *) or
  *          (slurmdb_wckey_rec_t *) of (slurmdb_cluster_rec_t *) with
  *          the id, and cluster set.
  * IN:  type what type is 'in'
diff --git a/src/Makefile.am b/src/Makefile.am
index 24bd9624f..47807fc65 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -3,6 +3,7 @@ SUBDIRS =		\
 	common		\
 	database	\
 	db_api		\
+	layouts		\
 	plugins		\
 	sacct		\
 	sacctmgr	\
diff --git a/src/Makefile.in b/src/Makefile.in
index e077e0038..79501548f 100644
--- a/src/Makefile.in
+++ b/src/Makefile.in
@@ -97,6 +97,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -105,10 +106,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -121,7 +124,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -182,9 +185,9 @@ am__define_uniq_tagged_files = \
   done | $(am__uniquify_input)`
 ETAGS = etags
 CTAGS = ctags
-DIST_SUBDIRS = api common database db_api plugins sacct sacctmgr \
-	salloc sattach sbatch sbcast scancel scontrol sdiag sinfo \
-	slurmctld slurmd slurmdbd smap smd sprio squeue sreport \
+DIST_SUBDIRS = api common database db_api layouts plugins sacct \
+	sacctmgr salloc sattach sbatch sbcast scancel scontrol sdiag \
+	sinfo slurmctld slurmd slurmdbd smap smd sprio squeue sreport \
 	srun_cr sshare sstat strigger sview srun
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
 am__relativize = \
@@ -253,6 +256,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -302,8 +307,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -322,6 +331,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -365,6 +377,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -388,6 +401,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -449,10 +463,10 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = api common database db_api plugins sacct sacctmgr salloc \
-	sattach sbatch sbcast scancel scontrol sdiag sinfo slurmctld \
-	slurmd slurmdbd smap smd sprio squeue sreport srun_cr sshare \
-	sstat strigger sview $(am__append_1)
+SUBDIRS = api common database db_api layouts plugins sacct sacctmgr \
+	salloc sattach sbatch sbcast scancel scontrol sdiag sinfo \
+	slurmctld slurmd slurmdbd smap smd sprio squeue sreport \
+	srun_cr sshare sstat strigger sview $(am__append_1)
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/api/Makefile.am b/src/api/Makefile.am
index 8ab56c692..c5053cc52 100644
--- a/src/api/Makefile.am
+++ b/src/api/Makefile.am
@@ -82,6 +82,8 @@ slurmapi_src =           \
 	allocate.c       \
 	allocate_msg.c   \
 	block_info.c     \
+	burst_buffer_info.c \
+	assoc_mgr_info.c    \
 	cancel.c         \
 	checkpoint.c     \
 	complete.c       \
@@ -90,24 +92,31 @@ slurmapi_src =           \
 	init_msg.c       \
 	job_info.c       \
 	job_step_info.c  \
+	layout_info.c    \
+	license_info.c   \
 	node_info.c      \
 	partition_info.c \
+	pmi_server.c     \
+	pmi_server.h     \
+	powercap_info.c  \
 	reservation_info.c \
 	signal.c         \
 	slurm_get_statistics.c \
 	slurm_hostlist.c \
-	slurm_pmi.c slurm_pmi.h	\
-	step_ctx.c step_ctx.h \
-	step_io.c step_io.h \
-	step_launch.c step_launch.h \
-	pmi_server.c pmi_server.h \
+	slurm_pmi.c      \
+	slurm_pmi.h	 \
+	step_ctx.c       \
+	step_ctx.h       \
+	step_io.c        \
+	step_io.h        \
+	step_launch.c    \
+	step_launch.h    \
 	submit.c         \
 	suspend.c        \
 	topo_info.c      \
 	triggers.c       \
 	reconfigure.c    \
-	update_config.c  \
-	license_info.c
+	update_config.c
 
 common_dir = $(top_builddir)/src/common
 
diff --git a/src/api/Makefile.in b/src/api/Makefile.in
index ef4c3edd6..00b3eb972 100644
--- a/src/api/Makefile.in
+++ b/src/api/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -182,14 +185,15 @@ libslurm_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
 am__DEPENDENCIES_1 = $(common_dir)/libcommon.la \
 	$(common_dir)/libspank.la $(common_dir)/libeio.la
 libslurmhelper_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
-am__objects_1 = allocate.lo allocate_msg.lo block_info.lo cancel.lo \
-	checkpoint.lo complete.lo config_info.lo front_end_info.lo \
-	init_msg.lo job_info.lo job_step_info.lo node_info.lo \
-	partition_info.lo reservation_info.lo signal.lo \
-	slurm_get_statistics.lo slurm_hostlist.lo slurm_pmi.lo \
-	step_ctx.lo step_io.lo step_launch.lo pmi_server.lo submit.lo \
-	suspend.lo topo_info.lo triggers.lo reconfigure.lo \
-	update_config.lo license_info.lo
+am__objects_1 = allocate.lo allocate_msg.lo block_info.lo \
+	burst_buffer_info.lo assoc_mgr_info.lo cancel.lo checkpoint.lo \
+	complete.lo config_info.lo front_end_info.lo init_msg.lo \
+	job_info.lo job_step_info.lo layout_info.lo license_info.lo \
+	node_info.lo partition_info.lo pmi_server.lo powercap_info.lo \
+	reservation_info.lo signal.lo slurm_get_statistics.lo \
+	slurm_hostlist.lo slurm_pmi.lo step_ctx.lo step_io.lo \
+	step_launch.lo submit.lo suspend.lo topo_info.lo triggers.lo \
+	reconfigure.lo update_config.lo
 am_libslurmhelper_la_OBJECTS = $(am__objects_1)
 libslurmhelper_la_OBJECTS = $(am_libslurmhelper_la_OBJECTS)
 PROGRAMS = $(noinst_PROGRAMS)
@@ -302,6 +306,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -352,8 +358,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -372,6 +382,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -415,6 +428,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -438,6 +452,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -575,6 +590,8 @@ slurmapi_src = \
 	allocate.c       \
 	allocate_msg.c   \
 	block_info.c     \
+	burst_buffer_info.c \
+	assoc_mgr_info.c    \
 	cancel.c         \
 	checkpoint.c     \
 	complete.c       \
@@ -583,24 +600,31 @@ slurmapi_src = \
 	init_msg.c       \
 	job_info.c       \
 	job_step_info.c  \
+	layout_info.c    \
+	license_info.c   \
 	node_info.c      \
 	partition_info.c \
+	pmi_server.c     \
+	pmi_server.h     \
+	powercap_info.c  \
 	reservation_info.c \
 	signal.c         \
 	slurm_get_statistics.c \
 	slurm_hostlist.c \
-	slurm_pmi.c slurm_pmi.h	\
-	step_ctx.c step_ctx.h \
-	step_io.c step_io.h \
-	step_launch.c step_launch.h \
-	pmi_server.c pmi_server.h \
+	slurm_pmi.c      \
+	slurm_pmi.h	 \
+	step_ctx.c       \
+	step_ctx.h       \
+	step_io.c        \
+	step_io.h        \
+	step_launch.c    \
+	step_launch.h    \
 	submit.c         \
 	suspend.c        \
 	topo_info.c      \
 	triggers.c       \
 	reconfigure.c    \
-	update_config.c  \
-	license_info.c
+	update_config.c
 
 common_dir = $(top_builddir)/src/common
 slurmapi_add = \
@@ -758,7 +782,9 @@ distclean-compile:
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocate.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocate_msg.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/assoc_mgr_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/block_info.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/burst_buffer_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cancel.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/complete.Plo@am__quote@
@@ -767,11 +793,13 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/init_msg.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_step_info.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/layout_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/license_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/node_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/partition_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pmi.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pmi_server.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/powercap_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reconfigure.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reservation_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/signal.Plo@am__quote@
diff --git a/src/api/allocate.c b/src/api/allocate.c
index 373de9a92..833f67035 100644
--- a/src/api/allocate.c
+++ b/src/api/allocate.c
@@ -294,40 +294,24 @@ slurm_allocate_resources_blocking (const job_desc_msg_t *user_req,
  */
 int slurm_job_will_run (job_desc_msg_t *req)
 {
-	slurm_msg_t req_msg, resp_msg;
-	will_run_response_msg_t *will_run_resp;
+	will_run_response_msg_t *will_run_resp = NULL;
 	char buf[64];
 	bool host_set = false;
 	int rc;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	char *type = "processors";
-	/* req.immediate = true;    implicit */
+
 	if ((req->alloc_node == NULL) &&
 	    (gethostname_short(buf, sizeof(buf)) == 0)) {
 		req->alloc_node = buf;
 		host_set = true;
 	}
-	slurm_msg_t_init(&req_msg);
-	req_msg.msg_type = REQUEST_JOB_WILL_RUN;
-	req_msg.data     = req;
-
-	rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg);
 
-	if (host_set)
-		req->alloc_node = NULL;
+	rc = slurm_job_will_run2(req, &will_run_resp);
 
-	if (rc < 0)
-		return SLURM_SOCKET_ERROR;
-
-	switch (resp_msg.msg_type) {
-	case RESPONSE_SLURM_RC:
-		if (_handle_rc_msg(&resp_msg) < 0)
-			return SLURM_PROTOCOL_ERROR;
-		break;
-	case RESPONSE_JOB_WILL_RUN:
+	if ((rc == 0) && will_run_resp) {
 		if (cluster_flags & CLUSTER_FLAG_BG)
 			type = "cnodes";
-		will_run_resp = (will_run_response_msg_t *) resp_msg.data;
 		slurm_make_time_str(&will_run_resp->start_time,
 				    buf, sizeof(buf));
 		info("Job %u to start at %s using %u %s"
@@ -346,11 +330,51 @@ int slurm_job_will_run (job_desc_msg_t *req)
 					sep = ",";
 				xstrfmtcat(job_list, "%s%u", sep, *job_id_ptr);
 			}
+			list_iterator_destroy(itr);
 			info("  Preempts: %s", job_list);
 			xfree(job_list);
 		}
 
 		slurm_free_will_run_response_msg(will_run_resp);
+	}
+
+	if (host_set)
+		req->alloc_node = NULL;
+
+	return rc;
+}
+
+/*
+ * slurm_job_will_run2 - determine if a job would execute immediately if
+ * 	submitted now
+ * IN job_desc_msg - description of resource allocation request
+ * OUT will_run_resp - job run time data
+ * 	free using slurm_free_will_run_response_msg()
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+int slurm_job_will_run2 (job_desc_msg_t *req,
+			 will_run_response_msg_t **will_run_resp)
+{
+	slurm_msg_t req_msg, resp_msg;
+	int rc;
+	/* req.immediate = true;    implicit */
+
+	slurm_msg_t_init(&req_msg);
+	req_msg.msg_type = REQUEST_JOB_WILL_RUN;
+	req_msg.data     = req;
+
+	rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg);
+
+	if (rc < 0)
+		return SLURM_SOCKET_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_SLURM_RC:
+		if (_handle_rc_msg(&resp_msg) < 0)
+			return SLURM_PROTOCOL_ERROR;
+		break;
+	case RESPONSE_JOB_WILL_RUN:
+		*will_run_resp = (will_run_response_msg_t *) resp_msg.data;
 		break;
 	default:
 		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
@@ -486,18 +510,21 @@ slurm_allocation_lookup_lite(uint32_t jobid,
 /*
  * slurm_sbcast_lookup - retrieve info for an existing resource allocation
  *	including a credential needed for sbcast
- * IN jobid - job allocation identifier
+ * IN job_id - job allocation identifier
+ * IN step_id - step allocation identifier (or NO_VAL for entire job)
  * OUT info - job allocation information including a credential for sbcast
  * RET 0 on success, otherwise return -1 and set errno to indicate the error
  * NOTE: free the "resp" using slurm_free_sbcast_cred_msg
  */
-int slurm_sbcast_lookup(uint32_t jobid, job_sbcast_cred_msg_t **info)
+int slurm_sbcast_lookup(uint32_t job_id, uint32_t step_id,
+			job_sbcast_cred_msg_t **info)
 {
-	job_alloc_info_msg_t req;
+	step_alloc_info_msg_t req;
 	slurm_msg_t req_msg;
 	slurm_msg_t resp_msg;
 
-	req.job_id = jobid;
+	req.job_id = job_id;
+	req.step_id = step_id;
 	slurm_msg_t_init(&req_msg);
 	slurm_msg_t_init(&resp_msg);
 	req_msg.msg_type = REQUEST_JOB_SBCAST_CRED;
@@ -723,7 +750,8 @@ static void _destroy_allocation_response_socket(listen_t *listen)
 static int
 _handle_msg(slurm_msg_t *msg, resource_allocation_response_msg_t **resp)
 {
-	uid_t req_uid   = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid   = g_slurm_auth_get_uid(msg->auth_cred,
+					       slurm_get_auth_info());
 	uid_t uid       = getuid();
 	uid_t slurm_uid = (uid_t) slurm_get_slurm_user_id();
 	int rc = 0;
@@ -782,20 +810,20 @@ _accept_msg_connection(int listen_fd,
 		slurm_free_msg(msg);
 
 		if (errno == EINTR) {
-			slurm_close_accepted_conn(conn_fd);
+			slurm_close(conn_fd);
 			*resp = NULL;
 			return 0;
 		}
 
 		error("_accept_msg_connection[%s]: %m", host);
-		slurm_close_accepted_conn(conn_fd);
+		slurm_close(conn_fd);
 		return SLURM_ERROR;
 	}
 
 	rc = _handle_msg(msg, resp); /* handle_msg frees msg */
 	slurm_free_msg(msg);
 
-	slurm_close_accepted_conn(conn_fd);
+	slurm_close(conn_fd);
 	return rc;
 }
 
diff --git a/src/api/allocate_msg.c b/src/api/allocate_msg.c
index 679b65d54..cdc88efbe 100644
--- a/src/api/allocate_msg.c
+++ b/src/api/allocate_msg.c
@@ -105,6 +105,7 @@ extern allocation_msg_thread_t *slurm_allocation_msg_thr_create(
 	struct allocation_msg_thread *msg_thr = NULL;
 	int cc;
 	uint16_t *ports;
+	uint16_t eio_timeout;
 
 	debug("Entering slurm_allocation_msg_thr_create()");
 
@@ -136,7 +137,8 @@ extern allocation_msg_thread_t *slurm_allocation_msg_thr_create(
 	debug("port from net_stream_listen is %hu", *port);
 	obj = eio_obj_create(sock, &message_socket_ops, (void *)msg_thr);
 
-	msg_thr->handle = eio_handle_create();
+	eio_timeout = slurm_get_srun_eio_timeout();
+	msg_thr->handle = eio_handle_create(eio_timeout);
 	if (!msg_thr->handle) {
 		error("failed to create eio handle");
 		xfree(msg_thr);
@@ -261,7 +263,8 @@ _handle_msg(void *arg, slurm_msg_t *msg)
 {
 	struct allocation_msg_thread *msg_thr =
 		(struct allocation_msg_thread *)arg;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	uid_t uid = getuid();
 
 	if ((req_uid != slurm_uid) && (req_uid != 0) && (req_uid != uid)) {
diff --git a/src/plugins/slurmctld/dynalloc/msg.h b/src/api/assoc_mgr_info.c
similarity index 60%
rename from src/plugins/slurmctld/dynalloc/msg.h
rename to src/api/assoc_mgr_info.c
index 089617593..bb6635ccf 100644
--- a/src/plugins/slurmctld/dynalloc/msg.h
+++ b/src/api/assoc_mgr_info.c
@@ -1,9 +1,11 @@
 /*****************************************************************************\
- *  msg.h - Message/communcation manager for dynalloc (resource dynamic allocation) plugin
+ *  assoc_mgr_info.c - get the current slurmctld assoc_mgr information
  *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
+ *  Copyright (C) 2014 CSCS
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Produced at CSCS
+ *  Written by Stephen Trofinoff
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -35,58 +37,56 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef DYNALLOC_MSG_H_
-#define DYNALLOC_MSG_H_
-
-#if HAVE_CONFIG_H
+#ifdef HAVE_CONFIG_H
 #  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
+#endif
 
-#include <ctype.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <strings.h>
-#include <unistd.h>
-#include <sys/stat.h>
-#include <sys/types.h>
+#include <string.h>
 
+#include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
 
-#include "src/common/bitstring.h"
-#include "src/common/hostlist.h"
-#include "src/common/log.h"
-#include "src/common/parse_config.h"
-#include "src/common/read_config.h"
 #include "src/common/slurm_protocol_api.h"
-#include "src/common/slurm_protocol_interface.h"
-#include "src/common/uid.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xsignal.h"
-#include "src/common/xstring.h"
-#include "src/slurmctld/slurmctld.h"
 
-/*
- * Spawn message hander thread
+/* slurm_load_assoc_mgr_info()
+ *
+ * Load requested controller assoc_mgr state.
+ *
  */
-extern int spawn_msg_thread(void);
+extern int
+slurm_load_assoc_mgr_info(assoc_mgr_info_request_msg_t *req,
+			  assoc_mgr_info_msg_t **resp)
+{
+	int cc;
+	slurm_msg_t msg_request;
+	slurm_msg_t msg_reply;
 
-/*
- * Terminate message hander thread
- */
-extern void	term_msg_thread(void);
+	slurm_msg_t_init(&msg_request);
+	slurm_msg_t_init(&msg_reply);
 
-/*
- * Send message
- */
-extern void	send_reply(slurm_fd_t new_fd, char *response);
+	msg_request.msg_type = REQUEST_ASSOC_MGR_INFO;
+	msg_request.data = req;
+
+	cc = slurm_send_recv_controller_msg(&msg_request, &msg_reply);
+	if (cc < 0)
+		return SLURM_ERROR;
+
+	switch (msg_reply.msg_type) {
+		case RESPONSE_ASSOC_MGR_INFO:
+			*resp = msg_reply.data;
+			break;
+		case RESPONSE_SLURM_RC:
+			cc = ((return_code_msg_t *)msg_reply.data)->return_code;
+			slurm_free_return_code_msg(msg_reply.data);
+			if (cc) /* slurm_seterrno_ret() is a macro ... sigh */
+				slurm_seterrno(cc);
+			return -1;
+		default:
+			slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
 
-#endif /* DYNALLOC_MSG_H_ */
+	return SLURM_PROTOCOL_SUCCESS;
+}
diff --git a/src/api/block_info.c b/src/api/block_info.c
index e6b414c11..84b2e4f8c 100644
--- a/src/api/block_info.c
+++ b/src/api/block_info.c
@@ -125,10 +125,11 @@ char *slurm_sprint_block_info(
 
 	/****** Line 1 ******/
 	convert_num_unit((float)block_ptr->cnode_cnt, tmp1, sizeof(tmp1),
-			 UNIT_NONE);
+			 UNIT_NONE, CONVERT_NUM_UNIT_EXACT);
 	if (cluster_flags & CLUSTER_FLAG_BGQ) {
 		convert_num_unit((float)block_ptr->cnode_err_cnt, tmp2,
-				 sizeof(tmp2), UNIT_NONE);
+				 sizeof(tmp2), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 		tmp_char = xstrdup_printf("%s/%s", tmp1, tmp2);
 	} else
 		tmp_char = tmp1;
diff --git a/src/api/burst_buffer_info.c b/src/api/burst_buffer_info.c
new file mode 100644
index 000000000..135cb5de2
--- /dev/null
+++ b/src/api/burst_buffer_info.c
@@ -0,0 +1,410 @@
+/*****************************************************************************\
+ *  burst_buffer_info.c - get/print the burst buffer state information
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef HAVE_SYS_SYSLOG_H
+#  include <sys/syslog.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+
+#include "src/common/parse_time.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+/* Reformat a numeric value with an appropriate suffix.
+ * The input units are bytes */
+static void _get_size_str(char *buf, size_t buf_size, uint64_t num)
+{
+	uint64_t tmp64;
+
+	if ((num == NO_VAL64) || (num == INFINITE64)) {
+		snprintf(buf, buf_size, "INFINITE");
+	} else if (num == 0) {
+		snprintf(buf, buf_size, "0");
+	} else if ((num % ((uint64_t)1024 * 1024 * 1024 * 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t)1024 * 1024 * 1024 * 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"P", tmp64);
+	} else if ((num % ((uint64_t)1024 * 1024 * 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t)1024 * 1024 * 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"T", tmp64);
+	} else if ((num % ((uint64_t)1024 * 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t)1024 * 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"G", tmp64);
+	} else if ((num % ((uint64_t)1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t)1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"M", tmp64);
+	} else if ((num % 1024) == 0) {
+		tmp64 = num / 1024;
+		snprintf(buf, buf_size, "%"PRIu64"K", tmp64);
+	} else {
+		tmp64 = num;
+		snprintf(buf, buf_size, "%"PRIu64"", tmp64);
+	}
+}
+
+/*
+ * slurm_load_burst_buffer_info - issue RPC to get slurm all burst buffer plugin
+ *	information
+ * IN burst_buffer_info_msg_pptr - place to store a burst buffer configuration
+ *	pointer
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_burst_buffer_info_msg
+ */
+extern int slurm_load_burst_buffer_info(burst_buffer_info_msg_t **
+					burst_buffer_info_msg_pptr)
+{
+	int rc;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+	req_msg.msg_type = REQUEST_BURST_BUFFER_INFO;
+	req_msg.data     = NULL;
+
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_BURST_BUFFER_INFO:
+		*burst_buffer_info_msg_pptr = (burst_buffer_info_msg_t *)
+					      resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		*burst_buffer_info_msg_pptr = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
+
+/*
+ * slurm_print_burst_buffer_info_msg - output information about burst buffers
+ *	based upon message as loaded using slurm_load_burst_buffer
+ * IN out - file to write to
+ * IN info_ptr - burst_buffer information message pointer
+ * IN one_liner - print as a single line if true
+ * IN verbose - higher values to log additional details
+ */
+extern void slurm_print_burst_buffer_info_msg(FILE *out,
+		 burst_buffer_info_msg_t *info_ptr, int one_liner,
+		 int verbose)
+{
+	int i;
+	burst_buffer_info_t *burst_buffer_ptr;
+
+	if (info_ptr->record_count == 0) {
+		error("No burst buffer information available");
+		return;
+	}
+
+	for (i = 0, burst_buffer_ptr = info_ptr->burst_buffer_array;
+	     i < info_ptr->record_count; i++, burst_buffer_ptr++) {
+		slurm_print_burst_buffer_record(out, burst_buffer_ptr,
+						one_liner, verbose);
+	}
+}
+
+static void _print_burst_buffer_resv(FILE *out,
+				     burst_buffer_resv_t* burst_buffer_ptr,
+				     int one_liner, bool verbose)
+{
+	char sz_buf[32], time_buf[64], tmp_line[512];
+	char *out_buf = NULL;
+	int i;
+
+	/****** Line 1 ******/
+	if (burst_buffer_ptr->name) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"    Name=%s ", burst_buffer_ptr->name);
+	} else if (burst_buffer_ptr->array_task_id == NO_VAL) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"    JobID=%u ", burst_buffer_ptr->job_id);
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"    JobID=%u_%u(%u) ",
+			burst_buffer_ptr->array_job_id,
+		        burst_buffer_ptr->array_task_id,
+		        burst_buffer_ptr->job_id);
+	}
+	xstrcat(out_buf, tmp_line);
+	_get_size_str(sz_buf, sizeof(sz_buf), burst_buffer_ptr->size);
+	if (burst_buffer_ptr->create_time) {
+		slurm_make_time_str(&burst_buffer_ptr->create_time, time_buf,
+				    sizeof(time_buf));
+	} else {
+		time_t now = time(NULL);
+		slurm_make_time_str(&now, time_buf, sizeof(time_buf));
+	}
+	if (verbose) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "Account=%s CreateTime=%s Partition=%s QOS=%s "
+			 "Size=%s State=%s UserID=%s(%u)",
+			 burst_buffer_ptr->account,  time_buf,
+			 burst_buffer_ptr->partition, burst_buffer_ptr->qos,
+			 sz_buf, bb_state_string(burst_buffer_ptr->state),
+			 uid_to_string(burst_buffer_ptr->user_id),
+			 burst_buffer_ptr->user_id);
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "CreateTime=%s Size=%s State=%s UserID=%s(%u)",
+			 time_buf, sz_buf,
+			 bb_state_string(burst_buffer_ptr->state),
+			 uid_to_string(burst_buffer_ptr->user_id),
+			 burst_buffer_ptr->user_id);
+	}
+	xstrcat(out_buf, tmp_line);
+
+	/* Gres includes "nodes" on Cray systems */
+	for (i = 0; i < burst_buffer_ptr->gres_cnt; i++) {
+		if (i == 0)
+			xstrcat(out_buf, " Gres=");
+		else
+			xstrcat(out_buf, ",");
+		_get_size_str(sz_buf, sizeof(sz_buf),
+			      burst_buffer_ptr->gres_ptr[i].used_cnt);
+		snprintf(tmp_line, sizeof(tmp_line), "%s:%s",
+			 burst_buffer_ptr->gres_ptr[i].name, sz_buf);
+		xstrcat(out_buf, tmp_line);
+	}
+
+	xstrcat(out_buf, "\n");
+	fprintf(out, "%s", out_buf);
+	xfree(out_buf);
+}
+
+static void _print_burst_buffer_use(FILE *out,
+				    burst_buffer_use_t* usage_ptr,
+				    int one_liner)
+{
+	char tmp_line[512], sz_buf[32];
+	char *out_buf = NULL;
+
+	_get_size_str(sz_buf, sizeof(sz_buf), usage_ptr->used);
+	snprintf(tmp_line, sizeof(tmp_line),
+		 "    UserID=%s(%u) Used=%s",
+	         uid_to_string(usage_ptr->user_id), usage_ptr->user_id, sz_buf);
+
+	xstrcat(out_buf, tmp_line);
+	xstrcat(out_buf, "\n");
+	fprintf(out, "%s", out_buf);
+	xfree(out_buf);
+}
+
+/*
+ * slurm_print_burst_buffer_record - output information about a specific Slurm
+ *	burst_buffer record based upon message as loaded using
+ *	slurm_load_burst_buffer_info()
+ * IN out - file to write to
+ * IN burst_buffer_ptr - an individual burst buffer record pointer
+ * IN one_liner - print as a single line if not zero
+ * IN verbose - higher values to log additional details
+ * RET out - char * containing formatted output (must be freed after call)
+ *	   NULL is returned on failure.
+ */
+extern void slurm_print_burst_buffer_record(FILE *out,
+		burst_buffer_info_t *burst_buffer_ptr, int one_liner,
+		int verbose)
+{
+	char tmp_line[512];
+	char g_sz_buf[32],t_sz_buf[32], u_sz_buf[32];
+	char *out_buf = NULL;
+	burst_buffer_resv_t *bb_resv_ptr;
+	burst_buffer_use_t  *bb_use_ptr;
+	bool has_acl = false;
+	int i;
+
+	/****** Line ******/
+	_get_size_str(g_sz_buf, sizeof(t_sz_buf),
+		      burst_buffer_ptr->granularity);
+	_get_size_str(t_sz_buf, sizeof(t_sz_buf),
+		      burst_buffer_ptr->total_space);
+	_get_size_str(u_sz_buf, sizeof(u_sz_buf),
+		      burst_buffer_ptr->used_space);
+	snprintf(tmp_line, sizeof(tmp_line),
+		 "Name=%s DefaultPool=%s Granularity=%s TotalSpace=%s "
+		 "UsedSpace=%s",
+		 burst_buffer_ptr->name, burst_buffer_ptr->default_pool,
+		 g_sz_buf, t_sz_buf, u_sz_buf);
+	xstrcat(out_buf, tmp_line);
+	if (!one_liner)
+		xstrcat(out_buf, "\n");
+
+	/****** Line (optional) ******/
+	/* Gres includes "nodes" on Cray systems */
+	for (i = 0; i < burst_buffer_ptr->gres_cnt; i++) {
+		_get_size_str(t_sz_buf, sizeof(t_sz_buf),
+			      burst_buffer_ptr->gres_ptr[i].avail_cnt);
+		_get_size_str(u_sz_buf, sizeof(u_sz_buf),
+			      burst_buffer_ptr->gres_ptr[i].used_cnt);
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "  Gres[%d] Name=%s AvailCount=%s UsedCount=%s",
+			 i, burst_buffer_ptr->gres_ptr[i].name,
+			 t_sz_buf, u_sz_buf);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	/****** Line ******/
+	snprintf(tmp_line, sizeof(tmp_line),
+		"  StageInTimeout=%u StageOutTimeout=%u Flags=%s",
+		burst_buffer_ptr->stage_in_timeout,
+		burst_buffer_ptr->stage_out_timeout,
+		slurm_bb_flags2str(burst_buffer_ptr->flags));
+	xstrcat(out_buf, tmp_line);
+	if (!one_liner)
+		xstrcat(out_buf, "\n");
+
+	/****** Line (optional) ******/
+	if (burst_buffer_ptr->allow_users) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  AllowUsers=%s", burst_buffer_ptr->allow_users);
+		xstrcat(out_buf, tmp_line);
+		has_acl = true;
+	} else if (burst_buffer_ptr->deny_users) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  DenyUsers=%s", burst_buffer_ptr->deny_users);
+		xstrcat(out_buf, tmp_line);
+		has_acl = true;
+	}
+	if (has_acl && !one_liner)
+		xstrcat(out_buf, "\n");
+
+	/****** Line (optional) ******/
+	if (burst_buffer_ptr->create_buffer) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  CreateBuffer=%s", burst_buffer_ptr->create_buffer);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	/****** Line (optional) ******/
+	if (burst_buffer_ptr->destroy_buffer) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  DestroyBuffer=%s", burst_buffer_ptr->destroy_buffer);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	/****** Line ******/
+	snprintf(tmp_line, sizeof(tmp_line),
+		"  GetSysState=%s", burst_buffer_ptr->get_sys_state);
+	xstrcat(out_buf, tmp_line);
+	if (!one_liner)
+		xstrcat(out_buf, "\n");
+
+	/****** Line (optional) ******/
+	if (burst_buffer_ptr->start_stage_in) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  StartStageIn=%s", burst_buffer_ptr->start_stage_in);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	/****** Line ******/
+	if (burst_buffer_ptr->start_stage_out) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  StartStageIn=%s", burst_buffer_ptr->start_stage_out);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	/****** Line (optional) ******/
+	if (burst_buffer_ptr->stop_stage_in) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  StopStageIn=%s", burst_buffer_ptr->stop_stage_in);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	/****** Line (optional) ******/
+	if (burst_buffer_ptr->stop_stage_out) {
+		snprintf(tmp_line, sizeof(tmp_line),
+			"  StopStageIn=%s", burst_buffer_ptr->stop_stage_out);
+		xstrcat(out_buf, tmp_line);
+		if (!one_liner)
+			xstrcat(out_buf, "\n");
+	}
+
+	if (one_liner)
+		xstrcat(out_buf, "\n");
+	fprintf(out, "%s", out_buf);
+	xfree(out_buf);
+
+	/****** Lines (optional) ******/
+	if (burst_buffer_ptr->buffer_count)
+		fprintf(out, "  Allocated Buffers:\n");
+	for (i = 0, bb_resv_ptr = burst_buffer_ptr->burst_buffer_resv_ptr;
+	     i < burst_buffer_ptr->buffer_count; i++, bb_resv_ptr++) {
+		 _print_burst_buffer_resv(out, bb_resv_ptr, one_liner, verbose);
+	}
+
+	/****** Lines (optional) ******/
+	if (burst_buffer_ptr->use_count)
+		fprintf(out, "  Per User Buffer Use:\n");
+	for (i = 0, bb_use_ptr = burst_buffer_ptr->burst_buffer_use_ptr;
+	     i < burst_buffer_ptr->use_count; i++, bb_use_ptr++) {
+		 _print_burst_buffer_use(out, bb_use_ptr, one_liner);
+	}
+}
diff --git a/src/api/cancel.c b/src/api/cancel.c
index 85795e4bf..4549088cf 100644
--- a/src/api/cancel.c
+++ b/src/api/cancel.c
@@ -112,7 +112,7 @@ slurm_kill_job_step (uint32_t job_id, uint32_t step_id, uint16_t signal)
 	req.signal      = signal;
 	req.flags	= 0;
 	msg.msg_type    = REQUEST_CANCEL_JOB_STEP;
-        msg.data        = &req;
+	msg.data        = &req;
 
 	if (slurm_send_recv_controller_rc_msg(&msg, &rc) < 0)
 		return SLURM_FAILURE;
@@ -126,7 +126,7 @@ slurm_kill_job_step (uint32_t job_id, uint32_t step_id, uint16_t signal)
 /* slurm_kill_job2()
  */
 int
-slurm_kill_job2(const char *job_id, uint16_t signal, uint16_t batch_flag)
+slurm_kill_job2(const char *job_id, uint16_t signal, uint16_t flags)
 {
 	int cc;
 	slurm_msg_t msg;
@@ -144,7 +144,7 @@ slurm_kill_job2(const char *job_id, uint16_t signal, uint16_t batch_flag)
 	req.sjob_id     = xstrdup(job_id);
 	req.job_step_id = NO_VAL;
 	req.signal      = signal;
-	req.flags	= 0;
+	req.flags	= flags;
 	msg.msg_type    = REQUEST_KILL_JOB;
         msg.data        = &req;
 
diff --git a/src/api/config_info.c b/src/api/config_info.c
index 9fd10f51d..dc44a73c4 100644
--- a/src/api/config_info.c
+++ b/src/api/config_info.c
@@ -140,7 +140,7 @@ void slurm_write_ctl_conf ( slurm_ctl_conf_info_msg_t * slurm_ctl_conf_ptr,
 	debug("Writing slurm.conf file: %s", path);
 
 	if ( ( fp = fopen(path, "w") ) == NULL ) {
-		fprintf(stderr, "Could not create file %s: %s\n", path, 
+		fprintf(stderr, "Could not create file %s: %s\n", path,
 			strerror(errno));
 		xfree(path);
 		return;
@@ -157,7 +157,7 @@ void slurm_write_ctl_conf ( slurm_ctl_conf_info_msg_t * slurm_ctl_conf_ptr,
 	ret_list = slurm_ctl_conf_2_key_pairs(slurm_ctl_conf_ptr);
 	if (ret_list) {
 		_write_key_pairs(fp, ret_list);
-		list_destroy((List)ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 
 	_write_group_header (fp, "NODES");
@@ -310,7 +310,7 @@ void slurm_print_ctl_conf ( FILE* out,
 	ret_list = slurm_ctl_conf_2_key_pairs(slurm_ctl_conf_ptr);
 	if (ret_list) {
 		slurm_print_key_pairs(out, ret_list, tmp_str);
-		list_destroy((List)ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 
 	slurm_print_key_pairs(out, slurm_ctl_conf_ptr->acct_gather_conf,
@@ -367,6 +367,11 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("AccountingStorageTRES");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_tres);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("AccountingStorageType");
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->accounting_storage_type);
@@ -379,10 +384,8 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("AccountingStoreJobComment");
-	if (slurm_ctl_conf_ptr->acctng_store_job_comment)
-		key_pair->value = xstrdup("YES");
-	else
-		key_pair->value = xstrdup("NO");
+	key_pair->value = xstrdup(
+		slurm_ctl_conf_ptr->acctng_store_job_comment ? "Yes" : "No");
 	list_append(ret_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -414,11 +417,10 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->acct_gather_profile_type);
 	list_append(ret_list, key_pair);
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u",
-		 slurm_ctl_conf_ptr->use_spec_resources);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("AllowSpecResourcesUsage");
-	key_pair->value = xstrdup(tmp_str);
+	key_pair->value = xstrdup_printf(
+		"%u", slurm_ctl_conf_ptr->use_spec_resources);
 	list_append(ret_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -455,6 +457,11 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("BurstBufferType");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->bb_type);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("CacheGroups");
 	if (slurm_ctl_conf_ptr->group_info & GROUP_CACHE)
@@ -507,6 +514,13 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("CpuFreqGovernors");
+	cpu_freq_govlist_to_string(tmp_str, sizeof(tmp_str),
+			   slurm_ctl_conf_ptr->cpu_freq_govs);
+	key_pair->value = xstrdup(tmp_str);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("CryptoType");
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->crypto_type);
@@ -541,25 +555,21 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	list_append(ret_list, key_pair);
 	key_pair->name = xstrdup("DisableRootJobs");
-	if (slurm_ctl_conf_ptr->disable_root_jobs)
-		key_pair->value = xstrdup("YES");
-	else
-		key_pair->value = xstrdup("NO");
+	key_pair->value = xstrdup(
+		slurm_ctl_conf_ptr->disable_root_jobs ? "Yes" : "No");
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u",
-		 slurm_ctl_conf_ptr->dynalloc_port);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("DynAllocPort");
+	key_pair->name = xstrdup("EioTimeout");
+	snprintf(tmp_str, sizeof(tmp_str), "%u",
+		 slurm_ctl_conf_ptr->eio_timeout);
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	list_append(ret_list, key_pair);
 	key_pair->name = xstrdup("EnforcePartLimits");
-	if (slurm_ctl_conf_ptr->enforce_part_limits)
-		key_pair->value = xstrdup("YES");
-	else
-		key_pair->value = xstrdup("NO");
+	key_pair->value = xstrdup(
+		slurm_ctl_conf_ptr->enforce_part_limits ? "Yes" : "No");
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("Epilog");
@@ -674,13 +684,6 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->health_check_program);
 	list_append(ret_list, key_pair);
 
-	if (cluster_flags & CLUSTER_FLAG_XCPU) {
-		key_pair = xmalloc(sizeof(config_key_pair_t));
-		key_pair->name = xstrdup("HAVE_XCPU");
-		key_pair->value = xstrdup("1");
-		list_append(ret_list, key_pair);
-	}
-
 	snprintf(tmp_str, sizeof(tmp_str), "%u sec",
 		 slurm_ctl_conf_ptr->inactive_limit);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -759,11 +762,10 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u",
-		 slurm_ctl_conf_ptr->job_requeue);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("JobRequeue");
-	key_pair->value = xstrdup(tmp_str);
+	key_pair->value = xstrdup_printf(
+		"%u", slurm_ctl_conf_ptr->job_requeue);
 	list_append(ret_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -786,7 +788,8 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 		 slurm_ctl_conf_ptr->kill_on_bad_exit);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("KillOnBadExit");
-	key_pair->value = xstrdup(tmp_str);
+	key_pair->value = xstrdup_printf(
+		"%u", slurm_ctl_conf_ptr->kill_on_bad_exit);
 	list_append(ret_list, key_pair);
 
 	snprintf(tmp_str, sizeof(tmp_str), "%u sec",
@@ -796,6 +799,11 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("LaunchParameters");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->launch_params);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("LaunchType");
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->launch_type);
@@ -879,10 +887,8 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("MemLimitEnforce");
-	if (slurm_ctl_conf_ptr->mem_limit_enforce)
-		key_pair->value = xstrdup("yes");
-	else
-		key_pair->value = xstrdup("no");
+	key_pair->value = xstrdup(
+		slurm_ctl_conf_ptr->mem_limit_enforce ? "Yes" : "No");
 	list_append(ret_list, key_pair);
 
 	snprintf(tmp_str, sizeof(tmp_str), "%u sec",
@@ -909,10 +915,15 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->mpi_params);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("MsgAggregationParams");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->msg_aggr_params);
+	list_append(ret_list, key_pair);
+
 	if (cluster_flags & CLUSTER_FLAG_MULTSD) {
 		key_pair = xmalloc(sizeof(config_key_pair_t));
 		key_pair->name = xstrdup("MULTIPLE_SLURMD");
-		key_pair->value = xstrdup("1");
+		key_pair->value = xstrdup("Yes");
 		list_append(ret_list, key_pair);
 	}
 
@@ -943,6 +954,16 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->plugstack);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PowerParameters");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->power_parameters);
+	list_append(ret_list, key_pair);
+
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PowerPlugin");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->power_plugin);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("PreemptMode");
 	key_pair->value = xstrdup(preempt_mode_string(slurm_ctl_conf_ptr->
@@ -979,11 +1000,11 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 		key_pair->value = xstrdup(tmp_str);
 		list_append(ret_list, key_pair);
 
-		snprintf(tmp_str, sizeof(tmp_str), "%u",
-			 slurm_ctl_conf_ptr->priority_favor_small);
 		key_pair = xmalloc(sizeof(config_key_pair_t));
 		key_pair->name = xstrdup("PriorityFavorSmall");
-		key_pair->value = xstrdup(tmp_str);
+		key_pair->value = xstrdup(
+			slurm_ctl_conf_ptr->priority_favor_small ?
+			"Yes" : "No");
 		list_append(ret_list, key_pair);
 
 		key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -1045,6 +1066,12 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 		key_pair->name = xstrdup("PriorityWeightQOS");
 		key_pair->value = xstrdup(tmp_str);
 		list_append(ret_list, key_pair);
+
+		key_pair = xmalloc(sizeof(config_key_pair_t));
+		key_pair->name = xstrdup("PriorityWeightTRES");
+		key_pair->value =
+			xstrdup(slurm_ctl_conf_ptr->priority_weight_tres);
+		list_append(ret_list, key_pair);
 	}
 
 
@@ -1065,6 +1092,13 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->prolog);
 	list_append(ret_list, key_pair);
 
+	snprintf(tmp_str, sizeof(tmp_str), "%u",
+		 slurm_ctl_conf_ptr->prolog_epilog_timeout);
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("PrologEpilogTimeout");
+	key_pair->value = xstrdup(tmp_str);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("PrologSlurmctld");
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->prolog_slurmctld);
@@ -1182,11 +1216,10 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u",
-		 slurm_ctl_conf_ptr->schedrootfltr);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("SchedulerRootFilter");
-	key_pair->value = xstrdup(tmp_str);
+	key_pair->value = xstrdup_printf(
+		"%u", slurm_ctl_conf_ptr->schedrootfltr);
 	list_append(ret_list, key_pair);
 
 	snprintf(tmp_str, sizeof(tmp_str), "%u sec",
@@ -1387,18 +1420,23 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
-	if (!slurm_ctl_conf_ptr->suspend_time)
+	if (slurm_ctl_conf_ptr->suspend_time == 0) {
 		snprintf(tmp_str, sizeof(tmp_str), "NONE");
-	else
+	} else {
 		snprintf(tmp_str, sizeof(tmp_str), "%d sec",
 			 ((int)slurm_ctl_conf_ptr->suspend_time - 1));
+	}
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("SuspendTime");
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u sec",
-		 slurm_ctl_conf_ptr->suspend_timeout);
+	if (slurm_ctl_conf_ptr->suspend_timeout == 0) {
+		snprintf(tmp_str, sizeof(tmp_str), "NONE");
+	} else {
+		snprintf(tmp_str, sizeof(tmp_str), "%u sec",
+			 slurm_ctl_conf_ptr->suspend_timeout);
+	}
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("SuspendTimeout");
 	key_pair->value = xstrdup(tmp_str);
@@ -1436,16 +1474,20 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->tmp_fs);
 	list_append(ret_list, key_pair);
 
+	key_pair = xmalloc(sizeof(config_key_pair_t));
+	key_pair->name = xstrdup("TopologyParam");
+	key_pair->value = xstrdup(slurm_ctl_conf_ptr->topology_param);
+	list_append(ret_list, key_pair);
+
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("TopologyPlugin");
 	key_pair->value = xstrdup(slurm_ctl_conf_ptr->topology_plugin);
 	list_append(ret_list, key_pair);
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u",
-		 slurm_ctl_conf_ptr->track_wckey);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("TrackWCKey");
-	key_pair->value = xstrdup(tmp_str);
+	key_pair->value = xstrdup(
+		slurm_ctl_conf_ptr->track_wckey ? "Yes" : "No");
 	list_append(ret_list, key_pair);
 
 	snprintf(tmp_str, sizeof(tmp_str), "%u",
@@ -1455,11 +1497,9 @@ extern void *slurm_ctl_conf_2_key_pairs (slurm_ctl_conf_t* slurm_ctl_conf_ptr)
 	key_pair->value = xstrdup(tmp_str);
 	list_append(ret_list, key_pair);
 
-	snprintf(tmp_str, sizeof(tmp_str), "%u",
-		 slurm_ctl_conf_ptr->use_pam);
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("UsePam");
-	key_pair->value = xstrdup(tmp_str);
+	key_pair->value = xstrdup_printf("%u", slurm_ctl_conf_ptr->use_pam);
 	list_append(ret_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -1710,18 +1750,13 @@ static void _write_key_pairs(FILE* out, void *key_pairs)
 			continue;
 		}
 
-		/* Certain values need to be changed to prevent
-		 * errors in reading/parsing the conf file */
-		if (!strcasecmp(key_pair->name, "SuspendTime") &&
-		    !strcasecmp(key_pair->value, "NONE"))
-			strcpy(key_pair->value, "0");
-
 		/* Comment out certain key_pairs */
 		/* - TaskPluginParam=(null type) is not a NULL but
 		 * it does imply no value */
 		if ((key_pair->value == NULL) ||
 		    (strlen(key_pair->value) == 0) ||
 		    !strcasecmp(key_pair->value, "(null type)") ||
+		    !strcasecmp(key_pair->value, "(null)") ||
 		    !strcasecmp(key_pair->value, "N/A") ||
 		    (!strcasecmp(key_pair->name, "KeepAliveTime") &&
 		     !strcasecmp(key_pair->value, "SYSTEM_DEFAULT")) ||
@@ -1733,9 +1768,11 @@ static void _write_key_pairs(FILE* out, void *key_pairs)
 			      key_pair->name,
 			      key_pair->value);
 		} else {
-			key_pair->value = strtok(key_pair->value, " (");
+			/* Only write out values. Use strtok
+			 * to grab just the value (ie. "60 sec") */
+			temp = strtok(key_pair->value, " (");
 			temp = xstrdup_printf("%s=%s",
-					      key_pair->name, key_pair->value);
+					      key_pair->name, temp);
 		}
 
 		if (!strcasecmp(key_pair->name, "ControlMachine") ||
@@ -1869,84 +1906,84 @@ static void _write_key_pairs(FILE* out, void *key_pairs)
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)control_list);
+	FREE_NULL_LIST(control_list);
 
 	_write_group_header (out, "LOGGING & OTHER PATHS");
 	iter = list_iterator_create(logging_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)logging_list);
+	FREE_NULL_LIST(logging_list);
 
 	_write_group_header (out, "ACCOUNTING");
 	iter = list_iterator_create(accounting_list);
-	while((temp = list_next(iter)))
+	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)accounting_list);
+	FREE_NULL_LIST(accounting_list);
 
 	_write_group_header (out, "SCHEDULING & ALLOCATION");
 	iter = list_iterator_create(sched_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)sched_list);
+	FREE_NULL_LIST(sched_list);
 
 	_write_group_header (out, "TOPOLOGY");
 	iter = list_iterator_create(topology_list);
-	while((temp = list_next(iter)))
+	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)topology_list);
+	FREE_NULL_LIST(topology_list);
 
 	_write_group_header (out, "TIMERS");
 	iter = list_iterator_create(timers_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)timers_list);
+	FREE_NULL_LIST(timers_list);
 
 	_write_group_header (out, "POWER");
 	iter = list_iterator_create(power_list);
-	while((temp = list_next(iter)))
+	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)power_list);
+	FREE_NULL_LIST(power_list);
 
 	_write_group_header (out, "DEBUG");
 	iter = list_iterator_create(debug_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)debug_list);
+	FREE_NULL_LIST(debug_list);
 
 	_write_group_header (out, "EPILOG & PROLOG");
 	iter = list_iterator_create(proepilog_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)proepilog_list);
+	FREE_NULL_LIST(proepilog_list);
 
 	_write_group_header (out, "PROCESS TRACKING");
 	iter = list_iterator_create(proctrac_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)proctrac_list);
+	FREE_NULL_LIST(proctrac_list);
 
 	_write_group_header (out, "RESOURCE CONFINEMENT");
 	iter = list_iterator_create(resconf_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)resconf_list);
+	FREE_NULL_LIST(resconf_list);
 
 	_write_group_header (out, "OTHER");
 	iter = list_iterator_create(other_list);
 	while ((temp = list_next(iter)))
 		fprintf(out, "%s\n", temp);
 	list_iterator_destroy(iter);
-	list_destroy((List)other_list);
+	FREE_NULL_LIST(other_list);
 
 }
 
@@ -1992,7 +2029,7 @@ static void _write_group_header(FILE* out, char * header)
 	left = ((comlen - hdrlen) / 2) - 1;
 	right = left;
 	if ((comlen - hdrlen) % 2)
-		right++; 
+		right++;
 
 	fprintf(out, "#\n");
 	for (i = 0; i < comlen; i++)
diff --git a/src/api/init_msg.c b/src/api/init_msg.c
index 4d47f6061..69eaeea05 100644
--- a/src/api/init_msg.c
+++ b/src/api/init_msg.c
@@ -64,6 +64,9 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->core_spec		= (uint16_t) NO_VAL;
 	job_desc_msg->cores_per_socket	= (uint16_t) NO_VAL;
 	job_desc_msg->cpu_bind_type	= (uint16_t) NO_VAL;
+	job_desc_msg->cpu_freq_min	= NO_VAL;
+	job_desc_msg->cpu_freq_max	= NO_VAL;
+	job_desc_msg->cpu_freq_gov	= NO_VAL;
 	job_desc_msg->cpus_per_task	= (uint16_t) NO_VAL;
 	job_desc_msg->geometry[0]       = (uint16_t) NO_VAL;
 	job_desc_msg->group_id		= NO_VAL;
@@ -91,7 +94,7 @@ void slurm_init_job_desc_msg(job_desc_msg_t * job_desc_msg)
 	job_desc_msg->rotate		= (uint16_t) NO_VAL;
 	job_desc_msg->shared		= (uint16_t) NO_VAL;
 	job_desc_msg->sockets_per_node	= (uint16_t) NO_VAL;
-	job_desc_msg->task_dist		= (uint16_t) NO_VAL;
+	job_desc_msg->task_dist		= NO_VAL;
 	job_desc_msg->threads_per_core	= (uint16_t) NO_VAL;
 	job_desc_msg->time_limit	= NO_VAL;
 	job_desc_msg->time_min		= NO_VAL;
@@ -147,6 +150,7 @@ void slurm_init_resv_desc_msg (resv_desc_msg_t * resv_msg)
 	resv_msg->end_time	= (time_t) NO_VAL;
 	resv_msg->flags		= NO_VAL;
 	resv_msg->start_time	= (time_t) NO_VAL;
+	resv_msg->resv_watts	= NO_VAL;
 }
 
 /*
diff --git a/src/api/job_info.c b/src/api/job_info.c
index 97448f3a2..6a64b639b 100644
--- a/src/api/job_info.c
+++ b/src/api/job_info.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2014 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -55,6 +56,7 @@
 #include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
 
+#include "src/common/cpu_frequency.h"
 #include "src/common/forward.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
@@ -282,7 +284,8 @@ static void _sprint_range(char *str, uint32_t str_size,
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
 	if (cluster_flags & CLUSTER_FLAG_BG) {
-		convert_num_unit((float)lower, tmp, sizeof(tmp), UNIT_NONE);
+		convert_num_unit((float)lower, tmp, sizeof(tmp), UNIT_NONE,
+			CONVERT_NUM_UNIT_EXACT);
 	} else {
 		snprintf(tmp, sizeof(tmp), "%u", lower);
 	}
@@ -290,7 +293,8 @@ static void _sprint_range(char *str, uint32_t str_size,
     		char tmp2[128];
 		if (cluster_flags & CLUSTER_FLAG_BG) {
 			convert_num_unit((float)upper, tmp2,
-					 sizeof(tmp2), UNIT_NONE);
+					 sizeof(tmp2), UNIT_NONE,
+					 CONVERT_NUM_UNIT_EXACT);
 		} else {
 			snprintf(tmp2, sizeof(tmp2), "%u", upper);
 		}
@@ -331,7 +335,7 @@ extern char *
 slurm_sprint_job_info ( job_info_t * job_ptr, int one_liner )
 {
 	int i, j, k;
-	char time_str[32], *group_name, *user_name;
+	char time_str[32], *group_name, *spec_name, *user_name;
 	char tmp1[128], tmp2[128], tmp3[128], tmp4[128], tmp5[128], tmp6[128];
 	char *tmp6_ptr;
 	char tmp_line[1024];
@@ -651,15 +655,6 @@ line6:
 			max_nodes = min_nodes;
 	} else if (IS_JOB_PENDING(job_ptr)) {
 		min_nodes = job_ptr->num_nodes;
-		if ((min_nodes == 1) && (job_ptr->num_cpus > 1)
-		    && job_ptr->ntasks_per_node
-		    && (job_ptr->ntasks_per_node != (uint16_t) NO_VAL)) {
-			int node_cnt2 = job_ptr->num_cpus;
-			node_cnt2 = (node_cnt2 + job_ptr->ntasks_per_node - 1)
-				    / job_ptr->ntasks_per_node;
-			if (min_nodes < node_cnt2)
-				min_nodes = node_cnt2;
-		}
 		max_nodes = job_ptr->max_nodes;
 		if (max_nodes && (max_nodes < min_nodes))
 			min_nodes = max_nodes;
@@ -696,6 +691,16 @@ line6:
 		xstrcat(out, "\n   ");
 
 	/****** Line 16 ******/
+	/* Tres should already of been converted at this point from simple */
+	snprintf(tmp_line, sizeof(tmp_line), "TRES=%s",
+		 job_ptr->tres_alloc_str);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Line 17 ******/
 	if (job_ptr->sockets_per_node == (uint16_t) NO_VAL)
 		strcpy(tmp1, "*");
 	else
@@ -718,13 +723,20 @@ line6:
 		strcpy(tmp5, "*");
 	else
 		snprintf(tmp5, sizeof(tmp5), "%u", job_ptr->ntasks_per_core);
-	if (job_ptr->core_spec == (uint16_t) NO_VAL)
+	if (job_ptr->core_spec == (uint16_t) NO_VAL) {
+		spec_name = "Core";
 		strcpy(tmp6, "*");
-	else
+	} else if (job_ptr->core_spec & CORE_SPEC_THREAD) {
+		spec_name = "Thread";
+		i = job_ptr->core_spec & (~CORE_SPEC_THREAD);
+		snprintf(tmp6, sizeof(tmp6), "%d", i);
+	} else {
+		spec_name = "Core";
 		snprintf(tmp6, sizeof(tmp6), "%u", job_ptr->core_spec);
+	}
 	snprintf(tmp_line, sizeof(tmp_line),
-		 "Socks/Node=%s NtasksPerN:B:S:C=%s:%s:%s:%s CoreSpec=%s",
-		 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6);
+		 "Socks/Node=%s NtasksPerN:B:S:C=%s:%s:%s:%s %sSpec=%s",
+		 tmp1, tmp2, tmp3, tmp4, tmp5, spec_name, tmp6);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -892,7 +904,7 @@ line6:
 		hostlist_destroy(hl);
 		hostlist_destroy(hl_last);
 	}
-	/****** Line 17 ******/
+	/****** Line 18 ******/
 line15:
 	if (job_ptr->pn_min_memory & MEM_PER_CPU) {
 		job_ptr->pn_min_memory &= (~MEM_PER_CPU);
@@ -902,7 +914,8 @@ line15:
 
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		convert_num_unit((float)job_ptr->pn_min_cpus,
-				 tmp1, sizeof(tmp1), UNIT_NONE);
+				 tmp1, sizeof(tmp1), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 		snprintf(tmp_line, sizeof(tmp_line), "MinCPUsNode=%s",	tmp1);
 	} else {
 		snprintf(tmp_line, sizeof(tmp_line), "MinCPUsNode=%u",
@@ -911,9 +924,9 @@ line15:
 
 	xstrcat(out, tmp_line);
 	convert_num_unit((float)job_ptr->pn_min_memory, tmp1, sizeof(tmp1),
-			 UNIT_MEGA);
+			 UNIT_MEGA, CONVERT_NUM_UNIT_EXACT);
 	convert_num_unit((float)job_ptr->pn_min_tmp_disk, tmp2, sizeof(tmp2),
-			 UNIT_MEGA);
+			 UNIT_MEGA, CONVERT_NUM_UNIT_EXACT);
 	snprintf(tmp_line, sizeof(tmp_line),
 		 " MinMemory%s=%s MinTmpDiskNode=%s",
 		 tmp6_ptr, tmp1, tmp2);
@@ -923,7 +936,7 @@ line15:
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 18 ******/
+	/****** Line 19 ******/
 	snprintf(tmp_line, sizeof(tmp_line),
 		 "Features=%s Gres=%s Reservation=%s",
 		 job_ptr->features, job_ptr->gres, job_ptr->resv_name);
@@ -933,19 +946,26 @@ line15:
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 19 ******/
+	/****** Line 20 ******/
+	if (job_ptr->shared == 0)
+		tmp6_ptr = "0";
+	else if (job_ptr->shared == 1)
+		tmp6_ptr = "1";
+	else if (job_ptr->shared == 2)
+		tmp6_ptr = "USER";
+	else
+		tmp6_ptr = "OK";
 	snprintf(tmp_line, sizeof(tmp_line),
 		 "Shared=%s Contiguous=%d Licenses=%s Network=%s",
-		 (job_ptr->shared == 0 ? "0" :
-		  job_ptr->shared == 1 ? "1" : "OK"),
-		 job_ptr->contiguous, job_ptr->licenses, job_ptr->network);
+		 tmp6_ptr, job_ptr->contiguous, job_ptr->licenses,
+		 job_ptr->network);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 20 ******/
+	/****** Line 21 ******/
 	snprintf(tmp_line, sizeof(tmp_line), "Command=%s",
 		 job_ptr->command);
 	xstrcat(out, tmp_line);
@@ -954,13 +974,13 @@ line15:
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 21 ******/
+	/****** Line 22 ******/
 	snprintf(tmp_line, sizeof(tmp_line), "WorkDir=%s",
 		 job_ptr->work_dir);
 	xstrcat(out, tmp_line);
 
 	if (cluster_flags & CLUSTER_FLAG_BG) {
-		/****** Line 22 (optional) ******/
+		/****** Line 23 (optional) ******/
 		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
 					       select_buf, sizeof(select_buf),
 					       SELECT_PRINT_BG_ID);
@@ -974,7 +994,7 @@ line15:
 			xstrcat(out, tmp_line);
 		}
 
-		/****** Line 23 (optional) ******/
+		/****** Line 24 (optional) ******/
 		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
 					       select_buf, sizeof(select_buf),
 					       SELECT_PRINT_MIXED_SHORT);
@@ -987,7 +1007,7 @@ line15:
 		}
 
 		if (cluster_flags & CLUSTER_FLAG_BGL) {
-			/****** Line 24 (optional) ******/
+			/****** Line 25 (optional) ******/
 			select_g_select_jobinfo_sprint(
 				job_ptr->select_jobinfo,
 				select_buf, sizeof(select_buf),
@@ -1002,7 +1022,7 @@ line15:
 				xstrcat(out, tmp_line);
 			}
 		}
-		/****** Line 25 (optional) ******/
+		/****** Line 26 (optional) ******/
 		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
 					       select_buf, sizeof(select_buf),
 					       SELECT_PRINT_LINUX_IMAGE);
@@ -1020,7 +1040,7 @@ line15:
 
 			xstrcat(out, tmp_line);
 		}
-		/****** Line 26 (optional) ******/
+		/****** Line 27 (optional) ******/
 		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
 					       select_buf, sizeof(select_buf),
 					       SELECT_PRINT_MLOADER_IMAGE);
@@ -1033,7 +1053,7 @@ line15:
 				 "MloaderImage=%s", select_buf);
 			xstrcat(out, tmp_line);
 		}
-		/****** Line 27 (optional) ******/
+		/****** Line 28 (optional) ******/
 		select_g_select_jobinfo_sprint(job_ptr->select_jobinfo,
 					       select_buf, sizeof(select_buf),
 					       SELECT_PRINT_RAMDISK_IMAGE);
@@ -1052,7 +1072,7 @@ line15:
 		}
 	}
 
-	/****** Line 28 (optional) ******/
+	/****** Line 29 (optional) ******/
 	if (job_ptr->comment) {
 		if (one_liner)
 			xstrcat(out, " ");
@@ -1063,7 +1083,7 @@ line15:
 		xstrcat(out, tmp_line);
 	}
 
-	/****** Line 29 (optional) ******/
+	/****** Line 30 (optional) ******/
 	if (job_ptr->batch_flag) {
 		if (one_liner)
 			xstrcat(out, " ");
@@ -1073,7 +1093,7 @@ line15:
 		xstrfmtcat(out, "StdErr=%s", tmp_line);
 	}
 
-	/****** Line 30 (optional) ******/
+	/****** Line 31 (optional) ******/
 	if (job_ptr->batch_flag) {
 		if (one_liner)
 			xstrcat(out, " ");
@@ -1083,7 +1103,7 @@ line15:
 		xstrfmtcat(out, "StdIn=%s", tmp_line);
 	}
 
-	/****** Line 31 (optional) ******/
+	/****** Line 32 (optional) ******/
 	if (job_ptr->batch_flag) {
 		if (one_liner)
 			xstrcat(out, " ");
@@ -1093,7 +1113,7 @@ line15:
 		xstrfmtcat(out, "StdOut=%s", tmp_line);
 	}
 
-	/****** Line 32 (optional) ******/
+	/****** Line 33 (optional) ******/
 	if (job_ptr->batch_script) {
 		if (one_liner)
 			xstrcat(out, " ");
@@ -1103,7 +1123,7 @@ line15:
 		xstrcat(out, job_ptr->batch_script);
 	}
 
-	/****** Line 33 (optional) ******/
+	/****** Line 34 (optional) ******/
 	if (job_ptr->req_switch) {
 		char time_buf[32];
 		if (one_liner)
@@ -1117,7 +1137,58 @@ line15:
 		xstrcat(out, tmp_line);
 	}
 
-	/****** Line 34 (optional) ******/
+	/****** Line 35 (optional) ******/
+	if (job_ptr->burst_buffer) {
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+		snprintf(tmp_line, sizeof(tmp_line), "BurstBuffer=%s",
+			 job_ptr->burst_buffer);
+		xstrcat(out, tmp_line);
+	}
+
+	/****** Line 36 (optional) ******/
+	if (cpu_freq_debug(NULL, NULL, tmp1, sizeof(tmp1),
+			   job_ptr->cpu_freq_gov, job_ptr->cpu_freq_min,
+			   job_ptr->cpu_freq_max, NO_VAL) != 0) {
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+		xstrcat(out, tmp1);
+	}
+
+	/****** Line 37 ******/
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+	snprintf(tmp_line, sizeof(tmp_line),
+		 "Power=%s SICP=%u",
+		 power_flags_str(job_ptr->power_flags), job_ptr->sicp_mode);
+	xstrcat(out, tmp_line);
+
+	/****** Line 38 (optional) ******/
+	if (job_ptr->bitflags) {
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+		if (job_ptr->bitflags & KILL_INV_DEP) {
+			snprintf(tmp_line,
+				 sizeof(tmp_line),
+				 "KillOInInvalidDependent=Yes");
+		}
+		if (job_ptr->bitflags & NO_KILL_INV_DEP) {
+			snprintf(tmp_line,
+				 sizeof(tmp_line),
+				 "KillOInInvalidDependent=No");
+		}
+		xstrcat(out, tmp_line);
+	}
+
+	/****** END OF JOB RECORD ******/
 	if (one_liner)
 		xstrcat(out, "\n");
 	else
@@ -1558,3 +1629,150 @@ extern int slurm_job_cpus_allocated_on_node(job_resources_t *job_resrcs_ptr,
 
 	return slurm_job_cpus_allocated_on_node_id(job_resrcs_ptr, node_id);
 }
+
+int slurm_job_cpus_allocated_str_on_node_id(char *cpus,
+					    size_t cpus_len,
+					    job_resources_t *job_resrcs_ptr,
+					    int node_id)
+{
+	int start_node = -1; /* start with -1 less so the array reps
+			      * lines up correctly */
+	uint32_t threads = 1;
+	int inx = 0;
+	bitstr_t *cpu_bitmap;
+	int j, k, bit_inx, bit_reps;
+
+	if (!job_resrcs_ptr || node_id < 0)
+		slurm_seterrno_ret(EINVAL);
+
+	/* find index in sock_core_rep_count[] for this node id
+	 */
+	do {
+		start_node += job_resrcs_ptr->sock_core_rep_count[inx];
+		inx++;
+	} while (start_node < node_id);
+	/* back to previous index since inx is always one step further
+	 * after previous loop
+	 */
+	inx--;
+
+	bit_reps = job_resrcs_ptr->sockets_per_node[inx] *
+		job_resrcs_ptr->cores_per_socket[inx];
+
+	/* get the number of threads per core on this node
+	 */
+	if (job_node_ptr)
+		threads = job_node_ptr->node_array[node_id].threads;
+	bit_inx = 0;
+	cpu_bitmap = bit_alloc(bit_reps * threads);
+	for (j = 0; j < bit_reps; j++) {
+		if (bit_test(job_resrcs_ptr->core_bitmap, bit_inx)){
+			for (k = 0; k < threads; k++)
+				bit_set(cpu_bitmap,
+					(j * threads) + k);
+		}
+		bit_inx++;
+	}
+	bit_fmt(cpus, cpus_len, cpu_bitmap);
+	FREE_NULL_BITMAP(cpu_bitmap);
+
+	return SLURM_SUCCESS;
+}
+
+int slurm_job_cpus_allocated_str_on_node(char *cpus,
+					 size_t cpus_len,
+					 job_resources_t *job_resrcs_ptr,
+					 const char *node)
+{
+	hostlist_t node_hl;
+	int node_id;
+
+	if (!job_resrcs_ptr || !node || !job_resrcs_ptr->nodes)
+		slurm_seterrno_ret(EINVAL);
+
+	node_hl = hostlist_create(job_resrcs_ptr->nodes);
+	node_id = hostlist_find(node_hl, node);
+	hostlist_destroy(node_hl);
+	if (node_id == -1)
+		return SLURM_ERROR;
+
+	return slurm_job_cpus_allocated_str_on_node_id(cpus,
+						       cpus_len,
+						       job_resrcs_ptr,
+						       node_id);
+}
+
+/*
+ * slurm_network_callerid - issue RPC to get the job id of a job from a remote
+ * slurmd based upon network socket information.
+ *
+ * IN req - Information about network connection in question
+ * OUT job_id -  ID of the job or NO_VAL
+ * OUT node_name - name of the remote slurmd
+ * IN node_name_size - size of the node_name buffer
+ * RET SLURM_PROTOCOL_SUCCESS or SLURM_FAILURE on error
+ */
+extern int
+slurm_network_callerid (network_callerid_msg_t req, uint32_t *job_id,
+	char *node_name, int node_name_size)
+{
+	int rc;
+	slurm_msg_t resp_msg;
+	slurm_msg_t req_msg;
+	network_callerid_resp_t *resp;
+	struct sockaddr_in addr;
+	uint32_t target_slurmd; /* change for IPv6 support */
+
+	debug("slurm_network_callerid RPC: start");
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+	/* ip_src is the IP we want to talk to. Hopefully there's a slurmd
+	 * listening there */
+	memset(&addr, 0, sizeof(addr));
+	addr.sin_family = req.af;
+
+	/* TODO: until IPv6 support is added to Slurm, we must hope that the
+	 * other end is IPv4 */
+	if (req.af == AF_INET6) {
+		error("IPv6 is not yet supported in Slurm");
+		/* For testing IPv6 callerid prior to Slurm IPv6 RPC support,
+		 * set a sane target, uncomment the following and comment out
+		 * the return code:
+		addr.sin_family = AF_INET;
+		target_slurmd = inet_addr("127.0.0.1"); //choose a test target
+		*/
+		return SLURM_FAILURE;
+	} else
+		memcpy(&target_slurmd, req.ip_src, 4);
+
+	addr.sin_addr.s_addr = target_slurmd;
+	addr.sin_port = htons(slurm_get_slurmd_port());
+	req_msg.address = addr;
+
+	req_msg.msg_type = REQUEST_NETWORK_CALLERID;
+	req_msg.data     = &req;
+
+	if (slurm_send_recv_node_msg(&req_msg, &resp_msg, 0) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+		case RESPONSE_NETWORK_CALLERID:
+			resp = (network_callerid_resp_t*)resp_msg.data;
+			*job_id = resp->job_id;
+			strncpy(node_name, resp->node_name, node_name_size);
+			break;
+		case RESPONSE_SLURM_RC:
+			rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+			if (rc)
+				slurm_seterrno_ret(rc);
+			break;
+		default:
+			slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+			break;
+	}
+
+	slurm_free_network_callerid_msg(resp_msg.data);
+	return SLURM_PROTOCOL_SUCCESS;
+}
diff --git a/src/api/job_step_info.c b/src/api/job_step_info.c
index 7d163ea02..f8f769a77 100644
--- a/src/api/job_step_info.c
+++ b/src/api/job_step_info.c
@@ -165,7 +165,8 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 				limit_str, sizeof(limit_str));
 	if (job_step_ptr->array_job_id) {
 		if (job_step_ptr->step_id == INFINITE) {	/* Pending */
-			snprintf(tmp_line, sizeof(tmp_line), "StepId=%u_%u.TBD ",
+			snprintf(tmp_line, sizeof(tmp_line),
+				 "StepId=%u_%u.TBD ",
 				 job_step_ptr->array_job_id,
 				 job_step_ptr->array_task_id);
 		} else {
@@ -200,7 +201,7 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		 job_state_string(job_step_ptr->state));
 	xstrcat(out, tmp_line);
 	if (cluster_flags & CLUSTER_FLAG_BG) {
-		char *io_nodes;
+		char *io_nodes = NULL;
 		select_g_select_jobinfo_get(job_step_ptr->select_jobinfo,
 					    SELECT_JOBDATA_IONODES,
 					    &io_nodes);
@@ -236,11 +237,12 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 					    SELECT_JOBDATA_NODE_CNT,
 					    &nodes);
 		convert_num_unit((float)nodes, tmp_node_cnt,
-				 sizeof(tmp_node_cnt), UNIT_NONE);
+				 sizeof(tmp_node_cnt), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 	} else {
 		convert_num_unit((float)_nodes_in_list(job_step_ptr->nodes),
 				 tmp_node_cnt, sizeof(tmp_node_cnt),
-				 UNIT_NONE);
+				 UNIT_NONE, CONVERT_NUM_UNIT_EXACT);
 	}
 
 	snprintf(tmp_line, sizeof(tmp_line),
@@ -254,6 +256,15 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 		xstrcat(out, "\n   ");
 
 	/****** Line 4 ******/
+	snprintf(tmp_line, sizeof(tmp_line), "TRES=%s",
+		 job_step_ptr->tres_alloc_str);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Line 5 ******/
 	snprintf(tmp_line, sizeof(tmp_line),
 		"ResvPorts=%s Checkpoint=%u CheckpointDir=%s",
 		 job_step_ptr->resv_ports,
@@ -264,19 +275,18 @@ slurm_sprint_job_step_info ( job_step_info_t * job_step_ptr,
 	else
 		xstrcat(out, "\n   ");
 
-	/****** Line 5 ******/
-	if (job_step_ptr->cpu_freq == NO_VAL) {
-		snprintf(tmp_line, sizeof(tmp_line),
-			 "CPUFreqReq=Default\n\n");
-	} else if (job_step_ptr->cpu_freq & CPU_FREQ_RANGE_FLAG) {
-		char buf[32];
-		cpu_freq_to_string(buf, sizeof(buf), job_step_ptr->cpu_freq);
-		snprintf(tmp_line, sizeof(tmp_line), "CPUFreqReq=%s\n\n", buf);
+	/****** Line 6 ******/
+	if (cpu_freq_debug(NULL, NULL, tmp_line, sizeof(tmp_line),
+			   job_step_ptr->cpu_freq_gov,
+			   job_step_ptr->cpu_freq_min,
+			   job_step_ptr->cpu_freq_max, NO_VAL) != 0) {
+		xstrcat(out, tmp_line);
 	} else {
-		snprintf(tmp_line, sizeof(tmp_line),
-			 "CPUFreqReq=%u\n\n", job_step_ptr->cpu_freq);
+		xstrcat(out, "CPUFreqReq=Default");
 	}
-	xstrcat(out, tmp_line);
+	xstrfmtcat(out, " Dist=%s",
+		   slurm_step_layout_type_name(job_step_ptr->task_dist));
+	xstrcat(out, "\n\n");
 
 	return out;
 }
@@ -473,7 +483,7 @@ extern int slurm_job_step_stat(uint32_t job_id, uint32_t step_id,
 		}
 	}
 	list_iterator_destroy(itr);
-	list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	if (resp_out->stats_list)
 		list_sort(resp_out->stats_list, (ListCmpF)_sort_stats_by_name);
@@ -581,7 +591,7 @@ extern int slurm_job_step_get_pids(uint32_t job_id, uint32_t step_id,
                 }
         }
         list_iterator_destroy(itr);
-        list_destroy(ret_list);
+        FREE_NULL_LIST(ret_list);
 
  	if (resp_out->pid_list)
 		list_sort(resp_out->pid_list, (ListCmpF)_sort_pids_by_name);
@@ -606,8 +616,7 @@ extern void slurm_job_step_pids_response_msg_free(void *object)
 	job_step_pids_response_msg_t *step_pids_msg =
 		(job_step_pids_response_msg_t *) object;
 	if (step_pids_msg) {
-		if (step_pids_msg->pid_list)
-			list_destroy(step_pids_msg->pid_list);
+		FREE_NULL_LIST(step_pids_msg->pid_list);
 		xfree(step_pids_msg);
 	}
 }
@@ -622,8 +631,7 @@ extern void slurm_job_step_stat_response_msg_free(void *object)
 	job_step_stat_response_msg_t *step_stat_msg =
 		(job_step_stat_response_msg_t *) object;
 	if (step_stat_msg) {
-		if (step_stat_msg->stats_list)
-			list_destroy(step_stat_msg->stats_list);
+		FREE_NULL_LIST(step_stat_msg->stats_list);
 		xfree(step_stat_msg);
 	}
 }
diff --git a/src/api/layout_info.c b/src/api/layout_info.c
new file mode 100644
index 000000000..40106e670
--- /dev/null
+++ b/src/api/layout_info.c
@@ -0,0 +1,125 @@
+/*****************************************************************************\
+ *  layout_info.c - get/print the layout information of slurm
+ *****************************************************************************
+ *  Copyright (C) 2015
+ *  Written by Bull - Thomas Cadeau
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef HAVE_SYS_SYSLOG_H
+#  include <sys/syslog.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+/*
+ * slurm_load_layout - issue RPC to get slurm specific layout information
+ *	if changed since update_time
+ * OUT resp - place to store a node configuration pointer
+ * IN l_type - type of layout
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_layout_info_msg
+ */
+/*extern int slurm_load_layout (node_info_msg_t **resp, char* l_type)
+{
+	return SLURM_PROTOCOL_SUCCESS;
+}*/
+
+extern int slurm_load_layout (char *layout_type, char *entities, char *type,
+			      uint32_t no_relation, layout_info_msg_t **resp)
+{
+	int rc;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+	layout_info_request_msg_t req;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+	req.layout_type  = layout_type;
+	req.entities     = entities;
+	req.type         = type;
+	req.no_relation  = no_relation;
+	req_msg.msg_type = REQUEST_LAYOUT_INFO;
+	req_msg.data     = &req;
+
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_LAYOUT_INFO:
+		*resp = (layout_info_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		*resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
+
+void slurm_print_layout_info ( FILE* out, layout_info_msg_t *layout_info_ptr,
+			       int one_liner )
+{
+	char *nl;
+	int i;
+	for (i = 0; i < layout_info_ptr->record_count; i++) {
+		if (one_liner) {
+			while ((nl = strchr(layout_info_ptr->records[i], '\n')))
+				nl[0] = ' ';
+		}
+		fprintf ( out, "%s", layout_info_ptr->records[i]);
+	}
+}
diff --git a/src/api/node_info.c b/src/api/node_info.c
index 6e9f00916..d7a40e921 100644
--- a/src/api/node_info.c
+++ b/src/api/node_info.c
@@ -128,7 +128,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 {
 	uint32_t my_state = node_ptr->node_state;
 	char *cloud_str = "", *comp_str = "", *drain_str = "", *power_str = "";
-	char load_str[32], tmp_line[512], time_str[32];
+	char load_str[32], mem_str[32], tmp_line[512], time_str[32], owner_str[32];
 	char *out = NULL, *reason_str = NULL, *select_reason_str = NULL;
 	uint16_t err_cpus = 0, alloc_cpus = 0;
 	int cpus_per_node = 1;
@@ -278,13 +278,18 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 		snprintf(tmp_line, sizeof(tmp_line), "OS=%s ", node_ptr->os);
 		xstrcat(out, tmp_line);
 	}
+	if (node_ptr->free_mem == NO_VAL)
+		strcpy(mem_str, "N/A");
+	else {
+		snprintf(mem_str, sizeof(mem_str), "%u", node_ptr->free_mem);
+	}
 	slurm_get_select_nodeinfo(node_ptr->select_nodeinfo,
 				  SELECT_NODEDATA_MEM_ALLOC,
 				  NODE_STATE_ALLOCATED,
 				  &alloc_memory);
 	snprintf(tmp_line, sizeof(tmp_line),
-		 "RealMemory=%u AllocMem=%u Sockets=%u Boards=%u",
-		 node_ptr->real_memory, alloc_memory,
+		 "RealMemory=%u AllocMem=%u FreeMem=%s Sockets=%u Boards=%u",
+		 node_ptr->real_memory, alloc_memory, mem_str,
 		 node_ptr->sockets, node_ptr->boards);
 	xstrcat(out, tmp_line);
 	if (one_liner)
@@ -317,12 +322,22 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	}
 
 	/****** Line 8 ******/
-
+	if (node_ptr->owner == NO_VAL) {
+		snprintf(owner_str, sizeof(owner_str), "N/A");
+	} else {
+		char *user_name;
+		user_name = uid_to_string((uid_t) node_ptr->owner);
+		snprintf(owner_str, sizeof(owner_str), "%s(%u)",
+			 user_name, node_ptr->owner);
+		xfree(user_name);
+	}
 	snprintf(tmp_line, sizeof(tmp_line),
-		 "State=%s%s%s%s%s ThreadsPerCore=%u TmpDisk=%u Weight=%u",
+		 "State=%s%s%s%s%s ThreadsPerCore=%u TmpDisk=%u Weight=%u "
+		 "Owner=%s",
 		 node_state_string(my_state),
 		 cloud_str, comp_str, drain_str, power_str,
-		 node_ptr->threads, node_ptr->tmp_disk, node_ptr->weight);
+		 node_ptr->threads, node_ptr->tmp_disk, node_ptr->weight,
+		 owner_str);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, " ");
@@ -352,15 +367,29 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	else
 		xstrcat(out, "\n   ");
 
-	/****** power Line ******/
+	/****** Power Management Line ******/
+	if (!node_ptr->power || (node_ptr->power->cap_watts == NO_VAL)) {
+		snprintf(tmp_line, sizeof(tmp_line), "CapWatts=n/a");
+	} else {
+		snprintf(tmp_line, sizeof(tmp_line), "CapWatts=%u",
+			 node_ptr->power->cap_watts);
+	}
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Power Consumption Line ******/
 	if (!node_ptr->energy || node_ptr->energy->current_watts == NO_VAL)
 		snprintf(tmp_line, sizeof(tmp_line), "CurrentWatts=n/s "
 				"LowestJoules=n/s ConsumedJoules=n/s");
 	else
 		snprintf(tmp_line, sizeof(tmp_line), "CurrentWatts=%u "
-				"LowestJoules=%u ConsumedJoules=%u",
+				"LowestJoules=%"PRIu64" "
+				"ConsumedJoules=%"PRIu64"",
 				node_ptr->energy->current_watts,
-				node_ptr->energy->base_watts,
+				node_ptr->energy->base_consumed_energy,
 				node_ptr->energy->consumed_energy);
 	xstrcat(out, tmp_line);
 	if (one_liner)
@@ -373,7 +402,7 @@ slurm_sprint_node_table (node_info_t * node_ptr,
 	    || node_ptr->ext_sensors->consumed_energy == NO_VAL)
 		snprintf(tmp_line, sizeof(tmp_line), "ExtSensorsJoules=n/s ");
 	else
-		snprintf(tmp_line, sizeof(tmp_line), "ExtSensorsJoules=%u ",
+		snprintf(tmp_line, sizeof(tmp_line), "ExtSensorsJoules=%"PRIu64" ",
 			 node_ptr->ext_sensors->consumed_energy);
 	xstrcat(out, tmp_line);
 	if (!node_ptr->ext_sensors
@@ -562,15 +591,19 @@ extern int slurm_load_node_single (node_info_msg_t **resp,
 }
 
 /*
- * slurm_node_energy - issue RPC to get the energy data on this machine
+ * slurm_get_node_energy_n - issue RPC to get the energy data of all
+ * configured sensors on the target machine
  * IN  host  - name of node to query, NULL if localhost
  * IN  delta - Use cache if data is newer than this in seconds
- * OUT acct_gather_energy_t structure on success or NULL other wise
- * RET 0 or a slurm error code
- * NOTE: free the response using slurm_acct_gather_energy_destroy
+ * OUT nb_sensors - number of sensors
+ * OUT energy - array of acct_gather_energy_t structures on success or
+ *                NULL other wise
+ * RET 0 on success or a slurm error code
+ * NOTE: free the response using xfree
  */
 extern int slurm_get_node_energy(char *host, uint16_t delta,
-				 acct_gather_energy_t **acct_gather_energy)
+				 uint16_t *sensor_cnt,
+				 acct_gather_energy_t **energy)
 {
 	int rc;
 	slurm_msg_t req_msg;
@@ -624,8 +657,10 @@ extern int slurm_get_node_energy(char *host, uint16_t delta,
 		g_slurm_auth_destroy(resp_msg.auth_cred);
 	switch (resp_msg.msg_type) {
 	case RESPONSE_ACCT_GATHER_ENERGY:
-		*acct_gather_energy = ((acct_gather_node_resp_msg_t *)
-				       resp_msg.data)->energy;
+		*sensor_cnt = ((acct_gather_node_resp_msg_t *)
+			       resp_msg.data)->sensor_cnt;
+		*energy = ((acct_gather_node_resp_msg_t *)
+			   resp_msg.data)->energy;
 		((acct_gather_node_resp_msg_t *) resp_msg.data)->energy = NULL;
 		slurm_free_acct_gather_node_resp_msg(resp_msg.data);
 		break;
diff --git a/src/api/partition_info.c b/src/api/partition_info.c
index f64ef0388..92260b06d 100644
--- a/src/api/partition_info.c
+++ b/src/api/partition_info.c
@@ -188,6 +188,14 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 	else
 		sprintf(tmp_line, " Default=NO");
 	xstrcat(out, tmp_line);
+
+	if (part_ptr->qos_char)
+		snprintf(tmp_line, sizeof(tmp_line), " QoS=%s",
+			 part_ptr->qos_char);
+	else
+		sprintf(tmp_line, " QoS=N/A");
+	xstrcat(out, tmp_line);
+
 	if (one_liner)
 		xstrcat(out, " ");
 	else
@@ -224,6 +232,11 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 	else
 		sprintf(tmp_line, " DisableRootJobs=NO");
 	xstrcat(out, tmp_line);
+	if (part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)
+		sprintf(tmp_line, " ExclusiveUser=YES");
+	else
+		sprintf(tmp_line, " ExclusiveUser=NO");
+	xstrcat(out, tmp_line);
 	sprintf(tmp_line, " GraceTime=%u", part_ptr->grace_time);
 	xstrcat(out, tmp_line);
 	if (part_ptr->flags & PART_FLAG_HIDDEN)
@@ -243,7 +256,8 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 	else {
 		if (cluster_flags & CLUSTER_FLAG_BG)
 			convert_num_unit((float)part_ptr->max_nodes,
-					 tmp1, sizeof(tmp1), UNIT_NONE);
+					 tmp1, sizeof(tmp1), UNIT_NONE,
+					 CONVERT_NUM_UNIT_EXACT);
 		else
 			snprintf(tmp1, sizeof(tmp1),"%u", part_ptr->max_nodes);
 
@@ -261,7 +275,8 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 	xstrcat(out, tmp_line);
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->min_nodes, tmp1, sizeof(tmp1),
-				 UNIT_NONE);
+				 UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 	else
 		snprintf(tmp1, sizeof(tmp1), "%u", part_ptr->min_nodes);
 	sprintf(tmp_line, " MinNodes=%s", tmp1);
@@ -353,7 +368,8 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->total_cpus, tmp1,
-				 sizeof(tmp1), UNIT_NONE);
+				 sizeof(tmp1), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 	else
 		snprintf(tmp1, sizeof(tmp1), "%u", part_ptr->total_cpus);
 
@@ -362,7 +378,8 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->total_nodes, tmp2,
-				 sizeof(tmp2), UNIT_NONE);
+				 sizeof(tmp2), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 	else
 		snprintf(tmp2, sizeof(tmp2), "%u", part_ptr->total_nodes);
 
@@ -414,6 +431,18 @@ char *slurm_sprint_partition_info ( partition_info_t * part_ptr,
 		xstrcat(out, tmp_line);
 	}
 
+	/****** Line 10 ******/
+	if (part_ptr->billing_weights_str) {
+		if (one_liner)
+			xstrcat(out, " ");
+		else
+			xstrcat(out, "\n   ");
+
+		snprintf(tmp_line, sizeof(tmp_line), "TRESBillingWeights=%s",
+			part_ptr->billing_weights_str);
+		xstrcat(out, tmp_line);
+	}
+
 	if (one_liner)
 		xstrcat(out, "\n");
 	else
diff --git a/src/api/pmi_server.c b/src/api/pmi_server.c
index 48b1b3db8..b54555efe 100644
--- a/src/api/pmi_server.c
+++ b/src/api/pmi_server.c
@@ -66,7 +66,7 @@ static int pmi_kvs_no_dup_keys = 1;
 struct barrier_resp {
 	uint16_t port;
 	char *hostname;
-};				/* details for barrier task communcations */
+};				/* details for barrier task communications */
 struct barrier_resp *barrier_ptr = NULL;
 uint32_t barrier_resp_cnt = 0;	/* tasks having reached barrier */
 uint32_t barrier_cnt = 0;	/* tasks needing to reach barrier */
diff --git a/src/api/powercap_info.c b/src/api/powercap_info.c
new file mode 100644
index 000000000..e6f99f648
--- /dev/null
+++ b/src/api/powercap_info.c
@@ -0,0 +1,149 @@
+/*****************************************************************************\
+ *  powercap_info.c - Definitions for power capping configuration display
+ *****************************************************************************
+ *  Copyright (C) 2013 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#ifdef HAVE_SYS_SYSLOG_H
+#  include <sys/syslog.h>
+#endif
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <syslog.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+
+#include "src/common/parse_time.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+
+/*
+ * slurm_load_powercap - issue RPC to get slurm powercapping details 
+ * IN powercap_info_msg_pptr - place to store a pointer to the result
+ * RET 0 or a slurm error code
+ * NOTE: free the response using slurm_free_powercap_info_msg
+ */
+extern int slurm_load_powercap(powercap_info_msg_t **resp)
+{
+	int rc;
+	slurm_msg_t req_msg;
+	slurm_msg_t resp_msg;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+	req_msg.msg_type = REQUEST_POWERCAP_INFO;
+	req_msg.data     = NULL;
+
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_POWERCAP_INFO:
+		*resp = (powercap_info_msg_t *) resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		*resp = NULL;
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
+
+/*
+ * slurm_print_powercap_info_msg - output information about powercapping
+ *	configuration based upon message as loaded using slurm_load_powercap
+ * IN out - file to write to
+ * IN powercap_info_msg_ptr - powercapping information message pointer
+ * IN one_liner - print as a single line if not zero
+ */
+extern void slurm_print_powercap_info_msg(FILE * out, powercap_info_msg_t *ptr,
+					  int one_liner)
+{
+	char tmp_line[512];
+	char *out_buf = NULL;
+
+	if (ptr->power_cap == 0) {
+		/****** Line 1 ******/
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "Powercapping disabled by configuration."
+			 " See PowerParameters in `man slurm.conf'");
+		xstrcat(out_buf, tmp_line);
+		xstrcat(out_buf, "\n");
+		fprintf(out, "%s", out_buf);
+		xfree(out_buf);
+	} else {
+		/****** Line 1 ******/
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "MinWatts=%u CurrentWatts=%u ",
+			 ptr->min_watts, ptr->cur_max_watts);
+		xstrcat(out_buf, tmp_line);
+		if (ptr->power_cap == INFINITE) {
+			snprintf(tmp_line, sizeof(tmp_line),
+				 "PowerCap=INFINITE ");
+		} else {
+			snprintf(tmp_line, sizeof(tmp_line),
+				 "PowerCap=%u ", ptr->power_cap);
+		}
+		xstrcat(out_buf, tmp_line);
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "PowerFloor=%u PowerChangeRate=%u",
+			 ptr->power_floor, ptr->power_change);
+		xstrcat(out_buf, tmp_line);
+		snprintf(tmp_line, sizeof(tmp_line),
+			 "AdjustedMaxWatts=%u MaxWatts=%u",
+			 ptr->adj_max_watts, ptr->max_watts);
+		xstrcat(out_buf, tmp_line);
+
+		xstrcat(out_buf, "\n");
+		fprintf(out, "%s", out_buf);
+		xfree(out_buf);
+	}
+}
diff --git a/src/api/reservation_info.c b/src/api/reservation_info.c
index 6505a7a41..2f74be6fd 100644
--- a/src/api/reservation_info.c
+++ b/src/api/reservation_info.c
@@ -148,13 +148,27 @@ char *slurm_sprint_reservation_info ( reserve_info_t * resv_ptr,
 		xstrcat(out, " ");
 	else
 		xstrcat(out, "\n   ");
-
 	/****** Line 3 ******/
+	snprintf(tmp_line, sizeof(tmp_line),
+		 "TRES=%s", resv_ptr->tres_str);
+	xfree(flag_str);
+	xstrcat(out, tmp_line);
+	if (one_liner)
+		xstrcat(out, " ");
+	else
+		xstrcat(out, "\n   ");
+
+	/****** Line 4 ******/
+	if (resv_ptr->resv_watts != (time_t) NO_VAL) {
+		snprintf(tmp1, 32, "%u", resv_ptr->resv_watts);
+	} else
+		snprintf(tmp1, 32, "n/a");
 	if ((resv_ptr->start_time <= now) && (resv_ptr->end_time >= now))
 		state = "ACTIVE";
 	snprintf(tmp_line, sizeof(tmp_line),
-		 "Users=%s Accounts=%s Licenses=%s State=%s",
-		 resv_ptr->users, resv_ptr->accounts, resv_ptr->licenses, state);
+		 "Users=%s Accounts=%s Licenses=%s State=%s BurstBuffer=%s " 
+		 "Watts=%s", resv_ptr->users, resv_ptr->accounts, 
+		  resv_ptr->licenses, state, resv_ptr->burst_buffer, tmp1);
 	xstrcat(out, tmp_line);
 	if (one_liner)
 		xstrcat(out, "\n");
diff --git a/src/api/slurm_pmi.c b/src/api/slurm_pmi.c
index faeaf35b5..36334b801 100644
--- a/src/api/slurm_pmi.c
+++ b/src/api/slurm_pmi.c
@@ -181,7 +181,7 @@ int slurm_send_kvs_comm_set(struct kvs_comm_set *kvs_set_ptr,
 	msg_send.msg_type = PMI_KVS_PUT_REQ;
 	msg_send.data = (void *) kvs_set_ptr;
 
-	/* Send the RPC to the local srun communcation manager.
+	/* Send the RPC to the local srun communication manager.
 	 * Since the srun can be sent thousands of messages at
 	 * the same time and refuse some connections, retry as
 	 * needed. Spread out messages by task's rank. Also
@@ -264,7 +264,7 @@ int  slurm_get_kvs_comm_set(struct kvs_comm_set **kvs_set_ptr,
 	msg_send.msg_type = PMI_KVS_GET_REQ;
 	msg_send.data = &data;
 
-	/* Send the RPC to the local srun communcation manager.
+	/* Send the RPC to the local srun communication manager.
 	 * Since the srun can be sent thousands of messages at
 	 * the same time and refuse some connections, retry as
 	 * needed. Wait until all key-pairs have been sent by
@@ -307,7 +307,7 @@ int  slurm_get_kvs_comm_set(struct kvs_comm_set **kvs_set_ptr,
 		if (errno == EINTR)
 			continue;
 		error("slurm_receive_msg: %m");
-		slurm_close_accepted_conn(srun_fd);
+		slurm_close(srun_fd);
 		return errno;
 	}
 	if (msg_rcv.auth_cred)
@@ -315,13 +315,13 @@ int  slurm_get_kvs_comm_set(struct kvs_comm_set **kvs_set_ptr,
 
 	if (msg_rcv.msg_type != PMI_KVS_GET_RESP) {
 		error("slurm_get_kvs_comm_set msg_type=%d", msg_rcv.msg_type);
-		slurm_close_accepted_conn(srun_fd);
+		slurm_close(srun_fd);
 		return SLURM_UNEXPECTED_MSG_ERROR;
 	}
 	if (slurm_send_rc_msg(&msg_rcv, SLURM_SUCCESS) < 0)
 		error("slurm_send_rc_msg: %m");
 
-	slurm_close_accepted_conn(srun_fd);
+	slurm_close(srun_fd);
 	*kvs_set_ptr = msg_rcv.data;
 
 	rc = _forward_comm_set(*kvs_set_ptr);
diff --git a/src/api/step_ctx.c b/src/api/step_ctx.c
index e576308ae..e5409ad43 100644
--- a/src/api/step_ctx.c
+++ b/src/api/step_ctx.c
@@ -101,6 +101,8 @@ _job_fake_cred(struct slurm_step_ctx_struct *ctx)
 	arg.step_hostlist  = ctx->step_req->node_list;
 	arg.step_mem_limit = 0;
 
+	arg.job_gres_list     = NULL;
+	arg.job_constraints   = NULL;
 	arg.job_core_bitmap   = bit_alloc(node_cnt);
 	bit_nset(arg.job_core_bitmap,  0, node_cnt-1);
 	arg.step_core_bitmap  = bit_alloc(node_cnt);
@@ -126,7 +128,9 @@ static job_step_create_request_msg_t *_create_step_request(
 	step_req->min_nodes = step_params->min_nodes;
 	step_req->max_nodes = step_params->max_nodes;
 	step_req->cpu_count = step_params->cpu_count;
-	step_req->cpu_freq  = step_params->cpu_freq;
+	step_req->cpu_freq_min = step_params->cpu_freq_min;
+	step_req->cpu_freq_max = step_params->cpu_freq_max;
+	step_req->cpu_freq_gov = step_params->cpu_freq_gov;
 	step_req->num_tasks = step_params->task_count;
 	step_req->relative = step_params->relative;
 	step_req->resv_port_cnt = step_params->resv_port_cnt;
@@ -249,6 +253,8 @@ slurm_step_ctx_create_timeout (const slurm_step_ctx_params_t *step_params,
 	rc = slurm_job_step_create(step_req, &step_resp);
 	if ((rc < 0) &&
 	    ((errno == ESLURM_NODES_BUSY) ||
+	     (errno == ESLURM_POWER_NOT_AVAIL) ||
+	     (errno == ESLURM_POWER_RESERVED) ||
 	     (errno == ESLURM_PORTS_BUSY) ||
 	     (errno == ESLURM_INTERCONNECT_BUSY))) {
 		struct pollfd fds;
diff --git a/src/api/step_io.c b/src/api/step_io.c
index 596b18e00..8694a96f2 100644
--- a/src/api/step_io.c
+++ b/src/api/step_io.c
@@ -305,15 +305,16 @@ _server_read(eio_obj_t *obj, List objs)
 
 		n = io_hdr_read_fd(obj->fd, &s->header);
 		if (n <= 0) { /* got eof or error on socket read */
-
-			if (getenv("SLURM_PTY_PORT") == NULL)
-				error("\
-%s: fd %d got error or unexpected eof reading header",
-				      __func__, obj->fd);
-
-			if (s->cio->sls)
-				step_launch_notify_io_failure(s->cio->sls,
-							      s->node_id);
+			if (n < 0) { /* Error */
+				if (getenv("SLURM_PTY_PORT") == NULL) {
+					error("%s: fd %d error reading header: %m",
+					      __func__, obj->fd);
+				}
+				if (s->cio->sls) {
+					step_launch_notify_io_failure(
+						s->cio->sls, s->node_id);
+				}
+			}
 			close(obj->fd);
 			obj->fd = -1;
 			s->in_eof = true;
@@ -1090,6 +1091,7 @@ client_io_handler_create(slurm_step_io_fds_t fds,
 	uint32_t siglen;
 	char *sig;
 	uint16_t *ports;
+	uint16_t eio_timeout;
 
 	cio = (client_io_t *)xmalloc(sizeof(client_io_t));
 	if (cio == NULL)
@@ -1112,7 +1114,8 @@ client_io_handler_create(slurm_step_io_fds_t fds,
 	memcpy(cio->io_key, sig, siglen);
 	/* no need to free "sig", it is just a pointer into the credential */
 
-	cio->eio = eio_handle_create();
+	eio_timeout = slurm_get_srun_eio_timeout();
+	cio->eio = eio_handle_create(eio_timeout);
 
 	/* Compute number of listening sockets needed to allow
 	 * all of the slurmds to establish IO streams with srun, without
diff --git a/src/api/step_launch.c b/src/api/step_launch.c
index e74b2eb52..9aa2c23f8 100644
--- a/src/api/step_launch.c
+++ b/src/api/step_launch.c
@@ -64,20 +64,22 @@
 
 #include "slurm/slurm.h"
 
-#include "src/common/hostlist.h"
-#include "src/common/slurm_protocol_api.h"
-#include "src/common/slurm_protocol_defs.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
+#include "src/common/cpu_frequency.h"
 #include "src/common/eio.h"
-#include "src/common/net.h"
 #include "src/common/fd.h"
-#include "src/common/slurm_auth.h"
 #include "src/common/forward.h"
+#include "src/common/hostlist.h"
+#include "src/common/mpi.h"
+#include "src/common/net.h"
 #include "src/common/plugstack.h"
+#include "src/common/slurm_auth.h"
 #include "src/common/slurm_cred.h"
-#include "src/common/mpi.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_protocol_defs.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
 
 #include "src/api/step_launch.h"
 #include "src/api/step_ctx.h"
@@ -140,7 +142,9 @@ void slurm_step_launch_params_t_init (slurm_step_launch_params_t *ptr)
 	ptr->buffered_stdio = true;
 	memcpy(&ptr->local_fds, &fds, sizeof(fds));
 	ptr->gid = getgid();
-	ptr->cpu_freq = NO_VAL;
+	ptr->cpu_freq_min = NO_VAL;
+	ptr->cpu_freq_max = NO_VAL;
+	ptr->cpu_freq_gov = NO_VAL;
 }
 
 /*
@@ -256,9 +260,12 @@ int slurm_step_launch (slurm_step_ctx_t *ctx,
 	launch.task_epilog	= params->task_epilog;
 	launch.cpu_bind_type	= params->cpu_bind_type;
 	launch.cpu_bind		= params->cpu_bind;
-	launch.cpu_freq		= params->cpu_freq;
+	launch.cpu_freq_min	= params->cpu_freq_min;
+	launch.cpu_freq_max	= params->cpu_freq_max;
+	launch.cpu_freq_gov	= params->cpu_freq_gov;
 	launch.mem_bind_type	= params->mem_bind_type;
 	launch.mem_bind		= params->mem_bind;
+	launch.accel_bind_type	= params->accel_bind_type;
 	launch.multi_prog	= params->multi_prog ? 1 : 0;
 	launch.cpus_per_task	= params->cpus_per_task;
 	launch.task_dist	= params->task_dist;
@@ -433,9 +440,12 @@ int slurm_step_launch_add (slurm_step_ctx_t *ctx,
 	launch.task_epilog	= params->task_epilog;
 	launch.cpu_bind_type	= params->cpu_bind_type;
 	launch.cpu_bind		= params->cpu_bind;
-	launch.cpu_freq		= params->cpu_freq;
+	launch.cpu_freq_min	= params->cpu_freq_min;
+	launch.cpu_freq_max	= params->cpu_freq_max;
+	launch.cpu_freq_gov	= params->cpu_freq_gov;
 	launch.mem_bind_type	= params->mem_bind_type;
 	launch.mem_bind		= params->mem_bind;
+	launch.accel_bind_type	= params->accel_bind_type;
 	launch.multi_prog	= params->multi_prog ? 1 : 0;
 	launch.cpus_per_task	= params->cpus_per_task;
 	launch.task_dist	= params->task_dist;
@@ -776,6 +786,7 @@ void slurm_step_launch_fwd_signal(slurm_step_ctx_t *ctx, int signo)
 	slurm_msg_t_init(&req);
 	req.msg_type = REQUEST_SIGNAL_TASKS;
 	req.data     = &msg;
+	req.protocol_version = ctx->step_resp->use_protocol_ver;
 
 	debug3("sending signal %d to job %u on host %s",
 	       signo, ctx->job_id, name);
@@ -802,7 +813,7 @@ void slurm_step_launch_fwd_signal(slurm_step_ctx_t *ctx, int signo)
 		}
 	}
 	list_iterator_destroy(itr);
-	list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 nothing_left:
 	debug2("All tasks have been signalled");
 
@@ -1012,11 +1023,13 @@ static int _msg_thr_create(struct step_launch_state *sls, int num_nodes)
 	int i, rc = SLURM_SUCCESS;
 	pthread_attr_t attr;
 	uint16_t *ports;
+	uint16_t eio_timeout;
 
 	debug("Entering _msg_thr_create()");
 	slurm_uid = (uid_t) slurm_get_slurm_user_id();
 
-	sls->msg_handle = eio_handle_create();
+	eio_timeout = slurm_get_srun_eio_timeout();
+	sls->msg_handle = eio_handle_create(eio_timeout);
 	sls->num_resp_port = _estimate_nports(num_nodes, 48);
 	sls->resp_port = xmalloc(sizeof(uint16_t) * sls->num_resp_port);
 
@@ -1445,7 +1458,8 @@ static void
 _handle_msg(void *arg, slurm_msg_t *msg)
 {
 	struct step_launch_state *sls = (struct step_launch_state *)arg;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	uid_t uid = getuid();
 	srun_user_msg_t *um;
 	int rc;
@@ -1566,6 +1580,7 @@ static int _fail_step_tasks(slurm_step_ctx_t *ctx, char *node, int ret_code)
 	slurm_msg_t_init(&req);
 	req.msg_type = REQUEST_STEP_COMPLETE;
 	req.data = &msg;
+	req.protocol_version = ctx->step_resp->use_protocol_ver;
 
 	if (slurm_send_recv_controller_rc_msg(&req, &rc) < 0)
 	       return SLURM_ERROR;
@@ -1609,6 +1624,7 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 	slurm_msg_t_init(&msg);
 	msg.msg_type = REQUEST_LAUNCH_TASKS;
 	msg.data = launch_msg;
+	msg.protocol_version = ctx->step_resp->use_protocol_ver;
 
 #ifdef HAVE_FRONT_END
 	slurm_cred_get_args(ctx->step_resp->cred, &cred_args);
@@ -1653,7 +1669,7 @@ static int _launch_tasks(slurm_step_ctx_t *ctx,
 		}
 	}
 	list_iterator_destroy(ret_itr);
-	list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	if (tot_rc != SLURM_SUCCESS)
 		return tot_rc;
@@ -1734,7 +1750,7 @@ _exec_prog(slurm_msg_t *msg)
 	}
 	if (checkpoint) {
 		/* OpenMPI specific checkpoint support */
-		info("Checkpoint started at %s", slurm_ctime(&now));
+		info("Checkpoint started at %s", slurm_ctime2(&now));
 		for (i=0; (exec_msg->argv[i] && (i<2)); i++) {
 			argv[i] = exec_msg->argv[i];
 		}
@@ -1781,10 +1797,10 @@ fini:	if (checkpoint) {
 		now = time(NULL);
 		if (exit_code) {
 			info("Checkpoint completion code %d at %s",
-			     exit_code, slurm_ctime(&now));
+			     exit_code, slurm_ctime2(&now));
 		} else {
 			info("Checkpoint completed successfully at %s",
-			     slurm_ctime(&now));
+			     slurm_ctime2(&now));
 		}
 		if (buf[0])
 			info("Checkpoint location: %s", buf);
diff --git a/src/api/topo_info.c b/src/api/topo_info.c
index c4bc55e20..83fec843f 100644
--- a/src/api/topo_info.c
+++ b/src/api/topo_info.c
@@ -126,7 +126,21 @@ extern void slurm_print_topo_info_msg(
 }
 
 
-
+static int _print_topo_record(const char *print, const char* record,
+			       const int size, char **out_buf)
+{
+	int len = 0;
+
+	if (size <= 0)
+		return 0;
+	if (print && print[0]) {
+		char tmp_line[size];
+		snprintf(tmp_line, size, "%s=%s ", record, print);
+		len = size - strlen(tmp_line);
+		xstrcat(*out_buf, tmp_line);
+	}
+	return len;
+}
 /*
  * slurm_print_topo_record - output information about a specific Slurm topology
  *	record based upon message as loaded using slurm_load_topo
@@ -140,24 +154,30 @@ extern void slurm_print_topo_record(FILE * out, topo_info_t *topo_ptr,
 				    int one_liner)
 {
 	char tmp_line[512];
+	char *buf;
 	char *out_buf = NULL;
+	int max_len = 0, len;
+
+	buf = getenv("SLURM_TOPO_LEN");
+	if (buf)
+		max_len = atoi(buf);
+	if (max_len <= 0)
+		max_len = 512;
+
+	if (max_len < sizeof(tmp_line))
+		len = max_len;
+	else
+		len =  sizeof(tmp_line);
 
 	/****** Line 1 ******/
-	snprintf(tmp_line, sizeof(tmp_line),
+	snprintf(tmp_line, len,
 		"SwitchName=%s Level=%u LinkSpeed=%u ",
 		topo_ptr->name, topo_ptr->level, topo_ptr->link_speed);
 	xstrcat(out_buf, tmp_line);
+	len = max_len - strlen(tmp_line);
 
-	if (topo_ptr->nodes && topo_ptr->nodes[0]) {
-		snprintf(tmp_line, sizeof(tmp_line),
-			 "Nodes=%s ", topo_ptr->nodes);
-		xstrcat(out_buf, tmp_line);
-	}
-	if (topo_ptr->switches && topo_ptr->switches[0]) {
-		snprintf(tmp_line, sizeof(tmp_line),
-			 "Switches=%s ", topo_ptr->switches);
-		xstrcat(out_buf, tmp_line);
-	}
+	len = _print_topo_record(topo_ptr->nodes, "Nodes", len, &out_buf);
+	(void)_print_topo_record(topo_ptr->switches, "Switches", len, &out_buf);
 
 	xstrcat(out_buf, "\n");
 	fprintf(out, "%s", out_buf);
diff --git a/src/api/update_config.c b/src/api/update_config.c
index 4877729f4..be0f78d7a 100644
--- a/src/api/update_config.c
+++ b/src/api/update_config.c
@@ -129,6 +129,17 @@ slurm_update_node ( update_node_msg_t * node_msg)
 {
 	return _slurm_update ((void *) node_msg, REQUEST_UPDATE_NODE);
 }
+/*
+ * slurm_update_layout - issue RPC to a layout's configuration per request,
+ *	only usable by user root
+ * IN layout_msg - command line (same format as conf)
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+int
+slurm_update_layout ( update_layout_msg_t * layout_msg)
+{
+	return _slurm_update ((void *) layout_msg, REQUEST_UPDATE_LAYOUT);
+}
 
 /*
  * slurm_create_partition - create a new partition, only usable by user root
@@ -165,6 +176,17 @@ slurm_delete_partition ( delete_part_msg_t * part_msg )
 	return _slurm_update ((void *) part_msg, REQUEST_DELETE_PARTITION);
 }
 
+/*
+ * slurm_update_powercap - issue RPC to update powercapping cap 
+ * IN powercap_msg - description of powercapping updates
+ * RET 0 on success, otherwise return -1 and set errno to indicate the error
+ */
+int
+slurm_update_powercap ( update_powercap_msg_t * powercap_msg )
+{
+	return _slurm_update ((void *) powercap_msg, REQUEST_UPDATE_POWERCAP);
+}
+
 /*
  * slurm_create_reservation - create a new reservation, only usable by user root
  * IN resv_msg - description of reservation
diff --git a/src/common/Makefile.am b/src/common/Makefile.am
index 3d49b5a29..353ec5263 100644
--- a/src/common/Makefile.am
+++ b/src/common/Makefile.am
@@ -39,6 +39,7 @@ libcommon_la_SOURCES = 			\
 	xsignal.c xsignal.h		\
 	strnatcmp.c strnatcmp.h		\
 	forward.c forward.h     	\
+	msg_aggr.c msg_aggr.h     	\
 	strlcpy.c strlcpy.h		\
 	list.c list.h 			\
 	xtree.c xtree.h			\
@@ -55,6 +56,7 @@ libcommon_la_SOURCES = 			\
 	parse_spec.c parse_spec.h	\
 	plugin.c plugin.h		\
 	plugrack.c plugrack.h		\
+	power.c power.h			\
 	print_fields.c print_fields.h	\
 	read_config.c read_config.h	\
 	node_select.c node_select.h	\
@@ -95,7 +97,8 @@ libcommon_la_SOURCES = 			\
 	slurm_acct_gather_infiniband.c slurm_acct_gather_infiniband.h \
 	slurm_acct_gather_filesystem.c slurm_acct_gather_filesystem.h \
 	slurm_jobcomp.c slurm_jobcomp.h	\
-	slurm_route.c slurm_route.h \
+	slurm_route.c slurm_route.h	\
+	slurm_time.c slurm_time.h	\
 	slurm_topology.c slurm_topology.h \
 	switch.c switch.h		\
 	arg_desc.c arg_desc.h		\
@@ -126,7 +129,9 @@ libcommon_la_SOURCES = 			\
 	entity.h entity.c		\
 	layout.h layout.c		\
 	layouts_mgr.h layouts_mgr.c	\
-	xcgroup_read_config.c xcgroup_read_config.h
+	mapping.c mapping.h		\
+	xcgroup_read_config.c xcgroup_read_config.h \
+	callerid.c callerid.h
 
 EXTRA_libcommon_la_SOURCES = 		\
 	$(extra_unsetenv_src)		\
diff --git a/src/common/Makefile.in b/src/common/Makefile.in
index 028a107e2..db471a01c 100644
--- a/src/common/Makefile.in
+++ b/src/common/Makefile.in
@@ -111,6 +111,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -119,10 +120,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -135,7 +138,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -148,12 +151,13 @@ libcommon_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
 am__libcommon_la_SOURCES_DIST = cpu_frequency.c cpu_frequency.h \
 	assoc_mgr.c assoc_mgr.h xmalloc.c xmalloc.h xassert.c \
 	xassert.h xstring.c xstring.h xsignal.c xsignal.h strnatcmp.c \
-	strnatcmp.h forward.c forward.h strlcpy.c strlcpy.h list.c \
-	list.h xtree.c xtree.h xhash.c xhash.h net.c net.h log.c log.h \
-	cbuf.c cbuf.h safeopen.c safeopen.h bitstring.c bitstring.h \
-	mpi.c mpi.h pack.c pack.h parse_config.c parse_config.h \
-	parse_value.c parse_value.h parse_spec.c parse_spec.h plugin.c \
-	plugin.h plugrack.c plugrack.h print_fields.c print_fields.h \
+	strnatcmp.h forward.c forward.h msg_aggr.c msg_aggr.h \
+	strlcpy.c strlcpy.h list.c list.h xtree.c xtree.h xhash.c \
+	xhash.h net.c net.h log.c log.h cbuf.c cbuf.h safeopen.c \
+	safeopen.h bitstring.c bitstring.h mpi.c mpi.h pack.c pack.h \
+	parse_config.c parse_config.h parse_value.c parse_value.h \
+	parse_spec.c parse_spec.h plugin.c plugin.h plugrack.c \
+	plugrack.h power.c power.h print_fields.c print_fields.h \
 	read_config.c read_config.h node_select.c node_select.h env.c \
 	env.h fd.c fd.h slurm_cred.h slurm_cred.c slurm_errno.c \
 	slurm_ext_sensors.c slurm_ext_sensors.h slurm_priority.c \
@@ -175,45 +179,47 @@ am__libcommon_la_SOURCES_DIST = cpu_frequency.c cpu_frequency.h \
 	slurm_acct_gather_profile.h slurm_acct_gather_infiniband.c \
 	slurm_acct_gather_infiniband.h slurm_acct_gather_filesystem.c \
 	slurm_acct_gather_filesystem.h slurm_jobcomp.c slurm_jobcomp.h \
-	slurm_route.c slurm_route.h slurm_topology.c slurm_topology.h \
-	switch.c switch.h arg_desc.c arg_desc.h macros.h malloc.c \
-	malloc.h getopt.h getopt.c getopt1.c unsetenv.c unsetenv.h \
-	slurm_selecttype_info.c slurm_selecttype_info.h \
-	slurm_resource_info.c slurm_resource_info.h hostlist.c \
-	hostlist.h slurm_step_layout.c slurm_step_layout.h \
-	checkpoint.c checkpoint.h job_resources.c job_resources.h \
-	parse_time.c parse_time.h job_options.c job_options.h \
-	global_defaults.c timers.c timers.h slurm_xlator.h stepd_api.c \
-	stepd_api.h write_labelled_message.c write_labelled_message.h \
-	proc_args.c proc_args.h slurm_strcasestr.c slurm_strcasestr.h \
-	node_conf.h node_conf.c gres.h gres.c entity.h entity.c \
-	layout.h layout.c layouts_mgr.h layouts_mgr.c \
-	xcgroup_read_config.c xcgroup_read_config.h
+	slurm_route.c slurm_route.h slurm_time.c slurm_time.h \
+	slurm_topology.c slurm_topology.h switch.c switch.h arg_desc.c \
+	arg_desc.h macros.h malloc.c malloc.h getopt.h getopt.c \
+	getopt1.c unsetenv.c unsetenv.h slurm_selecttype_info.c \
+	slurm_selecttype_info.h slurm_resource_info.c \
+	slurm_resource_info.h hostlist.c hostlist.h \
+	slurm_step_layout.c slurm_step_layout.h checkpoint.c \
+	checkpoint.h job_resources.c job_resources.h parse_time.c \
+	parse_time.h job_options.c job_options.h global_defaults.c \
+	timers.c timers.h slurm_xlator.h stepd_api.c stepd_api.h \
+	write_labelled_message.c write_labelled_message.h proc_args.c \
+	proc_args.h slurm_strcasestr.c slurm_strcasestr.h node_conf.h \
+	node_conf.c gres.h gres.c entity.h entity.c layout.h layout.c \
+	layouts_mgr.h layouts_mgr.c mapping.c mapping.h \
+	xcgroup_read_config.c xcgroup_read_config.h callerid.c \
+	callerid.h
 @HAVE_UNSETENV_FALSE@am__objects_1 = unsetenv.lo
 am_libcommon_la_OBJECTS = cpu_frequency.lo assoc_mgr.lo xmalloc.lo \
 	xassert.lo xstring.lo xsignal.lo strnatcmp.lo forward.lo \
-	strlcpy.lo list.lo xtree.lo xhash.lo net.lo log.lo cbuf.lo \
-	safeopen.lo bitstring.lo mpi.lo pack.lo parse_config.lo \
-	parse_value.lo parse_spec.lo plugin.lo plugrack.lo \
-	print_fields.lo read_config.lo node_select.lo env.lo fd.lo \
-	slurm_cred.lo slurm_errno.lo slurm_ext_sensors.lo \
-	slurm_priority.lo slurm_protocol_api.lo slurm_protocol_pack.lo \
-	slurm_protocol_util.lo slurm_protocol_socket_implementation.lo \
-	slurm_protocol_defs.lo slurm_rlimits_info.lo slurmdb_defs.lo \
-	slurmdb_pack.lo slurmdbd_defs.lo working_cluster.lo uid.lo \
-	util-net.lo slurm_auth.lo slurm_acct_gather.lo \
-	slurm_accounting_storage.lo slurm_jobacct_gather.lo \
-	slurm_acct_gather_energy.lo slurm_acct_gather_profile.lo \
-	slurm_acct_gather_infiniband.lo \
+	msg_aggr.lo strlcpy.lo list.lo xtree.lo xhash.lo net.lo log.lo \
+	cbuf.lo safeopen.lo bitstring.lo mpi.lo pack.lo \
+	parse_config.lo parse_value.lo parse_spec.lo plugin.lo \
+	plugrack.lo power.lo print_fields.lo read_config.lo \
+	node_select.lo env.lo fd.lo slurm_cred.lo slurm_errno.lo \
+	slurm_ext_sensors.lo slurm_priority.lo slurm_protocol_api.lo \
+	slurm_protocol_pack.lo slurm_protocol_util.lo \
+	slurm_protocol_socket_implementation.lo slurm_protocol_defs.lo \
+	slurm_rlimits_info.lo slurmdb_defs.lo slurmdb_pack.lo \
+	slurmdbd_defs.lo working_cluster.lo uid.lo util-net.lo \
+	slurm_auth.lo slurm_acct_gather.lo slurm_accounting_storage.lo \
+	slurm_jobacct_gather.lo slurm_acct_gather_energy.lo \
+	slurm_acct_gather_profile.lo slurm_acct_gather_infiniband.lo \
 	slurm_acct_gather_filesystem.lo slurm_jobcomp.lo \
-	slurm_route.lo slurm_topology.lo switch.lo arg_desc.lo \
-	malloc.lo getopt.lo getopt1.lo $(am__objects_1) \
+	slurm_route.lo slurm_time.lo slurm_topology.lo switch.lo \
+	arg_desc.lo malloc.lo getopt.lo getopt1.lo $(am__objects_1) \
 	slurm_selecttype_info.lo slurm_resource_info.lo hostlist.lo \
 	slurm_step_layout.lo checkpoint.lo job_resources.lo \
 	parse_time.lo job_options.lo global_defaults.lo timers.lo \
 	stepd_api.lo write_labelled_message.lo proc_args.lo \
 	slurm_strcasestr.lo node_conf.lo gres.lo entity.lo layout.lo \
-	layouts_mgr.lo xcgroup_read_config.lo
+	layouts_mgr.lo mapping.lo xcgroup_read_config.lo callerid.lo
 am__EXTRA_libcommon_la_SOURCES_DIST = unsetenv.c unsetenv.h \
 	uthash/LICENSE uthash/README uthash/uthash.h
 libcommon_la_OBJECTS = $(am_libcommon_la_OBJECTS)
@@ -352,6 +358,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -402,8 +410,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -422,6 +434,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -465,6 +480,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -488,6 +504,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -570,6 +587,7 @@ libcommon_la_SOURCES = \
 	xsignal.c xsignal.h		\
 	strnatcmp.c strnatcmp.h		\
 	forward.c forward.h     	\
+	msg_aggr.c msg_aggr.h     	\
 	strlcpy.c strlcpy.h		\
 	list.c list.h 			\
 	xtree.c xtree.h			\
@@ -586,6 +604,7 @@ libcommon_la_SOURCES = \
 	parse_spec.c parse_spec.h	\
 	plugin.c plugin.h		\
 	plugrack.c plugrack.h		\
+	power.c power.h			\
 	print_fields.c print_fields.h	\
 	read_config.c read_config.h	\
 	node_select.c node_select.h	\
@@ -626,7 +645,8 @@ libcommon_la_SOURCES = \
 	slurm_acct_gather_infiniband.c slurm_acct_gather_infiniband.h \
 	slurm_acct_gather_filesystem.c slurm_acct_gather_filesystem.h \
 	slurm_jobcomp.c slurm_jobcomp.h	\
-	slurm_route.c slurm_route.h \
+	slurm_route.c slurm_route.h	\
+	slurm_time.c slurm_time.h	\
 	slurm_topology.c slurm_topology.h \
 	switch.c switch.h		\
 	arg_desc.c arg_desc.h		\
@@ -657,7 +677,9 @@ libcommon_la_SOURCES = \
 	entity.h entity.c		\
 	layout.h layout.c		\
 	layouts_mgr.h layouts_mgr.c	\
-	xcgroup_read_config.c xcgroup_read_config.h
+	mapping.c mapping.h		\
+	xcgroup_read_config.c xcgroup_read_config.h \
+	callerid.c callerid.h
 
 EXTRA_libcommon_la_SOURCES = \
 	$(extra_unsetenv_src)		\
@@ -767,6 +789,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/arg_desc.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/assoc_mgr.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/bitstring.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/callerid.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cbuf.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/checkpoint.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpu_frequency.Plo@am__quote@
@@ -789,7 +812,9 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/list.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/log.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/malloc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mapping.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mpi.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msg_aggr.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/net.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/node_conf.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/node_select.Plo@am__quote@
@@ -802,6 +827,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugin.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugrack.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/plugstack.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/power.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/print_fields.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_args.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Plo@am__quote@
@@ -830,6 +856,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_selecttype_info.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_step_layout.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_strcasestr.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_time.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurm_topology.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmdb_defs.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmdb_pack.Plo@am__quote@
diff --git a/src/common/assoc_mgr.c b/src/common/assoc_mgr.c
index 951989b2a..e74ec3ec3 100644
--- a/src/common/assoc_mgr.c
+++ b/src/common/assoc_mgr.c
@@ -41,22 +41,26 @@
 #include <sys/types.h>
 #include <pwd.h>
 #include <fcntl.h>
+#include <stdlib.h>
 
 #include "src/common/uid.h"
 #include "src/common/xstring.h"
 #include "src/common/slurm_priority.h"
 #include "src/slurmdbd/read_config.h"
 
-#define ASSOC_USAGE_VERSION 1
-
 #define ASSOC_HASH_SIZE 1000
 #define ASSOC_HASH_ID_INX(_assoc_id)	(_assoc_id % ASSOC_HASH_SIZE)
 
-slurmdb_association_rec_t *assoc_mgr_root_assoc = NULL;
+slurmdb_assoc_rec_t *assoc_mgr_root_assoc = NULL;
 uint32_t g_qos_max_priority = 0;
 uint32_t g_qos_count = 0;
 uint32_t g_user_assoc_count = 0;
-List assoc_mgr_association_list = NULL;
+uint32_t g_tres_count = 0;
+
+List assoc_mgr_tres_list = NULL;
+slurmdb_tres_rec_t **assoc_mgr_tres_array = NULL;
+char **assoc_mgr_tres_name_array = NULL;
+List assoc_mgr_assoc_list = NULL;
 List assoc_mgr_res_list = NULL;
 List assoc_mgr_qos_list = NULL;
 List assoc_mgr_user_list = NULL;
@@ -66,8 +70,8 @@ static char *assoc_mgr_cluster_name = NULL;
 static int setup_children = 0;
 static assoc_mgr_lock_flags_t assoc_mgr_locks;
 static assoc_init_args_t init_setup;
-static slurmdb_association_rec_t **assoc_hash_id = NULL;
-static slurmdb_association_rec_t **assoc_hash = NULL;
+static slurmdb_assoc_rec_t **assoc_hash_id = NULL;
+static slurmdb_assoc_rec_t **assoc_hash = NULL;
 
 static pthread_mutex_t locks_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t locks_cond = PTHREAD_COND_INITIALIZER;
@@ -85,7 +89,7 @@ static int _get_str_inx(char *name)
 	return index;
 }
 
-static int _assoc_hash_index(slurmdb_association_rec_t *assoc)
+static int _assoc_hash_index(slurmdb_assoc_rec_t *assoc)
 {
 	int index;
 
@@ -115,16 +119,16 @@ static int _assoc_hash_index(slurmdb_association_rec_t *assoc)
 
 }
 
-static void _add_assoc_hash(slurmdb_association_rec_t *assoc)
+static void _add_assoc_hash(slurmdb_assoc_rec_t *assoc)
 {
 	int inx = ASSOC_HASH_ID_INX(assoc->id);
 
 	if (!assoc_hash_id)
 		assoc_hash_id = xmalloc(ASSOC_HASH_SIZE *
-					sizeof(slurmdb_association_rec_t *));
+				     sizeof(slurmdb_assoc_rec_t *));
 	if (!assoc_hash)
 		assoc_hash = xmalloc(ASSOC_HASH_SIZE *
-				     sizeof(slurmdb_association_rec_t *));
+				     sizeof(slurmdb_assoc_rec_t *));
 
 	assoc->assoc_next_id = assoc_hash_id[inx];
 	assoc_hash_id[inx] = assoc;
@@ -134,10 +138,10 @@ static void _add_assoc_hash(slurmdb_association_rec_t *assoc)
 	assoc_hash[inx] = assoc;
 }
 
-static bool _remove_from_assoc_list(slurmdb_association_rec_t *assoc)
+static bool _remove_from_assoc_list(slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *assoc_ptr;
-	ListIterator itr = list_iterator_create(assoc_mgr_association_list);
+	slurmdb_assoc_rec_t *assoc_ptr;
+	ListIterator itr = list_iterator_create(assoc_mgr_assoc_list);
 
 	while ((assoc_ptr = list_next(itr))) {
 		if (assoc_ptr == assoc) {
@@ -155,9 +159,9 @@ static bool _remove_from_assoc_list(slurmdb_association_rec_t *assoc)
  * IN job_id - requested job's id
  * RET pointer to the job's record, NULL on error
  */
-static slurmdb_association_rec_t *_find_assoc_rec_id(uint32_t assoc_id)
+static slurmdb_assoc_rec_t *_find_assoc_rec_id(uint32_t assoc_id)
 {
-	slurmdb_association_rec_t *assoc;
+	slurmdb_assoc_rec_t *assoc;
 
 	if (!assoc_hash_id) {
 		debug2("_find_assoc_rec_id: no associations added yet");
@@ -181,10 +185,10 @@ static slurmdb_association_rec_t *_find_assoc_rec_id(uint32_t assoc_id)
  * IN assoc - requested association info
  * RET pointer to the assoc_ptr's record, NULL on error
  */
-static slurmdb_association_rec_t *_find_assoc_rec(
-	slurmdb_association_rec_t *assoc)
+static slurmdb_assoc_rec_t *_find_assoc_rec(
+	slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *assoc_ptr;
+	slurmdb_assoc_rec_t *assoc_ptr;
 	int inx;
 
 	if (assoc->id)
@@ -264,10 +268,10 @@ static slurmdb_association_rec_t *_find_assoc_rec(
  *	assoc_count - count of assoc list entries
  *	assoc_hash - hash table into assoc records
  */
-static void _delete_assoc_hash(slurmdb_association_rec_t *assoc)
+static void _delete_assoc_hash(slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *assoc_ptr = assoc;
-	slurmdb_association_rec_t **assoc_pptr;
+	slurmdb_assoc_rec_t *assoc_ptr = assoc;
+	slurmdb_assoc_rec_t **assoc_pptr;
 
 	xassert(assoc);
 
@@ -304,9 +308,9 @@ static void _delete_assoc_hash(slurmdb_association_rec_t *assoc)
 
 
 static void _normalize_assoc_shares_fair_tree(
-	slurmdb_association_rec_t *assoc)
+	slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *fs_assoc = assoc;
+	slurmdb_assoc_rec_t *fs_assoc = assoc;
 	double shares_norm = 0.0;
 	if (assoc->shares_raw == SLURMDB_FS_USE_PARENT)
 		fs_assoc = assoc->usage->fs_assoc_ptr;
@@ -320,9 +324,9 @@ static void _normalize_assoc_shares_fair_tree(
 
 /* you should check for assoc == NULL before this function */
 static void _normalize_assoc_shares_traditional(
-		slurmdb_association_rec_t *assoc)
+		slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *assoc2 = assoc;
+	slurmdb_assoc_rec_t *assoc2 = assoc;
 
 	if ((assoc->shares_raw == SLURMDB_FS_USE_PARENT)
 	    && assoc->usage->fs_assoc_ptr) {
@@ -362,18 +366,23 @@ static void _normalize_assoc_shares_traditional(
 }
 
 
-static int _addto_used_info(slurmdb_association_rec_t *assoc1,
-			    slurmdb_association_rec_t *assoc2)
+static int _addto_used_info(slurmdb_assoc_rec_t *assoc1,
+			    slurmdb_assoc_rec_t *assoc2)
 {
+	int i;
+
 	if (!assoc1 || !assoc2)
 		return SLURM_ERROR;
 
-	assoc1->usage->grp_used_cpus += assoc2->usage->grp_used_cpus;
-	assoc1->usage->grp_used_mem += assoc2->usage->grp_used_mem;
-	assoc1->usage->grp_used_nodes += assoc2->usage->grp_used_nodes;
+	for (i=0; i < assoc1->usage->tres_cnt; i++) {
+		assoc1->usage->grp_used_tres[i] +=
+			assoc2->usage->grp_used_tres[i];
+		assoc1->usage->grp_used_tres_run_secs[i] +=
+			assoc2->usage->grp_used_tres_run_secs[i];
+		assoc1->usage->usage_tres_raw[i] +=
+			assoc2->usage->usage_tres_raw[i];
+	}
 	assoc1->usage->grp_used_wall += assoc2->usage->grp_used_wall;
-	assoc1->usage->grp_used_cpu_run_secs +=
-		assoc2->usage->grp_used_cpu_run_secs;
 
 	assoc1->usage->used_jobs += assoc2->usage->used_jobs;
 	assoc1->usage->used_submit_jobs += assoc2->usage->used_submit_jobs;
@@ -382,15 +391,17 @@ static int _addto_used_info(slurmdb_association_rec_t *assoc1,
 	return SLURM_SUCCESS;
 }
 
-static int _clear_used_assoc_info(slurmdb_association_rec_t *assoc)
+static int _clear_used_assoc_info(slurmdb_assoc_rec_t *assoc)
 {
+	int i;
+
 	if (!assoc || !assoc->usage)
 		return SLURM_ERROR;
 
-	assoc->usage->grp_used_cpus = 0;
-	assoc->usage->grp_used_mem = 0;
-	assoc->usage->grp_used_nodes = 0;
-	assoc->usage->grp_used_cpu_run_secs = 0;
+	for (i=0; i<assoc->usage->tres_cnt; i++) {
+		assoc->usage->grp_used_tres[i] = 0;
+		assoc->usage->grp_used_tres_run_secs[i] = 0;
+	}
 
 	assoc->usage->used_jobs  = 0;
 	assoc->usage->used_submit_jobs = 0;
@@ -406,6 +417,7 @@ static void _clear_qos_user_limit_info(slurmdb_qos_rec_t *qos_ptr)
 {
 	slurmdb_used_limits_t *used_limits = NULL;
 	ListIterator itr = NULL;
+	int i;
 
 	if (!qos_ptr->usage->user_limit_list
 	    || !list_count(qos_ptr->usage->user_limit_list))
@@ -413,16 +425,12 @@ static void _clear_qos_user_limit_info(slurmdb_qos_rec_t *qos_ptr)
 
 	itr = list_iterator_create(qos_ptr->usage->user_limit_list);
 	while ((used_limits = list_next(itr))) {
-		used_limits->cpu_run_mins = 0; /* Currently isn't used
-						  in the code but put
-						  here for future
-						  reference when/if it
-						  is.
-					       */
-		used_limits->cpus = 0;
 		used_limits->jobs = 0;
-		used_limits->nodes = 0;
 		used_limits->submit_jobs = 0;
+		for (i=0; i<qos_ptr->usage->tres_cnt; i++) {
+			used_limits->tres[i] = 0;
+			used_limits->tres_run_mins[i] = 0;
+		}
 	}
 	list_iterator_destroy(itr);
 
@@ -431,16 +439,17 @@ static void _clear_qos_user_limit_info(slurmdb_qos_rec_t *qos_ptr)
 
 static int _clear_used_qos_info(slurmdb_qos_rec_t *qos)
 {
+	int i;
+
 	if (!qos || !qos->usage)
 		return SLURM_ERROR;
 
-	qos->usage->grp_used_cpus = 0;
-	qos->usage->grp_used_mem = 0;
-	qos->usage->grp_used_nodes = 0;
-	qos->usage->grp_used_cpu_run_secs = 0;
-
 	qos->usage->grp_used_jobs  = 0;
 	qos->usage->grp_used_submit_jobs = 0;
+	for (i=0; i<qos->usage->tres_cnt; i++) {
+		qos->usage->grp_used_tres[i] = 0;
+		qos->usage->grp_used_tres_run_secs[i] = 0;
+	}
 	/* do not reset usage_raw or grp_used_wall.
 	 * if you need to reset it do it
 	 * else where since sometimes we call this and do not want
@@ -456,7 +465,7 @@ static int _change_user_name(slurmdb_user_rec_t *user)
 {
 	int rc = SLURM_SUCCESS;
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_wckey_rec_t *wckey = NULL;
 	uid_t pw_uid;
 
@@ -470,8 +479,8 @@ static int _change_user_name(slurmdb_user_rec_t *user)
 	} else
 		user->uid = pw_uid;
 
-	if (assoc_mgr_association_list) {
-		itr = list_iterator_create(assoc_mgr_association_list);
+	if (assoc_mgr_assoc_list) {
+		itr = list_iterator_create(assoc_mgr_assoc_list);
 		while ((assoc = list_next(itr))) {
 			if (!assoc->user)
 				continue;
@@ -509,9 +518,9 @@ static int _change_user_name(slurmdb_user_rec_t *user)
 	return rc;
 }
 
-static int _grab_parents_qos(slurmdb_association_rec_t *assoc)
+static int _grab_parents_qos(slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *parent_assoc = NULL;
+	slurmdb_assoc_rec_t *parent_assoc = NULL;
 	char *qos_char = NULL;
 	ListIterator itr = NULL;
 
@@ -537,7 +546,7 @@ static int _grab_parents_qos(slurmdb_association_rec_t *assoc)
 	return SLURM_SUCCESS;
 }
 
-static int _local_update_assoc_qos_list(slurmdb_association_rec_t *assoc,
+static int _local_update_assoc_qos_list(slurmdb_assoc_rec_t *assoc,
 					List new_qos_list)
 {
 	ListIterator new_qos_itr = NULL, curr_qos_itr = NULL;
@@ -600,7 +609,7 @@ static int _local_update_assoc_qos_list(slurmdb_association_rec_t *assoc,
 }
 
 /* locks should be put in place before calling this function USER_WRITE */
-static void _set_user_default_acct(slurmdb_association_rec_t *assoc)
+static void _set_user_default_acct(slurmdb_assoc_rec_t *assoc)
 {
 	xassert(assoc);
 	xassert(assoc->acct);
@@ -657,10 +666,10 @@ static void _set_user_default_wckey(slurmdb_wckey_rec_t *wckey)
 
 /* Return first parent that is not SLURMDB_FS_USE_PARENT unless
  * direct is set */
-static slurmdb_association_rec_t* _find_assoc_parent(
-	slurmdb_association_rec_t *assoc, bool direct)
+static slurmdb_assoc_rec_t* _find_assoc_parent(
+	slurmdb_assoc_rec_t *assoc, bool direct)
 {
-	slurmdb_association_rec_t *parent = NULL, *prev_parent;
+	slurmdb_assoc_rec_t *parent = NULL, *prev_parent;
 	xassert(assoc);
 
 	parent = assoc;
@@ -698,18 +707,18 @@ static slurmdb_association_rec_t* _find_assoc_parent(
 
 /* locks should be put in place before calling this function
  * ASSOC_WRITE, USER_WRITE */
-static int _set_assoc_parent_and_user(slurmdb_association_rec_t *assoc,
+static int _set_assoc_parent_and_user(slurmdb_assoc_rec_t *assoc,
 				      int reset)
 {
 	xassert(assoc_mgr_user_list);
 
-	if (!assoc || !assoc_mgr_association_list) {
+	if (!assoc || !assoc_mgr_assoc_list) {
 		error("you didn't give me an association");
 		return SLURM_ERROR;
 	}
 
 	if (!assoc->usage)
-		assoc->usage = create_assoc_mgr_association_usage();
+		assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 
 	if (assoc->parent_id) {
 		/* Here we need the direct parent (parent_assoc_ptr)
@@ -736,7 +745,8 @@ static int _set_assoc_parent_and_user(slurmdb_association_rec_t *assoc,
 		if (assoc->usage->fs_assoc_ptr && setup_children) {
 			if (!assoc->usage->fs_assoc_ptr->usage)
 				assoc->usage->fs_assoc_ptr->usage =
-					create_assoc_mgr_association_usage();
+					slurmdb_create_assoc_usage(
+						g_tres_count);
 			if (!assoc->usage->
 			    fs_assoc_ptr->usage->children_list)
 				assoc->usage->
@@ -754,8 +764,8 @@ static int _set_assoc_parent_and_user(slurmdb_association_rec_t *assoc,
 			      "itself as it's parent",
 			      assoc->id);
 		}
-	} else {
-		slurmdb_association_rec_t *last_root = assoc_mgr_root_assoc;
+	} else if (assoc_mgr_root_assoc != assoc) {
+		slurmdb_assoc_rec_t *last_root = assoc_mgr_root_assoc;
 
 		assoc_mgr_root_assoc = assoc;
 		/* set up new root since if running off cache the
@@ -765,6 +775,9 @@ static int _set_assoc_parent_and_user(slurmdb_association_rec_t *assoc,
 				last_root->usage->usage_raw;
 			assoc_mgr_root_assoc->usage->usage_norm =
 				last_root->usage->usage_norm;
+			memcpy(assoc_mgr_root_assoc->usage->usage_tres_raw,
+			       last_root->usage->usage_tres_raw,
+			       sizeof(long double) * g_tres_count);
 		}
 	}
 
@@ -824,16 +837,16 @@ static void _set_qos_norm_priority(slurmdb_qos_rec_t *qos)
 		return;
 
 	if (!qos->usage)
-		qos->usage = create_assoc_mgr_qos_usage();
+		qos->usage = slurmdb_create_qos_usage(g_tres_count);
 	qos->usage->norm_priority =
 		(double)qos->priority / (double)g_qos_max_priority;
 }
 
-static uint32_t _get_children_level_shares(slurmdb_association_rec_t *assoc)
+static uint32_t _get_children_level_shares(slurmdb_assoc_rec_t *assoc)
 {
 	List children = assoc->usage->children_list;
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *child;
+	slurmdb_assoc_rec_t *child;
 	uint32_t sum = 0;
 
 	if (!children || list_is_empty(children))
@@ -852,12 +865,12 @@ static uint32_t _get_children_level_shares(slurmdb_association_rec_t *assoc)
 }
 
 
-static void _set_children_level_shares(slurmdb_association_rec_t *assoc,
+static void _set_children_level_shares(slurmdb_assoc_rec_t *assoc,
 				       uint32_t level_shares)
 {
 	List children = assoc->usage->children_list;
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *child;
+	slurmdb_assoc_rec_t *child;
 
 	if (!children || list_is_empty(children))
 		return;
@@ -871,30 +884,30 @@ static void _set_children_level_shares(slurmdb_association_rec_t *assoc,
 	list_iterator_destroy(itr);
 }
 
-
 /* transfer slurmdb assoc list to be assoc_mgr assoc list */
 /* locks should be put in place before calling this function
  * ASSOC_WRITE, USER_WRITE */
-static int _post_association_list(void)
+static int _post_assoc_list(void)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	ListIterator itr = NULL;
 	int reset = 1;
 	//DEF_TIMERS;
 
-	if (!assoc_mgr_association_list)
+	if (!assoc_mgr_assoc_list)
 		return SLURM_ERROR;
 
 	xfree(assoc_hash_id);
 	xfree(assoc_hash);
 
-	itr = list_iterator_create(assoc_mgr_association_list);
+	itr = list_iterator_create(assoc_mgr_assoc_list);
 
 	//START_TIMER;
 	g_user_assoc_count = 0;
 	while ((assoc = list_next(itr))) {
 		_set_assoc_parent_and_user(assoc, reset);
 		_add_assoc_hash(assoc);
+		assoc_mgr_set_assoc_tres_cnt(assoc);
 		reset = 0;
 	}
 
@@ -917,7 +930,7 @@ static int _post_association_list(void)
 	}
 	list_iterator_destroy(itr);
 
-	slurmdb_sort_hierarchical_assoc_list(assoc_mgr_association_list);
+	slurmdb_sort_hierarchical_assoc_list(assoc_mgr_assoc_list);
 
 	//END_TIMER2("load_associations");
 	return SLURM_SUCCESS;
@@ -985,13 +998,15 @@ static int _post_qos_list(List qos_list)
 			qos->flags = 0;
 
 		if (!qos->usage)
-			qos->usage = create_assoc_mgr_qos_usage();
+			qos->usage = slurmdb_create_qos_usage(g_tres_count);
 		/* get the highest qos value to create bitmaps from */
 		if (qos->id > g_qos_count)
 			g_qos_count = qos->id;
 
 		if (qos->priority > g_qos_max_priority)
 			g_qos_max_priority = qos->priority;
+
+		assoc_mgr_set_qos_tres_cnt(qos);
 	}
 	/* Since in the database id's don't start at 1
 	   instead of 0 we need to ignore the 0 bit and start
@@ -1051,44 +1066,254 @@ static int _post_res_list(List res_list)
 	return SLURM_SUCCESS;
 }
 
-static int _get_assoc_mgr_association_list(void *db_conn, int enforce)
+/* tres write lock should be locked before calling this return 1 if
+ * callback is needed */
+static int _post_tres_list(List new_list, int new_cnt)
+{
+	ListIterator itr;
+	slurmdb_tres_rec_t *tres_rec, **new_array;
+	char **new_name_array;
+	bool changed_size = false, changed_pos = false;
+	int i, new_size, new_name_size, max_cnt = MAX(new_cnt, g_tres_count);
+
+	xassert(new_list);
+
+	new_size = sizeof(slurmdb_tres_rec_t) * new_cnt;
+	new_array = xmalloc(new_size);
+
+	new_name_size = sizeof(char *) * new_cnt;
+	new_name_array = xmalloc(new_name_size);
+
+	list_sort(new_list, (ListCmpF)slurmdb_sort_tres_by_id_asc);
+
+	/* we don't care if it gets smaller */
+	if (new_cnt > g_tres_count)
+		changed_size = true;
+
+	/* Set up the new array to see if we need to update any other
+	   arrays with current values.
+	*/
+	i = 0;
+	itr = list_iterator_create(new_list);
+	while ((tres_rec = list_next(itr))) {
+
+		new_array[i] = tres_rec;
+
+		new_name_array[i] = xstrdup_printf(
+			"%s%s%s",
+			tres_rec->type,
+			tres_rec->name ? "/" : "",
+			tres_rec->name ? tres_rec->name : "");
+
+		/* This should only happen if a new static TRES are added. */
+		if (assoc_mgr_tres_array && (i < g_tres_count) &&
+		    (new_array[i]->id != assoc_mgr_tres_array[i]->id))
+			changed_pos = true;
+		i++;
+	}
+	list_iterator_destroy(itr);
+
+	if (changed_size || changed_pos) {
+		if (assoc_mgr_assoc_list) {
+			slurmdb_assoc_rec_t *assoc_rec;
+			uint64_t grp_tres[new_cnt], grp_tres_mins[new_cnt],
+				grp_tres_run_mins[new_cnt], max_tres[new_cnt],
+				max_tres_pn[new_cnt], max_tres_mins[new_cnt],
+				max_tres_run_mins[new_cnt];
+
+			/* update the associations and such here */
+			itr = list_iterator_create(assoc_mgr_assoc_list);
+			while ((assoc_rec = list_next(itr))) {
+				if (changed_size) {
+					xrealloc(assoc_rec->grp_tres_ctld,
+						 new_size);
+					xrealloc(assoc_rec->grp_tres_mins_ctld,
+						 new_size);
+					xrealloc(assoc_rec->
+						 grp_tres_run_mins_ctld,
+						 new_size);
+					xrealloc(assoc_rec->max_tres_ctld,
+						 new_size);
+					xrealloc(assoc_rec->max_tres_pn_ctld,
+						 new_size);
+					xrealloc(assoc_rec->max_tres_mins_ctld,
+						 new_size);
+					xrealloc(assoc_rec->
+						 grp_tres_run_mins_ctld,
+						 new_size);
+				}
+
+				if (changed_pos) {
+					int pos;
+					int array_size =
+						sizeof(uint64_t) * new_cnt;
+					memset(grp_tres, 0, array_size);
+					memset(grp_tres_mins, 0, array_size);
+					memset(grp_tres_run_mins,
+					       0, array_size);
+					memset(max_tres, 0, array_size);
+					memset(max_tres_pn, 0, array_size);
+					memset(max_tres_mins, 0, array_size);
+					memset(max_tres_run_mins,
+					       0, array_size);
+					for (i=0; i<new_cnt; i++) {
+						if (!new_array[i])
+							break;
+
+						pos = slurmdb_get_new_tres_pos(
+							new_array,
+							assoc_mgr_tres_array,
+							i, max_cnt);
+
+						if (pos == NO_VAL)
+							continue;
+						grp_tres[i] = assoc_rec->
+							grp_tres_ctld[pos];
+						grp_tres_mins[i] = assoc_rec->
+							grp_tres_mins_ctld[pos];
+						grp_tres_run_mins[i] =
+							assoc_rec->
+							grp_tres_run_mins_ctld[
+								pos];
+						max_tres[i] = assoc_rec->
+							max_tres_ctld[pos];
+						max_tres_pn[i] = assoc_rec->
+							max_tres_pn_ctld[pos];
+						max_tres_mins[i] = assoc_rec->
+							max_tres_mins_ctld[pos];
+						max_tres_run_mins[i] =
+							assoc_rec->
+							max_tres_run_mins_ctld[
+								pos];
+					}
+					memcpy(assoc_rec->grp_tres_ctld,
+					       grp_tres, array_size);
+					memcpy(assoc_rec->grp_tres_mins_ctld,
+					       grp_tres_mins, array_size);
+					memcpy(assoc_rec->
+					       grp_tres_run_mins_ctld,
+					       grp_tres_run_mins, array_size);
+					memcpy(assoc_rec->max_tres_ctld,
+					       max_tres, array_size);
+					memcpy(assoc_rec->max_tres_pn_ctld,
+					       max_tres_pn, array_size);
+					memcpy(assoc_rec->max_tres_mins_ctld,
+					       max_tres_mins, array_size);
+					memcpy(assoc_rec->
+					       max_tres_run_mins_ctld,
+					       max_tres_run_mins, array_size);
+				}
+			}
+			list_iterator_destroy(itr);
+		}
+
+	}
+	xfree(assoc_mgr_tres_array);
+	assoc_mgr_tres_array = new_array;
+	new_array = NULL;
+
+	if (assoc_mgr_tres_name_array) {
+		for (i=0; i<g_tres_count; i++)
+			xfree(assoc_mgr_tres_name_array[i]);
+		xfree(assoc_mgr_tres_name_array);
+	}
+	assoc_mgr_tres_name_array = new_name_array;
+	new_name_array = NULL;
+
+	FREE_NULL_LIST(assoc_mgr_tres_list);
+	assoc_mgr_tres_list = new_list;
+	new_list = NULL;
+
+	g_tres_count = new_cnt;
+
+	return (changed_size || changed_pos) ? 1 : 0;
+}
+
+static int _get_assoc_mgr_tres_list(void *db_conn, int enforce)
 {
-	slurmdb_association_cond_t assoc_q;
+	slurmdb_tres_cond_t tres_q;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	List new_list = NULL;
+	char *tres_req_str;
+	int changed;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+
+	memset(&tres_q, 0, sizeof(slurmdb_tres_cond_t));
+
+	assoc_mgr_lock(&locks);
+
+	/* If this exists we only want/care about tracking/caching these TRES */
+	if ((tres_req_str = slurm_get_accounting_storage_tres())) {
+		tres_q.type_list = list_create(slurm_destroy_char);
+		slurm_addto_char_list(tres_q.type_list, tres_req_str);
+		xfree(tres_req_str);
+	}
+	new_list = acct_storage_g_get_tres(
+		db_conn, uid, &tres_q);
+
+	FREE_NULL_LIST(tres_q.type_list);
+
+	if (!new_list) {
+		assoc_mgr_unlock(&locks);
+		if (enforce & ACCOUNTING_ENFORCE_ASSOCS) {
+			error("_get_assoc_mgr_tres_list: "
+			      "no list was made.");
+			return SLURM_ERROR;
+		} else {
+			return SLURM_SUCCESS;
+		}
+	}
+
+	changed = _post_tres_list(new_list, list_count(new_list));
+
+	assoc_mgr_unlock(&locks);
+
+	if (changed && init_setup.update_cluster_tres) {
+		/* update jobs here, this needs to be outside of the
+		 * assoc_mgr locks */
+		init_setup.update_cluster_tres();
+	}
+
+	return SLURM_SUCCESS;
+}
+
+static int _get_assoc_mgr_assoc_list(void *db_conn, int enforce)
+{
+	slurmdb_assoc_cond_t assoc_q;
+	uid_t uid = getuid();
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, WRITE_LOCK, NO_LOCK };
 
 //	DEF_TIMERS;
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_association_list)
-		list_destroy(assoc_mgr_association_list);
+	FREE_NULL_LIST(assoc_mgr_assoc_list);
 
-	memset(&assoc_q, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_q, 0, sizeof(slurmdb_assoc_cond_t));
 	if (assoc_mgr_cluster_name) {
 		assoc_q.cluster_list = list_create(NULL);
 		list_append(assoc_q.cluster_list, assoc_mgr_cluster_name);
 	} else if ((enforce & ACCOUNTING_ENFORCE_ASSOCS) && !slurmdbd_conf) {
-		error("_get_assoc_mgr_association_list: "
+		error("_get_assoc_mgr_assoc_list: "
 		      "no cluster name here going to get "
 		      "all associations.");
 	}
 
 //	START_TIMER;
-	assoc_mgr_association_list =
-		acct_storage_g_get_associations(db_conn, uid, &assoc_q);
-//	END_TIMER2("get_associations");
+	assoc_mgr_assoc_list =
+		acct_storage_g_get_assocs(db_conn, uid, &assoc_q);
+//	END_TIMER2("get_assocs");
 
-	if (assoc_q.cluster_list)
-		list_destroy(assoc_q.cluster_list);
+	FREE_NULL_LIST(assoc_q.cluster_list);
 
-	if (!assoc_mgr_association_list) {
+	if (!assoc_mgr_assoc_list) {
 		/* create list so we don't keep calling this if there
 		   isn't anything there */
-		assoc_mgr_association_list =
-			list_create(slurmdb_destroy_association_rec);
+		assoc_mgr_assoc_list =
+			list_create(slurmdb_destroy_assoc_rec);
 		assoc_mgr_unlock(&locks);
 		if (enforce & ACCOUNTING_ENFORCE_ASSOCS) {
-			error("_get_assoc_mgr_association_list: "
+			error("_get_assoc_mgr_assoc_list: "
 			      "no list was made.");
 			return SLURM_ERROR;
 		} else {
@@ -1098,7 +1323,7 @@ static int _get_assoc_mgr_association_list(void *db_conn, int enforce)
 		}
 	}
 
-	_post_association_list();
+	_post_assoc_list();
 
 	assoc_mgr_unlock(&locks);
 
@@ -1109,12 +1334,11 @@ static int _get_assoc_mgr_res_list(void *db_conn, int enforce)
 {
 	slurmdb_res_cond_t res_q;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_res_list)
-		list_destroy(assoc_mgr_res_list);
+	FREE_NULL_LIST(assoc_mgr_res_list);
 
 	slurmdb_init_res_cond(&res_q, 0);
 	if (assoc_mgr_cluster_name) {
@@ -1129,8 +1353,7 @@ static int _get_assoc_mgr_res_list(void *db_conn, int enforce)
 
 	assoc_mgr_res_list = acct_storage_g_get_res(db_conn, uid, &res_q);
 
-	if (res_q.cluster_list)
-		list_destroy(res_q.cluster_list);
+	FREE_NULL_LIST(res_q.cluster_list);
 
 	if (!assoc_mgr_res_list) {
 		assoc_mgr_unlock(&locks);
@@ -1153,8 +1376,8 @@ static int _get_assoc_mgr_qos_list(void *db_conn, int enforce)
 {
 	uid_t uid = getuid();
 	List new_list = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	new_list = acct_storage_g_get_qos(db_conn, uid, NULL);
 
@@ -1184,15 +1407,14 @@ static int _get_assoc_mgr_user_list(void *db_conn, int enforce)
 {
 	slurmdb_user_cond_t user_q;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, WRITE_LOCK, NO_LOCK };
 
 	memset(&user_q, 0, sizeof(slurmdb_user_cond_t));
 	user_q.with_coords = 1;
 
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_user_list)
-		list_destroy(assoc_mgr_user_list);
+	FREE_NULL_LIST(assoc_mgr_user_list);
 	assoc_mgr_user_list = acct_storage_g_get_users(db_conn, uid, &user_q);
 
 	if (!assoc_mgr_user_list) {
@@ -1217,13 +1439,12 @@ static int _get_assoc_mgr_wckey_list(void *db_conn, int enforce)
 {
 	slurmdb_wckey_cond_t wckey_q;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, WRITE_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, WRITE_LOCK, WRITE_LOCK };
 
 //	DEF_TIMERS;
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_wckey_list)
-		list_destroy(assoc_mgr_wckey_list);
+	FREE_NULL_LIST(assoc_mgr_wckey_list);
 
 	memset(&wckey_q, 0, sizeof(slurmdb_wckey_cond_t));
 	if (assoc_mgr_cluster_name) {
@@ -1240,8 +1461,7 @@ static int _get_assoc_mgr_wckey_list(void *db_conn, int enforce)
 		acct_storage_g_get_wckeys(db_conn, uid, &wckey_q);
 //	END_TIMER2("get_wckeys");
 
-	if (wckey_q.cluster_list)
-		list_destroy(wckey_q.cluster_list);
+	FREE_NULL_LIST(wckey_q.cluster_list);
 
 	if (!assoc_mgr_wckey_list) {
 		/* create list so we don't keep calling this if there
@@ -1266,50 +1486,60 @@ static int _get_assoc_mgr_wckey_list(void *db_conn, int enforce)
 	return SLURM_SUCCESS;
 }
 
-static int _refresh_assoc_mgr_association_list(void *db_conn, int enforce)
+/* This only gets a new list if available dropping the old one if
+ * needed
+ */
+static int _refresh_assoc_mgr_tres_list(void *db_conn, int enforce)
+{
+	/* this function does both get and refresh */
+	_get_assoc_mgr_tres_list(db_conn, enforce);
+
+	return SLURM_SUCCESS;
+}
+
+static int _refresh_assoc_mgr_assoc_list(void *db_conn, int enforce)
 {
-	slurmdb_association_cond_t assoc_q;
+	slurmdb_assoc_cond_t assoc_q;
 	List current_assocs = NULL;
 	uid_t uid = getuid();
 	ListIterator curr_itr = NULL;
 	ListIterator assoc_mgr_itr = NULL;
-	slurmdb_association_rec_t *curr_assoc = NULL, *assoc = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	slurmdb_assoc_rec_t *curr_assoc = NULL, *assoc = NULL;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, WRITE_LOCK, NO_LOCK };
 //	DEF_TIMERS;
 
-	memset(&assoc_q, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_q, 0, sizeof(slurmdb_assoc_cond_t));
 	if (assoc_mgr_cluster_name) {
 		assoc_q.cluster_list = list_create(NULL);
 		list_append(assoc_q.cluster_list, assoc_mgr_cluster_name);
 	} else if ((enforce & ACCOUNTING_ENFORCE_ASSOCS) && !slurmdbd_conf) {
-		error("_refresh_assoc_mgr_association_list: "
+		error("_refresh_assoc_mgr_assoc_list: "
 		      "no cluster name here going to get "
 		      "all associations.");
 	}
 
 	assoc_mgr_lock(&locks);
 
-	current_assocs = assoc_mgr_association_list;
+	current_assocs = assoc_mgr_assoc_list;
 
 //	START_TIMER;
-	assoc_mgr_association_list =
-		acct_storage_g_get_associations(db_conn, uid, &assoc_q);
-//	END_TIMER2("get_associations");
+	assoc_mgr_assoc_list =
+		acct_storage_g_get_assocs(db_conn, uid, &assoc_q);
+//	END_TIMER2("get_assocs");
 
-	if (assoc_q.cluster_list)
-		list_destroy(assoc_q.cluster_list);
+	FREE_NULL_LIST(assoc_q.cluster_list);
 
-	if (!assoc_mgr_association_list) {
-		assoc_mgr_association_list = current_assocs;
+	if (!assoc_mgr_assoc_list) {
+		assoc_mgr_assoc_list = current_assocs;
 		assoc_mgr_unlock(&locks);
 
-		error("_refresh_assoc_mgr_association_list: "
+		error("_refresh_assoc_mgr_assoc_list: "
 		      "no new list given back keeping cached one.");
 		return SLURM_ERROR;
 	}
 
-	_post_association_list();
+	_post_assoc_list();
 
 	if (!current_assocs) {
 		assoc_mgr_unlock(&locks);
@@ -1317,7 +1547,7 @@ static int _refresh_assoc_mgr_association_list(void *db_conn, int enforce)
 	}
 
 	curr_itr = list_iterator_create(current_assocs);
-	assoc_mgr_itr = list_iterator_create(assoc_mgr_association_list);
+	assoc_mgr_itr = list_iterator_create(assoc_mgr_assoc_list);
 
 	/* add used limits We only look for the user associations to
 	 * do the parents since a parent may have moved */
@@ -1343,8 +1573,7 @@ static int _refresh_assoc_mgr_association_list(void *db_conn, int enforce)
 
 	assoc_mgr_unlock(&locks);
 
-	if (current_assocs)
-		list_destroy(current_assocs);
+	FREE_NULL_LIST(current_assocs);
 
 	return SLURM_SUCCESS;
 }
@@ -1357,8 +1586,8 @@ static int _refresh_assoc_mgr_res_list(void *db_conn, int enforce)
 	slurmdb_res_cond_t res_q;
 	List current_res = NULL;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	slurmdb_init_res_cond(&res_q, 0);
 	if (assoc_mgr_cluster_name) {
@@ -1373,8 +1602,7 @@ static int _refresh_assoc_mgr_res_list(void *db_conn, int enforce)
 
 	current_res = acct_storage_g_get_res(db_conn, uid, &res_q);
 
-	if (res_q.cluster_list)
-		list_destroy(res_q.cluster_list);
+	FREE_NULL_LIST(res_q.cluster_list);
 
 	if (!current_res) {
 		error("_refresh_assoc_mgr_res_list: "
@@ -1402,8 +1630,8 @@ static int _refresh_assoc_mgr_qos_list(void *db_conn, int enforce)
 {
 	List current_qos = NULL;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	current_qos = acct_storage_g_get_qos(db_conn, uid, NULL);
 
@@ -1416,8 +1644,7 @@ static int _refresh_assoc_mgr_qos_list(void *db_conn, int enforce)
 
 	assoc_mgr_lock(&locks);
 
-	if (assoc_mgr_qos_list)
-		list_destroy(assoc_mgr_qos_list);
+	FREE_NULL_LIST(assoc_mgr_qos_list);
 
 	assoc_mgr_qos_list = current_qos;
 
@@ -1434,8 +1661,8 @@ static int _refresh_assoc_mgr_user_list(void *db_conn, int enforce)
 	List current_users = NULL;
 	slurmdb_user_cond_t user_q;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, WRITE_LOCK, NO_LOCK };
 
 	memset(&user_q, 0, sizeof(slurmdb_user_cond_t));
 	user_q.with_coords = 1;
@@ -1451,8 +1678,7 @@ static int _refresh_assoc_mgr_user_list(void *db_conn, int enforce)
 
 	assoc_mgr_lock(&locks);
 
-	if (assoc_mgr_user_list)
-		list_destroy(assoc_mgr_user_list);
+	FREE_NULL_LIST(assoc_mgr_user_list);
 
 	assoc_mgr_user_list = current_users;
 
@@ -1469,8 +1695,8 @@ static int _refresh_assoc_wckey_list(void *db_conn, int enforce)
 	slurmdb_wckey_cond_t wckey_q;
 	List current_wckeys = NULL;
 	uid_t uid = getuid();
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, WRITE_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, WRITE_LOCK, WRITE_LOCK };
 
 	memset(&wckey_q, 0, sizeof(slurmdb_wckey_cond_t));
 	if (assoc_mgr_cluster_name) {
@@ -1484,8 +1710,7 @@ static int _refresh_assoc_wckey_list(void *db_conn, int enforce)
 
 	current_wckeys = acct_storage_g_get_wckeys(db_conn, uid, &wckey_q);
 
-	if (wckey_q.cluster_list)
-		list_destroy(wckey_q.cluster_list);
+	FREE_NULL_LIST(wckey_q.cluster_list);
 
 	if (!current_wckeys) {
 		error("_refresh_assoc_wckey_list: "
@@ -1496,8 +1721,7 @@ static int _refresh_assoc_wckey_list(void *db_conn, int enforce)
 	_post_wckey_list(current_wckeys);
 
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_wckey_list)
-		list_destroy(assoc_mgr_wckey_list);
+	FREE_NULL_LIST(assoc_mgr_wckey_list);
 
 	assoc_mgr_wckey_list = current_wckeys;
 	assoc_mgr_unlock(&locks);
@@ -1606,6 +1830,13 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args,
 	if (db_conn_errno != SLURM_SUCCESS)
 		return SLURM_ERROR;
 
+	/* get tres before association and qos since it is used there */
+	if ((!assoc_mgr_tres_list)
+	    && (init_setup.cache_level & ASSOC_MGR_CACHE_TRES))
+		if (_get_assoc_mgr_tres_list(db_conn, init_setup.enforce)
+		    == SLURM_ERROR)
+			return SLURM_ERROR;
+
 	/* get qos before association since it is used there */
 	if ((!assoc_mgr_qos_list)
 	    && (init_setup.cache_level & ASSOC_MGR_CACHE_QOS))
@@ -1620,16 +1851,16 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args,
 		    SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if ((!assoc_mgr_association_list)
+	if ((!assoc_mgr_assoc_list)
 	    && (init_setup.cache_level & ASSOC_MGR_CACHE_ASSOC))
-		if (_get_assoc_mgr_association_list(db_conn, init_setup.enforce)
+		if (_get_assoc_mgr_assoc_list(db_conn, init_setup.enforce)
 		    == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if (assoc_mgr_association_list && !setup_children) {
-		slurmdb_association_rec_t *assoc = NULL;
+	if (assoc_mgr_assoc_list && !setup_children) {
+		slurmdb_assoc_rec_t *assoc = NULL;
 		ListIterator itr =
-			list_iterator_create(assoc_mgr_association_list);
+			list_iterator_create(assoc_mgr_assoc_list);
 		while ((assoc = list_next(itr))) {
 			log_assoc_rec(assoc, assoc_mgr_qos_list);
 		}
@@ -1653,7 +1884,7 @@ extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args,
 
 extern int assoc_mgr_fini(char *state_save_location)
 {
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK,
 				   WRITE_LOCK, WRITE_LOCK, WRITE_LOCK,
 				   WRITE_LOCK };
 
@@ -1662,18 +1893,21 @@ extern int assoc_mgr_fini(char *state_save_location)
 
 	assoc_mgr_lock(&locks);
 
-	if (assoc_mgr_association_list)
-		list_destroy(assoc_mgr_association_list);
-	if (assoc_mgr_res_list)
-		list_destroy(assoc_mgr_res_list);
-	if (assoc_mgr_qos_list)
-		list_destroy(assoc_mgr_qos_list);
-	if (assoc_mgr_user_list)
-		list_destroy(assoc_mgr_user_list);
-	if (assoc_mgr_wckey_list)
-		list_destroy(assoc_mgr_wckey_list);
+	FREE_NULL_LIST(assoc_mgr_assoc_list);
+	FREE_NULL_LIST(assoc_mgr_tres_list);
+	FREE_NULL_LIST(assoc_mgr_res_list);
+	FREE_NULL_LIST(assoc_mgr_qos_list);
+	FREE_NULL_LIST(assoc_mgr_user_list);
+	FREE_NULL_LIST(assoc_mgr_wckey_list);
+	if (assoc_mgr_tres_name_array) {
+		int i;
+		for (i=0; i<g_tres_count; i++)
+			xfree(assoc_mgr_tres_name_array[i]);
+		xfree(assoc_mgr_tres_name_array);
+	}
+	xfree(assoc_mgr_tres_array);
 	xfree(assoc_mgr_cluster_name);
-	assoc_mgr_association_list = NULL;
+	assoc_mgr_assoc_list = NULL;
 	assoc_mgr_res_list = NULL;
 	assoc_mgr_qos_list = NULL;
 	assoc_mgr_user_list = NULL;
@@ -1712,6 +1946,11 @@ extern void assoc_mgr_lock(assoc_mgr_lock_t *locks)
 	else if (locks->res == WRITE_LOCK)
 		_wr_wrlock(RES_LOCK);
 
+	if (locks->tres == READ_LOCK)
+		_wr_rdlock(TRES_LOCK);
+	else if (locks->tres == WRITE_LOCK)
+		_wr_wrlock(TRES_LOCK);
+
 	if (locks->user == READ_LOCK)
 		_wr_rdlock(USER_LOCK);
 	else if (locks->user == WRITE_LOCK)
@@ -1735,6 +1974,11 @@ extern void assoc_mgr_unlock(assoc_mgr_lock_t *locks)
 	else if (locks->user == WRITE_LOCK)
 		_wr_wrunlock(USER_LOCK);
 
+	if (locks->tres == READ_LOCK)
+		_wr_rdunlock(TRES_LOCK);
+	else if (locks->tres == WRITE_LOCK)
+		_wr_wrunlock(TRES_LOCK);
+
 	if (locks->res == READ_LOCK)
 		_wr_rdunlock(RES_LOCK);
 	else if (locks->res == WRITE_LOCK)
@@ -1756,70 +2000,18 @@ extern void assoc_mgr_unlock(assoc_mgr_lock_t *locks)
 		_wr_wrunlock(ASSOC_LOCK);
 }
 
-extern assoc_mgr_association_usage_t *create_assoc_mgr_association_usage()
-{
-	assoc_mgr_association_usage_t *usage =
-		xmalloc(sizeof(assoc_mgr_association_usage_t));
-
-	usage->level_shares = NO_VAL;
-	usage->shares_norm = (double)NO_VAL;
-	usage->usage_efctv = 0;
-	usage->usage_norm = (long double)NO_VAL;
-	usage->usage_raw = 0;
-	usage->level_fs = 0;
-	usage->fs_factor = 0;
-
-	return usage;
-}
-
-extern void destroy_assoc_mgr_association_usage(void *object)
-{
-	assoc_mgr_association_usage_t *usage =
-		(assoc_mgr_association_usage_t *)object;
-
-	if (usage) {
-		if (usage->children_list)
-			list_destroy(usage->children_list);
-		FREE_NULL_BITMAP(usage->valid_qos);
-
-		xfree(usage);
-	}
-}
-
-extern assoc_mgr_qos_usage_t *create_assoc_mgr_qos_usage()
-{
-	assoc_mgr_qos_usage_t *usage =
-		xmalloc(sizeof(assoc_mgr_qos_usage_t));
-
-	return usage;
-}
-
-extern void destroy_assoc_mgr_qos_usage(void *object)
-{
-	assoc_mgr_qos_usage_t *usage =
-		(assoc_mgr_qos_usage_t *)object;
-
-	if (usage) {
-		if (usage->job_list)
-			list_destroy(usage->job_list);
-		if (usage->user_limit_list)
-			list_destroy(usage->user_limit_list);
-		xfree(usage);
-	}
-}
-
 /* Since the returned assoc_list is full of pointers from the
- * assoc_mgr_association_list assoc_mgr_lock_t READ_LOCK on
- * associations must be set before calling this function and while
+ * assoc_mgr_assoc_list assoc_mgr_lock_t READ_LOCK on
+ * assocs must be set before calling this function and while
  * handling it after a return.
  */
 extern int assoc_mgr_get_user_assocs(void *db_conn,
-				     slurmdb_association_rec_t *assoc,
+				     slurmdb_assoc_rec_t *assoc,
 				     int enforce,
 				     List assoc_list)
 {
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *found_assoc = NULL;
+	slurmdb_assoc_rec_t *found_assoc = NULL;
 	int set = 0;
 
 	xassert(assoc);
@@ -1830,17 +2022,17 @@ extern int assoc_mgr_get_user_assocs(void *db_conn,
 	   association list because we need qos and user lists before
 	   the association list can be made.
 	*/
-	if (!assoc_mgr_association_list)
-		if (assoc_mgr_refresh_lists(db_conn) == SLURM_ERROR)
+	if (!assoc_mgr_assoc_list)
+		if (assoc_mgr_refresh_lists(db_conn, 0) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if ((!assoc_mgr_association_list
-	     || !list_count(assoc_mgr_association_list))
+	if ((!assoc_mgr_assoc_list
+	     || !list_count(assoc_mgr_assoc_list))
 	    && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) {
 		return SLURM_SUCCESS;
 	}
 
-	itr = list_iterator_create(assoc_mgr_association_list);
+	itr = list_iterator_create(assoc_mgr_assoc_list);
 	while ((found_assoc = list_next(itr))) {
 		if (assoc->uid != found_assoc->uid) {
 			debug4("not the right user %u != %u",
@@ -1861,15 +2053,122 @@ extern int assoc_mgr_get_user_assocs(void *db_conn,
 	return SLURM_SUCCESS;
 }
 
+extern int assoc_mgr_fill_in_tres(void *db_conn,
+				  slurmdb_tres_rec_t *tres,
+				  int enforce,
+				  slurmdb_tres_rec_t **tres_pptr,
+				  bool locked)
+{
+	ListIterator itr;
+	slurmdb_tres_rec_t *found_tres = NULL;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if (tres_pptr)
+		*tres_pptr = NULL;
+
+	/* Since we might be locked we can't come in here and try to
+	 * get the list since we would need the WRITE_LOCK to do that,
+	 * so just return as this would only happen on a system not
+	 * talking to the database.
+	 */
+	if (!assoc_mgr_tres_list) {
+		int rc = SLURM_SUCCESS;
+
+		if (enforce & ACCOUNTING_ENFORCE_TRES) {
+			error("No TRES list available, this should never "
+			      "happen when running with the database, "
+			      "make sure it is configured.");
+			rc = SLURM_ERROR;
+		}
+		return rc;
+	}
+
+	if ((!assoc_mgr_tres_list
+	     || !list_count(assoc_mgr_tres_list))
+	    && !(enforce & ACCOUNTING_ENFORCE_TRES))
+		return SLURM_SUCCESS;
+
+	if (!tres->id) {
+		if (!tres->type ||
+		    ((!strncasecmp(tres->type, "gres:", 5) ||
+		      !strncasecmp(tres->type, "license:", 8))
+		     && !tres->name)) {
+			if (enforce & ACCOUNTING_ENFORCE_TRES) {
+				error("get_assoc_id: "
+				      "Not enough info to "
+				      "get an association");
+				return SLURM_ERROR;
+			} else {
+				return SLURM_SUCCESS;
+			}
+		}
+	}
+	/* info("looking for tres of (%d)%s:%s", */
+	/*      tres->id, tres->type, tres->name); */
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	itr = list_iterator_create(assoc_mgr_tres_list);
+	while ((found_tres = list_next(itr))) {
+		if (tres->id) {
+			if (tres->id == found_tres->id)
+				break;
+		} else if ((tres->type
+			    && !strcasecmp(tres->type, found_tres->type))
+			   && ((!tres->name && !found_tres->name)
+			       || ((tres->name && found_tres->name) &&
+				   !strcasecmp(tres->name,
+					       found_tres->name))))
+			break;
+	}
+	list_iterator_destroy(itr);
+
+	if (!found_tres) {
+		if (!locked)
+			assoc_mgr_unlock(&locks);
+		if (enforce & ACCOUNTING_ENFORCE_TRES)
+			return SLURM_ERROR;
+		else
+			return SLURM_SUCCESS;
+	}
+	debug3("found correct tres");
+	if (tres_pptr)
+		*tres_pptr = found_tres;
+
+	tres->id           = found_tres->id;
+
+	if (!tres->type)
+		tres->type = found_tres->type;
+	else {
+		xfree(tres->type);
+		tres->type = xstrdup(found_tres->type);
+	}
+
+	if (!tres->name)
+		tres->name = found_tres->name;
+	else {
+		xfree(tres->name);
+		tres->name = xstrdup(found_tres->name);
+	}
+
+	tres->count        = found_tres->count;
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+
+	return SLURM_SUCCESS;
+}
+
 extern int assoc_mgr_fill_in_assoc(void *db_conn,
-				   slurmdb_association_rec_t *assoc,
+				   slurmdb_assoc_rec_t *assoc,
 				   int enforce,
-				   slurmdb_association_rec_t **assoc_pptr,
+				   slurmdb_assoc_rec_t **assoc_pptr,
 				   bool locked)
 {
-	slurmdb_association_rec_t * ret_assoc = NULL;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_assoc_rec_t * ret_assoc = NULL;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (assoc_pptr)
 		*assoc_pptr = NULL;
@@ -1879,7 +2178,7 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 	 * so just return as this would only happen on a system not
 	 * talking to the database.
 	 */
-	if (!assoc_mgr_association_list) {
+	if (!assoc_mgr_assoc_list) {
 		int rc = SLURM_SUCCESS;
 
 		if (enforce & ACCOUNTING_ENFORCE_ASSOCS) {
@@ -1890,8 +2189,8 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 		return rc;
 	}
 
-	if ((!assoc_mgr_association_list
-	     || !list_count(assoc_mgr_association_list))
+	if ((!assoc_mgr_assoc_list
+	     || !list_count(assoc_mgr_assoc_list))
 	    && !(enforce & ACCOUNTING_ENFORCE_ASSOCS))
 		return SLURM_SUCCESS;
 
@@ -1981,12 +2280,13 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 	if (!assoc->cluster)
 		assoc->cluster = ret_assoc->cluster;
 
-	assoc->grp_cpu_mins    = ret_assoc->grp_cpu_mins;
-	assoc->grp_cpu_run_mins= ret_assoc->grp_cpu_run_mins;
-	assoc->grp_cpus        = ret_assoc->grp_cpus;
+	if (!assoc->grp_tres_mins)
+		assoc->grp_tres_mins    = ret_assoc->grp_tres_mins;
+	if (!assoc->grp_tres_run_mins)
+		assoc->grp_tres_run_mins= ret_assoc->grp_tres_run_mins;
+	if (!assoc->grp_tres)
+		assoc->grp_tres        = ret_assoc->grp_tres;
 	assoc->grp_jobs        = ret_assoc->grp_jobs;
-	assoc->grp_mem         = ret_assoc->grp_mem;
-	assoc->grp_nodes       = ret_assoc->grp_nodes;
 	assoc->grp_submit_jobs = ret_assoc->grp_submit_jobs;
 	assoc->grp_wall        = ret_assoc->grp_wall;
 
@@ -1994,11 +2294,15 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 
 	assoc->lft             = ret_assoc->lft;
 
-	assoc->max_cpu_mins_pj = ret_assoc->max_cpu_mins_pj;
-	assoc->max_cpu_run_mins= ret_assoc->max_cpu_run_mins;
-	assoc->max_cpus_pj     = ret_assoc->max_cpus_pj;
+	if (!assoc->max_tres_mins_pj)
+		assoc->max_tres_mins_pj = ret_assoc->max_tres_mins_pj;
+	if (!assoc->max_tres_run_mins)
+		assoc->max_tres_run_mins = ret_assoc->max_tres_run_mins;
+	if (!assoc->max_tres_pj)
+		assoc->max_tres_pj     = ret_assoc->max_tres_pj;
+	if (!assoc->max_tres_pn)
+		assoc->max_tres_pn     = ret_assoc->max_tres_pn;
 	assoc->max_jobs        = ret_assoc->max_jobs;
-	assoc->max_nodes_pj    = ret_assoc->max_nodes_pj;
 	assoc->max_submit_jobs = ret_assoc->max_submit_jobs;
 	assoc->max_wall_pj     = ret_assoc->max_wall_pj;
 
@@ -2028,10 +2332,9 @@ extern int assoc_mgr_fill_in_assoc(void *db_conn,
 
 	/* if (!assoc->usage->children_list) */
 	/* 	assoc->usage->children_list = ret_assoc->usage->children_list; */
-	/* assoc->usage->grp_used_cpus   = ret_assoc->usage->grp_used_cpus; */
-	/* assoc->usage->grp_used_cpu_run_mins  = */
-	/* 	ret_assoc->usage->grp_used_cpu_run_mins; */
-	/* assoc->usage->grp_used_nodes  = ret_assoc->usage->grp_used_nodes; */
+	/* assoc->usage->grp_used_tres   = ret_assoc->usage->grp_used_tres; */
+	/* assoc->usage->grp_used_tres_run_mins  = */
+	/* 	ret_assoc->usage->grp_used_tres_run_mins; */
 	/* assoc->usage->grp_used_wall   = ret_assoc->usage->grp_used_wall; */
 
 	/* assoc->usage->level_shares    = ret_assoc->usage->level_shares; */
@@ -2064,8 +2367,8 @@ extern int assoc_mgr_fill_in_user(void *db_conn, slurmdb_user_rec_t *user,
 {
 	ListIterator itr = NULL;
 	slurmdb_user_rec_t * found_user = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, READ_LOCK, NO_LOCK };
 
 	if (user_pptr)
 		*user_pptr = NULL;
@@ -2134,8 +2437,8 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 {
 	ListIterator itr = NULL;
 	slurmdb_qos_rec_t * found_qos = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (qos_pptr)
 		*qos_pptr = NULL;
@@ -2194,26 +2497,32 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	qos->id = found_qos->id;
 
 	qos->grace_time      = found_qos->grace_time;
-	qos->grp_cpu_mins    = found_qos->grp_cpu_mins;
-	qos->grp_cpu_run_mins= found_qos->grp_cpu_run_mins;
-	qos->grp_cpus        = found_qos->grp_cpus;
+	if (!qos->grp_tres_mins)
+		qos->grp_tres_mins    = found_qos->grp_tres_mins;
+	if (!qos->grp_tres_run_mins)
+		qos->grp_tres_run_mins= found_qos->grp_tres_run_mins;
+	if (!qos->grp_tres)
+		qos->grp_tres        = found_qos->grp_tres;
 	qos->grp_jobs        = found_qos->grp_jobs;
-	qos->grp_mem         = found_qos->grp_mem;
-	qos->grp_nodes       = found_qos->grp_nodes;
 	qos->grp_submit_jobs = found_qos->grp_submit_jobs;
 	qos->grp_wall        = found_qos->grp_wall;
 
-	qos->max_cpu_mins_pj = found_qos->max_cpu_mins_pj;
-	qos->max_cpu_run_mins_pu = found_qos->max_cpu_run_mins_pu;
-	qos->max_cpus_pj     = found_qos->max_cpus_pj;
-	qos->max_cpus_pu     = found_qos->max_cpus_pu;
+	if (!qos->max_tres_mins_pj)
+		qos->max_tres_mins_pj = found_qos->max_tres_mins_pj;
+	if (!qos->max_tres_run_mins_pu)
+		qos->max_tres_run_mins_pu = found_qos->max_tres_run_mins_pu;
+	if (!qos->max_tres_pj)
+		qos->max_tres_pj     = found_qos->max_tres_pj;
+	if (!qos->max_tres_pn)
+		qos->max_tres_pn     = found_qos->max_tres_pn;
+	if (!qos->max_tres_pu)
+		qos->max_tres_pu     = found_qos->max_tres_pu;
 	qos->max_jobs_pu     = found_qos->max_jobs_pu;
-	qos->max_nodes_pj    = found_qos->max_nodes_pj;
-	qos->max_nodes_pu    = found_qos->max_nodes_pu;
 	qos->max_submit_jobs_pu = found_qos->max_submit_jobs_pu;
 	qos->max_wall_pj     = found_qos->max_wall_pj;
 
-	qos->min_cpus_pj     = found_qos->min_cpus_pj;
+	if (!qos->min_tres_pj)
+		qos->min_tres_pj     = found_qos->min_tres_pj;
 
 	if (!qos->name)
 		qos->name = found_qos->name;
@@ -2231,11 +2540,10 @@ extern int assoc_mgr_fill_in_qos(void *db_conn, slurmdb_qos_rec_t *qos,
 	   is really in existance here, if they really want it they can
 	   use the pointer that is returned. */
 
-	/* qos->usage->grp_used_cpus   = found_qos->usage->grp_used_cpus; */
-	/* qos->usage->grp_used_cpu_run_mins  = */
-	/* 	found_qos->usage->grp_used_cpu_run_mins; */
+	/* qos->usage->grp_used_tres   = found_qos->usage->grp_used_tres; */
+	/* qos->usage->grp_used_tres_run_mins  = */
+	/* 	found_qos->usage->grp_used_tres_run_mins; */
 	/* qos->usage->grp_used_jobs   = found_qos->usage->grp_used_jobs; */
-	/* qos->usage->grp_used_nodes  = found_qos->usage->grp_used_nodes; */
 	/* qos->usage->grp_used_submit_jobs = */
 	/* 	found_qos->usage->grp_used_submit_jobs; */
 	/* qos->usage->grp_used_wall   = found_qos->usage->grp_used_wall; */
@@ -2263,8 +2571,8 @@ extern int assoc_mgr_fill_in_wckey(void *db_conn, slurmdb_wckey_rec_t *wckey,
 	ListIterator itr = NULL;
 	slurmdb_wckey_rec_t * found_wckey = NULL;
 	slurmdb_wckey_rec_t * ret_wckey = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, READ_LOCK };
 
 	if (wckey_pptr)
 		*wckey_pptr = NULL;
@@ -2427,8 +2735,8 @@ extern slurmdb_admin_level_t assoc_mgr_get_admin_level(void *db_conn,
 {
 	ListIterator itr = NULL;
 	slurmdb_user_rec_t * found_user = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, READ_LOCK, NO_LOCK };
 
 	if (!assoc_mgr_user_list)
 		if (_get_assoc_mgr_user_list(db_conn, 0) == SLURM_ERROR)
@@ -2461,8 +2769,8 @@ extern bool assoc_mgr_is_user_acct_coord(void *db_conn,
 	ListIterator itr = NULL;
 	slurmdb_coord_rec_t *acct = NULL;
 	slurmdb_user_rec_t * found_user = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, READ_LOCK, NO_LOCK };
 
 	if (!acct_name)
 		return false;
@@ -2504,34 +2812,38 @@ extern bool assoc_mgr_is_user_acct_coord(void *db_conn,
 	return false;
 }
 
-extern List assoc_mgr_get_shares(void *db_conn,
-				 uid_t uid, List acct_list, List user_list)
+extern void assoc_mgr_get_shares(void *db_conn,
+				 uid_t uid, shares_request_msg_t *req_msg,
+				 shares_response_msg_t *resp_msg)
 {
 	ListIterator itr = NULL;
 	ListIterator user_itr = NULL;
 	ListIterator acct_itr = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
-	association_shares_object_t *share = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	assoc_shares_object_t *share = NULL;
 	List ret_list = NULL;
 	char *tmp_char = NULL;
 	slurmdb_user_rec_t user;
 	int is_admin=1;
 	uint16_t private_data = slurm_get_private_data();
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
-	if (!assoc_mgr_association_list
-	    || !list_count(assoc_mgr_association_list))
-		return NULL;
+	xassert(resp_msg);
+
+	if (!assoc_mgr_assoc_list || !list_count(assoc_mgr_assoc_list))
+		return;
 
 	memset(&user, 0, sizeof(slurmdb_user_rec_t));
 	user.uid = uid;
 
-	if (user_list && list_count(user_list))
-		user_itr = list_iterator_create(user_list);
+	if (req_msg) {
+		if (req_msg->user_list && list_count(req_msg->user_list))
+			user_itr = list_iterator_create(req_msg->user_list);
 
-	if (acct_list && list_count(acct_list))
-		acct_itr = list_iterator_create(acct_list);
+		if (req_msg->acct_list && list_count(req_msg->acct_list))
+			acct_itr = list_iterator_create(req_msg->acct_list);
+	}
 
 	if (private_data & PRIVATE_DATA_USAGE) {
 		uint32_t slurm_uid = slurm_get_slurm_user_id();
@@ -2553,10 +2865,19 @@ extern List assoc_mgr_get_shares(void *db_conn,
 		}
 	}
 
-	ret_list = list_create(slurm_destroy_association_shares_object);
+	resp_msg->assoc_shares_list = ret_list =
+		list_create(slurm_destroy_assoc_shares_object);
 
 	assoc_mgr_lock(&locks);
-	itr = list_iterator_create(assoc_mgr_association_list);
+
+	resp_msg->tres_cnt = g_tres_count;
+
+	/* DON'T FREE, since this shouldn't change while the slurmctld
+	 * is running we should be ok.
+	*/
+	resp_msg->tres_names = assoc_mgr_tres_name_array;
+
+	itr = list_iterator_create(assoc_mgr_assoc_list);
 	while ((assoc = list_next(itr))) {
 		if (user_itr && assoc->user) {
 			while ((tmp_char = list_next(user_itr))) {
@@ -2617,7 +2938,7 @@ extern List assoc_mgr_get_shares(void *db_conn,
 		}
 	is_user:
 
-		share = xmalloc(sizeof(association_shares_object_t));
+		share = xmalloc(sizeof(assoc_shares_object_t));
 		list_append(ret_list, share);
 
 		share->assoc_id = assoc->id;
@@ -2631,11 +2952,28 @@ extern List assoc_mgr_get_shares(void *db_conn,
 		share->shares_norm = assoc->usage->shares_norm;
 		share->usage_raw = (uint64_t)assoc->usage->usage_raw;
 
-		share->grp_cpu_mins = assoc->grp_cpu_mins;
-		share->cpu_run_mins = assoc->usage->grp_used_cpu_run_secs / 60;
+		share->usage_tres_raw = xmalloc(
+			sizeof(long double) * g_tres_count);
+		memcpy(share->usage_tres_raw,
+		       assoc->usage->usage_tres_raw,
+		       sizeof(long double) * g_tres_count);
+
+		share->tres_grp_mins = xmalloc(sizeof(uint64_t) * g_tres_count);
+		memcpy(share->tres_grp_mins, assoc->grp_tres_mins_ctld,
+		       sizeof(uint64_t) * g_tres_count);
+		share->tres_run_secs = xmalloc(sizeof(uint64_t) * g_tres_count);
+		memcpy(share->tres_run_secs,
+		       assoc->usage->grp_used_tres_run_secs,
+		       sizeof(uint64_t) * g_tres_count);
 		share->fs_factor = assoc->usage->fs_factor;
 		share->level_fs = assoc->usage->level_fs;
 
+		if (assoc->partition) {
+			share->partition =  xstrdup(assoc->partition);
+		} else {
+			share->partition = NULL;
+		}
+
 		if (assoc->user) {
 			/* We only calculate user effective usage when
 			 * we need it
@@ -2669,104 +3007,401 @@ end_it:
 	/* The ret_list should already be sorted correctly, so no need
 	   to do it again.
 	*/
-	return ret_list;
+	return;
 }
 
-/*
- * assoc_mgr_update - update the association manager
- * IN update_list: updates to perform
- * RET: error code
- * NOTE: the items in update_list are not deleted
- */
-extern int assoc_mgr_update(List update_list)
+extern void assoc_mgr_info_get_pack_msg(
+	char **buffer_ptr, int *buffer_size,
+	assoc_mgr_info_request_msg_t *msg, uid_t uid,
+	void *db_conn, uint16_t protocol_version)
 {
-	int rc = SLURM_SUCCESS;
 	ListIterator itr = NULL;
-	slurmdb_update_object_t *object = NULL;
+	ListIterator user_itr = NULL, acct_itr = NULL, qos_itr = NULL;
+	slurmdb_qos_rec_t *qos_rec = NULL;
+	slurmdb_assoc_rec_t *assoc_rec = NULL;
+	List ret_list = NULL, tmp_list;
+	char *tmp_char = NULL;
+	slurmdb_user_rec_t user, *user_rec = NULL;
+	int is_admin=1;
+	void *object;
+	uint32_t flags = 0;
 
-	xassert(update_list);
-	itr = list_iterator_create(update_list);
-	while ((object = list_next(itr))) {
-		if (!object->objects || !list_count(object->objects))
-			continue;
+	uint16_t private_data = slurm_get_private_data();
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK,
+				   READ_LOCK, READ_LOCK, NO_LOCK };
+	Buf buffer;
 
-		switch(object->type) {
-		case SLURMDB_MODIFY_USER:
-		case SLURMDB_ADD_USER:
-		case SLURMDB_REMOVE_USER:
-		case SLURMDB_ADD_COORD:
-		case SLURMDB_REMOVE_COORD:
-			rc = assoc_mgr_update_users(object);
-			break;
-		case SLURMDB_ADD_ASSOC:
-		case SLURMDB_MODIFY_ASSOC:
-		case SLURMDB_REMOVE_ASSOC:
-		case SLURMDB_REMOVE_ASSOC_USAGE:
-			rc = assoc_mgr_update_assocs(object);
-			break;
-		case SLURMDB_ADD_QOS:
-		case SLURMDB_MODIFY_QOS:
-		case SLURMDB_REMOVE_QOS:
-		case SLURMDB_REMOVE_QOS_USAGE:
-			rc = assoc_mgr_update_qos(object);
-			break;
-		case SLURMDB_ADD_WCKEY:
-		case SLURMDB_MODIFY_WCKEY:
-		case SLURMDB_REMOVE_WCKEY:
-			rc = assoc_mgr_update_wckeys(object);
-			break;
-		case SLURMDB_ADD_RES:
-		case SLURMDB_MODIFY_RES:
-		case SLURMDB_REMOVE_RES:
-			rc = assoc_mgr_update_res(object);
-			break;
-		case SLURMDB_ADD_CLUSTER:
-		case SLURMDB_REMOVE_CLUSTER:
-			/* These are used in the accounting_storage
-			   plugins for rollback purposes, just skip here.
-			*/
-			break;
-		case SLURMDB_UPDATE_NOTSET:
-		default:
-			error("unknown type set in "
-			      "update_object: %d",
-			      object->type);
-			break;
-		}
-	}
-	list_iterator_destroy(itr);
-	return rc;
-}
+	buffer_ptr[0] = NULL;
+	*buffer_size = 0;
 
-extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
-{
-	slurmdb_association_rec_t * rec = NULL;
-	slurmdb_association_rec_t * object = NULL;
-	ListIterator itr = NULL;
-	int rc = SLURM_SUCCESS;
-	int parents_changed = 0;
-	int run_update_resvs = 0;
-	int resort = 0;
-	List remove_list = NULL;
-	List update_list = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	memset(&user, 0, sizeof(slurmdb_user_rec_t));
+	user.uid = uid;
 
-	assoc_mgr_lock(&locks);
-	if (!assoc_mgr_association_list) {
-		assoc_mgr_unlock(&locks);
-		return SLURM_SUCCESS;
+	if (msg) {
+		if (msg->user_list && list_count(msg->user_list))
+			user_itr = list_iterator_create(msg->user_list);
+
+		if (msg->acct_list && list_count(msg->acct_list))
+			acct_itr = list_iterator_create(msg->acct_list);
+
+		if (msg->qos_list && list_count(msg->qos_list))
+			qos_itr = list_iterator_create(msg->qos_list);
+		flags = msg->flags;
 	}
 
-	while ((object = list_pop(update->objects))) {
-		bool update_jobs = false;
-		if (object->cluster && assoc_mgr_cluster_name) {
-			/* only update the local clusters assocs */
-			if (strcasecmp(object->cluster,
-				       assoc_mgr_cluster_name)) {
-				slurmdb_destroy_association_rec(object);
-				continue;
-			}
+	if (private_data & (PRIVATE_DATA_USAGE | PRIVATE_DATA_USERS)) {
+		uint32_t slurm_uid = slurm_get_slurm_user_id();
+		is_admin = 0;
+		/* Check permissions of the requesting user.
+		 */
+		if ((uid == slurm_uid || uid == 0)
+		    || assoc_mgr_get_admin_level(db_conn, uid)
+		    >= SLURMDB_ADMIN_OPERATOR)
+			is_admin = 1;
+		else {
+			if (assoc_mgr_fill_in_user(
+				    db_conn, &user,
+				    ACCOUNTING_ENFORCE_ASSOCS, NULL)
+			    == SLURM_ERROR) {
+				debug3("User %d not found", user.uid);
+				goto end_it;
+			}
+		}
+	}
+
+	/* This is where we start to pack */
+	buffer = init_buf(BUF_SIZE);
+
+	packstr_array(assoc_mgr_tres_name_array, g_tres_count, buffer);
+
+	ret_list = list_create(NULL);
+
+	assoc_mgr_lock(&locks);
+
+	if (!(flags & ASSOC_MGR_INFO_FLAG_ASSOC))
+		goto no_assocs;
+
+	itr = list_iterator_create(assoc_mgr_assoc_list);
+	while ((assoc_rec = list_next(itr))) {
+		if (user_itr && assoc_rec->user) {
+			while ((tmp_char = list_next(user_itr))) {
+				if (!xstrcasecmp(tmp_char, assoc_rec->user))
+					break;
+			}
+			list_iterator_reset(user_itr);
+			/* not correct user */
+			if (!tmp_char)
+				continue;
+		}
+
+		if (acct_itr) {
+			while ((tmp_char = list_next(acct_itr))) {
+				if (!xstrcasecmp(tmp_char, assoc_rec->acct))
+					break;
+			}
+			list_iterator_reset(acct_itr);
+			/* not correct account */
+			if (!tmp_char)
+				continue;
+		}
+
+		if (private_data & PRIVATE_DATA_USAGE) {
+			if (!is_admin) {
+				ListIterator itr = NULL;
+				slurmdb_coord_rec_t *coord = NULL;
+
+				if (assoc_rec->user &&
+				    !strcmp(assoc_rec->user, user.name))
+					goto is_user;
+
+				if (!user.coord_accts) {
+					debug4("This user isn't a coord.");
+					goto bad_user;
+				}
+
+				if (!assoc_rec->acct) {
+					debug("No account name given "
+					      "in association.");
+					goto bad_user;
+				}
+
+				itr = list_iterator_create(user.coord_accts);
+				while ((coord = list_next(itr))) {
+					if (!strcasecmp(coord->name,
+							assoc_rec->acct))
+						break;
+				}
+				list_iterator_destroy(itr);
+
+				if (coord)
+					goto is_user;
+
+			bad_user:
+				continue;
+			}
+		}
+	is_user:
+
+		list_append(ret_list, assoc_rec);
+	}
+	list_iterator_destroy(itr);
+
+no_assocs:
+
+	/* pack the associations requested/allowed */
+	pack32(list_count(ret_list), buffer);
+	itr = list_iterator_create(ret_list);
+	while ((object = list_next(itr)))
+		slurmdb_pack_assoc_rec_with_usage(
+			object, protocol_version, buffer);
+	list_iterator_destroy(itr);
+	list_flush(ret_list);
+
+	if (!(flags & ASSOC_MGR_INFO_FLAG_QOS)) {
+		tmp_list = ret_list;
+		goto no_qos;
+	}
+
+	/* now filter out the qos */
+	if (qos_itr) {
+		while ((tmp_char = list_next(qos_itr)))
+			if ((qos_rec = list_find_first(
+				     assoc_mgr_qos_list,
+				     slurmdb_find_tres_in_list,
+				     &tmp_char)))
+				list_append(ret_list, user_rec);
+		tmp_list = ret_list;
+	} else
+		tmp_list = assoc_mgr_qos_list;
+
+no_qos:
+	/* pack the qos requested */
+	if (tmp_list) {
+		pack32(list_count(tmp_list), buffer);
+		itr = list_iterator_create(tmp_list);
+		while ((object = list_next(itr)))
+			slurmdb_pack_qos_rec_with_usage(
+				object, protocol_version, buffer);
+		list_iterator_destroy(itr);
+	} else
+		pack32(0, buffer);
+
+	if (qos_itr)
+		list_flush(ret_list);
+
+	if (!(flags & ASSOC_MGR_INFO_FLAG_USERS) || !assoc_mgr_user_list)
+		goto no_users;
+
+	/* now filter out the users */
+	itr = list_iterator_create(assoc_mgr_user_list);
+	while ((user_rec = list_next(itr))) {
+		if (!is_admin && (private_data & PRIVATE_DATA_USERS) &&
+		    xstrcasecmp(user_rec->name, user.name))
+			continue;
+
+		if (user_itr) {
+			while ((tmp_char = list_next(user_itr)))
+				if (xstrcasecmp(tmp_char, user_rec->name))
+					break;
+			list_iterator_reset(user_itr);
+			/* not correct user */
+			if (!tmp_char)
+				continue;
+		}
+
+		list_append(ret_list, user_rec);
+	}
+
+no_users:
+
+	/* pack the users requested/allowed */
+	pack32(list_count(ret_list), buffer);
+	itr = list_iterator_create(ret_list);
+	while ((object = list_next(itr)))
+		slurmdb_pack_user_rec(object, protocol_version, buffer);
+	list_iterator_destroy(itr);
+//	list_flush(ret_list);
+
+	FREE_NULL_LIST(ret_list);
+
+	assoc_mgr_unlock(&locks);
+
+	/* put the real record count in the message body header */
+	*buffer_size = get_buf_offset(buffer);
+	buffer_ptr[0] = xfer_buf_data(buffer);
+
+end_it:
+	if (user_itr)
+		list_iterator_destroy(user_itr);
+	if (acct_itr)
+		list_iterator_destroy(acct_itr);
+	if (qos_itr)
+		list_iterator_destroy(qos_itr);
+
+	return;
+}
+
+extern int assoc_mgr_info_unpack_msg(
+	assoc_mgr_info_msg_t **object, Buf buffer, uint16_t protocol_version)
+{
+	assoc_mgr_info_msg_t *object_ptr =
+		xmalloc(sizeof(assoc_mgr_info_msg_t));
+	void *list_object = NULL;
+	uint32_t count;
+	int i;
+
+	*object = object_ptr;
+
+	safe_unpackstr_array(&object_ptr->tres_names, &object_ptr->tres_cnt,
+			     buffer);
+
+	safe_unpack32(&count, buffer);
+	if (count) {
+		object_ptr->assoc_list =
+			list_create(slurmdb_destroy_assoc_rec);
+		for (i=0; i<count; i++) {
+			if (slurmdb_unpack_assoc_rec_with_usage(
+				    &list_object, protocol_version,
+				    buffer)
+			    != SLURM_SUCCESS)
+				goto unpack_error;
+			list_append(object_ptr->assoc_list, list_object);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if (count) {
+		object_ptr->qos_list =
+			list_create(slurmdb_destroy_qos_rec);
+		for (i=0; i<count; i++) {
+			if (slurmdb_unpack_qos_rec_with_usage(
+				    &list_object, protocol_version, buffer)
+			    != SLURM_SUCCESS)
+				goto unpack_error;
+			list_append(object_ptr->qos_list, list_object);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if (count) {
+		object_ptr->user_list =
+			list_create(slurmdb_destroy_user_rec);
+		for (i=0; i<count; i++) {
+			if (slurmdb_unpack_user_rec(
+				    &list_object, protocol_version, buffer)
+			    != SLURM_SUCCESS)
+				goto unpack_error;
+			list_append(object_ptr->user_list, list_object);
+		}
+	}
+
+	return SLURM_SUCCESS;
+unpack_error:
+	slurm_free_assoc_mgr_info_msg(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+/*
+ * assoc_mgr_update - update the association manager
+ * IN update_list: updates to perform
+ * RET: error code
+ * NOTE: the items in update_list are not deleted
+ */
+extern int assoc_mgr_update(List update_list, bool locked)
+{
+	int rc = SLURM_SUCCESS;
+	ListIterator itr = NULL;
+	slurmdb_update_object_t *object = NULL;
+
+	xassert(update_list);
+	itr = list_iterator_create(update_list);
+	while ((object = list_next(itr))) {
+		if (!object->objects || !list_count(object->objects))
+			continue;
+
+		switch(object->type) {
+		case SLURMDB_MODIFY_USER:
+		case SLURMDB_ADD_USER:
+		case SLURMDB_REMOVE_USER:
+		case SLURMDB_ADD_COORD:
+		case SLURMDB_REMOVE_COORD:
+			rc = assoc_mgr_update_users(object, locked);
+			break;
+		case SLURMDB_ADD_ASSOC:
+		case SLURMDB_MODIFY_ASSOC:
+		case SLURMDB_REMOVE_ASSOC:
+		case SLURMDB_REMOVE_ASSOC_USAGE:
+			rc = assoc_mgr_update_assocs(object, locked);
+			break;
+		case SLURMDB_ADD_QOS:
+		case SLURMDB_MODIFY_QOS:
+		case SLURMDB_REMOVE_QOS:
+		case SLURMDB_REMOVE_QOS_USAGE:
+			rc = assoc_mgr_update_qos(object, locked);
+			break;
+		case SLURMDB_ADD_WCKEY:
+		case SLURMDB_MODIFY_WCKEY:
+		case SLURMDB_REMOVE_WCKEY:
+			rc = assoc_mgr_update_wckeys(object, locked);
+			break;
+		case SLURMDB_ADD_RES:
+		case SLURMDB_MODIFY_RES:
+		case SLURMDB_REMOVE_RES:
+			rc = assoc_mgr_update_res(object, locked);
+			break;
+		case SLURMDB_ADD_CLUSTER:
+		case SLURMDB_REMOVE_CLUSTER:
+			/* These are used in the accounting_storage
+			   plugins for rollback purposes, just skip here.
+			*/
+			break;
+		case SLURMDB_ADD_TRES:
+			rc = assoc_mgr_update_tres(object, locked);
+			break;
+		case SLURMDB_UPDATE_NOTSET:
+		default:
+			error("unknown type set in "
+			      "update_object: %d",
+			      object->type);
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	return rc;
+}
+
+extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update, bool locked)
+{
+	slurmdb_assoc_rec_t * rec = NULL;
+	slurmdb_assoc_rec_t * object = NULL;
+	ListIterator itr = NULL;
+	int rc = SLURM_SUCCESS, i;
+	int parents_changed = 0;
+	int run_update_resvs = 0;
+	int resort = 0;
+	List remove_list = NULL;
+	List update_list = NULL;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   READ_LOCK, WRITE_LOCK, NO_LOCK };
+
+	if (!locked)
+		assoc_mgr_lock(&locks);
+	if (!assoc_mgr_assoc_list) {
+		if (!locked)
+			assoc_mgr_unlock(&locks);
+		return SLURM_SUCCESS;
+	}
+
+	while ((object = list_pop(update->objects))) {
+		bool update_jobs = false;
+		if (object->cluster && assoc_mgr_cluster_name) {
+			/* only update the local clusters assocs */
+			if (strcasecmp(object->cluster,
+				       assoc_mgr_cluster_name)) {
+				slurmdb_destroy_assoc_rec(object);
+				continue;
+			}
 		} else if (assoc_mgr_cluster_name) {
 			error("We don't have a cluster here, no "
 			      "idea if this is our association.");
@@ -2801,25 +3436,47 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 				}
 			}
 
-			if (object->grp_cpu_mins != (uint64_t)NO_VAL)
-				rec->grp_cpu_mins = object->grp_cpu_mins;
-			if (object->grp_cpu_run_mins != (uint64_t)NO_VAL)
-				rec->grp_cpu_run_mins =
-					object->grp_cpu_run_mins;
-			if (object->grp_cpus != NO_VAL) {
+			if (object->grp_tres) {
 				update_jobs = true;
-				rec->grp_cpus = object->grp_cpus;
+				/* If we have a blank string that
+				 * means it is cleared.
+				 */
+				xfree(rec->grp_tres);
+				if (object->grp_tres[0]) {
+					rec->grp_tres = object->grp_tres;
+					object->grp_tres = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->grp_tres_ctld,
+					rec->grp_tres, INFINITE64, 1);
 			}
-			if (object->grp_jobs != NO_VAL)
-				rec->grp_jobs = object->grp_jobs;
-			if (object->grp_mem != NO_VAL) {
-				update_jobs = true;
-				rec->grp_mem = object->grp_mem;
+
+			if (object->grp_tres_mins) {
+				xfree(rec->grp_tres_mins);
+				if (object->grp_tres_mins[0]) {
+					rec->grp_tres_mins =
+						object->grp_tres_mins;
+					object->grp_tres_mins = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->grp_tres_mins_ctld,
+					rec->grp_tres_mins, INFINITE64, 1);
 			}
-			if (object->grp_nodes != NO_VAL) {
-				update_jobs = true;
-				rec->grp_nodes = object->grp_nodes;
+
+			if (object->grp_tres_run_mins) {
+				xfree(rec->grp_tres_run_mins);
+				if (object->grp_tres_run_mins[0]) {
+					rec->grp_tres_run_mins =
+						object->grp_tres_run_mins;
+					object->grp_tres_run_mins = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->grp_tres_run_mins_ctld,
+					rec->grp_tres_run_mins, INFINITE64, 1);
 			}
+
+			if (object->grp_jobs != NO_VAL)
+				rec->grp_jobs = object->grp_jobs;
 			if (object->grp_submit_jobs != NO_VAL)
 				rec->grp_submit_jobs = object->grp_submit_jobs;
 			if (object->grp_wall != NO_VAL) {
@@ -2832,21 +3489,56 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 				resort = 1;
 			}
 
-			if (object->max_cpu_mins_pj != (uint64_t)NO_VAL)
-				rec->max_cpu_mins_pj = object->max_cpu_mins_pj;
-			if (object->max_cpu_run_mins != (uint64_t)NO_VAL)
-				rec->max_cpu_run_mins =
-					object->max_cpu_run_mins;
-			if (object->max_cpus_pj != NO_VAL) {
+			if (object->max_tres_pj) {
 				update_jobs = true;
-				rec->max_cpus_pj = object->max_cpus_pj;
+				xfree(rec->max_tres_pj);
+				if (object->max_tres_pj[0]) {
+					rec->max_tres_pj = object->max_tres_pj;
+					object->max_tres_pj = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_ctld,
+					rec->max_tres_pj, INFINITE64, 1);
 			}
-			if (object->max_jobs != NO_VAL)
-				rec->max_jobs = object->max_jobs;
-			if (object->max_nodes_pj != NO_VAL) {
+
+			if (object->max_tres_pn) {
 				update_jobs = true;
-				rec->max_nodes_pj = object->max_nodes_pj;
+				xfree(rec->max_tres_pn);
+				if (object->max_tres_pn[0]) {
+					rec->max_tres_pn = object->max_tres_pn;
+					object->max_tres_pn = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_pn_ctld,
+					rec->max_tres_pn, INFINITE64, 1);
+			}
+
+			if (object->max_tres_mins_pj) {
+				xfree(rec->max_tres_mins_pj);
+				if (object->max_tres_mins_pj[0]) {
+					rec->max_tres_mins_pj =
+						object->max_tres_mins_pj;
+					object->max_tres_mins_pj = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_mins_ctld,
+					rec->max_tres_mins_pj, INFINITE64, 1);
 			}
+
+			if (object->max_tres_run_mins) {
+				xfree(rec->max_tres_run_mins);
+				if (object->max_tres_run_mins[0]) {
+					rec->max_tres_run_mins =
+						object->max_tres_run_mins;
+					object->max_tres_run_mins = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_run_mins_ctld,
+					rec->max_tres_run_mins, INFINITE64, 1);
+			}
+
+			if (object->max_jobs != NO_VAL)
+				rec->max_jobs = object->max_jobs;
 			if (object->max_submit_jobs != NO_VAL)
 				rec->max_submit_jobs = object->max_submit_jobs;
 			if (object->max_wall_pj != NO_VAL) {
@@ -2945,7 +3637,8 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 
 			if (!object->usage)
 				object->usage =
-					create_assoc_mgr_association_usage();
+					slurmdb_create_assoc_usage(
+						g_tres_count);
 			/* If is_def is uninitialized the value will
 			   be NO_VAL, so if it isn't 1 make it 0.
 			*/
@@ -2955,7 +3648,9 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 			/* Set something so we know to add it to the hash */
 			object->uid = INFINITE;
 
-			list_append(assoc_mgr_association_list, object);
+			assoc_mgr_set_assoc_tres_cnt(object);
+
+			list_append(assoc_mgr_assoc_list, object);
 
 			object = NULL;
 			parents_changed = 1; /* set since we need to
@@ -2988,10 +3683,10 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 				*/
 				if (!remove_list)
 					remove_list = list_create(
-						slurmdb_destroy_association_rec);
+						slurmdb_destroy_assoc_rec);
 				list_append(remove_list, rec);
 			} else
-				slurmdb_destroy_association_rec(rec);
+				slurmdb_destroy_assoc_rec(rec);
 			break;
 		case SLURMDB_REMOVE_ASSOC_USAGE:
 			if (!rec) {
@@ -3004,7 +3699,7 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 			break;
 		}
 
-		slurmdb_destroy_association_rec(object);
+		slurmdb_destroy_assoc_rec(object);
 	}
 
 	/* We have to do this after the entire list is processed since
@@ -3014,9 +3709,9 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 		int reset = 1;
 		g_user_assoc_count = 0;
 		slurmdb_sort_hierarchical_assoc_list(
-			assoc_mgr_association_list);
+			assoc_mgr_assoc_list);
 
-		itr = list_iterator_create(assoc_mgr_association_list);
+		itr = list_iterator_create(assoc_mgr_assoc_list);
 		/* flush the children lists */
 		if (setup_children) {
 			while ((object = list_next(itr))) {
@@ -3034,6 +3729,8 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 			if (!object->user) {
 				_clear_used_assoc_info(object);
 				object->usage->usage_raw = 0;
+				for (i=0; i<object->usage->tres_cnt; i++)
+					object->usage->usage_tres_raw[i] = 0;
 				object->usage->grp_used_wall = 0;
 			}
 
@@ -3047,7 +3744,6 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 
 			if (addit)
 				_add_assoc_hash(object);
-
 			reset = 0;
 		}
 		/* Now that we have set up the parents correctly we
@@ -3092,9 +3788,10 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 		list_iterator_destroy(itr);
 	} else if (resort)
 		slurmdb_sort_hierarchical_assoc_list(
-			assoc_mgr_association_list);
+			assoc_mgr_assoc_list);
 
-	assoc_mgr_unlock(&locks);
+	if (!locked)
+		assoc_mgr_unlock(&locks);
 
 	/* This needs to happen outside of the
 	   assoc_mgr_lock */
@@ -3103,7 +3800,7 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 		while ((rec = list_next(itr)))
 			init_setup.remove_assoc_notify(rec);
 		list_iterator_destroy(itr);
-		list_destroy(remove_list);
+		FREE_NULL_LIST(remove_list);
 	}
 
 	if (update_list) {
@@ -3111,7 +3808,7 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 		while ((rec = list_next(itr)))
 			init_setup.update_assoc_notify(rec);
 		list_iterator_destroy(itr);
-		list_destroy(update_list);
+		FREE_NULL_LIST(update_list);
 	}
 
 	if (run_update_resvs && init_setup.update_resvs)
@@ -3120,19 +3817,21 @@ extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update)
 	return rc;
 }
 
-extern int assoc_mgr_update_wckeys(slurmdb_update_object_t *update)
+extern int assoc_mgr_update_wckeys(slurmdb_update_object_t *update, bool locked)
 {
 	slurmdb_wckey_rec_t * rec = NULL;
 	slurmdb_wckey_rec_t * object = NULL;
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	uid_t pw_uid;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, WRITE_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, WRITE_LOCK, WRITE_LOCK };
 
-	assoc_mgr_lock(&locks);
+	if (!locked)
+		assoc_mgr_lock(&locks);
 	if (!assoc_mgr_wckey_list) {
-		assoc_mgr_unlock(&locks);
+		if (!locked)
+			assoc_mgr_unlock(&locks);
 		return SLURM_SUCCESS;
 	}
 
@@ -3235,12 +3934,13 @@ extern int assoc_mgr_update_wckeys(slurmdb_update_object_t *update)
 		slurmdb_destroy_wckey_rec(object);
 	}
 	list_iterator_destroy(itr);
-	assoc_mgr_unlock(&locks);
+	if (!locked)
+		assoc_mgr_unlock(&locks);
 
 	return rc;
 }
 
-extern int assoc_mgr_update_users(slurmdb_update_object_t *update)
+extern int assoc_mgr_update_users(slurmdb_update_object_t *update, bool locked)
 {
 	slurmdb_user_rec_t * rec = NULL;
 	slurmdb_user_rec_t * object = NULL;
@@ -3248,12 +3948,14 @@ extern int assoc_mgr_update_users(slurmdb_update_object_t *update)
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	uid_t pw_uid;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, WRITE_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, WRITE_LOCK, WRITE_LOCK };
 
-	assoc_mgr_lock(&locks);
+	if (!locked)
+		assoc_mgr_lock(&locks);
 	if (!assoc_mgr_user_list) {
-		assoc_mgr_unlock(&locks);
+		if (!locked)
+			assoc_mgr_unlock(&locks);
 		return SLURM_SUCCESS;
 	}
 
@@ -3341,8 +4043,7 @@ extern int assoc_mgr_update_users(slurmdb_update_object_t *update)
 				if (rec->coord_accts)
 					list_flush(rec->coord_accts);
 			} else {
-				if (rec->coord_accts)
-					list_destroy(rec->coord_accts);
+				FREE_NULL_LIST(rec->coord_accts);
 				rec->coord_accts = object->coord_accts;
 				object->coord_accts = NULL;
 			}
@@ -3354,30 +4055,33 @@ extern int assoc_mgr_update_users(slurmdb_update_object_t *update)
 		slurmdb_destroy_user_rec(object);
 	}
 	list_iterator_destroy(itr);
-	assoc_mgr_unlock(&locks);
+	if (!locked)
+		assoc_mgr_unlock(&locks);
 
 	return rc;
 }
 
-extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
+extern int assoc_mgr_update_qos(slurmdb_update_object_t *update, bool locked)
 {
 	slurmdb_qos_rec_t *rec = NULL;
 	slurmdb_qos_rec_t *object = NULL;
 
 	ListIterator itr = NULL, assoc_itr = NULL;
 
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	int rc = SLURM_SUCCESS;
 	bool resize_qos_bitstr = 0;
 	int redo_priority = 0;
 	List remove_list = NULL;
 	List update_list = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
-	assoc_mgr_lock(&locks);
+	if (!locked)
+		assoc_mgr_lock(&locks);
 	if (!assoc_mgr_qos_list) {
-		assoc_mgr_unlock(&locks);
+		if (!locked)
+			assoc_mgr_unlock(&locks);
 		return SLURM_SUCCESS;
 	}
 
@@ -3400,7 +4104,11 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 			}
 
 			if (!object->usage)
-				object->usage = create_assoc_mgr_qos_usage();
+				object->usage = slurmdb_create_qos_usage(
+					g_tres_count);
+
+			assoc_mgr_set_qos_tres_cnt(object);
+
 			list_append(assoc_mgr_qos_list, object);
 /* 			char *tmp = get_qos_complete_str_bitstr( */
 /* 				assoc_mgr_qos_list, */
@@ -3446,25 +4154,49 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 
 			if (object->grace_time != NO_VAL)
 				rec->grace_time = object->grace_time;
-			if (object->grp_cpu_mins != (uint64_t)NO_VAL)
-				rec->grp_cpu_mins = object->grp_cpu_mins;
-			if (object->grp_cpu_run_mins != (uint64_t)NO_VAL)
-				rec->grp_cpu_run_mins =
-					object->grp_cpu_run_mins;
-			if (object->grp_cpus != NO_VAL) {
+
+			if (object->grp_tres) {
 				update_jobs = true;
-				rec->grp_cpus = object->grp_cpus;
+				/* If we have a blank string that
+				 * means it is cleared.
+				 */
+				xfree(rec->grp_tres);
+				if (object->grp_tres[0]) {
+					rec->grp_tres = object->grp_tres;
+					object->grp_tres = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->grp_tres_ctld, rec->grp_tres,
+					INFINITE64, 1);
+
 			}
-			if (object->grp_jobs != NO_VAL)
-				rec->grp_jobs = object->grp_jobs;
-			if (object->grp_mem != NO_VAL) {
-				update_jobs = true;
-				rec->grp_mem = object->grp_mem;
+
+			if (object->grp_tres_mins) {
+				xfree(rec->grp_tres_mins);
+				if (object->grp_tres_mins[0]) {
+					rec->grp_tres_mins =
+						object->grp_tres_mins;
+					object->grp_tres_mins = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->grp_tres_mins_ctld,
+					rec->grp_tres_mins, INFINITE64, 1);
 			}
-			if (object->grp_nodes != NO_VAL) {
-				update_jobs = true;
-				rec->grp_nodes = object->grp_nodes;
+
+			if (object->grp_tres_run_mins) {
+				xfree(rec->grp_tres_run_mins);
+				if (object->grp_tres_run_mins[0]) {
+					rec->grp_tres_run_mins =
+						object->grp_tres_run_mins;
+					object->grp_tres_run_mins = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->grp_tres_run_mins_ctld,
+					rec->grp_tres_run_mins, INFINITE64, 1);
 			}
+
+			if (object->grp_jobs != NO_VAL)
+				rec->grp_jobs = object->grp_jobs;
 			if (object->grp_submit_jobs != NO_VAL)
 				rec->grp_submit_jobs = object->grp_submit_jobs;
 			if (object->grp_wall != NO_VAL) {
@@ -3472,29 +4204,69 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 				rec->grp_wall = object->grp_wall;
 			}
 
-			if (object->max_cpu_mins_pj != (uint64_t)NO_VAL)
-				rec->max_cpu_mins_pj = object->max_cpu_mins_pj;
-			if (object->max_cpu_run_mins_pu != (uint64_t)NO_VAL)
-				rec->max_cpu_run_mins_pu =
-					object->max_cpu_run_mins_pu;
-			if (object->max_cpus_pj != NO_VAL) {
+			if (object->max_tres_pj) {
 				update_jobs = true;
-				rec->max_cpus_pj = object->max_cpus_pj;
+				xfree(rec->max_tres_pj);
+				if (object->max_tres_pj[0]) {
+					rec->max_tres_pj = object->max_tres_pj;
+					object->max_tres_pj = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_pj_ctld,
+					rec->max_tres_pj, INFINITE64, 1);
 			}
-			if (object->max_cpus_pu != NO_VAL) {
+
+			if (object->max_tres_pn) {
 				update_jobs = true;
-				rec->max_cpus_pu = object->max_cpus_pu;
+				xfree(rec->max_tres_pn);
+				if (object->max_tres_pn[0]) {
+					rec->max_tres_pn = object->max_tres_pn;
+					object->max_tres_pn = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_pn_ctld,
+					rec->max_tres_pn, INFINITE64, 1);
 			}
-			if (object->max_jobs_pu != NO_VAL)
-				rec->max_jobs_pu = object->max_jobs_pu;
-			if (object->max_nodes_pj != NO_VAL) {
+
+			if (object->max_tres_pu) {
 				update_jobs = true;
-				rec->max_nodes_pj = object->max_nodes_pj;
+				xfree(rec->max_tres_pu);
+				if (object->max_tres_pu[0]) {
+					rec->max_tres_pu = object->max_tres_pu;
+					object->max_tres_pu = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_pu_ctld,
+					rec->max_tres_pu, INFINITE64, 1);
 			}
-			if (object->max_nodes_pu != NO_VAL) {
-				update_jobs = true;
-				rec->max_nodes_pu = object->max_nodes_pu;
+
+			if (object->max_tres_mins_pj) {
+				xfree(rec->max_tres_mins_pj);
+				if (object->max_tres_mins_pj[0]) {
+					rec->max_tres_mins_pj =
+						object->max_tres_mins_pj;
+					object->max_tres_mins_pj = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_mins_pj_ctld,
+					rec->max_tres_mins_pj, INFINITE64, 1);
 			}
+
+			if (object->max_tres_run_mins_pu) {
+				xfree(rec->max_tres_run_mins_pu);
+				if (object->max_tres_run_mins_pu[0]) {
+					rec->max_tres_run_mins_pu =
+						object->max_tres_run_mins_pu;
+					object->max_tres_run_mins_pu = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->max_tres_run_mins_pu_ctld,
+					rec->max_tres_run_mins_pu,
+					INFINITE64, 1);
+			}
+
+			if (object->max_jobs_pu != NO_VAL)
+				rec->max_jobs_pu = object->max_jobs_pu;
 			if (object->max_submit_jobs_pu != NO_VAL)
 				rec->max_submit_jobs_pu =
 					object->max_submit_jobs_pu;
@@ -3503,8 +4275,16 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 				rec->max_wall_pj = object->max_wall_pj;
 			}
 
-			if (object->min_cpus_pj != NO_VAL)
-				rec->min_cpus_pj = object->min_cpus_pj;
+			if (object->min_tres_pj) {
+				xfree(rec->min_tres_pj);
+				if (object->min_tres_pj[0]) {
+					rec->min_tres_pj = object->min_tres_pj;
+					object->min_tres_pj = NULL;
+				}
+				assoc_mgr_set_tres_cnt_array(
+					&rec->min_tres_pj_ctld,
+					rec->min_tres_pj, INFINITE64, 1);
+			}
 
 			if (object->preempt_bitstr) {
 				if (rec->preempt_bitstr)
@@ -3577,13 +4357,13 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 			} else
 				list_delete_item(itr);
 
-			if (!assoc_mgr_association_list)
+			if (!assoc_mgr_assoc_list)
 				break;
 			/* Remove this qos from all the associations
 			   on this cluster.
 			*/
 			assoc_itr = list_iterator_create(
-				assoc_mgr_association_list);
+				assoc_mgr_assoc_list);
 			while ((assoc = list_next(assoc_itr))) {
 
 				if (assoc->def_qos_id == object->id)
@@ -3625,9 +4405,9 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 				bit_realloc(object->preempt_bitstr,
 					    g_qos_count);
 		}
-		if (assoc_mgr_association_list) {
+		if (assoc_mgr_assoc_list) {
 			assoc_itr = list_iterator_create(
-				assoc_mgr_association_list);
+				assoc_mgr_assoc_list);
 			while ((assoc = list_next(assoc_itr))) {
 				if (!assoc->usage->valid_qos)
 					continue;
@@ -3648,7 +4428,8 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 
 	list_iterator_destroy(itr);
 
-	assoc_mgr_unlock(&locks);
+	if (!locked)
+		assoc_mgr_unlock(&locks);
 
 	/* This needs to happen outside of the
 	   assoc_mgr_lock */
@@ -3657,7 +4438,7 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 		while ((rec = list_next(itr)))
 			init_setup.remove_qos_notify(rec);
 		list_iterator_destroy(itr);
-		list_destroy(remove_list);
+		FREE_NULL_LIST(remove_list);
 	}
 
 	if (update_list) {
@@ -3665,25 +4446,27 @@ extern int assoc_mgr_update_qos(slurmdb_update_object_t *update)
 		while ((rec = list_next(itr)))
 			init_setup.update_qos_notify(rec);
 		list_iterator_destroy(itr);
-		list_destroy(update_list);
+		FREE_NULL_LIST(update_list);
 	}
 
 	return rc;
 }
 
-extern int assoc_mgr_update_res(slurmdb_update_object_t *update)
+extern int assoc_mgr_update_res(slurmdb_update_object_t *update, bool locked)
 {
 	slurmdb_res_rec_t *rec = NULL;
 	slurmdb_res_rec_t *object = NULL;
 
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
-	assoc_mgr_lock(&locks);
+	if (!locked)
+		assoc_mgr_lock(&locks);
 	if (!assoc_mgr_res_list) {
-		assoc_mgr_unlock(&locks);
+		if (!locked)
+			assoc_mgr_unlock(&locks);
 		return SLURM_SUCCESS;
 	}
 
@@ -3815,7 +4598,71 @@ extern int assoc_mgr_update_res(slurmdb_update_object_t *update)
 		slurmdb_destroy_res_rec(object);
 	}
 	list_iterator_destroy(itr);
-	assoc_mgr_unlock(&locks);
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+	return rc;
+}
+
+extern int assoc_mgr_update_tres(slurmdb_update_object_t *update, bool locked)
+{
+	slurmdb_tres_rec_t *rec = NULL;
+	slurmdb_tres_rec_t *object = NULL;
+
+	ListIterator itr = NULL;
+	List tmp_list = assoc_mgr_tres_list;
+	bool changed = false, freeit = false;
+	int rc = SLURM_SUCCESS;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	if (!tmp_list) {
+		tmp_list = list_create(slurmdb_destroy_tres_rec);
+		freeit = true;
+	}
+
+	itr = list_iterator_create(tmp_list);
+	while ((object = list_pop(update->objects))) {
+		list_iterator_reset(itr);
+		while ((rec = list_next(itr))) {
+			if (object->id == rec->id)
+				break;
+		}
+
+		switch (update->type) {
+		case SLURMDB_ADD_TRES:
+			if (rec) {
+				//rc = SLURM_ERROR;
+				break;
+			}
+			if (!object->id) {
+				error("trying to add resource without an id!  "
+				      "This should never happen.");
+				break;
+			}
+			list_append(tmp_list, object);
+			object = NULL;
+			changed = true;
+			break;
+		default:
+			break;
+		}
+
+		slurmdb_destroy_tres_rec(object);
+	}
+	list_iterator_destroy(itr);
+	if (changed) {
+		/* We want to run this on the assoc_mgr_tres_list, but we need
+		 * to make a tmp variable since _post_tres_list will set
+		 * assoc_mgr_tres_list for us.
+		 */
+		_post_tres_list(tmp_list, list_count(tmp_list));
+	} else if (freeit)
+		FREE_NULL_LIST(tmp_list);
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
 	return rc;
 }
 
@@ -3823,21 +4670,21 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 				       uint32_t assoc_id,
 				       int enforce)
 {
-	slurmdb_association_rec_t * found_assoc = NULL;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_assoc_rec_t * found_assoc = NULL;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	/* Call assoc_mgr_refresh_lists instead of just getting the
 	   association list because we need qos and user lists before
 	   the association list can be made.
 	*/
-	if (!assoc_mgr_association_list)
-		if (assoc_mgr_refresh_lists(db_conn) == SLURM_ERROR)
+	if (!assoc_mgr_assoc_list)
+		if (assoc_mgr_refresh_lists(db_conn, 0) == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	assoc_mgr_lock(&locks);
-	if ((!assoc_mgr_association_list
-	     || !list_count(assoc_mgr_association_list))
+	if ((!assoc_mgr_assoc_list
+	     || !list_count(assoc_mgr_assoc_list))
 	    && !(enforce & ACCOUNTING_ENFORCE_ASSOCS)) {
 		assoc_mgr_unlock(&locks);
 		return SLURM_SUCCESS;
@@ -3855,14 +4702,14 @@ extern int assoc_mgr_validate_assoc_id(void *db_conn,
 extern void assoc_mgr_clear_used_info(void)
 {
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t * found_assoc = NULL;
+	slurmdb_assoc_rec_t * found_assoc = NULL;
 	slurmdb_qos_rec_t * found_qos = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_association_list) {
-		itr = list_iterator_create(assoc_mgr_association_list);
+	if (assoc_mgr_assoc_list) {
+		itr = list_iterator_create(assoc_mgr_assoc_list);
 		while ((found_assoc = list_next(itr))) {
 			_clear_used_assoc_info(found_assoc);
 		}
@@ -3882,8 +4729,9 @@ extern void assoc_mgr_clear_used_info(void)
 
 static void _reset_children_usages(List children_list)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	ListIterator itr = NULL;
+	int i;
 
 	if (!children_list || !list_count(children_list))
 		return;
@@ -3892,6 +4740,9 @@ static void _reset_children_usages(List children_list)
 	while ((assoc = list_next(itr))) {
 		assoc->usage->usage_raw = 0.0;
 		assoc->usage->grp_used_wall = 0.0;
+		for (i=0; i<assoc->usage->tres_cnt; i++)
+			assoc->usage->usage_tres_raw[i] = 0;
+
 		if (assoc->user)
 			continue;
 
@@ -3900,13 +4751,84 @@ static void _reset_children_usages(List children_list)
 	list_iterator_destroy(itr);
 }
 
-extern void assoc_mgr_remove_assoc_usage(slurmdb_association_rec_t *assoc)
+/* tres read lock needs to be locked before calling this. */
+static char *_make_usage_tres_raw_str(long double *tres_cnt)
+{
+	int i;
+	char *tres_str = NULL;
+
+	if (!tres_cnt)
+		return NULL;
+
+	for (i=0; i<g_tres_count; i++) {
+		if (!assoc_mgr_tres_array[i] || !tres_cnt[i])
+			continue;
+		xstrfmtcat(tres_str, "%s%u=%Lf", tres_str ? "," : "",
+			   assoc_mgr_tres_array[i]->id, tres_cnt[i]);
+	}
+
+	return tres_str;
+}
+
+static void _set_usage_tres_raw(long double *tres_cnt, char *tres_str)
+{
+	char *tmp_str = tres_str;
+	int pos, id;
+	char *endptr;
+	slurmdb_tres_rec_t tres_rec;
+
+	xassert(tres_cnt);
+
+	if (!tres_str || !tres_str[0])
+		return;
+
+	if (tmp_str[0] == ',')
+		tmp_str++;
+
+	memset(&tres_rec, 0, sizeof(slurmdb_tres_rec_t));
+
+	while (tmp_str) {
+		id = atoi(tmp_str);
+		/* 0 isn't a valid tres id */
+		if (id <= 0) {
+			error("_set_usage_tres_raw: no id "
+			      "found at %s instead", tmp_str);
+			break;
+		}
+		if (!(tmp_str = strchr(tmp_str, '='))) {
+			error("_set_usage_tres_raw: "
+			      "no value found %s", tres_str);
+			break;
+		}
+
+		tres_rec.id = id;
+		pos = assoc_mgr_find_tres_pos(&tres_rec, true);
+		if (pos != -1) {
+			/* set the index to the count */
+			tres_cnt[pos] = strtold(++tmp_str, &endptr);
+		} else {
+			debug("_set_usage_tres_raw: "
+			       "no tres of id %u found in the array",
+			       tres_rec.id);
+		}
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+
+	return;
+}
+
+extern void assoc_mgr_remove_assoc_usage(slurmdb_assoc_rec_t *assoc)
 {
 	char *child;
 	char *child_str;
 	long double old_usage_raw = 0.0;
+	long double old_usage_tres_raw[g_tres_count];
+	int i;
 	double old_grp_used_wall = 0.0;
-	slurmdb_association_rec_t *sav_assoc = assoc;
+	slurmdb_assoc_rec_t *sav_assoc = assoc;
 
 	xassert(assoc);
 	xassert(assoc->usage);
@@ -3921,6 +4843,10 @@ extern void assoc_mgr_remove_assoc_usage(slurmdb_association_rec_t *assoc)
 	info("Resetting usage for %s %s", child, child_str);
 
 	old_usage_raw = assoc->usage->usage_raw;
+	/* clang needs this memset to avoid a warning */
+	memset(old_usage_tres_raw, 0, sizeof(old_usage_tres_raw));
+	for (i=0; i<g_tres_count; i++)
+		old_usage_tres_raw[i] = assoc->usage->usage_tres_raw[i];
 	old_grp_used_wall = assoc->usage->grp_used_wall;
 /*
  *	Reset this association's raw and group usages and subtract its
@@ -3934,6 +4860,11 @@ extern void assoc_mgr_remove_assoc_usage(slurmdb_association_rec_t *assoc)
 		     assoc->id, assoc->user, assoc->acct);
 
 		assoc->usage->usage_raw -= old_usage_raw;
+
+		for (i=0; i<g_tres_count; i++)
+			assoc->usage->usage_tres_raw[i] -=
+				old_usage_tres_raw[i];
+
 		assoc->usage->grp_used_wall -= old_grp_used_wall;
 		assoc = assoc->usage->parent_assoc_ptr;
 	}
@@ -3947,6 +4878,8 @@ extern void assoc_mgr_remove_assoc_usage(slurmdb_association_rec_t *assoc)
 
 extern void assoc_mgr_remove_qos_usage(slurmdb_qos_rec_t *qos)
 {
+	int i;
+
 	xassert(qos);
 	xassert(qos->usage);
 
@@ -3954,19 +4887,24 @@ extern void assoc_mgr_remove_qos_usage(slurmdb_qos_rec_t *qos)
 
 	qos->usage->usage_raw = 0;
 	qos->usage->grp_used_wall = 0;
-	if (!qos->usage->grp_used_cpus)
-		qos->usage->grp_used_cpu_run_secs = 0;
+
+	for (i=0; i<qos->usage->tres_cnt; i++) {
+		qos->usage->usage_tres_raw[i] = 0;
+		if (!qos->usage->grp_used_tres[i])
+			qos->usage->grp_used_tres_run_secs[i] = 0;
+	}
 }
 
 extern int dump_assoc_mgr_state(char *state_save_location)
 {
 	static int high_buffer_size = (1024 * 1024);
 	int error_code = 0, log_fd;
-	char *old_file = NULL, *new_file = NULL, *reg_file = NULL;
+	char *old_file = NULL, *new_file = NULL, *reg_file = NULL,
+		*tmp_char = NULL;
 	dbd_list_msg_t msg;
 	Buf buffer = init_buf(high_buffer_size);
-	assoc_mgr_lock_t locks = { READ_LOCK, WRITE_LOCK,
-				   READ_LOCK, READ_LOCK, READ_LOCK, READ_LOCK};
+	assoc_mgr_lock_t locks = { READ_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK,
+				   READ_LOCK, READ_LOCK, READ_LOCK};
 	DEF_TIMERS;
 
 	START_TIMER;
@@ -3975,6 +4913,15 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 	pack_time(time(NULL), buffer);
 
 	assoc_mgr_lock(&locks);
+	if (assoc_mgr_tres_list) {
+		memset(&msg, 0, sizeof(dbd_list_msg_t));
+		msg.my_list = assoc_mgr_tres_list;
+		/* let us know what to unpack */
+		pack16(DBD_ADD_TRES, buffer);
+		slurmdbd_pack_list_msg(&msg, SLURM_PROTOCOL_VERSION,
+				       DBD_ADD_TRES, buffer);
+	}
+
 	if (assoc_mgr_user_list) {
 		memset(&msg, 0, sizeof(dbd_list_msg_t));
 		msg.my_list = assoc_mgr_user_list;
@@ -4012,9 +4959,9 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 	}
 	/* this needs to be done last so qos is set up
 	 * before hand when loading it back */
-	if (assoc_mgr_association_list) {
+	if (assoc_mgr_assoc_list) {
 		memset(&msg, 0, sizeof(dbd_list_msg_t));
-		msg.my_list = assoc_mgr_association_list;
+		msg.my_list = assoc_mgr_assoc_list;
 		/* let us know what to unpack */
 		pack16(DBD_ADD_ASSOCS, buffer);
 		slurmdbd_pack_list_msg(&msg, SLURM_PROTOCOL_VERSION,
@@ -4070,22 +5017,23 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 
 	buffer = init_buf(high_buffer_size);
 	/* write header: version, time */
-	pack16(ASSOC_USAGE_VERSION, buffer);
+	pack16(SLURM_PROTOCOL_VERSION, buffer);
 	pack_time(time(NULL), buffer);
 
-	if (assoc_mgr_association_list) {
+	if (assoc_mgr_assoc_list) {
 		ListIterator itr = NULL;
-		slurmdb_association_rec_t *assoc = NULL;
-		itr = list_iterator_create(assoc_mgr_association_list);
+		slurmdb_assoc_rec_t *assoc = NULL;
+		itr = list_iterator_create(assoc_mgr_assoc_list);
 		while ((assoc = list_next(itr))) {
 			if (!assoc->user)
 				continue;
 
 			pack32(assoc->id, buffer);
-			/* we only care about the main part here so
-			   anything under 1 we are dropping
-			*/
-			pack64((uint64_t)assoc->usage->usage_raw, buffer);
+			packlongdouble(assoc->usage->usage_raw, buffer);
+			tmp_char = _make_usage_tres_raw_str(
+				assoc->usage->usage_tres_raw);
+			packstr(tmp_char, buffer);
+			xfree(tmp_char);
 			pack32(assoc->usage->grp_used_wall, buffer);
 		}
 		list_iterator_destroy(itr);
@@ -4139,7 +5087,7 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 
 	buffer = init_buf(high_buffer_size);
 	/* write header: version, time */
-	pack16(ASSOC_USAGE_VERSION, buffer);
+	pack16(SLURM_PROTOCOL_VERSION, buffer);
 	pack_time(time(NULL), buffer);
 
 	if (assoc_mgr_qos_list) {
@@ -4148,10 +5096,11 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 		itr = list_iterator_create(assoc_mgr_qos_list);
 		while ((qos = list_next(itr))) {
 			pack32(qos->id, buffer);
-			/* we only care about the main part here so
-			   anything under 1 we are dropping
-			*/
-			pack64((uint64_t)qos->usage->usage_raw, buffer);
+			packlongdouble(qos->usage->usage_raw, buffer);
+			tmp_char = _make_usage_tres_raw_str(
+				qos->usage->usage_tres_raw);
+			packstr(tmp_char, buffer);
+			xfree(tmp_char);
 			pack32(qos->usage->grp_used_wall, buffer);
 		}
 		list_iterator_destroy(itr);
@@ -4209,17 +5158,17 @@ extern int dump_assoc_mgr_state(char *state_save_location)
 
 extern int load_assoc_usage(char *state_save_location)
 {
-	int data_allocated, data_read = 0;
+	int data_allocated, data_read = 0, i;
 	uint32_t data_size = 0;
 	uint16_t ver = 0;
 	int state_fd;
 	char *data = NULL, *state_file;
-	Buf buffer;
+	Buf buffer = NULL;
 	time_t buf_time;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, READ_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, READ_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
-	if (!assoc_mgr_association_list)
+	if (!assoc_mgr_assoc_list)
 		return SLURM_SUCCESS;
 
 	/* read the file */
@@ -4230,6 +5179,7 @@ extern int load_assoc_usage(char *state_save_location)
 	state_fd = open(state_file, O_RDONLY);
 	if (state_fd < 0) {
 		debug2("No Assoc usage file (%s) to recover", state_file);
+		goto unpack_error;
 	} else {
 		data_allocated = BUF_SIZE;
 		data = xmalloc(data_allocated);
@@ -4257,11 +5207,14 @@ extern int load_assoc_usage(char *state_save_location)
 	buffer = create_buf(data, data_size);
 
 	safe_unpack16(&ver, buffer);
-	debug3("Version in assoc_mgr_state header is %u", ver);
-	if (ver != ASSOC_USAGE_VERSION) {
+	debug3("Version in assoc_usage header is %u", ver);
+	/* We used to pack 1 here for the version, so we can't use
+	 * SLURM_MIN_PROTOCOL_VERSION to check until 2 versions after 15.08. */
+	if (ver > SLURM_PROTOCOL_VERSION) {
 		error("***********************************************");
-		error("Can not recover usage_mgr state, incompatible version, "
-		      "got %u need %u", ver, ASSOC_USAGE_VERSION);
+		error("Can not recover assoc_usage state, "
+		      "incompatible version, got %u need > %u <= %u", ver,
+		      SLURM_MIN_PROTOCOL_VERSION, SLURM_PROTOCOL_VERSION);
 		error("***********************************************");
 		free_buf(buffer);
 		assoc_mgr_unlock(&locks);
@@ -4273,12 +5226,24 @@ extern int load_assoc_usage(char *state_save_location)
 	while (remaining_buf(buffer) > 0) {
 		uint32_t assoc_id = 0;
 		uint32_t grp_used_wall = 0;
-		uint64_t usage_raw = 0;
-		slurmdb_association_rec_t *assoc = NULL;
-
-		safe_unpack32(&assoc_id, buffer);
-		safe_unpack64(&usage_raw, buffer);
-		safe_unpack32(&grp_used_wall, buffer);
+		long double usage_raw = 0;
+		slurmdb_assoc_rec_t *assoc = NULL;
+		char *tmp_str = NULL;
+		uint32_t tmp32;
+		long double usage_tres_raw[g_tres_count];
+
+		if (ver == SLURM_15_08_PROTOCOL_VERSION) {
+			safe_unpack32(&assoc_id, buffer);
+			safe_unpacklongdouble(&usage_raw, buffer);
+			safe_unpackstr_xmalloc(&tmp_str, &tmp32, buffer);
+			safe_unpack32(&grp_used_wall, buffer);
+		} else {
+			uint64_t tmp64;
+			safe_unpack32(&assoc_id, buffer);
+			safe_unpack64(&tmp64, buffer);
+			safe_unpack32(&grp_used_wall, buffer);
+			usage_raw = (long double)tmp64;
+		}
 		assoc = _find_assoc_rec_id(assoc_id);
 
 		/* We want to do this all the way up to and including
@@ -4289,14 +5254,21 @@ extern int load_assoc_usage(char *state_save_location)
 		if (assoc) {
 			assoc->usage->grp_used_wall = 0;
 			assoc->usage->usage_raw = 0;
+			for (i=0; i < g_tres_count; i++)
+				assoc->usage->usage_tres_raw[i] = 0;
+			memset(usage_tres_raw, 0, sizeof(usage_tres_raw));
+			_set_usage_tres_raw(usage_tres_raw, tmp_str);
 		}
-
 		while (assoc) {
 			assoc->usage->grp_used_wall += grp_used_wall;
-			assoc->usage->usage_raw += (long double)usage_raw;
-
+			assoc->usage->usage_raw += usage_raw;
+			for (i=0; i < g_tres_count; i++)
+				assoc->usage->usage_tres_raw[i] +=
+					usage_tres_raw[i];
 			assoc = assoc->usage->parent_assoc_ptr;
 		}
+
+		xfree(tmp_str);
 	}
 	assoc_mgr_unlock(&locks);
 
@@ -4317,11 +5289,11 @@ extern int load_qos_usage(char *state_save_location)
 	uint16_t ver = 0;
 	int state_fd;
 	char *data = NULL, *state_file;
-	Buf buffer;
+	Buf buffer = NULL;
 	time_t buf_time;
 	ListIterator itr = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, READ_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, READ_LOCK, WRITE_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (!assoc_mgr_qos_list)
 		return SLURM_SUCCESS;
@@ -4334,6 +5306,7 @@ extern int load_qos_usage(char *state_save_location)
 	state_fd = open(state_file, O_RDONLY);
 	if (state_fd < 0) {
 		debug2("No Qos usage file (%s) to recover", state_file);
+		goto unpack_error;
 	} else {
 		data_allocated = BUF_SIZE;
 		data = xmalloc(data_allocated);
@@ -4361,11 +5334,14 @@ extern int load_qos_usage(char *state_save_location)
 	buffer = create_buf(data, data_size);
 
 	safe_unpack16(&ver, buffer);
-	debug3("Version in assoc_mgr_state header is %u", ver);
-	if (ver != ASSOC_USAGE_VERSION) {
+	debug3("Version in qos_usage header is %u", ver);
+	/* We used to pack 1 here for the version, so we can't use
+	 * SLURM_MIN_PROTOCOL_VERSION to check until 2 versions after 15.08. */
+	if (ver > SLURM_PROTOCOL_VERSION) {
 		error("***********************************************");
-		error("Can not recover usage_mgr state, incompatible version, "
-		      "got %u need %u", ver, ASSOC_USAGE_VERSION);
+		error("Can not recover qos_usage state, "
+		      "incompatible version, got %u need > %u <= %u", ver,
+		      SLURM_MIN_PROTOCOL_VERSION, SLURM_PROTOCOL_VERSION);
 		error("***********************************************");
 		free_buf(buffer);
 		assoc_mgr_unlock(&locks);
@@ -4378,20 +5354,34 @@ extern int load_qos_usage(char *state_save_location)
 	while (remaining_buf(buffer) > 0) {
 		uint32_t qos_id = 0;
 		uint32_t grp_used_wall = 0;
-		uint64_t usage_raw = 0;
+		uint32_t tmp32;
+		long double usage_raw = 0;
 		slurmdb_qos_rec_t *qos = NULL;
+		char *tmp_str = NULL;
 
-		safe_unpack32(&qos_id, buffer);
-		safe_unpack64(&usage_raw, buffer);
-		safe_unpack32(&grp_used_wall, buffer);
+		if (ver >= SLURM_15_08_PROTOCOL_VERSION) {
+			safe_unpack32(&qos_id, buffer);
+			safe_unpacklongdouble(&usage_raw, buffer);
+			safe_unpackstr_xmalloc(&tmp_str, &tmp32, buffer);
+			safe_unpack32(&grp_used_wall, buffer);
+		} else {
+			uint64_t tmp64 = 0;
+			safe_unpack32(&qos_id, buffer);
+			safe_unpack64(&tmp64, buffer);
+			safe_unpack32(&grp_used_wall, buffer);
+			usage_raw = (long double)tmp64;
+		}
 		while ((qos = list_next(itr)))
 			if (qos->id == qos_id)
 				break;
 		if (qos) {
 			qos->usage->grp_used_wall = grp_used_wall;
-			qos->usage->usage_raw = (long double)usage_raw;
+			qos->usage->usage_raw = usage_raw;
+			_set_usage_tres_raw(qos->usage->usage_tres_raw,
+					    tmp_str);
 		}
 
+		xfree(tmp_str);
 		list_iterator_reset(itr);
 	}
 	list_iterator_destroy(itr);
@@ -4417,12 +5407,12 @@ extern int load_assoc_mgr_state(char *state_save_location)
 	uint16_t ver = 0;
 	int state_fd;
 	char *data = NULL, *state_file;
-	Buf buffer;
+	Buf buffer = NULL;
 	time_t buf_time;
 	dbd_list_msg_t *msg = NULL;
 	assoc_mgr_lock_t locks = { WRITE_LOCK, READ_LOCK,
 				   WRITE_LOCK, WRITE_LOCK, WRITE_LOCK,
-				   WRITE_LOCK };
+				   WRITE_LOCK, WRITE_LOCK };
 
 	/* read the file */
 	state_file = xstrdup(state_save_location);
@@ -4432,7 +5422,7 @@ extern int load_assoc_mgr_state(char *state_save_location)
 	state_fd = open(state_file, O_RDONLY);
 	if (state_fd < 0) {
 		debug2("No association state file (%s) to recover", state_file);
-		return ENOENT;
+		goto unpack_error;
 	} else {
 		data_allocated = BUF_SIZE;
 		data = xmalloc(data_allocated);
@@ -4461,11 +5451,11 @@ extern int load_assoc_mgr_state(char *state_save_location)
 
 	safe_unpack16(&ver, buffer);
 	debug3("Version in assoc_mgr_state header is %u", ver);
-	if (ver > SLURM_PROTOCOL_VERSION || ver < SLURMDBD_MIN_VERSION) {
+	if (ver > SLURM_PROTOCOL_VERSION || ver < SLURM_MIN_PROTOCOL_VERSION) {
 		error("***********************************************");
 		error("Can not recover assoc_mgr state, incompatible version, "
 		      "got %u need > %u <= %u", ver,
-		      SLURMDBD_MIN_VERSION, SLURM_PROTOCOL_VERSION);
+		      SLURM_MIN_PROTOCOL_VERSION, SLURM_PROTOCOL_VERSION);
 		error("***********************************************");
 		free_buf(buffer);
 		assoc_mgr_unlock(&locks);
@@ -4476,7 +5466,29 @@ extern int load_assoc_mgr_state(char *state_save_location)
 	while (remaining_buf(buffer) > 0) {
 		safe_unpack16(&type, buffer);
 		switch(type) {
+		case DBD_ADD_TRES:
+			error_code = slurmdbd_unpack_list_msg(
+				&msg, ver, DBD_ADD_TRES, buffer);
+			if (error_code != SLURM_SUCCESS)
+				goto unpack_error;
+			else if (!msg->my_list) {
+				error("No tres retrieved");
+				break;
+			}
+			FREE_NULL_LIST(assoc_mgr_tres_list);
+			_post_tres_list(msg->my_list, list_count(msg->my_list));
+			/* assoc_mgr_tres_list gets set in _post_tres_list */
+			debug("Recovered %u tres",
+			      list_count(assoc_mgr_tres_list));
+			msg->my_list = NULL;
+			slurmdbd_free_list_msg(msg);
+			break;
 		case DBD_ADD_ASSOCS:
+			if (!g_tres_count)
+				fatal("load_assoc_mgr_state: "
+				      "Unable to run cache without TRES, "
+				      "please make sure you have a connection "
+				      "to your database to continue.");
 			error_code = slurmdbd_unpack_list_msg(
 				&msg, ver, DBD_ADD_ASSOCS, buffer);
 			if (error_code != SLURM_SUCCESS)
@@ -4485,13 +5497,12 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No associations retrieved");
 				break;
 			}
-			if (assoc_mgr_association_list)
-				list_destroy(assoc_mgr_association_list);
-			assoc_mgr_association_list = msg->my_list;
-			_post_association_list();
+			FREE_NULL_LIST(assoc_mgr_assoc_list);
+			assoc_mgr_assoc_list = msg->my_list;
+			_post_assoc_list();
 
 			debug("Recovered %u associations",
-			      list_count(assoc_mgr_association_list));
+			      list_count(assoc_mgr_assoc_list));
 			msg->my_list = NULL;
 			slurmdbd_free_list_msg(msg);
 			break;
@@ -4504,8 +5515,7 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No users retrieved");
 				break;
 			}
-			if (assoc_mgr_user_list)
-				list_destroy(assoc_mgr_user_list);
+			FREE_NULL_LIST(assoc_mgr_user_list);
 			assoc_mgr_user_list = msg->my_list;
 			_post_user_list(assoc_mgr_user_list);
 			debug("Recovered %u users",
@@ -4522,8 +5532,7 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No resources retrieved");
 				break;
 			}
-			if (assoc_mgr_res_list)
-				list_destroy(assoc_mgr_res_list);
+			FREE_NULL_LIST(assoc_mgr_res_list);
 			assoc_mgr_res_list = msg->my_list;
 			_post_res_list(assoc_mgr_res_list);
 			debug("Recovered %u resources",
@@ -4532,6 +5541,11 @@ extern int load_assoc_mgr_state(char *state_save_location)
 			slurmdbd_free_list_msg(msg);
 			break;
 		case DBD_ADD_QOS:
+			if (!g_tres_count)
+				fatal("load_assoc_mgr_state: "
+				      "Unable to run cache without TRES, "
+				      "please make sure you have a connection "
+				      "to your database to continue.");
 			error_code = slurmdbd_unpack_list_msg(
 				&msg, ver, DBD_ADD_QOS, buffer);
 			if (error_code != SLURM_SUCCESS)
@@ -4540,8 +5554,7 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No qos retrieved");
 				break;
 			}
-			if (assoc_mgr_qos_list)
-				list_destroy(assoc_mgr_qos_list);
+			FREE_NULL_LIST(assoc_mgr_qos_list);
 			assoc_mgr_qos_list = msg->my_list;
 			_post_qos_list(assoc_mgr_qos_list);
 			debug("Recovered %u qos",
@@ -4558,8 +5571,7 @@ extern int load_assoc_mgr_state(char *state_save_location)
 				error("No wckeys retrieved");
 				break;
 			}
-			if (assoc_mgr_wckey_list)
-				list_destroy(assoc_mgr_wckey_list);
+			FREE_NULL_LIST(assoc_mgr_wckey_list);
 			assoc_mgr_wckey_list = msg->my_list;
 			debug("Recovered %u wckeys",
 			      list_count(assoc_mgr_wckey_list));
@@ -4584,36 +5596,51 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern int assoc_mgr_refresh_lists(void *db_conn)
+extern int assoc_mgr_refresh_lists(void *db_conn, uint16_t cache_level)
 {
+	bool partial_list = 1;
+
+	if (!cache_level) {
+		cache_level = init_setup.cache_level;
+		partial_list = 0;
+	}
+
+	/* get tres before association and qos since it is used there */
+	if (cache_level & ASSOC_MGR_CACHE_TRES) {
+		if (_refresh_assoc_mgr_tres_list(
+			    db_conn, init_setup.enforce) == SLURM_ERROR)
+			return SLURM_ERROR;
+	}
+
 	/* get qos before association since it is used there */
-	if (init_setup.cache_level & ASSOC_MGR_CACHE_QOS)
+	if (cache_level & ASSOC_MGR_CACHE_QOS)
 		if (_refresh_assoc_mgr_qos_list(
 			    db_conn, init_setup.enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
 	/* get user before association/wckey since it is used there */
-	if (init_setup.cache_level & ASSOC_MGR_CACHE_USER)
+	if (cache_level & ASSOC_MGR_CACHE_USER)
 		if (_refresh_assoc_mgr_user_list(
 			    db_conn, init_setup.enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if (init_setup.cache_level & ASSOC_MGR_CACHE_ASSOC) {
-		if (_refresh_assoc_mgr_association_list(
+	if (cache_level & ASSOC_MGR_CACHE_ASSOC) {
+		if (_refresh_assoc_mgr_assoc_list(
 			    db_conn, init_setup.enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 	}
-	if (init_setup.cache_level & ASSOC_MGR_CACHE_WCKEY)
+	if (cache_level & ASSOC_MGR_CACHE_WCKEY)
 		if (_refresh_assoc_wckey_list(
 			    db_conn, init_setup.enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	if (init_setup.cache_level & ASSOC_MGR_CACHE_RES)
+	if (cache_level & ASSOC_MGR_CACHE_RES)
 		if (_refresh_assoc_mgr_res_list(
 			    db_conn, init_setup.enforce) == SLURM_ERROR)
 			return SLURM_ERROR;
 
-	running_cache = 0;
+	if (!partial_list)
+		running_cache = 0;
 
 	return SLURM_SUCCESS;
 }
@@ -4623,12 +5650,13 @@ extern int assoc_mgr_set_missing_uids()
 	uid_t pw_uid;
 	ListIterator itr = NULL;
 	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, WRITE_LOCK, WRITE_LOCK };
+				   NO_LOCK, NO_LOCK, NO_LOCK,
+				   WRITE_LOCK, WRITE_LOCK };
 
 	assoc_mgr_lock(&locks);
-	if (assoc_mgr_association_list) {
-		slurmdb_association_rec_t *object = NULL;
-		itr = list_iterator_create(assoc_mgr_association_list);
+	if (assoc_mgr_assoc_list) {
+		slurmdb_assoc_rec_t *object = NULL;
+		itr = list_iterator_create(assoc_mgr_assoc_list);
 		while ((object = list_next(itr))) {
 			if (object->user && (object->uid == NO_VAL)) {
 				if (uid_from_string(
@@ -4691,7 +5719,7 @@ extern int assoc_mgr_set_missing_uids()
 }
 
 /* you should check for assoc == NULL before this function */
-extern void assoc_mgr_normalize_assoc_shares(slurmdb_association_rec_t *assoc)
+extern void assoc_mgr_normalize_assoc_shares(slurmdb_assoc_rec_t *assoc)
 {
 	xassert(assoc);
 	/* Use slurmctld_conf.priority_flags directly instead of using a
@@ -4704,3 +5732,221 @@ extern void assoc_mgr_normalize_assoc_shares(slurmdb_association_rec_t *assoc)
 		_normalize_assoc_shares_traditional(assoc);
 }
 
+extern int assoc_mgr_find_tres_pos(slurmdb_tres_rec_t *tres_rec, bool locked)
+{
+	int i, tres_pos = -1;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!tres_rec->id && !tres_rec->type)
+		return tres_pos;
+
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	xassert(assoc_mgr_tres_array);
+	xassert(g_tres_count);
+	xassert(assoc_mgr_tres_array[g_tres_count - 1]);
+
+	for (i=0; i<g_tres_count; i++) {
+		if (tres_rec->id &&
+		    assoc_mgr_tres_array[i]->id == tres_rec->id) {
+			tres_pos = i;
+			break;
+		} else if (!xstrcasecmp(assoc_mgr_tres_array[i]->type,
+				  tres_rec->type) &&
+			 !xstrcasecmp(assoc_mgr_tres_array[i]->name,
+				  tres_rec->name)) {
+			tres_pos = i;
+			break;
+		}
+	}
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+
+	return tres_pos;
+}
+
+/* The assoc_mgr tres read lock needs to be locked before calling this
+ * function and while using the returned record */
+extern slurmdb_tres_rec_t *assoc_mgr_find_tres_rec(slurmdb_tres_rec_t *tres_rec)
+{
+	int pos = assoc_mgr_find_tres_pos(tres_rec, 1);
+
+	if (pos == -1)
+		return NULL;
+	else
+		return assoc_mgr_tres_array[pos];
+}
+
+extern int assoc_mgr_set_tres_cnt_array(uint64_t **tres_cnt, char *tres_str,
+					uint64_t init_val, bool locked)
+{
+	int array_size = sizeof(uint64_t) * g_tres_count;
+	int diff_cnt = 0, i;
+
+	xassert(tres_cnt);
+
+	/* When doing the cnt the string is always the
+	 * complete string, so always set everything to 0 to
+	 * catch anything that was removed.
+	 */
+	xfree(*tres_cnt);
+	if (!init_val)
+		*tres_cnt = xmalloc(array_size);
+	else {
+		*tres_cnt = xmalloc_nz(array_size);
+		for (i=0; i<g_tres_count; i++)
+			(*tres_cnt)[i] = init_val;
+	}
+
+	if (tres_str) {
+		List tmp_list = NULL;
+		/* info("got %s", tres_str); */
+		slurmdb_tres_list_from_string(
+			&tmp_list, tres_str, TRES_STR_FLAG_NONE);
+		if (tmp_list) {
+			slurmdb_tres_rec_t *tres_rec;
+			ListIterator itr = list_iterator_create(tmp_list);
+			while ((tres_rec = list_next(itr))) {
+				int pos = assoc_mgr_find_tres_pos(
+					tres_rec, locked);
+				if (pos == -1) {
+					debug2("assoc_mgr_set_tres_cnt_array: "
+					       "no tres "
+					       "of id %u found in the array",
+					       tres_rec->id);
+					continue;
+				}
+				/* set the index to the count */
+				(*tres_cnt)[pos] = tres_rec->count;
+				/* info("%d pos %d has count of %"PRIu64, */
+				/*      tres_rec->id, */
+				/*      pos, tres_rec->count); */
+			}
+			list_iterator_destroy(itr);
+			if (g_tres_count != list_count(tmp_list))
+				diff_cnt = 1;
+			FREE_NULL_LIST(tmp_list);
+		}
+	}
+	return diff_cnt;
+}
+
+/* tres read lock needs to be locked before this is called. */
+extern void assoc_mgr_set_assoc_tres_cnt(slurmdb_assoc_rec_t *assoc)
+{
+	/* This isn't needed on the dbd */
+	if (slurmdbd_conf)
+		return;
+
+	xassert(assoc_mgr_tres_array);
+
+	assoc_mgr_set_tres_cnt_array(&assoc->grp_tres_ctld, assoc->grp_tres,
+				     INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&assoc->grp_tres_mins_ctld,
+				     assoc->grp_tres_mins, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&assoc->grp_tres_run_mins_ctld,
+				     assoc->grp_tres_run_mins, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&assoc->max_tres_ctld,
+				     assoc->max_tres_pj, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&assoc->max_tres_pn_ctld,
+				     assoc->max_tres_pn, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&assoc->max_tres_mins_ctld,
+				     assoc->max_tres_mins_pj, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&assoc->max_tres_run_mins_ctld,
+				     assoc->max_tres_run_mins, INFINITE64, 1);
+}
+
+/* tres read lock needs to be locked before this is called. */
+extern void assoc_mgr_set_qos_tres_cnt(slurmdb_qos_rec_t *qos)
+{
+	/* This isn't needed on the dbd */
+	if (slurmdbd_conf)
+		return;
+
+	xassert(assoc_mgr_tres_array);
+
+	assoc_mgr_set_tres_cnt_array(&qos->grp_tres_ctld, qos->grp_tres,
+				     INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->grp_tres_mins_ctld,
+				     qos->grp_tres_mins, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->grp_tres_run_mins_ctld,
+				     qos->grp_tres_run_mins, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->max_tres_pj_ctld,
+				     qos->max_tres_pj, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->max_tres_pn_ctld,
+				     qos->max_tres_pn, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->max_tres_pu_ctld,
+				     qos->max_tres_pu, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->max_tres_mins_pj_ctld,
+				     qos->max_tres_mins_pj, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->max_tres_run_mins_pu_ctld,
+				     qos->max_tres_run_mins_pu, INFINITE64, 1);
+	assoc_mgr_set_tres_cnt_array(&qos->min_tres_pj_ctld,
+				     qos->min_tres_pj, INFINITE64, 1);
+}
+
+extern char *assoc_mgr_make_tres_str_from_array(
+	uint64_t *tres_cnt, uint32_t flags, bool locked)
+{
+	int i;
+	char *tres_str = NULL;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!tres_cnt)
+		return NULL;
+
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	for (i=0; i<g_tres_count; i++) {
+		if (!assoc_mgr_tres_array[i] || !tres_cnt[i])
+			continue;
+		if (flags & TRES_STR_FLAG_SIMPLE)
+			xstrfmtcat(tres_str, "%s%u=%"PRIu64,
+				   tres_str ? "," : "",
+				   assoc_mgr_tres_array[i]->id, tres_cnt[i]);
+		else
+			xstrfmtcat(tres_str, "%s%s=%"PRIu64,
+				   tres_str ? "," : "",
+				   assoc_mgr_tres_name_array[i], tres_cnt[i]);
+
+	}
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+
+	return tres_str;
+}
+
+/* READ lock needs to be set on associations before calling this. */
+extern void assoc_mgr_get_default_qos_info(
+	slurmdb_assoc_rec_t *assoc_ptr, slurmdb_qos_rec_t *qos_rec)
+{
+	xassert(qos_rec);
+
+	if (!qos_rec->name && !qos_rec->id) {
+		if (assoc_ptr && assoc_ptr->usage->valid_qos) {
+			if (assoc_ptr->def_qos_id)
+				qos_rec->id = assoc_ptr->def_qos_id;
+			else if (bit_set_count(assoc_ptr->usage->valid_qos)
+				 == 1)
+				qos_rec->id =
+					bit_ffs(assoc_ptr->usage->valid_qos);
+			else if (assoc_mgr_root_assoc
+				 && assoc_mgr_root_assoc->def_qos_id)
+				qos_rec->id = assoc_mgr_root_assoc->def_qos_id;
+			else
+				qos_rec->name = "normal";
+		} else if (assoc_mgr_root_assoc
+			   && assoc_mgr_root_assoc->def_qos_id)
+			qos_rec->id = assoc_mgr_root_assoc->def_qos_id;
+		else
+			qos_rec->name = "normal";
+	}
+
+	return;
+}
diff --git a/src/common/assoc_mgr.h b/src/common/assoc_mgr.h
index ea80d867e..bf8f4b165 100644
--- a/src/common/assoc_mgr.h
+++ b/src/common/assoc_mgr.h
@@ -35,6 +35,10 @@
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+ *****************************************************************************
+ * NOTE: When using lock_slurmctld() and assoc_mgr_lock(), always call
+ * lock_slurmctld() before calling assoc_mgr_lock() and then call
+ * assoc_mgr_unlock() before calling unlock_slurmctld().
 \*****************************************************************************/
 
 #ifndef _SLURM_ASSOC_MGR_H
@@ -57,6 +61,7 @@
 #define ASSOC_MGR_CACHE_USER  0x0004
 #define ASSOC_MGR_CACHE_WCKEY 0x0008
 #define ASSOC_MGR_CACHE_RES   0x0010
+#define ASSOC_MGR_CACHE_TRES  0x0020
 #define ASSOC_MGR_CACHE_ALL   0xffff
 
 /* to lock or not */
@@ -65,6 +70,7 @@ typedef struct {
 	lock_level_t file;
 	lock_level_t qos;
 	lock_level_t res;
+	lock_level_t tres;
 	lock_level_t user;
 	lock_level_t wckey;
 } assoc_mgr_lock_t;
@@ -81,6 +87,7 @@ typedef enum {
 	FILE_LOCK,
 	QOS_LOCK,
 	RES_LOCK,
+	TRES_LOCK,
 	USER_LOCK,
 	WCKEY_LOCK,
 	ASSOC_MGR_ENTITY_COUNT
@@ -94,110 +101,34 @@ typedef struct {
  	uint16_t cache_level;
 	uint16_t enforce;
 	void (*add_license_notify) (slurmdb_res_rec_t *rec);
- 	void (*remove_assoc_notify) (slurmdb_association_rec_t *rec);
+	void (*remove_assoc_notify) (slurmdb_assoc_rec_t *rec);
 	void (*remove_license_notify) (slurmdb_res_rec_t *rec);
- 	void (*remove_qos_notify) (slurmdb_qos_rec_t *rec);
+	void (*remove_qos_notify) (slurmdb_qos_rec_t *rec);
 	void (*sync_license_notify) (List clus_res_list);
- 	void (*update_assoc_notify) (slurmdb_association_rec_t *rec);
+	void (*update_assoc_notify) (slurmdb_assoc_rec_t *rec);
+	void (*update_cluster_tres) (void);
 	void (*update_license_notify) (slurmdb_res_rec_t *rec);
- 	void (*update_qos_notify) (slurmdb_qos_rec_t *rec);
+	void (*update_qos_notify) (slurmdb_qos_rec_t *rec);
 	void (*update_resvs) ();
 } assoc_init_args_t;
 
-struct assoc_mgr_association_usage {
-	List children_list;     /* list of children associations
-				 * (DON'T PACK) */
-	uint32_t grp_used_cpus; /* count of active jobs in the group
-				 * (DON'T PACK) */
-	uint32_t grp_used_mem; /* count of active memory in the group
-				 * (DON'T PACK) */
-	uint32_t grp_used_nodes; /* count of active jobs in the group
-				  * (DON'T PACK) */
-	double grp_used_wall;   /* group count of time used in
-				 * running jobs (DON'T PACK) */
-	uint64_t grp_used_cpu_run_secs; /* count of running cpu secs
-					 * (DON'T PACK) */
-	double fs_factor;	/* Fairshare factor. Not used by all algorithms
-				 * (DON'T PACK) */
-	uint32_t level_shares;  /* number of shares on this level of
-				 * the tree (DON'T PACK) */
-
-	slurmdb_association_rec_t *parent_assoc_ptr; /* ptr to direct
-						      * parent assoc
-						      * set in slurmctld
-						      * (DON'T PACK) */
-
-	slurmdb_association_rec_t *fs_assoc_ptr;    /* ptr to fairshare parent
-						     * assoc if fairshare
-						     * == SLURMDB_FS_USE_PARENT
-						     * set in slurmctld
-						     * (DON'T PACK) */
-
-	double shares_norm;     /* normalized shares (DON'T PACK) */
-
-	long double usage_efctv;/* effective, normalized usage (DON'T PACK) */
-	long double usage_norm;	/* normalized usage (DON'T PACK) */
-	long double usage_raw;	/* measure of resource usage (DON'T PACK) */
-
-	uint32_t used_jobs;	/* count of active jobs (DON'T PACK) */
-	uint32_t used_submit_jobs; /* count of jobs pending or running
-				    * (DON'T PACK) */
-
-	/* Currently FAIR_TREE and TICKET_BASED systems are defining data on
-	 * this struct but instead we could keep a void pointer to system
-	 * specific data. This would allow subsystems to define whatever data
-	 * they need without having to modify this struct; it would also save
-	 * space.
-	 */
-	uint32_t tickets;       /* Number of tickets (for multifactor2
-				 * plugin). (DON'T PACK) */
-	unsigned active_seqno;  /* Sequence number for identifying
-				 * active associations (DON'T PACK) */
-
-	long double level_fs;	/* (FAIR_TREE) Result of fairshare equation
-				 * compared to the association's siblings (DON'T
-				 * PACK) */
-
-	bitstr_t *valid_qos;    /* qos available for this association
-				 * derived from the qos_list.
-				 * (DON'T PACK) */
-};
-
-struct assoc_mgr_qos_usage {
-	List job_list; /* list of job pointers to submitted/running
-			  jobs (DON'T PACK) */
-	uint32_t grp_used_cpus; /* count of cpus in use in this qos
-				 * (DON'T PACK) */
-	uint64_t grp_used_cpu_run_secs; /* count of running cpu secs
-					 * (DON'T PACK) */
-	uint32_t grp_used_jobs;	/* count of active jobs (DON'T PACK) */
-	uint32_t grp_used_mem; /* count of memory in use in this qos
-				* (DON'T PACK) */
-	uint32_t grp_used_nodes; /* count of nodes in use in this qos
-				  * (DON'T PACK) */
-	uint32_t grp_used_submit_jobs; /* count of jobs pending or running
-					* (DON'T PACK) */
-	double grp_used_wall;   /* group count of time (minutes) used in
-				 * running jobs (DON'T PACK) */
-	double norm_priority;/* normalized priority (DON'T PACK) */
-	long double usage_raw;	/* measure of resource usage (DON'T PACK) */
-
-	List user_limit_list; /* slurmdb_used_limits_t's (DON'T PACK) */
-};
-
-
-extern List assoc_mgr_association_list;
+extern List assoc_mgr_tres_list;
+extern slurmdb_tres_rec_t **assoc_mgr_tres_array;
+extern char **assoc_mgr_tres_name_array;
+extern List assoc_mgr_assoc_list;
 extern List assoc_mgr_res_list;
 extern List assoc_mgr_qos_list;
 extern List assoc_mgr_user_list;
 extern List assoc_mgr_wckey_list;
 
-extern slurmdb_association_rec_t *assoc_mgr_root_assoc;
+extern slurmdb_assoc_rec_t *assoc_mgr_root_assoc;
 
 extern uint32_t g_qos_max_priority; /* max priority in all qos's */
 extern uint32_t g_qos_count; /* count used for generating qos bitstr's */
 extern uint32_t g_user_assoc_count; /* Number of assocations which are users */
-
+extern uint32_t g_tres_count; /* Number of TRES from the database
+			       * which also is the number of elements
+			       * in the assoc_mgr_tres_array */
 
 extern int assoc_mgr_init(void *db_conn, assoc_init_args_t *args,
 			  int db_conn_errno);
@@ -205,14 +136,9 @@ extern int assoc_mgr_fini(char *state_save_location);
 extern void assoc_mgr_lock(assoc_mgr_lock_t *locks);
 extern void assoc_mgr_unlock(assoc_mgr_lock_t *locks);
 
-extern assoc_mgr_association_usage_t *create_assoc_mgr_association_usage();
-extern void destroy_assoc_mgr_association_usage(void *object);
-extern assoc_mgr_qos_usage_t *create_assoc_mgr_qos_usage();
-extern void destroy_assoc_mgr_qos_usage(void *object);
-
 /*
  * get info from the storage
- * IN:  assoc - slurmdb_association_rec_t with at least cluster and
+ * IN:  assoc - slurmdb_assoc_rec_t with at least cluster and
  *		    account set for account association.  To get user
  *		    association set user, and optional partition.
  *		    Sets "id" field with the association ID.
@@ -224,24 +150,45 @@ extern void destroy_assoc_mgr_qos_usage(void *object);
  * RET: SLURM_SUCCESS on success, else SLURM_ERROR
  *
  * NOTE: Since the returned assoc_list is full of pointers from the
- *       assoc_mgr_association_list assoc_mgr_lock_t READ_LOCK on
+ *       assoc_mgr_assoc_list assoc_mgr_lock_t READ_LOCK on
  *       associations must be set before calling this function and while
  *       handling it after a return.
  */
 extern int assoc_mgr_get_user_assocs(void *db_conn,
-				     slurmdb_association_rec_t *assoc,
+				     slurmdb_assoc_rec_t *assoc,
 				     int enforce,
 				     List assoc_list);
 
 /*
  * get info from the storage
- * IN/OUT:  assoc - slurmdb_association_rec_t with at least cluster and
+ * IN/OUT:  tres - slurmdb_tres_rec_t with at least id or type and
+ *                  optional name set.
+ * IN: enforce - return an error if no such tres exists
+ * IN/OUT: tres_pptr - if non-NULL then return a pointer to the
+ *			slurmdb_tres record in cache on success
+ *                      DO NOT FREE.
+ * IN: locked - If you plan on using tres_pptr after this function
+ *              you need to have an assoc_mgr_lock_t READ_LOCK for
+ *              tres while you use it before and after the
+ *              return.  This is not required if using the assoc for
+ *              non-pointer portions.
+ * RET: SLURM_SUCCESS on success, else SLURM_ERROR
+ */
+extern int assoc_mgr_fill_in_tres(void *db_conn,
+				   slurmdb_tres_rec_t *tres,
+				   int enforce,
+				   slurmdb_tres_rec_t **tres_pptr,
+				   bool locked);
+
+/*
+ * get info from the storage
+ * IN/OUT:  assoc - slurmdb_assoc_rec_t with at least cluster and
  *		    account set for account association.  To get user
  *		    association set user, and optional partition.
  *		    Sets "id" field with the association ID.
  * IN: enforce - return an error if no such association exists
  * IN/OUT: assoc_pptr - if non-NULL then return a pointer to the
- *			slurmdb_association record in cache on success
+ *			slurmdb_assoc record in cache on success
  *                      DO NOT FREE.
  * IN: locked - If you plan on using assoc_pptr after this function
  *              you need to have an assoc_mgr_lock_t READ_LOCK for
@@ -251,9 +198,9 @@ extern int assoc_mgr_get_user_assocs(void *db_conn,
  * RET: SLURM_SUCCESS on success, else SLURM_ERROR
  */
 extern int assoc_mgr_fill_in_assoc(void *db_conn,
-				   slurmdb_association_rec_t *assoc,
+				   slurmdb_assoc_rec_t *assoc,
 				   int enforce,
-				   slurmdb_association_rec_t **assoc_pptr,
+				   slurmdb_assoc_rec_t **assoc_pptr,
 				   bool locked);
 
 /*
@@ -319,57 +266,101 @@ extern bool assoc_mgr_is_user_acct_coord(void *db_conn, uint32_t uid,
 					char *acct);
 
 /*
- * get the share information from the association list in the form of
- * a list containing association_share_object_t's
+ * get the share information from the association list
+ * IN: uid: uid_t of user issuing the request
+ * IN: req_msg: info about request
+ * IN/OUT: resp_msg: message filled in with assoc_mgr info
+ */
+extern void assoc_mgr_get_shares(void *db_conn,
+				 uid_t uid, shares_request_msg_t *req_msg,
+				 shares_response_msg_t *resp_msg);
+
+/*
+ * get the state of the association manager and pack it up in buffer
+ * OUT buffer_ptr - the pointer is set to the allocated buffer.
+ * OUT buffer_size - set to size of the buffer in bytes
+ * IN: msg: request for various states
  * IN: uid: uid_t of user issuing the request
- * IN: acct_list: char * list of accounts you want (NULL for all)
- * IN: user_list: char * list of user names you want (NULL for all)
+ * IN: db_conn: needed if not already connected to the database or DBD
+ * IN: protocol_version: version of Slurm we are sending to.
  */
-extern List assoc_mgr_get_shares(
-	void *db_conn, uid_t uid, List acct_list, List user_list);
+extern void assoc_mgr_info_get_pack_msg(
+	char **buffer_ptr, int *buffer_size,
+	assoc_mgr_info_request_msg_t *msg, uid_t uid,
+	void *db_conn, uint16_t protocol_version);
+
+/*
+ * unpack the packing of the above assoc_mgr_get_pack_state_msg function.
+ * OUT: object - what to unpack into
+ * IN: buffer - buffer to unpack
+ * IN: version of Slurm this is packed in
+ * RET: SLURM_SUCCESS on SUCCESS, SLURM_ERROR else
+ */
+extern int assoc_mgr_info_unpack_msg(
+	assoc_mgr_info_msg_t **object, Buf buffer, uint16_t protocol_version);
 
 /*
  * assoc_mgr_update - update the association manager
  * IN update_list: updates to perform
+ * IN locked: if appropriate write locks are locked before calling or not
  * RET: error code
  * NOTE: the items in update_list are not deleted
  */
-extern int assoc_mgr_update(List update_list);
+extern int assoc_mgr_update(List update_list, bool locked);
 
 /*
  * update associations in cache
  * IN:  slurmdb_update_object_t *object
+ * IN   locked: if appropriate write locks are locked before calling or not
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update);
+extern int assoc_mgr_update_assocs(slurmdb_update_object_t *update,
+				   bool locked);
 
 /*
  * update wckeys in cache
  * IN:  slurmdb_update_object_t *object
+ * IN   locked: if appropriate write locks are locked before calling or not
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_wckeys(slurmdb_update_object_t *update);
+extern int assoc_mgr_update_wckeys(slurmdb_update_object_t *update,
+				   bool locked);
 
 /*
  * update qos in cache
  * IN:  slurmdb_update_object_t *object
+ * IN   locked: if appropriate write locks are locked before calling or not
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_qos(slurmdb_update_object_t *update);
+extern int assoc_mgr_update_qos(slurmdb_update_object_t *update,
+				bool locked);
 
 /*
  * update cluster resources in cache
  * IN:  slurmdb_update_object_t *object
+ * IN   locked: if appropriate write locks are locked before calling or not
+ * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
+ */
+extern int assoc_mgr_update_res(slurmdb_update_object_t *update,
+				bool locked);
+
+/*
+ * update cluster tres in cache
+ * IN:  slurmdb_update_object_t *object
+ * IN   locked: if appropriate write locks are locked before calling or not
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_res(slurmdb_update_object_t *update);
+extern int assoc_mgr_update_tres(slurmdb_update_object_t *update,
+				 bool locked);
 
 /*
  * update users in cache
  * IN:  slurmdb_update_object_t *object
+ * IN   locked: if appropriate write locks are locked before calling or not
  * RET: SLURM_SUCCESS on success (or not found) SLURM_ERROR else
  */
-extern int assoc_mgr_update_users(slurmdb_update_object_t *update);
+extern int assoc_mgr_update_users(slurmdb_update_object_t *update,
+				  bool locked);
 
 /*
  * validate that an association ID is still valid
@@ -390,10 +381,10 @@ extern void assoc_mgr_clear_used_info(void);
 
 /*
  * Remove the association's accumulated usage
- * IN:  slurmdb_association_rec_t *assoc
+ * IN:  slurmdb_assoc_rec_t *assoc
  * RET: SLURM_SUCCESS on success or else SLURM_ERROR
  */
-extern void assoc_mgr_remove_assoc_usage(slurmdb_association_rec_t *assoc);
+extern void assoc_mgr_remove_assoc_usage(slurmdb_assoc_rec_t *assoc);
 
 /*
  * Remove the QOS's accumulated usage
@@ -428,7 +419,7 @@ extern int load_assoc_mgr_state(char *state_save_location);
  * Refresh the lists if when running_cache is set this will load new
  * information from the database (if any) and update the cached list.
  */
-extern int assoc_mgr_refresh_lists(void *db_conn);
+extern int assoc_mgr_refresh_lists(void *db_conn, uint16_t cache_level);
 
 /*
  * Sets the uids of users added to the system after the start of the
@@ -439,6 +430,57 @@ extern int assoc_mgr_set_missing_uids();
 /* Normalize shares for an association. External so a priority plugin
  * can call it if needed.
  */
-extern void assoc_mgr_normalize_assoc_shares(slurmdb_association_rec_t *assoc);
+extern void assoc_mgr_normalize_assoc_shares(slurmdb_assoc_rec_t *assoc);
+
+/* Find the position of the given TRES ID or type/name in the
+ * assoc_mgr_tres_array, if the ID isn't found -1 is returned.
+ */
+extern int assoc_mgr_find_tres_pos(slurmdb_tres_rec_t *tres_rec, bool locked);
+
+/* calls assoc_mgr_find_tres_pos and returns the pointer in the
+ * assoc_mgr_tres_array.
+ * NOTE: The assoc_mgr tres read lock needs to be locked before calling this
+ * function and while using the returned record.
+ */
+extern slurmdb_tres_rec_t *assoc_mgr_find_tres_rec(
+	slurmdb_tres_rec_t *tres_rec);
+
+/* fills in allocates and sets tres_cnt based off tres_str
+ * OUT tres_cnt - array to be filled in g_tres_cnt in length
+ * IN tres_str - simple format of tres used with id and count set
+ * IN init_val - what the initial value is going to be set to
+ * IN locked - if the assoc_mgr tres read lock is locked or not.
+ * RET if positions changed in array from string 1 if nothing changed 0
+ */
+extern int assoc_mgr_set_tres_cnt_array(uint64_t **tres_cnt, char *tres_str,
+					uint64_t init_val, bool locked);
+
+/* Creates all the tres arrays for an association.
+ * NOTE: The assoc_mgr tres read lock needs to be locked before this
+ * is called. */
+extern void assoc_mgr_set_assoc_tres_cnt(slurmdb_assoc_rec_t *assoc);
+
+/* Creates all the tres arrays for a QOS.
+ * NOTE: The assoc_mgr tres read lock needs to be locked before this
+ * is called. */
+extern void assoc_mgr_set_qos_tres_cnt(slurmdb_qos_rec_t *qos);
+
+/* Make a simple tres string from a tres count array.
+ * IN tres_cnt - counts of each tres used
+ * IN flags - TRES_STR_FLAG_SIMPLE or 0 for formatted string
+ * IN locked - if the assoc_mgr tres read lock is locked or not.
+ * RET char * of simple tres string
+ */
+extern char *assoc_mgr_make_tres_str_from_array(
+	uint64_t *tres_cnt, uint32_t flags, bool locked);
+
+/* Fill in the default qos id or name given an association record.  If
+ * none is given it gives the default qos for the system.
+ * IN/OUT: qos_rec - fills in the name or id of the default qos
+ *
+ * NOTE: READ lock needs to be set on associations and QOS before
+ * calling this. */
+extern void assoc_mgr_get_default_qos_info(
+	slurmdb_assoc_rec_t *assoc_ptr, slurmdb_qos_rec_t *qos_rec);
 
 #endif /* _SLURM_ASSOC_MGR_H */
diff --git a/src/common/callerid.c b/src/common/callerid.c
new file mode 100644
index 000000000..770db5079
--- /dev/null
+++ b/src/common/callerid.c
@@ -0,0 +1,417 @@
+/*****************************************************************************\
+ *  callerid.c - Identify initiator of ssh connections, etc
+ *****************************************************************************
+ *  Copyright (C) 2015, Brigham Young University
+ *  Author:  Ryan Cox <ryan_cox@byu.edu>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif  /* HAVE_INTTYPES_H */
+#else   /* !HAVE_CONFIG_H */
+#  include <inttypes.h>
+#endif  /*  HAVE_CONFIG_H */
+
+
+//#ifndef _GNU_SOURCE
+//#define _GNU_SOURCE
+//#endif
+#include <arpa/inet.h>
+#include <ctype.h>
+#if HAVE_DIRENT_H
+#  include <dirent.h>
+#endif
+#include <libgen.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+#include "src/common/callerid.h"
+#include "src/common/log.h"
+#include "src/common/xstring.h"
+#include "src/common/xmalloc.h"
+
+#ifndef _BSD_SOURCE
+#define _BSD_SOURCE
+#endif
+
+#ifndef PATH_PROCNET_TCP
+#define PATH_PROCNET_TCP "/proc/net/tcp"
+#endif
+#ifndef PATH_PROCNET_TCP6
+#define PATH_PROCNET_TCP6 "/proc/net/tcp6"
+#endif
+
+strong_alias(callerid_get_own_netinfo, slurm_callerid_get_own_netinfo);
+
+static int _match_inode(callerid_conn_t *conn_result, ino_t *inode_search,
+		callerid_conn_t *conn_row, ino_t inode_row, int af)
+{
+	if (*inode_search == inode_row) {
+		memcpy(&conn_result->ip_dst, &conn_row->ip_dst, 16);
+		memcpy(&conn_result->ip_src, &conn_row->ip_src, 16);
+		conn_result->port_src = conn_row->port_src;
+		conn_result->port_dst = conn_row->port_dst;
+		conn_result->af = af;
+		debug3("_match_inode matched");
+		return SLURM_SUCCESS;
+	}
+	return SLURM_FAILURE;
+}
+
+static int _match_conn(callerid_conn_t *conn_search, ino_t *inode_result,
+		callerid_conn_t *conn_row, ino_t inode_row, int af)
+{
+	int addrbytes = af == AF_INET ? 4 : 16;
+
+	if (conn_search->port_dst != conn_row->port_dst ||
+	    conn_search->port_src != conn_row->port_src ||
+	    memcmp((void*)&conn_search->ip_dst, (void*)&conn_row->ip_dst,
+		   addrbytes) !=0 ||
+	    memcmp((void*)&conn_search->ip_src, (void*)&conn_row->ip_src,
+		   addrbytes) !=0
+	   )
+		return SLURM_FAILURE;
+
+	debug3("_match_conn matched inode %lu", (long unsigned int)inode_row);
+	*inode_result = inode_row;
+	return SLURM_SUCCESS;
+}
+
+/* Note that /proc/net/tcp, etc. can be updated *while reading* but a read on
+ * each row is atomic: http://stackoverflow.com/a/5880485.
+ *
+ * This should be safe but may potentially miss an entry due to the entry
+ * moving up in the file as it's read.
+ */
+static int _find_match_in_tcp_file(
+		callerid_conn_t *conn,
+		ino_t *inode,
+		int af,
+		const char *path,
+		int (*match_func)(callerid_conn_t *,
+				   ino_t *, callerid_conn_t *, ino_t, int))
+{
+	int rc = SLURM_FAILURE;
+	FILE *fp;
+	char ip_dst_str[INET6_ADDRSTRLEN+1]; /* +1 for scanf to add \0 */
+	char ip_src_str[INET6_ADDRSTRLEN+1];
+	char line[1024];
+	int addrbytes, i, matches;
+	uint64_t inode_row;
+	callerid_conn_t conn_row;
+
+	addrbytes = af == AF_INET ? 4 : 16;
+
+	/* Zero out the IPs. Not strictly necessary but it will look much better
+	 * in a debugger since IPv4 only uses 4 out of 16 bytes. */
+	bzero(&conn_row.ip_dst, 16);
+	bzero(&conn_row.ip_src, 16);
+
+	fp = fopen(path, "r");
+	if (!fp)
+		return rc;
+
+	while( fgets(line, 1024, fp) != NULL ) {
+		matches = sscanf(line,
+			"%*s %[0-9A-Z]:%x %[0-9A-Z]:%x %*s %*s %*s %*s %*s %*s %"PRIu64"",
+			ip_dst_str, &conn_row.port_dst, ip_src_str,
+			&conn_row.port_src, &inode_row);
+
+		if (matches == EOF)
+			break;
+
+		/* Probably the header */
+		if (!matches)
+			continue;
+
+		/* Convert to usable forms */
+		inet_nsap_addr(ip_dst_str, (unsigned char*)&conn_row.ip_dst,
+				addrbytes);
+		inet_nsap_addr(ip_src_str, (unsigned char*)&conn_row.ip_src,
+				addrbytes);
+
+		/* Convert to network byte order. */
+		for (i=0; i < (addrbytes>>2); i++) {
+			conn_row.ip_dst.s6_addr32[i]
+				= htonl(conn_row.ip_dst.s6_addr32[i]);
+			conn_row.ip_src.s6_addr32[i]
+				= htonl(conn_row.ip_src.s6_addr32[i]);
+		}
+
+		/* Check if we matched */
+		rc = match_func(conn, inode, &conn_row, (ino_t)inode_row, af);
+		if (rc == SLURM_SUCCESS) {
+			char ip_src_str[INET6_ADDRSTRLEN];
+			char ip_dst_str[INET6_ADDRSTRLEN];
+
+			inet_ntop(af, &conn->ip_src, ip_src_str,
+					INET6_ADDRSTRLEN);
+			inet_ntop(af, &conn->ip_dst, ip_dst_str,
+					INET6_ADDRSTRLEN);
+			debug("network_callerid matched %s:%lu => %s:%lu with inode %lu",
+			      ip_src_str, (long unsigned int)conn->port_src,
+			      ip_dst_str, (long unsigned int)conn->port_dst,
+			      (long unsigned int)inode);
+			break;
+		}
+	}
+
+	fclose(fp);
+	return rc;
+}
+
+
+/* Search through /proc/$pid/fd/ symlinks for the specified inode
+ *
+ * All errors in this function should be silently ignored. Processes appear and
+ * disappear all the time. It is natural for processes to disappear in between
+ * operations such as readdir_r, stat, and others. We should detect errors but
+ * not log them.
+ */
+static int _find_inode_in_fddir(pid_t pid, ino_t inode)
+{
+	DIR *dirp;
+	struct dirent *entryp;
+	struct dirent *result;
+	int name_max;
+	char dirpath[1024];
+	char fdpath[1024];
+	int len, rc = SLURM_FAILURE;
+	struct stat statbuf;
+
+	snprintf(dirpath, 1024, "/proc/%d/fd", (pid_t)pid);
+	if ((dirp = opendir(dirpath)) == NULL) {
+		return SLURM_FAILURE;
+	}
+
+	/* Thus saith the man page readdir_r(3) */
+	name_max = pathconf(dirpath, _PC_NAME_MAX);
+	if (name_max == -1)	/* Limit not defined, or error */
+		name_max = 255;	/* Take a guess */
+	len = offsetof(struct dirent, d_name) + name_max + 1;
+	entryp = xmalloc(len);
+
+	while (1) {
+		readdir_r(dirp, entryp, &result);
+		if (!result)
+			break;
+		/* Ignore . and .. */
+		if (strncmp(entryp->d_name, ".", 1)==0)
+			continue;
+
+		/* This is a symlink. Follow it to get destination's inode. */
+		snprintf(fdpath, 1024, "%s/%s", dirpath, entryp->d_name);
+		if (stat(fdpath, &statbuf) != 0)
+			continue;
+		if (statbuf.st_ino == inode) {
+			debug3("_find_inode_in_fddir: found %lu at %s",
+			       (long unsigned int)inode, fdpath);
+			rc = SLURM_SUCCESS;
+			break;
+		}
+	}
+
+	closedir(dirp);
+	xfree(entryp);
+	return rc;
+}
+
+
+extern int callerid_find_inode_by_conn(callerid_conn_t conn, ino_t *inode)
+{
+	int rc;
+
+	rc = _find_match_in_tcp_file(&conn, inode, AF_INET, PATH_PROCNET_TCP,
+			_match_conn);
+	if (rc == SLURM_SUCCESS)
+		return SLURM_SUCCESS;
+
+	rc = _find_match_in_tcp_file(&conn, inode, AF_INET6, PATH_PROCNET_TCP6,
+			_match_conn);
+	if (rc == SLURM_SUCCESS)
+		return SLURM_SUCCESS;
+
+	/* Add new protocols here if needed, such as UDP */
+
+	return SLURM_FAILURE;
+}
+
+
+extern int callerid_find_conn_by_inode(callerid_conn_t *conn, ino_t inode)
+{
+	int rc;
+
+	rc = _find_match_in_tcp_file(conn, &inode, AF_INET, PATH_PROCNET_TCP,
+			_match_inode);
+	if (rc == SLURM_SUCCESS)
+		return SLURM_SUCCESS;
+
+	rc = _find_match_in_tcp_file(conn, &inode, AF_INET6, PATH_PROCNET_TCP6,
+			_match_inode);
+	if (rc == SLURM_SUCCESS)
+		return SLURM_SUCCESS;
+
+	/* Add new protocols here if needed, such as UDP */
+
+	return SLURM_FAILURE;
+}
+
+
+/* Read through /proc then read each proc's fd/ directory.
+ *
+ * Most errors in this function should be silently ignored. Processes appear and
+ * disappear all the time. It is natural for processes to disappear in between
+ * operations such as readdir_r, stat, and others. We should detect errors but
+ * not log them.
+ */
+extern int find_pid_by_inode (pid_t *pid_result, ino_t inode)
+{
+	DIR *dirp;
+	struct dirent *entryp;
+	struct dirent *result;
+	char *dirpath = "/proc";
+	int name_max, len, rc = SLURM_FAILURE;
+	pid_t pid;
+
+	if ((dirp = opendir(dirpath)) == NULL) {
+		/* Houston, we have a problem: /proc is inaccessible */
+		error("find_pid_by_inode: unable to open %s: %m",
+				dirpath);
+		return SLURM_FAILURE;
+	}
+
+	/* Thus saith the man page readdir_r(3) */
+	name_max = pathconf(dirpath, _PC_NAME_MAX);
+	if (name_max == -1)	/* Limit not defined, or error */
+		name_max = 255;	/* Take a guess */
+	len = offsetof(struct dirent, d_name) + name_max + 1;
+	entryp = xmalloc(len);
+
+	while (1) {
+		readdir_r(dirp, entryp, &result);
+		if (!result)
+			break;
+		/* This check is probably unnecessary due to the !result check
+		 * but better safe than sorry */
+		else if (!entryp->d_name)
+			continue;
+		/* We're only looking for /proc/[0-9]*  */
+		else if (!isdigit(entryp->d_name[0]))
+			continue;
+
+		/* More sanity checks can be performed but there isn't much
+		 * point. The fd/ directory will exist inside the directory and
+		 * we'll find the specified inode or we won't. Failures are
+		 * silent so it won't clutter logs. The above checks are
+		 * currently sufficient for Linux. */
+
+		pid = (int)atoi(entryp->d_name);
+		rc = _find_inode_in_fddir(pid, inode);
+		if (rc == SLURM_SUCCESS) {
+			*pid_result = pid;
+			break;
+		}
+	}
+
+	closedir(dirp);
+	xfree(entryp);
+	return rc;
+}
+
+
+extern int callerid_get_own_netinfo (callerid_conn_t *conn)
+{
+	DIR *dirp;
+	struct dirent *entryp;
+	struct dirent *result;
+	char *dirpath = "/proc/self/fd";
+	char fdpath[1024];
+	int name_max, len, rc = SLURM_FAILURE;
+	struct stat statbuf;
+
+	if ((dirp = opendir(dirpath)) == NULL) {
+		error("callerid_get_own_netinfo: opendir failed for %s: %m",
+				dirpath);
+		return rc;
+	}
+
+	/* thus saith the man page readdir_r(3) */
+	name_max = pathconf(dirpath, _PC_NAME_MAX);
+	if (name_max == -1)	/* Limit not defined, or error */
+		name_max = 255;	/* Take a guess */
+	len = offsetof(struct dirent, d_name) + name_max + 1;
+	entryp = xmalloc(len);
+
+	while (1) {
+		readdir_r(dirp, entryp, &result);
+		if (!result)
+			break;
+
+		/* Ignore . and .. */
+		if (strncmp(entryp->d_name, ".", 1)==0)
+			continue;
+
+		snprintf(fdpath, 1024, "%s/%s", dirpath, entryp->d_name);
+		debug3("callerid_get_own_netinfo: checking %s", fdpath);
+		/* This is a symlink. Follow it to get destination's inode. */
+		if (stat(fdpath, &statbuf) != 0) {
+			debug3("stat failed for %s: %m", fdpath);
+			continue;
+		}
+
+		/* We are only interested in sockets */
+		if (S_ISSOCK(statbuf.st_mode)) {
+			debug3("callerid_get_own_netinfo: checking socket %s",
+					fdpath);
+			rc = callerid_find_conn_by_inode(conn, statbuf.st_ino);
+			if (rc == SLURM_SUCCESS) {
+				break;
+			}
+		}
+	}
+
+	closedir(dirp);
+	xfree(entryp);
+	return rc;
+}
diff --git a/src/common/callerid.h b/src/common/callerid.h
new file mode 100644
index 000000000..1f264e519
--- /dev/null
+++ b/src/common/callerid.h
@@ -0,0 +1,55 @@
+/*****************************************************************************\
+ *  callerid.h - Identify initiator of ssh connections, etc
+ *****************************************************************************
+ *  Copyright (C) 2015, Brigham Young University
+ *  Author:  Ryan Cox <ryan_cox@byu.edu>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _SLURM_CALLERID_H
+#define _SLURM_CALLERID_H
+
+#include <arpa/inet.h>
+
+typedef struct callerid_conn {
+	uint32_t port_dst;
+	uint32_t port_src;
+	struct in6_addr ip_dst;
+	struct in6_addr ip_src;
+	int af;
+} callerid_conn_t;
+
+extern int callerid_get_own_netinfo(callerid_conn_t *conn);
+extern int callerid_find_inode_by_conn(callerid_conn_t conn, ino_t *inode);
+extern int callerid_find_conn_by_inode(callerid_conn_t *conn, ino_t inode);
+extern int find_pid_by_inode (pid_t *pid_result, ino_t inode);
+
+#endif /* _SLURM_CALLERID_H */
diff --git a/src/common/cpu_frequency.c b/src/common/cpu_frequency.c
index c73690597..8ff335b8b 100644
--- a/src/common/cpu_frequency.c
+++ b/src/common/cpu_frequency.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2012 Bull
  *  Written by Don Albert, <don.albert@bull.com>
+ *  Modified by Rod Schultz, <rod.schultz@bull.com> for min-max:gov
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -49,6 +50,7 @@
 #include "slurm/slurm.h"
 
 #include "src/common/cpu_frequency.h"
+#include "src/common/env.h"
 #include "src/common/fd.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
@@ -70,18 +72,29 @@
 #define GOV_USERSPACE		0x10
 
 static uint16_t cpu_freq_count = 0;
+static uint32_t cpu_freq_govs = 0; /* Governors allowed. */
+static uint64_t debug_flags = NO_VAL; /* init value for slurmd, slurmstepd */
 static struct cpu_freq_data {
 	uint8_t  avail_governors;
-	uint32_t orig_frequency;
-	char     orig_governor[GOV_NAME_LEN];
-	uint32_t new_frequency;
+	uint8_t  nfreq;
+	uint32_t avail_freq[FREQ_LIST_MAX];
+	char     org_governor[GOV_NAME_LEN];
 	char     new_governor[GOV_NAME_LEN];
+	uint32_t org_frequency;
+	uint32_t new_frequency;
+	uint32_t org_min_freq;
+	uint32_t new_min_freq;
+	uint32_t org_max_freq;
+	uint32_t new_max_freq;
 } * cpufreq = NULL;
 static char *slurmd_spooldir = NULL;
 
-static void	_cpu_freq_find_valid(uint32_t cpu_freq, int cpuidx);
-static uint16_t	_cpu_freq_next_cpu(char **core_range, uint16_t *cpuidx,
+static int      _cpu_freq_cpu_avail(int cpx);
+static int      _cpu_freq_current_state(int cpx);
+static uint16_t	_cpu_freq_next_cpu(char **core_range, uint16_t *cpx,
 				   uint16_t *start, uint16_t *end);
+static void     _cpu_freq_init_data(int cpx);
+static void     _cpu_freq_setup_data(stepd_step_rec_t *job, int cpx);
 static int	_fd_lock_retry(int fd);
 
 static int _fd_lock_retry(int fd)
@@ -116,18 +129,21 @@ static int _set_cpu_owner_lock(int cpu_id, uint32_t job_id)
 	int fd, sz;
 
 	snprintf(tmp, sizeof(tmp), "%s/cpu", slurmd_spooldir);
-	(void) mkdir(tmp, 0700);
+	if ((mkdir(tmp, 0700) != 0) && (errno != EEXIST)) {
+		error("mkdir failed: %m %s",tmp);
+		return -1;
+	}
 	snprintf(tmp, sizeof(tmp), "%s/cpu/%d", slurmd_spooldir, cpu_id);
-	fd = open(tmp, O_CREAT | O_RDWR, 0500);
+	fd = open(tmp, O_CREAT | O_RDWR, 0600);
 	if (fd < 0) {
-		error("%s: open: %m", __func__);
+		error("%s: open: %m %s", __func__, tmp);
 		return fd;
 	}
 	if (_fd_lock_retry(fd) < 0)
-		error("%s: fd_get_write_lock: %m", __func__);
+		error("%s: fd_get_write_lock: %m %s", __func__, tmp);
 	sz = sizeof(uint32_t);
 	if (fd_write_n(fd, (void *) &job_id, sz) != sz)
-		error("%s: write: %m", __func__);
+		error("%s: write: %m %s", __func__, tmp);
 
 	return fd;
 }
@@ -139,21 +155,25 @@ static int _test_cpu_owner_lock(int cpu_id, uint32_t job_id)
 	int fd, sz;
 
 	snprintf(tmp, sizeof(tmp), "%s/cpu", slurmd_spooldir);
-	(void) mkdir(tmp, 0700);
+	if ((mkdir(tmp, 0700) != 0) && (errno != EEXIST)) {
+		error("%s: mkdir failed: %m %s", __func__, tmp);
+		return -1;
+	}
 	snprintf(tmp, sizeof(tmp), "%s/cpu/%d", slurmd_spooldir, cpu_id);
-	fd = open(tmp, O_RDWR);
+	fd = open(tmp, O_RDWR, 0600);
 	if (fd < 0) {
-		error("%s: open: %m", __func__);
-		return fd;
+		if (errno != ENOENT)	/* Race condition */
+			error("%s: open: %m %s", __func__, tmp);
+		return -1;
 	}
 	if (_fd_lock_retry(fd) < 0) {
-		error("%s: fd_get_write_lock: %m", __func__);
+		error("%s: fd_get_write_lock: %m %s", __func__, tmp);
 		close(fd);
 		return -1;
 	}
 	sz = sizeof(uint32_t);
 	if (fd_read_n(fd, (void *) &in_job_id, sz) != sz) {
-		error("%s: read: %m", __func__);
+		error("%s: read: %m %s", __func__, tmp);
 		close(fd);
 		return -1;
 	}
@@ -170,6 +190,55 @@ static int _test_cpu_owner_lock(int cpu_id, uint32_t job_id)
 	return fd;
 }
 
+/*
+ * Find available frequencies on this cpu
+ * IN      cpuidx     - cpu to query
+ * Return: SLURM_SUCCESS or SLURM_FAILURE
+ *         avail_freq array will be in strictly ascending order
+ */
+static int
+_cpu_freq_cpu_avail(int cpuidx)
+{
+	FILE *fp = NULL;
+	char path[SYSFS_PATH_MAX];
+	int i, j, k;
+	uint32_t freq;
+	bool all_avail = false;
+
+	snprintf(path, sizeof(path),  PATH_TO_CPU
+		 "cpu%u/cpufreq/scaling_available_frequencies", cpuidx);
+	if ( ( fp = fopen(path, "r") ) == NULL ) {
+		static bool open_err_log = true;	/* Log once */
+		if (open_err_log) {
+			error("%s: Could not open %s", __func__, path);
+			open_err_log = false;
+		}
+		return SLURM_FAILURE;
+	}
+	for (i = 0; i < (FREQ_LIST_MAX-1); i++) {
+		if ( fscanf(fp, "%u", &freq) == EOF) {
+			all_avail = true;
+			break;
+		}
+		/* make sure list is sorted */
+		for (j = 0; j < i; j++) {
+			if (freq < cpufreq[cpuidx].avail_freq[j]) {
+				for (k = i; k >= j; k--) {
+					cpufreq[cpuidx].avail_freq[k+1] =
+						cpufreq[cpuidx].avail_freq[k];
+				}
+				break;
+			}
+		}
+		cpufreq[cpuidx].avail_freq[j] = freq;
+	}
+	cpufreq[cpuidx].nfreq = i;
+	fclose(fp);
+	if (!all_avail)
+		error("all available frequencies not scanned");
+	return SLURM_SUCCESS;
+}
+
 /*
  * called to check if the node supports setting CPU frequency
  * if so, initialize fields in cpu_freq_data structure
@@ -183,6 +252,8 @@ cpu_freq_init(slurmd_conf_t *conf)
 	char value[LINE_LEN];
 	unsigned int i, j;
 
+	debug_flags = slurm_get_debug_flags(); /* init for slurmd */
+
 	xfree(slurmd_spooldir);
 	slurmd_spooldir = xstrdup(conf->spooldir);
 
@@ -203,9 +274,13 @@ cpu_freq_init(slurmd_conf_t *conf)
 	/* get the cpu frequency info into the cpu_freq_data structure */
 	cpu_freq_count = conf->block_map_size;
 	if (!cpufreq) {
+		int cpuidx;
 		cpufreq = (struct cpu_freq_data *)
 			  xmalloc(cpu_freq_count *
 				  sizeof(struct cpu_freq_data));
+
+		for (cpuidx = 0; cpuidx < cpu_freq_count; cpuidx++)
+			_cpu_freq_init_data(cpuidx);
 	}
 
 	debug2("Gathering cpu frequency information for %u cpus",
@@ -215,58 +290,55 @@ cpu_freq_init(slurmd_conf_t *conf)
 			 PATH_TO_CPU
 			 "cpu%u/cpufreq/scaling_available_governors", i);
 		if ((fp = fopen(path, "r")) == NULL)
-			goto log_it;
+			continue;
 		if (fgets(value, LINE_LEN, fp) == NULL) {
 			fclose(fp);
-			goto log_it;
+			continue;
 		}
-		if (strstr(value, "conservative"))
+		if (strstr(value, "conservative")) {
 			cpufreq[i].avail_governors |= GOV_CONSERVATIVE;
-		if (strstr(value, "ondemand"))
+			if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) {
+				info("cpu_freq: Conservative governor "
+				     "defined on cpu 0");
+			}
+		}
+		if (strstr(value, "ondemand")) {
 			cpufreq[i].avail_governors |= GOV_ONDEMAND;
-		if (strstr(value, "performance"))
+			if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) {
+				info("cpu_freq: OnDemand governor "
+				     "defined on cpu 0");
+			}
+		}
+		if (strstr(value, "performance")) {
 			cpufreq[i].avail_governors |= GOV_PERFORMANCE;
-		if (strstr(value, "powersave"))
+			if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) {
+				info("cpu_freq: Performance governor "
+				     "defined on cpu 0");
+			}
+		}
+		if (strstr(value, "powersave")) {
 			cpufreq[i].avail_governors |= GOV_POWERSAVE;
-		if (strstr(value, "userspace"))
-			cpufreq[i].avail_governors |= GOV_USERSPACE;
-		fclose(fp);
-
-		if (!(cpufreq[i].avail_governors & GOV_USERSPACE))
-			goto log_it;
-
-		snprintf(path, sizeof(path),
-			 PATH_TO_CPU "cpu%u/cpufreq/scaling_governor", i);
-		if ((fp = fopen(path, "r")) == NULL)
-			goto log_it;
-		if (fgets(value, LINE_LEN, fp) == NULL) {
-			fclose(fp);
-			goto log_it;
+			if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) {
+				info("cpu_freq: PowerSave governor "
+				     "defined on cpu 0");
+			}
 		}
-		if (strlen(value) >= GOV_NAME_LEN) {
-			fclose(fp);
-			goto log_it;
+		if (strstr(value, "userspace")) {
+			cpufreq[i].avail_governors |= GOV_USERSPACE;
+			if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) {
+				info("cpu_freq: UserSpace governor "
+				     "defined on cpu 0");
+			}
 		}
-		strcpy(cpufreq[i].orig_governor, value);
 		fclose(fp);
-		j = strlen(cpufreq[i].orig_governor);
-		if ((j > 0) && (cpufreq[i].orig_governor[j - 1] == '\n'))
-			cpufreq[i].orig_governor[j - 1] = '\0';
-
-		snprintf(path, sizeof(path),
-			 PATH_TO_CPU "cpu%u/cpufreq/scaling_min_freq", i);
-		if ((fp = fopen(path, "r")) == NULL)
+		if (_cpu_freq_cpu_avail(i) == SLURM_FAILURE)
 			continue;
-		if (fscanf (fp, "%u", &cpufreq[i].orig_frequency) < 0) {
-			error("cpu_freq_cgroup_valid: Could not read "
-			      "scaling_min_freq");
+		if ((i == 0) && (debug_flags & DEBUG_FLAG_CPU_FREQ)) {
+			for (j = 0; j < cpufreq[i].nfreq; j++) {
+				info("cpu_freq: frequency %u defined on cpu 0",
+				     cpufreq[i].avail_freq[j]);
+			}
 		}
-		fclose(fp);
-
-log_it:		debug("cpu_freq_init: CPU:%u reset_freq:%u avail_gov:%x "
-		      "orig_governor:%s",
-		      i, cpufreq[i].orig_frequency, cpufreq[i].avail_governors,
-		      cpufreq[i].orig_governor);
 	}
 	return;
 }
@@ -278,10 +350,21 @@ cpu_freq_fini(void)
 	xfree(slurmd_spooldir);
 }
 
+/*
+ * reset debug flag (slurmd)
+ */
+extern void
+cpu_freq_reconfig(void)
+{
+	/* reset local static variables */
+	cpu_freq_govs = 0;
+	debug_flags = slurm_get_debug_flags();
+}
+
 /*
  * Send the cpu_frequency table info to slurmstepd
  */
-void
+extern void
 cpu_freq_send_info(int fd)
 {
 	if (cpu_freq_count) {
@@ -302,7 +385,7 @@ rwfail:
 /*
  * Receive the cpu_frequency table info from slurmd
  */
-void
+extern void
 cpu_freq_recv_info(int fd)
 {
 	safe_read(fd, &cpu_freq_count, sizeof(uint16_t));
@@ -326,13 +409,12 @@ rwfail:
 	return;
 }
 
-
 /*
  * Validate the cpus and select the frequency to set
  * Called from task cpuset code with task launch request containing
  *  a pointer to a hex map string of the cpus to be used by this step
  */
-void
+extern void
 cpu_freq_cpuset_validate(stepd_step_rec_t *job)
 {
 	int cpuidx, cpu_num;
@@ -342,13 +424,19 @@ cpu_freq_cpuset_validate(stepd_step_rec_t *job)
 	char *cpu_str;
 	char *savestr = NULL;
 
-	debug2("cpu_freq_cpuset_validate: request = %12d  %8x",
-	       job->cpu_freq, job->cpu_freq);
-	debug2("  jobid=%u, stepid=%u, tasks=%u cpu/task=%u, cpus=%u",
-	     job->jobid, job->stepid, job->node_tasks,
-	       job->cpus_per_task, job->cpus);
-	debug2("  cpu_bind_type=%4x, cpu_bind map=%s",
-	       job->cpu_bind_type, job->cpu_bind);
+	debug_flags = slurm_get_debug_flags(); /* init for slurmstepd */
+	if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+		info("cpu_freq_cpuset_validate: request: min=(%12d  %8x) "
+		      "max=(%12d %8x) governor=%8x",
+		      job->cpu_freq_min, job->cpu_freq_min,
+		      job->cpu_freq_max, job->cpu_freq_max,
+		      job->cpu_freq_gov);
+		info("  jobid=%u, stepid=%u, tasks=%u cpu/task=%u, cpus=%u",
+		     job->jobid, job->stepid, job->node_tasks,
+		     job->cpus_per_task, job->cpus);
+		info("  cpu_bind_type=%4x, cpu_bind map=%s",
+		     job->cpu_bind_type, job->cpu_bind);
+	}
 
 	if (!cpu_freq_count)
 		return;
@@ -396,8 +484,9 @@ cpu_freq_cpuset_validate(stepd_step_rec_t *job)
 	} while ( (cpu_str = strtok_r(NULL, ",", &savestr) ) != NULL);
 
 	for (cpuidx = 0; cpuidx < cpu_freq_count; cpuidx++) {
+		_cpu_freq_init_data(cpuidx);
 		if (bit_test(cpus_to_set, cpuidx)) {
-			_cpu_freq_find_valid(job->cpu_freq, cpuidx);
+			_cpu_freq_setup_data(job, cpuidx);
 		}
 	}
 	cpu_freq_set(job);
@@ -408,13 +497,12 @@ cpu_freq_cpuset_validate(stepd_step_rec_t *job)
 	return;
 }
 
-
 /*
  * Validate the cpus and select the frequency to set
  * Called from task cgroup cpuset code with string containing
  *  the list of cpus to be used by this step
  */
-void
+extern void
 cpu_freq_cgroup_validate(stepd_step_rec_t *job, char *step_alloc_cores)
 {
 	uint16_t start  = USHRT_MAX;
@@ -422,19 +510,28 @@ cpu_freq_cgroup_validate(stepd_step_rec_t *job, char *step_alloc_cores)
 	uint16_t cpuidx =  0;
 	char *core_range;
 
-	debug2("cpu_freq_cgroup_validate: request value = %12d  %8x",
-	       job->cpu_freq, job->cpu_freq);
-	debug2("  jobid=%u, stepid=%u, tasks=%u cpu/task=%u, cpus=%u",
-	       job->jobid,job->stepid,job->node_tasks,
-	       job->cpus_per_task,job->cpus);
-	debug2("  cpu_bind_type=%4x, cpu_bind map=%s",
-	       job->cpu_bind_type, job->cpu_bind);
-	debug2("  step logical cores = %s, step physical cores = %s",
-	       job->step_alloc_cores, step_alloc_cores);
-
+	debug_flags = slurm_get_debug_flags(); /* init for slurmstepd */
+	if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+		info("cpu_freq_cgroup_validate: request: min=(%12d  %8x) "
+				"max=(%12d %8x) governor=%8x",
+		       job->cpu_freq_min, job->cpu_freq_min,
+		       job->cpu_freq_max, job->cpu_freq_max,
+		       job->cpu_freq_gov);
+		info("  jobid=%u, stepid=%u, tasks=%u cpu/task=%u, cpus=%u",
+		       job->jobid,job->stepid,job->node_tasks,
+		       job->cpus_per_task,job->cpus);
+		info("  cpu_bind_type=%4x, cpu_bind map=%s",
+		       job->cpu_bind_type, job->cpu_bind);
+		info("  step logical cores = %s, step physical cores = %s",
+		       job->step_alloc_cores, step_alloc_cores);
+	}
 	if (!cpu_freq_count)
 		return;
 
+	for (cpuidx = 0; cpuidx < cpu_freq_count; cpuidx++) {
+		_cpu_freq_init_data(cpuidx);
+	}
+
 	/* set entries in cpu frequency table for this step's cpus */
 	core_range = step_alloc_cores;
 	while ( (cpuidx = _cpu_freq_next_cpu(&core_range, &cpuidx,
@@ -444,13 +541,12 @@ cpu_freq_cgroup_validate(stepd_step_rec_t *job, char *step_alloc_cores)
 			  cpuidx, cpu_freq_count);
 		    return;
 		}
-		_cpu_freq_find_valid(job->cpu_freq, cpuidx);
+		_cpu_freq_setup_data(job, cpuidx);
 	}
 	cpu_freq_set(job);
 	return;
 }
 
-
 /*
  * get the next number in a range
  * assumes range is well-formed, i.e., monotonically increasing,
@@ -525,258 +621,586 @@ _cpu_freq_next_cpu(char **core_range, uint16_t *cpuidx,
 }
 
 /*
- * Compute the right frequency value to set, based on request
+ * Find current governor on this cpu
  *
- * input: job record containing cpu frequency parameter
- * input: index to current cpu entry in cpu_freq_data table
- *
- * sets "new_frequency" table entry if valid value found
+ * Return: SLURM_SUCCESS or SLURM_FAILURE
  */
-void
-_cpu_freq_find_valid(uint32_t cpu_freq, int cpuidx)
+static int
+_cpu_freq_get_cur_gov(int cpuidx)
+{
+	FILE *fp = NULL;
+	char path[SYSFS_PATH_MAX], gov_value[LINE_LEN];
+	int j;
+
+	snprintf(path, sizeof(path),
+		 PATH_TO_CPU "cpu%u/cpufreq/scaling_governor", cpuidx);
+	if ((fp = fopen(path, "r")) == NULL) {
+		error("%s: Could not open scaling_governor", __func__);
+		return SLURM_FAILURE;
+	}
+	if (fgets(gov_value, LINE_LEN, fp) == NULL) {
+		error("%s: Could not read scaling_governor", __func__);
+		fclose(fp);
+		return SLURM_FAILURE;
+	}
+	if (strlen(gov_value) >= GOV_NAME_LEN) {
+		error("%s: scaling_governor is to long", __func__);
+		fclose(fp);
+		return SLURM_FAILURE;
+	}
+	strcpy(cpufreq[cpuidx].org_governor, gov_value);
+	fclose(fp);
+	j = strlen(cpufreq[cpuidx].org_governor);
+	if ((j > 0) && (cpufreq[cpuidx].org_governor[j - 1] == '\n'))
+		cpufreq[cpuidx].org_governor[j - 1] = '\0';
+	return SLURM_SUCCESS;
+}
+
+/*
+ * set cpu governor
+ */
+static int
+_cpu_freq_set_gov(stepd_step_rec_t *job, int cpuidx, char* gov )
 {
-	unsigned int j, freq_med = 0;
-	uint32_t  freq_list[FREQ_LIST_MAX] =  { 0 };
 	char path[SYSFS_PATH_MAX];
+	FILE *fp;
+	int fd, rc;
+
+	rc = SLURM_SUCCESS;
+	snprintf(path, sizeof(path), PATH_TO_CPU
+		 "cpu%u/cpufreq/scaling_governor", cpuidx);
+	fd = _set_cpu_owner_lock(cpuidx, job->jobid);
+	if ((fp = fopen(path, "w"))) {
+		fputs(gov, fp);
+		fputc('\n', fp);
+		fclose(fp);
+	} else {
+		error("%s: Can not set CPU governor: %m", __func__);
+		rc = SLURM_FAILURE;
+	}
+	(void) close(fd);
+	return rc;
+}
+
+/*
+ * get one of scalling_min_freq, scaling_max_freq, cpuinfo_cur_freq
+ *
+ * Return: value of scaling_min_freq, or 0 on error
+ */
+static uint32_t
+_cpu_freq_get_scaling_freq(int cpuidx, char* option)
+{
 	FILE *fp = NULL;
+	char path[SYSFS_PATH_MAX];
+	uint32_t freq;
+	/* get the value from 'option' */
+	snprintf(path, sizeof(path), PATH_TO_CPU
+		"cpu%u/cpufreq/%s", cpuidx, option);
+	if ( ( fp = fopen(path, "r") ) == NULL ) {
+		error("%s: Could not open %s", __func__, option);
+		return 0;
+	}
+	if (fscanf (fp, "%u", &freq) < 1) {
+		error("%s: Could not read %s", __func__, option);
+		fclose(fp);
+		return 0;
+	}
+	fclose(fp);
+	return freq;
+}
+
+/*
+ * set one of scalling_min_freq, scaling_max_freq, scaling_setspeed
+ * -- assume governor already set to userspace ---
+ *
+ */
+static int
+_cpu_freq_set_scaling_freq(stepd_step_rec_t *job, int cpx, uint32_t freq,
+		char* option)
+{
+	char path[SYSFS_PATH_MAX];
+	FILE *fp;
+	int fd, rc;
+	uint32_t newfreq;
+
+	rc = SLURM_SUCCESS;
+	snprintf(path, sizeof(path), PATH_TO_CPU
+		 "cpu%u/cpufreq/%s", cpx, option);
+	fd = _set_cpu_owner_lock(cpx, job->jobid);
+	if ((fp = fopen(path, "w"))) {
+		fprintf(fp, "%u\n", freq);
+		fclose(fp);
+	} else {
+		error("%s: Can not set %s: %m", __func__, option);
+		rc = SLURM_FAILURE;
+	}
+	(void) close(fd);
+	if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+		newfreq = _cpu_freq_get_scaling_freq(cpx, option);
+		if (newfreq != freq) {
+			error("Failed to set freq_scaling %s to %u (org=%u)",
+			      option, freq, newfreq);
+		}
+	}
+	return rc;
+
+}
 
-	if ((cpu_freq == NO_VAL) || (cpu_freq == 0)) {	/* Default config */
-		;
-	} else if (cpu_freq & CPU_FREQ_RANGE_FLAG) {	/* Named values */
+/*
+ * Get current state
+ *
+ * IN:     cpuidx        - cpu to query
+ * Return: SLURM_SUCCESS or SLURM_FAILURE
+ */
+static int
+_cpu_freq_current_state(int cpuidx)
+{
+	uint32_t freq;
+	/*
+	 * Getting 'previous' values using the 'scaling' values rather
+	 * than the 'cpuinfo' values.
+	 * The 'cpuinfo' values are read only. min/max seem to be raw
+	 * hardware capability.
+	 * The 'scaling' values are set by the governor
+	 */
+	freq = _cpu_freq_get_scaling_freq(cpuidx, "scaling_cur_freq");
+	if (freq == 0)
+		return SLURM_FAILURE;
+	cpufreq[cpuidx].org_frequency = freq;
+	freq = _cpu_freq_get_scaling_freq(cpuidx, "scaling_min_freq");
+	if (freq == 0)
+		return SLURM_FAILURE;
+	cpufreq[cpuidx].org_min_freq = freq;
+	freq = _cpu_freq_get_scaling_freq(cpuidx, "scaling_max_freq");
+	if (freq == 0)
+		return SLURM_FAILURE;
+	cpufreq[cpuidx].org_max_freq = freq;
+
+	return _cpu_freq_get_cur_gov(cpuidx);
+}
+
+
+/*
+ * Copy string representation of a governor into cpufreq structure for a cpu.
+ */
+static int
+_cpu_freq_govspec_string(uint32_t cpu_freq, int cpuidx)
+{
+
+	if ((cpu_freq & CPU_FREQ_RANGE_FLAG) == 0)
+		return SLURM_FAILURE;
+		
+	switch(cpu_freq)
+	{
+	case CPU_FREQ_CONSERVATIVE:
+		if (cpufreq[cpuidx].avail_governors & GOV_CONSERVATIVE)
+			strcpy(cpufreq[cpuidx].new_governor, "conservative");
+		return SLURM_SUCCESS;
+	case CPU_FREQ_ONDEMAND:
+		if (cpufreq[cpuidx].avail_governors & GOV_ONDEMAND)
+			strcpy(cpufreq[cpuidx].new_governor,"ondemand");
+		return SLURM_SUCCESS;
+	case CPU_FREQ_PERFORMANCE:
+		if (cpufreq[cpuidx].avail_governors & GOV_PERFORMANCE)
+			strcpy(cpufreq[cpuidx].new_governor, "performance");
+		return SLURM_SUCCESS;
+	case CPU_FREQ_POWERSAVE:
+		if (cpufreq[cpuidx].avail_governors & GOV_POWERSAVE)
+			strcpy(cpufreq[cpuidx].new_governor, "powersave");
+		return SLURM_SUCCESS;
+	case CPU_FREQ_USERSPACE:
+		if (cpufreq[cpuidx].avail_governors & GOV_USERSPACE)
+			strcpy(cpufreq[cpuidx].new_governor, "userspace");
+		return SLURM_SUCCESS;
+	default:
+		return SLURM_FAILURE;
+	}
+}
+
+/*
+ * Convert frequency_spec into an actual frequency
+ * Returns -- frequency from avail frequency list, or NO_VAL
+ */
+uint32_t
+_cpu_freq_freqspec_num(uint32_t cpu_freq, int cpuidx)
+{
+	int fx, j;
+	if (!cpufreq || (cpufreq[cpuidx].nfreq == (uint8_t) NO_VAL))
+		return NO_VAL;
+	/* assume the frequency list is in ascending order */
+	if (cpu_freq & CPU_FREQ_RANGE_FLAG) {	/* Named values */
 		switch(cpu_freq)
 		{
 		case CPU_FREQ_LOW :
-			/* get the value from scale min freq */
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU
-				 "cpu%u/cpufreq/scaling_min_freq", cpuidx);
-			if ( ( fp = fopen(path, "r") ) == NULL ) {
-				error("%s: Could not open scaling_min_freq",
-				      __func__);
-				return;
-			}
-			if (fscanf (fp, "%u",
-				    &cpufreq[cpuidx].new_frequency) < 1) {
-				error("%s: Could not read scaling_min_freq",
-				      __func__);
-				return;
-			}
-			if (cpufreq[cpuidx].avail_governors & GOV_USERSPACE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "userspace");
-			break;
-
+			return cpufreq[cpuidx].avail_freq[0];
+	
 		case CPU_FREQ_MEDIUM :
+			if (cpufreq[cpuidx].nfreq == 1)
+				return cpufreq[cpuidx].avail_freq[0];
+			fx = (cpufreq[cpuidx].nfreq - 1) / 2;
+			return cpufreq[cpuidx].avail_freq[fx];
+	
 		case CPU_FREQ_HIGHM1 :
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU
-				 "cpu%u/cpufreq/scaling_available_frequencies",
-				 cpuidx);
-			if ( ( fp = fopen(path, "r") ) == NULL ) {
-				error("%s: Could not open "
-				      "scaling_available_frequencies",
-				      __func__);
-				return;
-			}
-			for (j = 0; j < FREQ_LIST_MAX; j++) {
-				if ( fscanf(fp, "%u", &freq_list[j]) == EOF)
-					break;
-				freq_med = (j + 1) / 2;
-			}
-			if (cpu_freq == CPU_FREQ_MEDIUM) {
-				cpufreq[cpuidx].new_frequency =
-					freq_list[freq_med];
-			} else if (j > 0) {	/* Find second highest freq */
-				int high_loc = 0, m1_loc = -1;
-				for (j = 1; j < FREQ_LIST_MAX; j++) {
-					if (freq_list[j] == 0)
-						break;
-					if (freq_list[j] > freq_list[high_loc])
-						high_loc = j;
-				}
-				for (j = 0; j < FREQ_LIST_MAX; j++) {
-					if (freq_list[j] == 0)
-						break;
-					if (freq_list[j] == freq_list[high_loc])
-						continue;
-					if ((m1_loc == -1) ||
-					    (freq_list[j] > freq_list[m1_loc]))
-						m1_loc = j;
-				}
-				cpufreq[cpuidx].new_frequency =
-					freq_list[m1_loc];
-			}
-			if (cpufreq[cpuidx].avail_governors & GOV_USERSPACE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "userspace");
-			break;
-
+			if (cpufreq[cpuidx].nfreq == 1)
+				return cpufreq[cpuidx].avail_freq[0];
+			fx = cpufreq[cpuidx].nfreq - 2;
+			return cpufreq[cpuidx].avail_freq[fx];
+	
 		case CPU_FREQ_HIGH :
-			/* get the value from scale max freq */
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU "cpu%u/cpufreq/scaling_max_freq",
-				 cpuidx);
-			if ( ( fp = fopen(path, "r") ) == NULL ) {
-				error("%s: Could not open scaling_max_freq",
-				      __func__);
-				return;
-			}
-			if (fscanf (fp, "%u",
-				    &cpufreq[cpuidx].new_frequency) < 1) {
-				error("%s: Could not read scaling_max_freq",
-				      __func__);
+			fx = cpufreq[cpuidx].nfreq - 1;
+			return cpufreq[cpuidx].avail_freq[fx];
+		
+		default:
+			return NO_VAL;
+		}
+	}		
+	for (j = 0; j < cpufreq[cpuidx].nfreq; j++) {
+		if (cpu_freq == cpufreq[cpuidx].avail_freq[j]) {
+			return cpufreq[cpuidx].avail_freq[j];
+		}
+		if (j > 0) {
+			if ((cpu_freq > cpufreq[cpuidx].avail_freq[j-1]) &&
+			    (cpu_freq < cpufreq[cpuidx].avail_freq[j])) {
+				return cpufreq[cpuidx].avail_freq[j];
 			}
-			if (cpufreq[cpuidx].avail_governors & GOV_USERSPACE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "userspace");
-			break;
-
-		case CPU_FREQ_CONSERVATIVE:
-			if (cpufreq[cpuidx].avail_governors & GOV_CONSERVATIVE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "conservative");
-			break;
-
-		case CPU_FREQ_ONDEMAND:
-			if (cpufreq[cpuidx].avail_governors & GOV_ONDEMAND)
-				strcpy(cpufreq[cpuidx].new_governor,"ondemand");
-			break;
+		}
+	}
 
-		case CPU_FREQ_PERFORMANCE:
-			if (cpufreq[cpuidx].avail_governors & GOV_PERFORMANCE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "performance");
-			break;
+	error("failed to find frequency %d on cpu=%d", cpu_freq, cpuidx);
+	return NO_VAL;
+}
 
-		case CPU_FREQ_POWERSAVE:
-			if (cpufreq[cpuidx].avail_governors & GOV_POWERSAVE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "powersave");
-			break;
+/*
+ * Initialize data structure
+ */
+static void
+_cpu_freq_init_data(int cpx)
+{
+	/* avail_governors -- set at initialization */
+	cpufreq[cpx].org_governor[0] = '\0';
+	cpufreq[cpx].new_governor[0] = '\0';
+	cpufreq[cpx].org_frequency = NO_VAL;
+	cpufreq[cpx].new_frequency = NO_VAL;
+	cpufreq[cpx].org_min_freq = NO_VAL;
+	cpufreq[cpx].new_min_freq = NO_VAL;
+	cpufreq[cpx].org_max_freq = NO_VAL;
+	cpufreq[cpx].new_max_freq = NO_VAL;
+}
+/*
+ * Set either current frequency (speed)
+ * Or min/max governor base on --cpu-freq parameter
+ */
+static void
+_cpu_freq_setup_data(stepd_step_rec_t *job, int cpx)
+{
+	uint32_t freq;
 
-		case CPU_FREQ_USERSPACE:
-			if (cpufreq[cpuidx].avail_governors & GOV_USERSPACE)
-				strcpy(cpufreq[cpuidx].new_governor,
-				       "userspace");
-			break;
+	if (   (job->cpu_freq_min == NO_VAL || job->cpu_freq_min==0)
+	    && (job->cpu_freq_max == NO_VAL || job->cpu_freq_max==0)
+	    && (job->cpu_freq_gov == NO_VAL || job->cpu_freq_gov==0)) {
+		return; /* No --cpu-freq */
+	}
 
-		default :
-			error("%s: invalid cpu_freq value %u",
-			      __func__, cpu_freq);
-			return;
+	/* Get current state */
+	if (_cpu_freq_current_state(cpx) == SLURM_FAILURE)
+		return;
+	
+	if (job->cpu_freq_min == NO_VAL &&
+	    job->cpu_freq_max != NO_VAL &&
+	    job->cpu_freq_gov == NO_VAL) {
+		/* Pre version 15.08 behavior */
+		freq = _cpu_freq_freqspec_num(job->cpu_freq_max, cpx);
+		cpufreq[cpx].new_frequency = freq;
+		goto newfreq;
+	}
+	if (job->cpu_freq_gov == CPU_FREQ_USERSPACE) {
+		_cpu_freq_govspec_string(job->cpu_freq_gov, cpx);
+		if (job->cpu_freq_max == NO_VAL) {
+			return; /* pre version 15.08 behavior. */
 		}
+		/* Power capping */
+		freq = _cpu_freq_freqspec_num(job->cpu_freq_max, cpx);
+		cpufreq[cpx].new_frequency = freq;
+		freq = _cpu_freq_freqspec_num(job->cpu_freq_min, cpx);
+		cpufreq[cpx].new_min_freq = freq;
+		goto newfreq;
+	}
+	if (job->cpu_freq_min != NO_VAL && job->cpu_freq_max != NO_VAL) {
+		freq = _cpu_freq_freqspec_num(job->cpu_freq_min, cpx);
+		cpufreq[cpx].new_min_freq = freq;
+		freq = _cpu_freq_freqspec_num(job->cpu_freq_max, cpx);
+		cpufreq[cpx].new_max_freq = freq;
+	}
 
-		if (fp)
-			fclose(fp);
-
-	} else {
-		/* find legal value close to requested value */
-		snprintf(path, sizeof(path),
-			 PATH_TO_CPU
-			 "cpu%u/cpufreq/scaling_available_frequencies", cpuidx);
-		if ( ( fp = fopen(path, "r") ) == NULL )
-			return;
-		for (j = 0; j < FREQ_LIST_MAX; j++) {
-
-			if ( fscanf(fp, "%u", &freq_list[j]) == EOF)
-				break;
-			if (cpu_freq == freq_list[j]) {
-				cpufreq[cpuidx].new_frequency = freq_list[j];
-				break;
-			}
-			if (j > 0) {
-				if (freq_list[j] > freq_list[j-1] ) {
-					/* ascending order */
-					if ((cpu_freq > freq_list[j-1]) &&
-					    (cpu_freq < freq_list[j])) {
-						cpufreq[cpuidx].new_frequency =
-							freq_list[j];
-						break;
-					}
-				} else {
-					/* descending order */
-					if ((cpu_freq > freq_list[j]) &&
-					    (cpu_freq < freq_list[j-1])) {
-						cpufreq[cpuidx].new_frequency =
-							freq_list[j];
-						break;
-					}
-				}
-			}
+	if (job->cpu_freq_gov != NO_VAL) {
+		_cpu_freq_govspec_string(job->cpu_freq_gov, cpx);
+	}
+newfreq:
+	/* Make sure a 'new' frequency is within scaling min/max */
+	if (cpufreq[cpx].new_frequency != NO_VAL) {
+		if (cpufreq[cpx].new_frequency < cpufreq[cpx].org_min_freq) {
+			cpufreq[cpx].new_min_freq = cpufreq[cpx].new_frequency;
+		}
+		if (cpufreq[cpx].new_frequency > cpufreq[cpx].org_max_freq) {
+			cpufreq[cpx].new_max_freq = cpufreq[cpx].new_frequency;
 		}
-		fclose(fp);
-		if (cpufreq[cpuidx].avail_governors & GOV_USERSPACE)
-			strcpy(cpufreq[cpuidx].new_governor, "userspace");
 	}
-
-	debug3("%s: CPU:%u, frequency:%u governor:%s",
-	       __func__, cpuidx, cpufreq[cpuidx].new_frequency,
-	       cpufreq[cpuidx].new_governor);
-
-	return;
 }
 
+/*
+ * check an argument against valid governors.
+ *
+ * Input:  - arg     - string value of governor
+ *         - illegal - combination of enums for governors not allowed.
+ * Returns - enum of governor found
+ * 	   - or 0 if not found
+ */
+static uint32_t
+_cpu_freq_check_gov(const char* arg, uint32_t illegal)
+{
+	uint32_t rc = 0;
+	if (strncasecmp(arg, "co", 2) == 0) {
+		rc = CPU_FREQ_CONSERVATIVE;
+	} else if (strncasecmp(arg, "perf", 4) == 0) {
+		rc = CPU_FREQ_PERFORMANCE;
+	} else if (strncasecmp(arg, "pow", 3) == 0) {
+		rc = CPU_FREQ_POWERSAVE;
+	} else if (strncasecmp(arg, "user", 4) == 0) {
+		rc = CPU_FREQ_USERSPACE;
+	} else if (strncasecmp(arg, "onde", 4) == 0) {
+		rc = CPU_FREQ_ONDEMAND;
+	}
+	rc &= (~illegal);
+	if (rc == 0)
+		return 0;
+	return (rc | CPU_FREQ_RANGE_FLAG);
+}
 
 /*
- * Verify cpu_freq parameter
+ * check an argument for a frequency or frequency synonym.
  *
- * In addition to a numeric frequency value, we allow the user to specify
- * "low", "medium", "highm1", or "high" frequency plus "performance",
- * "powersave", "userspace" and "ondemand" governor
+ * Input:  - arg - string value of frequency
  *
- * returns -1 on error, 0 otherwise
+ * Returns - frequency
+ *         - enum for synonym
+ *         0 on error.
  */
-int
-cpu_freq_verify_param(const char *arg, uint32_t *cpu_freq)
+static uint32_t
+_cpu_freq_check_freq(const char* arg)
 {
 	char *end;
 	uint32_t frequency;
 
-	if (arg == NULL) {
-		return 0;
-	}
-
-	if ( (frequency = strtoul(arg, &end, 10) )) {
-		*cpu_freq = frequency;
-		return 0;
-	}
-
 	if (strncasecmp(arg, "lo", 2) == 0) {
-		*cpu_freq = CPU_FREQ_LOW;
-		return 0;
-	} else if (strncasecmp(arg, "co", 2) == 0) {
-		*cpu_freq = CPU_FREQ_CONSERVATIVE;
-		return 0;
+		return CPU_FREQ_LOW;
 	} else if (strncasecmp(arg, "him1", 4) == 0 ||
 		   strncasecmp(arg, "highm1", 6) == 0) {
-		*cpu_freq = CPU_FREQ_HIGHM1;
-		return 0;
+		return CPU_FREQ_HIGHM1;
 	} else if (strncasecmp(arg, "hi", 2) == 0) {
-		*cpu_freq = CPU_FREQ_HIGH;
-		return 0;
+		return CPU_FREQ_HIGH;
 	} else if (strncasecmp(arg, "med", 3) == 0) {
-		*cpu_freq = CPU_FREQ_MEDIUM;
-		return 0;
-	} else if (strncasecmp(arg, "perf", 4) == 0) {
-		*cpu_freq = CPU_FREQ_PERFORMANCE;
-		return 0;
-	} else if (strncasecmp(arg, "pow", 3) == 0) {
-		*cpu_freq = CPU_FREQ_POWERSAVE;
-		return 0;
-	} else if (strncasecmp(arg, "user", 4) == 0) {
-		*cpu_freq = CPU_FREQ_USERSPACE;
-		return 0;
-	} else if (strncasecmp(arg, "onde", 4) == 0) {
-		*cpu_freq = CPU_FREQ_ONDEMAND;
-		return 0;
+		return CPU_FREQ_MEDIUM;
+	}
+	if ( (frequency = strtoul(arg, &end, 10) )) {
+		return frequency;
 	}
-
 	error("unrecognized --cpu-freq argument \"%s\"", arg);
-	return -1;
+	return 0;
+}
+
+/*
+ * set cpu frequency if possible for each cpu of the job step
+ */
+extern void
+cpu_freq_set(stepd_step_rec_t *job)
+{
+	char freq_detail[100];
+	uint32_t freq;
+	int i, rc;
+
+	if ((!cpu_freq_count) || (!cpufreq))
+		return;
+	for (i = 0; i < cpu_freq_count; i++) {
+		if (cpufreq[i].new_frequency == NO_VAL
+		    && cpufreq[i].new_min_freq == NO_VAL
+	            && cpufreq[i].new_max_freq == NO_VAL
+		    && cpufreq[i].new_governor[0] == '\0')
+			continue; /* Nothing to set on this CPU */
+		if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+			info("cpu_freq: current_state cpu=%d org_min=%u "
+			     "org_freq=%u org_max=%u org_gpv=%s", i,
+			     cpufreq[i].org_min_freq,
+			     cpufreq[i].org_frequency,
+			     cpufreq[i].org_max_freq,
+			     cpufreq[i].org_governor);
+		}
+
+		/* Max must be set before min, per
+		 * www.kernel.org/doc/Documentation/cpu-freq/user-guide.txt
+		 */
+		if (cpufreq[i].new_max_freq != NO_VAL ) {
+			freq = cpufreq[i].new_max_freq;
+			if (cpufreq[i].org_frequency > freq) {
+				/* The current frequency is > requested max,
+				 * Set it so it is in range
+				 * have to go to UserSpace to do it. */
+				rc = _cpu_freq_set_gov(job, i, "userspace");
+				if (rc == SLURM_FAILURE)
+					return;
+				rc = _cpu_freq_set_scaling_freq(job, i, freq,
+						         "scaling_setspeed");
+				if (rc == SLURM_FAILURE)
+					continue;
+				if (cpufreq[i].new_governor[0] == '\0') {
+					/* Not requesting new gov, so restore */
+					rc = _cpu_freq_set_gov(job, i,
+						cpufreq[i].org_governor);
+					if (rc == SLURM_FAILURE)
+						continue;
+				}
+			}
+			rc = _cpu_freq_set_scaling_freq(job, i, freq,
+							"scaling_max_freq");
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+		if (cpufreq[i].new_min_freq != NO_VAL) {
+			freq = cpufreq[i].new_min_freq;
+			if (cpufreq[i].org_frequency < freq) {
+				/* The current frequency is < requested min,
+				 * Set it so it is in range
+				 * have to go to UserSpace to do it. */
+				rc = _cpu_freq_set_gov(job, i, "userspace");
+				if (rc == SLURM_FAILURE)
+					continue;
+				rc = _cpu_freq_set_scaling_freq(job, i, freq,
+						         "scaling_setspeed");
+				if (rc == SLURM_FAILURE)
+					continue;
+				if (cpufreq[i].new_governor[0] == '\0') {
+					/* Not requesting new gov, so restore */
+					rc= _cpu_freq_set_gov(job, i,
+						cpufreq[i].org_governor);
+					if (rc == SLURM_FAILURE)
+						continue;
+				}
+			}
+			rc= _cpu_freq_set_scaling_freq(job, i, freq,
+						       "scaling_min_freq");
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+		if (cpufreq[i].new_frequency != NO_VAL) {
+			if (strcmp(cpufreq[i].org_governor,"userspace")) {
+				rc = _cpu_freq_set_gov(job, i, "userspace");
+				if (rc == SLURM_FAILURE)
+					continue;
+			}
+			rc = _cpu_freq_set_scaling_freq(job, i,
+					cpufreq[i].new_frequency,
+					"scaling_setspeed");
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+		if (cpufreq[i].new_governor[0] != '\0') {
+			rc = _cpu_freq_set_gov(job, i, cpufreq[i].new_governor);
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+		if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+			cpu_freq_debug(NULL, NULL,
+					freq_detail, sizeof(freq_detail),
+					NO_VAL, cpufreq[i].new_min_freq,
+					cpufreq[i].new_max_freq,
+					cpufreq[i].new_frequency);
+			if (cpufreq[i].new_governor[0] != '\0') {
+				info("cpu_freq: set cpu=%d %s Governor=%s",
+				     i, freq_detail, cpufreq[i].new_governor);
+			} else {
+				info("cpu_freq: reset cpu=%d %s", i,
+				     freq_detail);
+			}
+		}
+	}
+}
+
+/*
+ * reset the cpus used by the process to their
+ * default frequency and governor type
+ */
+extern void
+cpu_freq_reset(stepd_step_rec_t *job)
+{
+	int i, rc, fd;
+	char freq_detail[100];
+
+	if ((!cpu_freq_count) || (!cpufreq))
+		return;
+
+	for (i = 0; i < cpu_freq_count; i++) {
+		if (cpufreq[i].new_frequency == NO_VAL
+		    && cpufreq[i].new_min_freq == NO_VAL
+		    && cpufreq[i].new_max_freq == NO_VAL
+		    && cpufreq[i].new_governor[0] == '\0')
+			continue; /* Nothing to reset on this CPU */
+
+		fd = _test_cpu_owner_lock(i, job->jobid);
+		if (fd < 0)
+			continue;
+
+		if (cpufreq[i].new_frequency != NO_VAL) {
+			rc = _cpu_freq_set_gov(job, i, "userspace");
+			if (rc == SLURM_FAILURE)
+				continue;
+			rc = _cpu_freq_set_scaling_freq(job, i,
+					cpufreq[i].org_frequency,
+					"scaling_setspeed");
+			if (rc == SLURM_FAILURE)
+				continue;
+			cpufreq[i].new_governor[0] = 'u'; /* force gov reset */
+		}
+		/* Max must be set before min, per
+		 * www.kernel.org/doc/Documentation/cpu-freq/user-guide.txt
+		 */
+		if (cpufreq[i].new_max_freq != NO_VAL) {
+			rc = _cpu_freq_set_scaling_freq(job, i,
+					cpufreq[i].org_max_freq,
+					"scaling_max_freq");
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+		if (cpufreq[i].new_min_freq != NO_VAL) {
+			rc = _cpu_freq_set_scaling_freq(job, i,
+					cpufreq[i].org_min_freq,
+					"scaling_min_freq");
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+		if (cpufreq[i].new_governor[0] != '\0') {
+			rc = _cpu_freq_set_gov(job, i, cpufreq[i].org_governor);
+			if (rc == SLURM_FAILURE)
+				continue;
+		}
+
+		if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+			cpu_freq_debug(NULL, NULL,
+					freq_detail, sizeof(freq_detail),
+					NO_VAL, cpufreq[i].org_min_freq,
+					cpufreq[i].org_max_freq,
+					cpufreq[i].org_frequency);
+			if (cpufreq[i].new_governor[0] != '\0') {
+				info("cpu_freq: reset cpu=%d %s Governor=%s",
+				     i, freq_detail, cpufreq[i].org_governor);
+			} else {
+				info("cpu_freq: reset cpu=%d %s", i,
+				     freq_detail);
+			}
+		}
+	}
 }
 
 /* Convert a cpu_freq number to its equivalent string */
-void
+extern void
 cpu_freq_to_string(char *buf, int buf_size, uint32_t cpu_freq)
 {
 	if (cpu_freq == CPU_FREQ_LOW)
@@ -804,155 +1228,419 @@ cpu_freq_to_string(char *buf, int buf_size, uint32_t cpu_freq)
 			buf[0] = '\0';
 	} else
 		convert_num_unit2((double)cpu_freq, buf, buf_size,
-				  UNIT_KILO, 1000, false);
+				  UNIT_KILO, 1000, 0);
 }
 
 /*
- * set cpu frequency if possible for each cpu of the job step
+ * Set environment variables associated with the frequency variables.
  */
-void
-cpu_freq_set(stepd_step_rec_t *job)
+extern int
+cpu_freq_set_env(char* var, uint32_t argmin, uint32_t argmax, uint32_t arggov)
 {
-	char path[SYSFS_PATH_MAX];
-	FILE *fp;
-	char freq_value[LINE_LEN], gov_value[LINE_LEN];
-	unsigned int i, j;
-	int fd;
+	uint32_t min, max, gov;
+	char bfgov[32], bfmin[32], bfmax[32], bfall[96];
+	bfgov[0] = '\0';
+	bfmin[0] = '\0';
+	bfmax[0] = '\0';
+
+	/*
+	 * Default value from command line is NO_VAL,
+	 * Default value from slurmstepd for batch jobs is 0
+	 * Convert slurmstepd values to command line ones.
+	 */
+	min = argmin;
+	if (min == 0)
+		min = NO_VAL;
+	max = argmax;
+	if (max == 0)
+		max = NO_VAL;
+	gov = arggov;
+	if (gov == 0)
+		gov = NO_VAL;
+
+	if ((min == NO_VAL) && (max == NO_VAL) && (gov == NO_VAL))
+		return SLURM_SUCCESS;
+
+	if (min != NO_VAL) {
+		if (min & CPU_FREQ_RANGE_FLAG) {
+			cpu_freq_to_string(bfmin, sizeof(bfmin), min);
+		} else {
+			sprintf(bfmin, "%u", min);
+		}
+	}
+	if (max != NO_VAL) {
+		if (max & CPU_FREQ_RANGE_FLAG) {
+			cpu_freq_to_string(bfmax, sizeof(bfmax), max);
+		} else {
+			sprintf(bfmax, "%u", max);
+		}
+	}
+	if (gov != NO_VAL) {
+		cpu_freq_to_string(bfgov, sizeof(bfgov), gov);
+	}
+	if ((min != NO_VAL) && (max != NO_VAL) && (gov != NO_VAL)) {
+		sprintf(bfall, "%s-%s:%s", bfmin, bfmax, bfgov);
+	} else if ((min != NO_VAL) && (max != NO_VAL)) {
+		sprintf(bfall, "%s-%s", bfmin, bfmax);
+	} else if (max != NO_VAL) {
+		sprintf(bfall, "%s", bfmax);
+	} else if (gov != NO_VAL) {
+		sprintf(bfall, "%s", bfgov);
+	}
+	if (setenvf(NULL, var, "%s", bfall)) {
+		error("Unable to set %s", var);
+		return SLURM_FAILURE;
+	}
+	return SLURM_SUCCESS;
+}
 
-	if ((!cpu_freq_count) || (!cpufreq))
-		return;
+/* Convert a composite cpu governor enum to its equivalent string
+ *
+ * Input:  - buf   - buffer to contain string
+ *         - bufsz - size of buffer
+ *         - gpvs  - composite enum of governors
+ */
+extern void
+cpu_freq_govlist_to_string(char* buf, uint16_t bufsz, uint32_t govs)
+{
+	char *list = NULL;
+
+	if ((govs & CPU_FREQ_CONSERVATIVE) == CPU_FREQ_CONSERVATIVE) {
+		if (list == NULL)
+			list = xstrdup("Conservative");
+		else {
+			xstrcatchar(list,',');
+			xstrcat(list,"Conservative");
+		}
+	}
+	if ((govs & CPU_FREQ_PERFORMANCE) == CPU_FREQ_PERFORMANCE) {
+		if (list == NULL)
+			list = xstrdup("Performance");
+		else {
+			xstrcatchar(list,',');
+			xstrcat(list,"Performance");
+		}
+	}
+	if ((govs & CPU_FREQ_POWERSAVE) == CPU_FREQ_POWERSAVE) {
+		if (list == NULL)
+			list = xstrdup("PowerSave");
+		else {
+			xstrcatchar(list,',');
+			xstrcat(list,"PowerSave");
+		}
+	}
+	if ((govs & CPU_FREQ_ONDEMAND) == CPU_FREQ_ONDEMAND) {
+		if (list == NULL)
+			list = xstrdup("OnDemand");
+		else {
+			xstrcatchar(list,',');
+			xstrcat(list,"OnDemand");
+		}
+	}
+	if ((govs & CPU_FREQ_USERSPACE) == CPU_FREQ_USERSPACE) {
+		if (list == NULL)
+			list = xstrdup("UserSpace");
+		else {
+			xstrcatchar(list,',');
+			xstrcat(list,"UserSpace");
+		}
+	}
+	if (list) {
+		if (strlen(list) < bufsz)
+			strcpy(buf, list);
+		else
+			strncpy(buf, list, bufsz-1);
 
-	j = 0;
-	for (i = 0; i < cpu_freq_count; i++) {
-		bool reset_freq = false;
-		bool reset_gov = false;
-
-		if (cpufreq[i].new_frequency != 0)
-			reset_freq = true;
-		if (cpufreq[i].new_governor[0] != '\0')
-			reset_gov = true;
-		if (!reset_freq && !reset_gov)
-			continue;
+		xfree(list);
+	} else {
+		strncpy(buf,"No Governors defined", bufsz-1);
+	}
+}
 
-		fd = _set_cpu_owner_lock(i, job->jobid);
-		if (reset_gov) {
-			snprintf(gov_value, LINE_LEN, "%s",
-				 cpufreq[i].new_governor);
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU "cpu%u/cpufreq/scaling_governor",
-				 i);
-			if ((fp = fopen(path, "w"))) {
-				fputs(gov_value, fp);
-				fputc('\n', fp);
-				fclose(fp);
-			} else {
-				error("%s: Can not set CPU governor: %m",
-				      __func__);
-			}
-		}
+/*
+ * Verify slurm.conf CpuFreqDef option
+ *
+ * Input:  - arg  - frequency value to check
+ * 		    valid governor, low, medium, highm1, high,
+ * 		    or numeric frequency
+ *	   - freq - pointer to corresponging enum or numberic value
+ * Returns - -1 on error, else 0
+ */
+extern int
+cpu_freq_verify_def(const char *arg, uint32_t *freq)
+{
+	uint32_t cpufreq = 0;
 
-		if (reset_freq) {
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU "cpu%u/cpufreq/scaling_setspeed",
-				 i);
-			snprintf(freq_value, LINE_LEN, "%u",
-				 cpufreq[i].new_frequency);
-			if ((fp = fopen(path, "w"))) {
-				fputs(freq_value, fp);
-				fclose(fp);
-			} else {
-				error("%s: Can not set CPU frequency: %m",
-				      __func__);
-			}
-		} else {
-			strcpy(freq_value, "N/A");
-		}
-		(void) close(fd);
+	cpufreq = _cpu_freq_check_gov(arg, CPU_FREQ_USERSPACE);
+	if (cpufreq) {
+		debug3("cpu_freq_verify_def: %s set", arg);
+		*freq = cpufreq;
+		return 0;
+	}
+	cpufreq = _cpu_freq_check_freq(arg);
+	if (cpufreq == 0) {
+		error("cpu_freq_verify_def: CpuFreqDef=%s invalid", arg);
+		return -1;
+	}
+	debug3("cpu_freq_verify_def: %s set", arg);
+	*freq = cpufreq;
+	return 0;
+}
 
-		j++;
-		debug3("%s: CPU:%u frequency:%s governor:%s",
-		       __func__, i, freq_value, gov_value);
+/*
+ * Verify slurm.conf CpuFreqGovernors list
+ *
+ * Input:  - arg  - string list of governors
+ *	   - govs - pointer to composite of enum for each governor in list
+ * Returns - -1 on error, else 0
+ */
+extern int
+cpu_freq_verify_govlist(const char *arg, uint32_t *govs)
+{
+	char *list, *gov, *savestr;
+	uint32_t agov;
+
+	*govs = 0;
+	if (arg == NULL) {
+		error("cpu_freq_verify_govlist: governor list is empty");
+		return -1;
 	}
-	debug("%s: #cpus set = %u", __func__, j);
+
+	list = xstrdup(arg);
+	if ( (gov = strtok_r(list, ",", &savestr) ) == NULL) {
+		error("cpu_freq_verify_govlist: governor list '%s' invalid",
+				arg);
+		return -1;
+	}
+	do {
+		debug3("cpu_freq_verify_govlist: gov = %s", gov);
+		agov = _cpu_freq_check_gov(gov, 0);
+		if (agov == 0) {
+			error("cpu_freq_verify_govlist: governor '%s' invalid",
+				gov);
+			return -1;
+		}
+		*govs |= agov;
+	} while ( (gov = strtok_r(NULL, ",", &savestr) ) != NULL);
+	xfree(list);
+	return 0;
 }
 
 /*
- * reset the cpus used by the process to their
- * default frequency and governor type
+ * Verify cpu_freq command line option
+ *
+ * --cpu-freq=arg
+ *   where arg is p1{-p2{:p3}}
+ *
+ * - p1 can be  [#### | low | medium | high | highm1]
+ * 	which will set the current frequency, and set the governor to
+ * 	UserSpace.
+ * - p1 can be [Conservative | OnDemand | Performance | PowerSave | UserSpace]
+ *      which will set the governor to the corresponding value.
+ * - When p2 is present, p1 will be the minimum frequency and p2 will be
+ *   the maximum. The governor will not be changed.
+ * - p2 can be  [#### | medium | high | highm1] p2 must be greater than p1.
+ * - If the current frequency is < min, it will be set to min.
+ *   Likewise, if the current frequency is > max, it will be set to max.
+ * - p3 can be [Conservative | OnDemand | Performance | PowerSave | UserSpace]
+ *   which will set the governor to the corresponding value.
+ *   When p3 is UserSpace, the current frequency is set to p2.
+ *   p2 will have been set by PowerCapping.
+ *
+ * returns -1 on error, 0 otherwise
  */
-void
-cpu_freq_reset(stepd_step_rec_t *job)
+extern int
+cpu_freq_verify_cmdline(const char *arg,
+			uint32_t *cpu_freq_min,
+			uint32_t *cpu_freq_max,
+			uint32_t *cpu_freq_gov)
 {
-	char path[SYSFS_PATH_MAX];
-	FILE *fp;
-	char value[LINE_LEN];
-	unsigned int i, j;
-	uint32_t def_cpu_freq;
-	int fd;
+	char *poscolon, *posdash;
+	char *p1=NULL, *p2=NULL, *p3=NULL;
+	uint32_t frequency;
+	int rc = 0;
 
-	if ((!cpu_freq_count) || (!cpufreq))
-		return;
+	if (cpu_freq_govs == 0)
+		cpu_freq_govs = slurm_get_cpu_freq_govs();
 
-	def_cpu_freq = slurm_get_cpu_freq_def();
-	j = 0;
-	for (i = 0; i < cpu_freq_count; i++) {
-		bool reset_freq = false;
-		bool reset_gov = false;
-
-		if (cpufreq[i].new_frequency != 0)
-			reset_freq = true;
-		if (cpufreq[i].new_governor[0] != '\0')
-			reset_gov = true;
-		if (!reset_freq && !reset_gov)
-			continue;
-		fd = _test_cpu_owner_lock(i, job->jobid);
-		if (fd < 0)
-			continue;
 
-		cpufreq[i].new_frequency = 0;
-		cpufreq[i].new_governor[0] = '\0';
-		_cpu_freq_find_valid(def_cpu_freq, i);
-		if (cpufreq[i].new_frequency == 0)
-			cpufreq[i].new_frequency = cpufreq[i].orig_frequency;
-		if (cpufreq[i].new_governor[0] == '\0') {
-			strcpy(cpufreq[i].new_governor,
-			       cpufreq[i].orig_governor);
-		}
-
-		if (reset_freq) {
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU "cpu%u/cpufreq/scaling_setspeed",
-				 i);
-			snprintf(value, LINE_LEN, "%u",
-				 cpufreq[i].new_frequency);
-			if ((fp = fopen(path, "w"))) {
-				fputs(value, fp);
-				fclose(fp);
-			} else {
-				error("%s: Can not set CPU frequency: %m",
-				      __func__);
-			}
+	if (arg == NULL || cpu_freq_min == NULL || cpu_freq_max == NULL
+			|| cpu_freq_gov == NULL) {
+		return -1;
+	}
+	*cpu_freq_min = NO_VAL;
+	*cpu_freq_max = NO_VAL;
+	*cpu_freq_gov = NO_VAL;
+	poscolon = strchr(arg,':');
+	if (poscolon) {
+		p3 = xstrdup((poscolon+1));
+	}
+	posdash = strchr(arg,'-');
+	if (posdash) {
+		p1 = xstrndup(arg, (posdash-arg));
+		if (poscolon) {
+			p2 = xstrndup((posdash+1), ((poscolon-posdash)-1));
+		} else {
+			p2 = xstrdup((posdash+1));
+		}
+	} else {
+		if (poscolon) {
+			p1 = xstrndup(arg, (poscolon-arg));
+		} else {
+			p1 = xstrdup(arg);
+		}
+	}
+
+	frequency = _cpu_freq_check_gov(p1, 0);
+	if (frequency != 0) {
+		if (p3) {
+			error("governor cannot be specified twice "
+			      "%s{-}:%s in --cpu-freq", p1, p3);
+			rc = -1;
+			goto clean;
+		}
+		*cpu_freq_gov = frequency;
+	} else {
+		frequency = _cpu_freq_check_freq(p1);
+		if (frequency == 0) {
+			rc = -1;
+			goto clean;
+		}
+		*cpu_freq_max = frequency;
+	}
+	if (p2) {
+		frequency = _cpu_freq_check_freq(p2);
+		if (frequency == 0) {
+			rc = -1;
+			goto clean;
+		}
+		*cpu_freq_min = *cpu_freq_max;
+		*cpu_freq_max = frequency;
+		if (*cpu_freq_max < *cpu_freq_min) {
+			error("min cpu-frec (%s) must be < max cpu-freq (%s)",
+			      p1, p2);
+			rc = -1;
+			goto clean;
 		}
+	}
 
-		if (reset_gov) {
-			snprintf(path, sizeof(path),
-				 PATH_TO_CPU "cpu%u/cpufreq/scaling_governor",
-				 i);
-			if ((fp = fopen(path, "w"))) {
-				fputs(cpufreq[i].new_governor, fp);
-				fputc('\n', fp);
-				fclose(fp);
-			} else {
-				error("%s: Can not set CPU governor: %m",
-				      __func__);
-			}
+	if (p3) {
+		if (!p2) {
+			error("gov on cpu-frec (%s) illegal without max", p3);
+			rc = -1;
+			goto clean;
+		}
+		frequency = _cpu_freq_check_gov(p3, 0);
+		if (frequency == 0) {
+			error("illegal governor: %s on --cpu-freq", p3);
+			rc = -1;
+			goto clean;
+		}
+		*cpu_freq_gov = frequency;
+	}
+
+clean:
+	if (*cpu_freq_gov != NO_VAL) {
+		if (((*cpu_freq_gov & cpu_freq_govs)
+		    & ~CPU_FREQ_RANGE_FLAG) == 0) {
+			error("governor of %s is not allowed in slurm.conf",
+			      arg);
+			*cpu_freq_gov = NO_VAL;
+			rc = -1;
 		}
-		(void) close(fd);
+	}
+	if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+		cpu_freq_debug("command", "NO_VAL", NULL, 0,
+			       *cpu_freq_gov, *cpu_freq_min,
+			       *cpu_freq_max, NO_VAL);
+	}
+	xfree(p1);
+	xfree(p2);
+	xfree(p3);
+	return rc;
 
-		j++;
-		debug3("%s: CPU:%u frequency:%u governor:%s",
-		       __func__, i, cpufreq[i].new_frequency,
-		       cpufreq[i].new_governor);
+}
+
+/*
+ * Convert frequency parameters to strings
+ * Typically called to produce string for a log or reporting utility.
+ *
+ * When label!=NULL, info message is put to log. This is convenient for
+ *      inserting debug calls to verify values in structures or messages.
+ * noval_str==NULL allows missing parameters not to be reported.
+ * freq_str is a buffer to hold the composite string for all input values.
+ * freq_len is length of freq_str
+ * gov is a governor value
+ * min is a minumum value
+ * max is a maximum value
+ * freq is a (current) frequency value.
+ *
+ * Returns 0 if all parameters are NO_VAL (or 0)
+ */
+extern int
+cpu_freq_debug(char* label, char* noval_str, char* freq_str, int freq_len,
+		  uint32_t gov, uint32_t min, uint32_t max, uint32_t freq)
+{
+	int rc = 0;
+	char bfgov[64], bfmin[32], bfmax[32], bffreq[32];
+	char *sep1 = " ", *sep2 = " ", *sep3 = " ";
+
+	bfgov[0] = '\0';
+	bfmin[0] = '\0';
+	bfmax[0] = '\0';
+	bffreq[0] = '\0';
+
+	if (freq != NO_VAL && freq != 0) {
+		rc = 1;
+		sprintf(bffreq, "cur_freq=%u", freq);
+	} else {
+		sep1 = "";
+	}
+	if (min != NO_VAL && min != 0) {
+		rc = 1;
+		if (min & CPU_FREQ_RANGE_FLAG) {
+			strcpy(bfmin, "CPU_min_freq=");
+			cpu_freq_to_string(&bfmin[13], (sizeof(bfmin)-13), min);
+		} else {
+			sprintf(bfmin, "CPU_min_freq=%u", min);
+		}
+	} else if (noval_str) {
+		strcpy(bfmin, noval_str);
+	} else {
+		sep2 = "";
+	}
+	if (max != NO_VAL && max != 0) {
+		rc = 1;
+		if (max & CPU_FREQ_RANGE_FLAG) {
+			strcpy(bfmax, "CPU_max_freq=");
+			cpu_freq_to_string(&bfmax[13], (sizeof(bfmax)-13), max);
+		} else {
+			sprintf(bfmax, "CPU_max_freq=%u", max);
+		}
+	} else if (noval_str) {
+		strcpy(bfmax, noval_str);
+	} else {
+		sep3 = "";
 	}
-	debug("%s: #cpus reset = %u", __func__, j);
+	if ((gov != NO_VAL) && (gov != 0)) {
+		rc = 1;
+		strcpy(bfgov, "Governor=");
+		cpu_freq_to_string(&bfgov[9], (sizeof(bfgov)-9), gov);
+	} else if (noval_str) {
+		strcpy(bfgov, noval_str);
+	}
+	if (rc) {
+		if (freq_str) {
+			snprintf(freq_str, freq_len, "%s%s%s%s%s%s%s",
+				 bffreq, sep1, bfmin, sep2, bfmax, sep3, bfgov);
+		}
+	} else {
+		if (freq_str)
+			freq_str[0] = '\0';
+	}
+	if (label) {
+		info("cpu-freq: %s :: %s%s%s%s%s%s%s", label,
+		     bffreq, sep1, bfmin, sep2, bfmax, sep3, bfgov);
+	}
+	return rc;
 }
diff --git a/src/common/cpu_frequency.h b/src/common/cpu_frequency.h
index 7edc80037..6c1e0bc23 100644
--- a/src/common/cpu_frequency.h
+++ b/src/common/cpu_frequency.h
@@ -44,7 +44,7 @@
  * check if cpu frequency setting is allowed on this node
  * if so, create and initialize the cpu frequency table entry for each cpu
  */
-void
+extern void
 cpu_freq_init(slurmd_conf_t *conf);
 
 /*
@@ -53,16 +53,22 @@ cpu_freq_init(slurmd_conf_t *conf);
 extern void
 cpu_freq_fini(void);
 
+/*
+ * reset debug flag (slurmd)
+ */
+extern void
+cpu_freq_reconfig(void);
+
 /*
  * Send the cpu_frequency table info to slurmstepd
  */
-void
+extern void
 cpu_freq_send_info(int fd);
 
 /*
  * Receive the cpu_frequency table info from slurmd
  */
-void
+extern void
 cpu_freq_recv_info(int fd);
 
 /*
@@ -70,7 +76,7 @@ cpu_freq_recv_info(int fd);
  * Called from task cpuset code with job record containing
  *  a pointer to a hex map of the cpus to be used by this step
  */
-void
+extern void
 cpu_freq_cpuset_validate(stepd_step_rec_t *job);
 
 /*
@@ -78,35 +84,110 @@ cpu_freq_cpuset_validate(stepd_step_rec_t *job);
  * Called from task cgroup cpuset code with string containing
  *  the list of cpus to be used by this step
  */
-void
+extern void
 cpu_freq_cgroup_validate(stepd_step_rec_t *job, char *step_alloc_cores);
 
 /*
- * Verify cpu_freq parameter
+ * Verify slurm.conf CpuFreqGovernors list
  *
- * In addition to a numeric frequency value, we allow the user to specify
- * "low", "medium", "highm1", or "high" frequency plus "performance",
- * "powersave", "userspace" and "ondemand" governor
+ * Input:  - arg  - string list of governors
+ *	   - govs - pointer to composite of enum for each governor in list
+ * Returns - -1 on error, else 0
+ */
+extern int
+cpu_freq_verify_govlist(const char *arg, uint32_t *govs);
+
+/*
+ * Verify slurm.conf CpuFreqDef option
+ *
+ * Input:  - arg  - frequency value to check
+ * 		    valid governor, low, medium, highm1, high,
+ * 		    or numeric frequency
+ *	   - freq - pointer to corresponging enum or numberic value
+ * Returns - -1 on error, else 0
+ */
+extern int
+cpu_freq_verify_def(const char *arg, uint32_t *freq);
+
+/*
+ * Verify cpu_freq command line option
+ *
+ * --cpu-freq=arg
+ *   where arg is p1{-p2}{:p3}
+ *
+ * - p1 can be  [#### | low | medium | high | highm1]
+ * 	which will set the current and max cpu frequency, but not the governor.
+ * - p1 can be [Conservative | OnDemand | Performance | PowerSave]
+ *      which will set the governor to the corresponding value.
+ * - If p1 is the first case and is preceded with <, then the value of p1
+ *   becomes the max frequency and min is set to "low".
+ *   Similarly, if p1 is followed by > then p1 becomes the minimum frequency
+ *   and max is set to "high".
+ * - When p2 is present, p1 will be the minimum frequency and p2 will be
+ *   the maximum.
+ * - p2 can be  [#### | medium | high | highm1] p2 must be greater than p1.
+ * - If the current frequency is < min, it will be set to min.
+ *   Likewise, if the current frequency is > max, it will be set to max.
+ * - p3 can be [Conservative | OnDemand | Performance | PowerSave]
+ *   which will set the governor to the corresponding value.
  *
  * returns -1 on error, 0 otherwise
  */
-int
-cpu_freq_verify_param(const char *arg, uint32_t *cpu_freq);
+extern int
+cpu_freq_verify_cmdline(const char *arg,
+		uint32_t *cpu_freq_min,
+		uint32_t *cpu_freq_max,
+		uint32_t *cpu_freq_gov);
+
+/* Convert a composite cpu governor enum to its equivalent string
+ *
+ * Input:  - buf   - buffer to contain string
+ *         - bufsz - size of buffer
+ *         - gpvs  - composite enum of governors
+ */
+extern void
+cpu_freq_govlist_to_string(char* buf, uint16_t bufsz, uint32_t govs);
+
+/*
+ * Set environment variables associated with the frequency variables.
+ */
+extern int
+cpu_freq_set_env(char* var, uint32_t min, uint32_t max, uint32_t gov);
 
 /* Convert a cpu_freq number to its equivalent string */
-void
+extern void
 cpu_freq_to_string(char *buf, int buf_size, uint32_t cpu_freq);
 
+
 /*
  * set the userspace governor and the new frequency value
  */
-void
+extern void
 cpu_freq_set(stepd_step_rec_t *job);
 
 /*
  * reset the governor and cpu frequency to the configured values
  */
-void
+extern void
 cpu_freq_reset(stepd_step_rec_t *job);
 
+/*
+ * Convert frequency parameters to strings
+ * Typically called to produce string for a log or reporting utility.
+ *
+ *
+ * When label!=NULL, info message is put to log. This is convienient for
+ *      inserting debug calls to verify values in structures or messages.
+ * noval_str==NULL allows missing parameters not to be reported.
+ * freq_str is a buffer to hold the composite string for all input values.
+ * freq_len is length of freq_str
+ *
+ * Returns 0 if all parameters are NO_VAL (or 0)
+ */
+extern int
+cpu_freq_debug(char* label, char* noval_str, char* freq_str, int freq_len,
+		  uint32_t gov, uint32_t min, uint32_t max, uint32_t freq);
+
+
+
 #endif /* _CPU_FREQUENCY_H_ */
diff --git a/src/common/eio.c b/src/common/eio.c
index 76a9a684b..ba6a7efe7 100644
--- a/src/common/eio.c
+++ b/src/common/eio.c
@@ -71,10 +71,6 @@ strong_alias(eio_remove_obj,		slurm_eio_remove_obj);
 strong_alias(eio_signal_shutdown,	slurm_eio_signal_shutdown);
 strong_alias(eio_signal_wakeup,		slurm_eio_signal_wakeup);
 
-/* How many seconds to wait after eio_signal_shutdown() is called before
- * terminating the job and abandoning any I/O remaining to be processed */
-#define EIO_SHUTDOWN_WAIT 180
-
 /*
  * outside threads can stick new objects on the new_objs List and
  * the eio thread will move them to the main obj_list the next time
@@ -87,6 +83,7 @@ struct eio_handle_components {
 #endif
 	int  fds[2];
 	time_t shutdown_time;
+	uint16_t shutdown_wait;
 	List obj_list;
 	List new_objs;
 };
@@ -104,7 +101,7 @@ static void         _poll_handle_event(short revents, eio_obj_t *obj,
 		                       List objList);
 
 
-eio_handle_t *eio_handle_create(void)
+eio_handle_t *eio_handle_create(uint16_t shutdown_wait)
 {
 	eio_handle_t *eio = xmalloc(sizeof(*eio));
 
@@ -123,6 +120,10 @@ eio_handle_t *eio_handle_create(void)
 	eio->obj_list = list_create(eio_obj_destroy);
 	eio->new_objs = list_create(eio_obj_destroy);
 
+	eio->shutdown_wait = DEFAULT_EIO_SHUTDOWN_WAIT;
+	if (shutdown_wait > 0)
+		eio->shutdown_wait = shutdown_wait;
+
 	return eio;
 }
 
@@ -132,11 +133,8 @@ void eio_handle_destroy(eio_handle_t *eio)
 	xassert(eio->magic == EIO_MAGIC);
 	close(eio->fds[0]);
 	close(eio->fds[1]);
-	if (eio->obj_list)
-		list_destroy(eio->obj_list);
-
-	if (eio->new_objs)
-		list_destroy(eio->new_objs);
+	FREE_NULL_LIST(eio->obj_list);
+	FREE_NULL_LIST(eio->new_objs);
 
 	xassert(eio->magic = ~EIO_MAGIC);
 	xfree(eio);
@@ -215,7 +213,7 @@ again:
 	(*obj->ops->handle_msg)(obj->arg, msg); /* handle_msg should free
 					      * msg->data */
 cleanup:
-	if ((msg->conn_fd >= 0) && slurm_close_accepted_conn(msg->conn_fd) < 0)
+	if ((msg->conn_fd >= 0) && slurm_close(msg->conn_fd) < 0)
 		error ("close(%d): %m", msg->conn_fd);
 	slurm_free_msg(msg);
 
@@ -289,7 +287,7 @@ int eio_handle_mainloop(eio_handle_t *eio)
 		if (maxnfds < n) {
 			maxnfds = n;
 			xrealloc(pollfds, (maxnfds+1) * sizeof(struct pollfd));
-			xrealloc(map,     maxnfds     * sizeof(eio_obj_t *  ));
+			xrealloc(map, maxnfds * sizeof(eio_obj_t *));
 			/*
 			 * Note: xrealloc() also handles initial malloc
 			 */
@@ -317,13 +315,13 @@ int eio_handle_mainloop(eio_handle_t *eio)
 		if (pollfds[nfds-1].revents & POLLIN)
 			_eio_wakeup_handler(eio);
 
-		_poll_dispatch(pollfds, nfds-1, map, eio->obj_list);
+		_poll_dispatch(pollfds, nfds - 1, map, eio->obj_list);
 
-		if (eio->shutdown_time &&
-		    (difftime(time(NULL), eio->shutdown_time) >=
-		     EIO_SHUTDOWN_WAIT)) {
-			error("Abandoning IO %d secs after job shutdown "
-			      "initiated", EIO_SHUTDOWN_WAIT);
+		if (eio->shutdown_time
+		    && difftime(time(NULL), eio->shutdown_time)
+		    >= eio->shutdown_wait) {
+			error("%s: Abandoning IO %d secs after job shutdown "
+			      "initiated", __func__, eio->shutdown_wait);
 			break;
 		}
 	}
diff --git a/src/common/eio.h b/src/common/eio.h
index c019bfa61..d5e266ea9 100644
--- a/src/common/eio.h
+++ b/src/common/eio.h
@@ -69,7 +69,7 @@ struct eio_obj {
 	bool shutdown;
 };
 
-eio_handle_t *eio_handle_create(void);
+eio_handle_t *eio_handle_create(uint16_t);
 void eio_handle_destroy(eio_handle_t *eio);
 
 /*
diff --git a/src/common/entity.c b/src/common/entity.c
index d8587a078..c7e31ddfb 100644
--- a/src/common/entity.c
+++ b/src/common/entity.c
@@ -35,6 +35,11 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#include "string.h"
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
 #include "src/common/entity.h"
 #include "src/common/layout.h"
 #include "src/common/xmalloc.h"
@@ -43,7 +48,7 @@
 
 
 /*****************************************************************************\
- *                                 FUNCTIONS                                 *
+ *                                 HELPERS                                   *
 \*****************************************************************************/
 
 static const char* _entity_data_identify(void* item)
@@ -52,6 +57,15 @@ static const char* _entity_data_identify(void* item)
 	return data_item->key;
 }
 
+static void _entity_data_destroy(void* x)
+{
+	entity_data_t* entity_data = (entity_data_t*)x;
+	if (entity_data) {
+		xfree(entity_data->value);
+		xfree(entity_data);
+	}
+}
+
 static void _entity_node_destroy(void* x)
 {
 	entity_node_t* entity_node = (entity_node_t*)x;
@@ -61,11 +75,58 @@ static void _entity_node_destroy(void* x)
 	}
 }
 
+static int _entity_add_data(const entity_t* entity, const char* key,
+			    void* value, size_t size,
+			    void (*_free)(void*), bool byreference)
+{
+	entity_data_t* result;
+	entity_data_t* new_data_item;
+
+	if (!key || !*key || !value)
+		return SLURM_ERROR;
+
+	result = (entity_data_t*)xhash_get(entity->data, key);
+	if (result != NULL) {
+		/* update existing value by ref or by override */
+		if (byreference) {
+			if (_free)
+				_free(result->value);
+			result->value = value;
+		} else {
+			memcpy(result->value, value, size);
+		}
+		return SLURM_SUCCESS;
+	}
+
+	/* add a new KV if not already existing, by ref or allocating
+	 * a new buffer and dumping the provided input */
+	new_data_item = (entity_data_t*)xmalloc(sizeof(entity_data_t));
+	new_data_item->key = key;
+	if (byreference) {
+		new_data_item->value = value;
+	} else {
+		new_data_item->value = (void*) xmalloc(size);
+		memcpy(new_data_item->value, value, size);
+	}
+	result = xhash_add(entity->data, new_data_item);
+	if (result == NULL) {
+		xfree(new_data_item);
+		return SLURM_ERROR;
+	}
+	return SLURM_SUCCESS;
+}
+
+/*****************************************************************************\
+ *                                 FUNCTIONS                                 *
+\*****************************************************************************/
+
 void entity_init(entity_t* entity, const char* name, const char* type)
 {
 	entity->name = xstrdup(name);
 	entity->type = xstrdup(type);
-	entity->data = xhash_init(_entity_data_identify, NULL, NULL, 0);
+	entity->data = xhash_init(_entity_data_identify,
+				  (xhash_freefunc_t)_entity_data_destroy,
+				  NULL, 0);
 	entity->nodes = list_create(_entity_node_destroy);
 	entity->ptr = NULL;
 }
@@ -90,38 +151,37 @@ const char* entity_get_type(const entity_t* entity)
 	return entity->type;
 }
 
-void** entity_get_data(const entity_t* entity, const char* key)
+int entity_get_data(const entity_t* entity, const char* key,
+		    void* value, size_t size)
+{
+	void* data = NULL;
+	data = entity_get_data_ref(entity, key);
+	if (data != NULL) {
+		memcpy(value, data, size);
+		return SLURM_SUCCESS;
+	}
+	return SLURM_ERROR;
+}
+
+void* entity_get_data_ref(const entity_t* entity, const char* key)
 {
 	entity_data_t* data = (entity_data_t*)xhash_get(entity->data, key);
 	if (data) {
-		return &data->value;
+		return data->value;
 	}
 	return NULL;
 }
 
-int entity_add_data(entity_t* entity, const char* key, void* value,
-		    void (*_free)(void*))
+int entity_set_data(const entity_t* entity, const char* key,
+		    void* value, size_t size)
 {
-	entity_data_t* result;
-	entity_data_t* new_data_item;
-	if (!key || !*key || !value)
-		return 0;
-	result = (entity_data_t*)xhash_get(entity->data, key);
-	if (result != NULL) {
-		if (_free)
-			_free(result->value);
-		result->value = value;
-		return 1;
-	}
-	new_data_item = (entity_data_t*)xmalloc(sizeof(entity_data_t));
-	new_data_item->key = key;
-	new_data_item->value = value;
-	result = xhash_add(entity->data, new_data_item);
-	if (result == NULL) {
-		xfree(new_data_item);
-		return 0;
-	}
-	return 1;
+	return _entity_add_data(entity, key, value, size, NULL, false);
+}
+
+int entity_set_data_ref(const entity_t* entity, const char* key, void* value,
+			void (*_free)(void*))
+{
+	return _entity_add_data(entity, key, value, 0, _free, true);
 }
 
 void entity_delete_data(entity_t* entity, const char* key)
@@ -134,47 +194,76 @@ void entity_clear_data(entity_t* entity)
 	xhash_clear(entity->data);
 }
 
-void entity_add_node(entity_t* entity, layout_t* layout, void* node)
+entity_node_t* entity_add_node(entity_t* entity, layout_t* layout)
 {
 
 	entity_node_t* entity_node = (entity_node_t*)xmalloc(
 		sizeof(entity_node_t));
 	entity_node->layout = layout;
-	entity_node->node = node;
-	list_append(entity->nodes, entity_node);
+	entity_node->entity = entity;
+	entity_node->node = NULL;
+	entity_node = list_append(entity->nodes, entity_node);
+	return entity_node;
 }
 
-static int _entity_node_find(void* x, void* key)
+typedef struct _entity_get_node_walk_st {
+	layout_t* layout;
+	entity_node_t* node;
+} _entity_get_node_walk_t;
+
+static void _entity_get_node_walkfunc(layout_t* layout,
+				      entity_node_t* node, void* arg)
 {
-	entity_node_t* entity_node = (entity_node_t*)x;
-	return entity_node->node == key;
+	_entity_get_node_walk_t* real_arg =
+		(_entity_get_node_walk_t*) arg;
+	/* Note that if multiple nodes of the same layout are added
+	 * to a single entity, the last one will be returned.
+	 * An entity MUST NOT be added more than once /!\ */
+	if (layout == real_arg->layout) {
+		real_arg->node = node;
+	}
 }
 
-void entity_delete_node(entity_t* entity, void* node)
+entity_node_t* entity_get_node(entity_t* entity, layout_t* layout)
 {
-	ListIterator i = list_iterator_create(entity->nodes);
-	if (list_find(i, _entity_node_find, node))
-		list_delete_item(i);
-	list_iterator_destroy(i);
+	_entity_get_node_walk_t arg;
+	arg.layout = layout;
+	arg.node = NULL;
+	entity_nodes_walk(entity, _entity_get_node_walkfunc, (void*) &arg);
+	return arg.node;
 }
 
-void entity_clear_nodes(entity_t* entity)
+static int _entity_node_find(void* x, void* key)
 {
-	list_flush(entity->nodes);
+	entity_node_t* entity_node = (entity_node_t*)x;
+	return entity_node->node == key;
 }
 
-int entity_has_node(entity_t* entity, void* node)
+int entity_delete_node(entity_t* entity, layout_t* layout)
 {
+	int rc = SLURM_ERROR;
+	entity_node_t* node;
 	ListIterator i;
-	void* result;
+	node = entity_get_node(entity, layout);
+	if (node == NULL)
+		return rc;
 	i = list_iterator_create(entity->nodes);
-	result = list_find(i, _entity_node_find, node);
+	if (list_find(i, _entity_node_find, node)) {
+		list_delete_item(i);
+		rc = SLURM_SUCCESS;
+	}
 	list_iterator_destroy(i);
-	return result != NULL;
+	return rc;
+}
+
+int entity_clear_nodes(entity_t* entity)
+{
+	list_flush(entity->nodes);
+	return SLURM_SUCCESS;
 }
 
 typedef struct _entity_nodes_walkstruct_st {
-	void (*callback)(layout_t* layout, void* node, void* arg);
+	void (*callback)(layout_t* layout, entity_node_t* node, void* arg);
 	void* arg;
 } _entity_nodes_walkstruct_t;
 
@@ -184,14 +273,14 @@ static int _entity_nodes_walkfunc(void* x, void* arg)
 	_entity_nodes_walkstruct_t* real_arg =
 		(_entity_nodes_walkstruct_t*)arg;
 	real_arg->callback(entity_node->layout,
-			   entity_node->node,
+			   entity_node,
 			   real_arg->arg);
 	return 0;
 }
 
 void entity_nodes_walk(entity_t* entity,
 		       void (*callback)(layout_t* layout,
-					void* node,
+					entity_node_t* node,
 					void* arg),
 		       void* arg)
 {
diff --git a/src/common/entity.h b/src/common/entity.h
index 03650a6af..c43c10be1 100644
--- a/src/common/entity.h
+++ b/src/common/entity.h
@@ -67,9 +67,10 @@ typedef struct entity_data_st {
  * to represent the layout nodes that are linked to them */
 typedef struct entity_node_st {
 	layout_t* layout; /* layout containing a relationnal structure holding
-			   * a reference to this entity */
+			   * a reference to the entity */
+	entity_t* entity; /* pointer to the associated entity */
 	void* node;       /* pointer to the relational node referencing
-			     this entity */
+			     this entity node */
 } entity_node_t;
 
 /*****************************************************************************\
@@ -111,7 +112,21 @@ const char* entity_get_name(const entity_t* entity);
 const char* entity_get_type(const entity_t* entity);
 
 /*
- * entity_get_data - get the address of the pointer to the data associated
+ * entity_get_data - copy the content of the data associated to a particular key
+ *       of an entity into a buffer up to the requested size
+ *
+ * IN entity - the entity struct to use
+ * IN key - the targeted key
+ * IN value - ponter to the mem area to fill
+ * IN size - size of the mem area to copy
+ *
+ * Return SLURM_SUCCESS or SLURM_ERROR if no element found
+ */
+int entity_get_data(const entity_t* entity, const char* key,
+		    void* value, size_t size);
+
+/*
+ * entity_get_data_ref - get the address of the pointer to the data associated
  *       with a particular key of an entity
  *
  * IN entity - the entity struct to use
@@ -120,10 +135,28 @@ const char* entity_get_type(const entity_t* entity);
  * Return value is the address of the (void*) pointer to the data associated to
  *       the key or NULL in case of error
  */
-void** entity_get_data(const entity_t* entity, const char* key);
+void* entity_get_data_ref(const entity_t* entity, const char* key);
 
 /*
- * entity_add_data - associate data to a particular key of an entity
+ * entity_set_data - copy the content of the input buffer up to the requested
+ *       size into the the buffer associated to a particular key of an entity
+ *       (note that the entity key value's buffer is allocated internally if
+ *       necessary)
+ *
+ * IN entity - the entity struct to use
+ * IN key - the targeted key
+ * IN value - ponter to the mem area to fill with
+ * IN size - size of the mem area to copy
+ *
+ * Return SLURM_SUCCESS or SLURM_ERROR if no element found
+ */
+int entity_set_data(const entity_t* entity, const char* key,
+		    void* value, size_t size);
+
+/*
+ * entity_set_data_ref - associate a particular key of an entity with the
+ *       input buffer, 
+ *       with a particular key of an entity
  *
  * IN entity - the entity struct to use
  * IN key - the key the data must be associated to
@@ -132,10 +165,10 @@ void** entity_get_data(const entity_t* entity, const char* key);
  * IN _free - a function to apply on the former value in case it exists
  *       before overriding
  *
- * Return 1 if the value was successfully associated or 0 otherwise
+ * Return SLURM_SUCCESS or SLURM_ERROR in case of error
  */
-int entity_add_data(entity_t* entity, const char* key, void* value,
-		    void (*_free)(void*));
+int entity_set_data_ref(const entity_t* entity, const char* key, void* value,
+			void (*_free)(void*));
 
 /*
  * entity_delete_data - delete the data associated with a particular key
@@ -157,48 +190,49 @@ void entity_delete_data(entity_t* entity, const char* key);
 void entity_clear_data(entity_t* entity);
 
 /*
- * entity_add_node - add a relational node to the list of nodes referring to
- *       this entity
+ * entity_add_node - add a per layout entity node to the list of nodes referring
+ *       to this entity
  *
  * IN entity - the entity struct to use
- * IN layout - the layout having a node referring to this entity
- * IN node - the node referring to it
+ * IN layout - the layout to create an entity node referring to this entity
  *
- * Notes: the entity does not own the memory of the relationnal nodes.
+ * Notes: - the returned entity_node does not point to anything at that point.
+ *          it will be added to a relational structure and will then have to 
+ *          be associated to the underlying relational node afterwards.
+ *        - the entity node will not own the memory of the relationnal node.
  */
-void entity_add_node(entity_t* entity, layout_t* layout, void* node);
+entity_node_t* entity_add_node(entity_t* entity, layout_t* layout);
 
 /*
- * entity_delete_node - remove a relational node from the list of nodes
- *       referring to this entity
+ * entity_get_node - get the entity node referring to a particular layout in
+ *       the list of entity nodes associated to an entity.
  *
  * IN entity - the entity struct to use
- * IN node - the node referring to it
+ * IN layout - the layout having an entity node referring to this entity
  *
- * Notes: the memory of the node data is not freed.
+ * Return value is the entity node of the layout or NULL if not found
  */
-void entity_delete_node(entity_t* entity, void* node);
+entity_node_t* entity_get_node(entity_t* entity, layout_t* layout);
 
 /*
- * entity_clear_nodes - remove all the relational node from the list of nodes
- *       referring to this entity
+ * entity_delete_node - remove the entity node referring to a particular layout
+ *       from the list of entity nodes associated to an entity
  *
  * IN entity - the entity struct to use
+ * IN layout - the layout having an entity node referring to this entity
  *
- * Notes: the memory of the nodes data is not freed.
+ * Return SLURM_SUCCESS or SLURM_ERROR
  */
-void entity_clear_nodes(entity_t* entity);
+int entity_delete_node(entity_t* entity, layout_t* layout);
 
 /*
- * entity_has_node - check wether or not a relational node is in the
- *       list of nodes referring to this entity
+ * entity_clear_nodes - remove all the entity node associated to an entity
  *
  * IN entity - the entity struct to use
- * IN node - the node to check for
  *
- * Return 1 if found, 0 otherwise
+ * Return SLURM_SUCCESS or SLURM_ERROR
  */
-int entity_has_node(entity_t* entity, void* node);
+int entity_clear_nodes(entity_t* entity);
 
 /*
  * entity_nodes_walk - iterate over the nodes referring to this entity
@@ -213,7 +247,7 @@ int entity_has_node(entity_t* entity, void* node);
  * IN arg - the arg to pass to the callback function for every node.
  */
 void entity_nodes_walk(entity_t* entity,
-		       void (*callback)(layout_t*, void*, void*),
+		       void (*callback)(layout_t*, entity_node_t*, void*),
 		       void* arg);
 
 /*
diff --git a/src/common/env.c b/src/common/env.c
index 6b2a0814f..744f25ddb 100644
--- a/src/common/env.c
+++ b/src/common/env.c
@@ -417,7 +417,7 @@ int setup_env(env_t *env, bool preserve_env)
 			rc = SLURM_FAILURE;
 		}
 
-	if (env->distribution == SLURM_DIST_PLANE)
+	if ((env->distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE)
 		if (setenvf(&env->env, "SLURM_DIST_PLANESIZE", "%u",
 			    env->plane_size)) {
 			error("Can't set SLURM_DIST_PLANESIZE "
@@ -631,24 +631,9 @@ int setup_env(env_t *env, bool preserve_env)
 		}
 	}
 
-	if ((env->cpu_freq != NO_VAL) && /* Default value from srun */
-	    (env->cpu_freq != 0)) {      /* Default value from slurmstepd
-					  * for batch jobs */
-		int sts;
-
-		if (env->cpu_freq & CPU_FREQ_RANGE_FLAG) {
-			char buf[32];
-			cpu_freq_to_string(buf, sizeof(buf), env->cpu_freq);
-			sts = setenvf(&env->env, "SLURM_CPU_FREQ_REQ", buf);
-		} else {
-			sts = setenvf(&env->env, "SLURM_CPU_FREQ_REQ", "%u",
-				      env->cpu_freq);
-		}
-		if (sts) {
-			error("Unable to set SLURM_CPU_FREQ_REQ");
-			rc = SLURM_FAILURE;
-		}
-	}
+	if (cpu_freq_set_env("SLURM_CPU_FREQ_REQ", env->cpu_freq_min,
+			env->cpu_freq_max, env->cpu_freq_gov) != SLURM_SUCCESS)
+		rc = SLURM_FAILURE;
 
 	if (env->overcommit
 	    && (setenvf(&env->env, "SLURM_OVERCOMMIT", "1"))) {
@@ -856,6 +841,37 @@ int setup_env(env_t *env, bool preserve_env)
 		}
 	}
 
+	if (env->account) {
+		if (setenvf(&env->env,
+			    "SLURM_JOB_ACCOUNT",
+			    "%s",
+			    env->account)) {
+			error("%s: can't set SLURM_JOB_ACCOUNT env variable",
+			      __func__);
+			rc = SLURM_FAILURE;
+		}
+	}
+	if (env->qos) {
+		if (setenvf(&env->env,
+			    "SLURM_JOB_QOS",
+			    "%s",
+			    env->qos)) {
+			error("%s: can't set SLURM_JOB_QOS env variable",
+				__func__);
+			rc = SLURM_FAILURE;
+		}
+	}
+	if (env->resv_name) {
+		if (setenvf(&env->env,
+			    "SLURM_JOB_RESERVATION",
+			    "%s",
+			    env->resv_name)) {
+			error("%s: can't set SLURM_JOB_RESERVATION env variable",
+				__func__);
+			rc = SLURM_FAILURE;
+		}
+	}
+
 	return rc;
 }
 
@@ -972,9 +988,10 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 {
 	char *tmp = NULL;
 	char *dist = NULL, *lllp_dist = NULL;
+	char *key, *value;
 	slurm_step_layout_t *step_layout = NULL;
 	uint32_t num_tasks = desc->num_tasks;
-	int rc = SLURM_SUCCESS;
+	int i, rc = SLURM_SUCCESS;
 	uint32_t node_cnt = alloc->node_cnt;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
@@ -1006,7 +1023,7 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 		env_array_overwrite_fmt(dest, "SLURM_DISTRIBUTION", "%s",
 					dist);
 
-	if (desc->task_dist == SLURM_DIST_PLANE)
+	if ((desc->task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE)
 		env_array_overwrite_fmt(dest, "SLURM_DIST_PLANESIZE",
 					"%u", desc->plane_size);
 
@@ -1054,7 +1071,7 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 		//num_tasks = desc->min_cpus;
 	}
 
-	if (desc->task_dist == SLURM_DIST_ARBITRARY) {
+	if ((desc->task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY) {
 		tmp = desc->req_nodes;
 		env_array_overwrite_fmt(dest, "SLURM_ARBITRARY_NODELIST",
 					"%s", tmp);
@@ -1076,6 +1093,21 @@ env_array_for_job(char ***dest, const resource_allocation_response_msg_t *alloc,
 	slurm_step_layout_destroy(step_layout);
 	env_array_overwrite_fmt(dest, "SLURM_TASKS_PER_NODE", "%s", tmp);
 	xfree(tmp);
+
+	if (alloc->env_size) {	/* Used to set Burst Buffer environment */
+		for (i = 0; i < alloc->env_size; i++) {
+			tmp = xstrdup(alloc->environment[i]);
+			key = tmp;
+			value = strchr(tmp, '=');
+			if (value) {
+				value[0] = '\0';
+				value++;
+				env_array_overwrite(dest, key, value);
+			}
+			xfree(tmp);
+		}
+	}
+
 	return rc;
 }
 
@@ -1115,7 +1147,7 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 	slurm_step_layout_t *step_layout = NULL;
 	uint32_t num_tasks = batch->ntasks;
 	uint16_t cpus_per_task;
-	uint16_t task_dist;
+	uint32_t task_dist;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
 	_setup_particulars(cluster_flags, dest, batch->select_jobinfo);
@@ -1215,8 +1247,32 @@ env_array_for_batch_job(char ***dest, const batch_job_launch_msg_t *batch,
 					tmp_mem);
 	}
 
-	return SLURM_SUCCESS;
+	/* Set the SLURM_JOB_ACCOUNT,  SLURM_JOB_QOS
+	 * and SLURM_JOB_RESERVATION if set by
+	 * the controller.
+	 */
+	if (batch->account) {
+		env_array_overwrite_fmt(dest,
+					"SLURM_JOB_ACCOUNT",
+					"%s",
+					batch->account);
+	}
+
+	if (batch->qos) {
+		env_array_overwrite_fmt(dest,
+					"SLURM_JOB_QOS",
+					"%s",
+					batch->qos);
+	}
+
+	if (batch->resv_name) {
+		env_array_overwrite_fmt(dest,
+					"SLURM_JOB_RESERVATION",
+					"%s",
+					batch->resv_name);
+	}
 
+	return SLURM_SUCCESS;
 }
 
 /*
@@ -1634,6 +1690,32 @@ void env_array_merge_slurm(char ***dest_array, const char **src_array)
 	xfree(value);
 }
 
+/*
+ * Merge all of the environment variables in src_array into the array
+ * dest_array and strip any header names of "SPANK_".  Any variables already
+ * found in dest_array will be overwritten with the value from src_array.
+ */
+void env_array_merge_spank(char ***dest_array, const char **src_array)
+{
+	char **ptr;
+	char name[256], *value;
+
+	if (src_array == NULL)
+		return;
+
+	value = xmalloc(ENV_BUFSIZE);
+	for (ptr = (char **)src_array; *ptr != NULL; ptr++) {
+		if (_env_array_entry_splitter(*ptr, name, sizeof(name),
+					      value, ENV_BUFSIZE)) {
+			if (strncmp(name, "SPANK_" ,6))
+				env_array_overwrite(dest_array, name, value);
+			else
+				env_array_overwrite(dest_array, name+6, value);
+		}
+	}
+	xfree(value);
+}
+
 /*
  * Strip out trailing carriage returns and newlines
  */
diff --git a/src/common/env.h b/src/common/env.h
index 140fb0bde..642f38e70 100644
--- a/src/common/env.h
+++ b/src/common/env.h
@@ -45,7 +45,9 @@ typedef struct env_options {
 	cpu_bind_type_t
 		cpu_bind_type;	/* --cpu_bind=			*/
 	char *cpu_bind;		/* binding map for map/mask_cpu	*/
-	uint32_t cpu_freq;	/* cpu_frequency requested	*/
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	mem_bind_type_t
 		mem_bind_type;	/* --mem_bind=			*/
 	char *mem_bind;		/* binding map for tasks to memory	*/
@@ -81,6 +83,9 @@ typedef struct env_options {
 	uint16_t batch_flag;	/* 1 if batch: queued job with script */
 	uint32_t uid;		/* user ID */
 	char *user_name;	/* user name */
+	char *account;          /* job's account */
+	char *qos;              /* job's qos */
+	char *resv_name;        /* job's reservation */
 } env_t;
 
 
@@ -205,6 +210,13 @@ void env_array_merge(char ***dest_array, const char **src_array);
  */
 void env_array_merge_slurm(char ***dest_array, const char **src_array);
 
+/*
+ * Merge all of the environment variables in src_array into the array
+ * dest_array and strip any header names of "SPANK_".  Any variables already
+ * found in dest_array will be overwritten with the value from src_array.
+ */
+void env_array_merge_spank(char ***dest_array, const char **src_array);
+
 /*
  * Copy env_array must be freed by env_array_free
  */
diff --git a/src/common/forward.c b/src/common/forward.c
index 88b91f8e4..6bd2f2f2b 100644
--- a/src/common/forward.c
+++ b/src/common/forward.c
@@ -73,6 +73,14 @@ typedef struct {
 	pthread_mutex_t *tree_mutex;
 } fwd_tree_t;
 
+static void _start_msg_tree_internal(hostlist_t hl, hostlist_t* sp_hl,
+				     fwd_tree_t *fwd_tree_in,
+				     int hl_count);
+static void _forward_msg_internal(hostlist_t hl, hostlist_t* sp_hl,
+				  forward_struct_t *fwd_struct,
+				  header_t *header, int timeout,
+				  int hl_count);
+
 void _destroy_tree_fwd(fwd_tree_t *fwd_tree)
 {
 	if (fwd_tree) {
@@ -95,6 +103,7 @@ void _destroy_tree_fwd(fwd_tree_t *fwd_tree)
 void *_forward_thread(void *arg)
 {
 	forward_msg_t *fwd_msg = (forward_msg_t *)arg;
+	forward_struct_t *fwd_struct = fwd_msg->fwd_struct;
 	Buf buffer = init_buf(BUF_SIZE);	/* probably enough for header */
 	List ret_list = NULL;
 	slurm_fd_t fd = -1;
@@ -111,12 +120,12 @@ void *_forward_thread(void *arg)
 		if (slurm_conf_get_addr(name, &addr) == SLURM_ERROR) {
 			error("forward_thread: can't find address for host "
 			      "%s, check slurm.conf", name);
-			slurm_mutex_lock(fwd_msg->forward_mutex);
-			mark_as_failed_forward(&fwd_msg->ret_list, name,
+			slurm_mutex_lock(&fwd_struct->forward_mutex);
+			mark_as_failed_forward(&fwd_struct->ret_list, name,
 					       SLURM_UNKNOWN_FORWARD_ADDR);
  			free(name);
 			if (hostlist_count(hl) > 0) {
-				slurm_mutex_unlock(fwd_msg->forward_mutex);
+				slurm_mutex_unlock(&fwd_struct->forward_mutex);
 				continue;
 			}
 			goto cleanup;
@@ -124,13 +133,21 @@ void *_forward_thread(void *arg)
 		if ((fd = slurm_open_msg_conn(&addr)) < 0) {
 			error("forward_thread to %s: %m", name);
 
-			slurm_mutex_lock(fwd_msg->forward_mutex);
+			slurm_mutex_lock(&fwd_struct->forward_mutex);
 			mark_as_failed_forward(
-				&fwd_msg->ret_list, name,
+				&fwd_struct->ret_list, name,
 				SLURM_COMMUNICATIONS_CONNECTION_ERROR);
 			free(name);
 			if (hostlist_count(hl) > 0) {
-				slurm_mutex_unlock(fwd_msg->forward_mutex);
+				slurm_mutex_unlock(&fwd_struct->forward_mutex);
+				/* Abandon tree. This way if all the
+				 * nodes in the branch are down we
+				 * don't have to time out for each
+				 * node serially.
+				 */
+				_forward_msg_internal(hl, NULL, fwd_struct,
+						      &fwd_msg->header, 0,
+						      hostlist_count(hl));
 				continue;
 			}
 			goto cleanup;
@@ -154,37 +171,45 @@ void *_forward_thread(void *arg)
 		pack_header(&fwd_msg->header, buffer);
 
 		/* add forward data to buffer */
-		if (remaining_buf(buffer) < fwd_msg->buf_len) {
-			int new_size = buffer->processed + fwd_msg->buf_len;
+		if (remaining_buf(buffer) < fwd_struct->buf_len) {
+			int new_size = buffer->processed + fwd_struct->buf_len;
 			new_size += 1024; /* padded for paranoia */
 			xrealloc_nz(buffer->head, new_size);
 			buffer->size = new_size;
 		}
-		if (fwd_msg->buf_len) {
+		if (fwd_struct->buf_len) {
 			memcpy(&buffer->head[buffer->processed],
-			       fwd_msg->buf, fwd_msg->buf_len);
-			buffer->processed += fwd_msg->buf_len;
+			       fwd_struct->buf, fwd_struct->buf_len);
+			buffer->processed += fwd_struct->buf_len;
 		}
 
 		/*
 		 * forward message
 		 */
-		if (_slurm_msg_sendto(fd,
+		if (slurm_msg_sendto(fd,
 				     get_buf_data(buffer),
 				     get_buf_offset(buffer),
 				     SLURM_PROTOCOL_NO_SEND_RECV_FLAGS ) < 0) {
 			error("forward_thread: slurm_msg_sendto: %m");
 
-			slurm_mutex_lock(fwd_msg->forward_mutex);
-			mark_as_failed_forward(&fwd_msg->ret_list, name,
+			slurm_mutex_lock(&fwd_struct->forward_mutex);
+			mark_as_failed_forward(&fwd_struct->ret_list, name,
 					       errno);
 			free(name);
 			if (hostlist_count(hl) > 0) {
 				free_buf(buffer);
-				buffer = init_buf(fwd_msg->buf_len);
-				slurm_mutex_unlock(fwd_msg->forward_mutex);
-				slurm_close_accepted_conn(fd);
+				buffer = init_buf(fwd_struct->buf_len);
+				slurm_mutex_unlock(&fwd_struct->forward_mutex);
+				slurm_close(fd);
 				fd = -1;
+				/* Abandon tree. This way if all the
+				 * nodes in the branch are down we
+				 * don't have to time out for each
+				 * node serially.
+				 */
+				_forward_msg_internal(hl, NULL, fwd_struct,
+						      &fwd_msg->header, 0,
+						      hostlist_count(hl));
 				continue;
 			}
 			goto cleanup;
@@ -193,15 +218,15 @@ void *_forward_thread(void *arg)
 		if ((fwd_msg->header.msg_type == REQUEST_SHUTDOWN) ||
 		    (fwd_msg->header.msg_type == REQUEST_RECONFIGURE) ||
 		    (fwd_msg->header.msg_type == REQUEST_REBOOT_NODES)) {
-			slurm_mutex_lock(fwd_msg->forward_mutex);
+			slurm_mutex_lock(&fwd_struct->forward_mutex);
 			ret_data_info = xmalloc(sizeof(ret_data_info_t));
-			list_push(fwd_msg->ret_list, ret_data_info);
+			list_push(fwd_struct->ret_list, ret_data_info);
 			ret_data_info->node_name = xstrdup(name);
 			free(name);
 			while ((name = hostlist_shift(hl))) {
 				ret_data_info =
 					xmalloc(sizeof(ret_data_info_t));
-				list_push(fwd_msg->ret_list, ret_data_info);
+				list_push(fwd_struct->ret_list, ret_data_info);
 				ret_data_info->node_name = xstrdup(name);
 				free(name);
 			}
@@ -216,29 +241,30 @@ void *_forward_thread(void *arg)
 			steps = (fwd_msg->header.forward.cnt+1) /
 				slurm_get_tree_width();
 			fwd_msg->timeout = (message_timeout*steps);
-/* 			info("got %d * %d = %d", message_timeout, steps, fwd_msg->timeout); */
+			/* info("got %d * %d = %d", message_timeout, */
+			/*      steps, fwd_msg->timeout); */
 			steps++;
 			fwd_msg->timeout += (start_timeout*steps);
-/* 			info("now  + %d*%d = %d", start_timeout, steps, fwd_msg->timeout); */
+			/* info("now  + %d*%d = %d", start_timeout, */
+			/*      steps, fwd_msg->timeout); */
 		}
 
 		ret_list = slurm_receive_msgs(fd, steps, fwd_msg->timeout);
 		/* info("sent %d forwards got %d back", */
-/* 		     fwd_msg->header.forward.cnt, list_count(ret_list)); */
+		/*      fwd_msg->header.forward.cnt, list_count(ret_list)); */
 
 		if (!ret_list || (fwd_msg->header.forward.cnt != 0
-				 && list_count(ret_list) <= 1)) {
-			slurm_mutex_lock(fwd_msg->forward_mutex);
-			mark_as_failed_forward(&fwd_msg->ret_list, name,
+				  && list_count(ret_list) <= 1)) {
+			slurm_mutex_lock(&fwd_struct->forward_mutex);
+			mark_as_failed_forward(&fwd_struct->ret_list, name,
 					       errno);
 			free(name);
-			if (ret_list)
-				list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			if (hostlist_count(hl) > 0) {
 				free_buf(buffer);
-				buffer = init_buf(fwd_msg->buf_len);
-				slurm_mutex_unlock(fwd_msg->forward_mutex);
-				slurm_close_accepted_conn(fd);
+				buffer = init_buf(fwd_struct->buf_len);
+				slurm_mutex_unlock(&fwd_struct->forward_mutex);
+				slurm_close(fd);
 				fd = -1;
 				continue;
 			}
@@ -277,7 +303,7 @@ void *_forward_thread(void *arg)
 				list_iterator_destroy(itr);
 				if (!node_found) {
 					mark_as_failed_forward(
-						&fwd_msg->ret_list,
+						&fwd_struct->ret_list,
 						tmp,
 						SLURM_COMMUNICATIONS_CONNECTION_ERROR);
 				}
@@ -285,34 +311,36 @@ void *_forward_thread(void *arg)
 			}
 			hostlist_iterator_destroy(host_itr);
 			if (!first_node_found) {
-				mark_as_failed_forward(&fwd_msg->ret_list,
-						       name,
-						       SLURM_COMMUNICATIONS_CONNECTION_ERROR);
+				mark_as_failed_forward(
+					&fwd_struct->ret_list,
+					name,
+					SLURM_COMMUNICATIONS_CONNECTION_ERROR);
 			}
 		}
 		break;
 	}
-	slurm_mutex_lock(fwd_msg->forward_mutex);
+	slurm_mutex_lock(&fwd_struct->forward_mutex);
 	if (ret_list) {
 		while ((ret_data_info = list_pop(ret_list)) != NULL) {
 			if (!ret_data_info->node_name) {
 				ret_data_info->node_name = xstrdup(name);
 			}
-			list_push(fwd_msg->ret_list, ret_data_info);
+			list_push(fwd_struct->ret_list, ret_data_info);
 			debug3("got response from %s",
 			       ret_data_info->node_name);
 		}
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 	free(name);
 cleanup:
-	if ((fd >= 0) && slurm_close_accepted_conn(fd) < 0)
+	if ((fd >= 0) && slurm_close(fd) < 0)
 		error ("close(%d): %m", fd);
 	hostlist_destroy(hl);
 	destroy_forward(&fwd_msg->header.forward);
 	free_buf(buffer);
-	pthread_cond_signal(fwd_msg->notify);
-	slurm_mutex_unlock(fwd_msg->forward_mutex);
+	pthread_cond_signal(&fwd_struct->notify);
+	slurm_mutex_unlock(&fwd_struct->forward_mutex);
+	xfree(fwd_msg);
 
 	return (NULL);
 }
@@ -399,10 +427,19 @@ void *_fwd_tree_thread(void *arg)
 			list_transfer(fwd_tree->ret_list, ret_list);
 			pthread_cond_signal(fwd_tree->notify);
 			slurm_mutex_unlock(fwd_tree->tree_mutex);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			/* try next node */
 			if (ret_cnt <= send_msg.forward.cnt) {
 				free(name);
+				/* Abandon tree. This way if all the
+				 * nodes in the branch are down we
+				 * don't have to time out for each
+				 * node serially.
+				 */
+				_start_msg_tree_internal(
+					fwd_tree->tree_hl, NULL,
+					fwd_tree,
+					hostlist_count(fwd_tree->tree_hl));
 				continue;
 			}
 		} else {
@@ -437,6 +474,135 @@ void *_fwd_tree_thread(void *arg)
 	return NULL;
 }
 
+static void _start_msg_tree_internal(hostlist_t hl, hostlist_t* sp_hl,
+				     fwd_tree_t *fwd_tree_in,
+				     int hl_count)
+{
+	int j;
+	fwd_tree_t *fwd_tree;
+
+	xassert((hl || sp_hl) && !(hl && sp_hl));
+	xassert(fwd_tree_in);
+	xassert(fwd_tree_in->p_thr_count);
+	xassert(fwd_tree_in->tree_mutex);
+	xassert(fwd_tree_in->notify);
+	xassert(fwd_tree_in->ret_list);
+
+	if (hl)
+		xassert(hl_count == hostlist_count(hl));
+
+	if (fwd_tree_in->timeout <= 0)
+		/* convert secs to msec */
+		fwd_tree_in->timeout  = slurm_get_msg_timeout() * 1000;
+
+	for (j = 0; j < hl_count; j++) {
+		pthread_attr_t attr_agent;
+		pthread_t thread_agent;
+		int retries = 0;
+
+		slurm_attr_init(&attr_agent);
+		if (pthread_attr_setdetachstate
+		    (&attr_agent, PTHREAD_CREATE_DETACHED))
+			error("pthread_attr_setdetachstate error %m");
+
+		fwd_tree = xmalloc(sizeof(fwd_tree_t));
+		memcpy(fwd_tree, fwd_tree_in, sizeof(fwd_tree_t));
+
+		if (sp_hl) {
+			fwd_tree->tree_hl = sp_hl[j];
+			sp_hl[j] = NULL;
+		} else if (hl) {
+			char *name = hostlist_shift(hl);
+			fwd_tree->tree_hl = hostlist_create(name);
+			free(name);
+		}
+
+		/*
+		 * Lock and increase thread counter, we need that to protect
+		 * the start_msg_tree waiting loop that was originally designed
+		 * around a "while ((count < host_count))" loop. In case where a
+		 * fwd thread was not able to get all the return codes from
+		 * children, the waiting loop was deadlocked.
+		 */
+		slurm_mutex_lock(fwd_tree->tree_mutex);
+		(*fwd_tree->p_thr_count)++;
+		slurm_mutex_unlock(fwd_tree->tree_mutex);
+
+		while (pthread_create(&thread_agent, &attr_agent,
+				      _fwd_tree_thread, (void *)fwd_tree)) {
+			error("pthread_create error %m");
+			if (++retries > MAX_RETRIES)
+				fatal("Can't create pthread");
+			usleep(100000);	/* sleep and try again */
+		}
+		slurm_attr_destroy(&attr_agent);
+
+	}
+}
+
+static void _forward_msg_internal(hostlist_t hl, hostlist_t* sp_hl,
+				  forward_struct_t *fwd_struct,
+				  header_t *header, int timeout,
+				  int hl_count)
+{
+	int j;
+	forward_msg_t *fwd_msg = NULL;
+	char *buf = NULL, *tmp_char = NULL;
+	pthread_attr_t attr_agent;
+	pthread_t thread_agent;
+
+	if (timeout <= 0)
+		/* convert secs to msec */
+		timeout  = slurm_get_msg_timeout() * 1000;
+
+	for (j = 0; j < hl_count; j++) {
+		int retries = 0;
+
+		slurm_attr_init(&attr_agent);
+		if (pthread_attr_setdetachstate
+		    (&attr_agent, PTHREAD_CREATE_DETACHED))
+			error("pthread_attr_setdetachstate error %m");
+
+		fwd_msg = xmalloc(sizeof(forward_msg_t));
+
+		fwd_msg->fwd_struct = fwd_struct;
+
+		fwd_msg->timeout = timeout;
+
+		memcpy(&fwd_msg->header.orig_addr,
+		       &header->orig_addr,
+		       sizeof(slurm_addr_t));
+
+		fwd_msg->header.version = header->version;
+		fwd_msg->header.flags = header->flags;
+		fwd_msg->header.msg_type = header->msg_type;
+		fwd_msg->header.body_length = header->body_length;
+		fwd_msg->header.ret_list = NULL;
+		fwd_msg->header.ret_cnt = 0;
+
+		if (sp_hl) {
+			buf = hostlist_ranged_string_xmalloc(sp_hl[j]);
+			hostlist_destroy(sp_hl[j]);
+		} else {
+			tmp_char = hostlist_shift(hl);
+			buf = xstrdup(tmp_char);
+			free(tmp_char);
+		}
+
+		forward_init(&fwd_msg->header.forward, NULL);
+		fwd_msg->header.forward.nodelist = buf;
+		while (pthread_create(&thread_agent, &attr_agent,
+				     _forward_thread,
+				     (void *)fwd_msg)) {
+			error("pthread_create error %m");
+			if (++retries > MAX_RETRIES)
+				fatal("Can't create pthread");
+			usleep(100000);	/* sleep and try again */
+		}
+		slurm_attr_destroy(&attr_agent);
+	}
+}
+
 /*
  * forward_init    - initilize forward structure
  * IN: forward     - forward_t *   - struct to store forward info
@@ -471,13 +637,8 @@ extern void forward_init(forward_t *forward, forward_t *from)
  *                                             needing to be forwarded.
  * RET: SLURM_SUCCESS - int
  */
-extern int forward_msg(forward_struct_t *forward_struct,
-		       header_t *header)
+extern int forward_msg(forward_struct_t *forward_struct, header_t *header)
 {
-	int j = 0;
-	int retries = 0;
-	forward_msg_t *forward_msg = NULL;
-	int thr_count = 0;
 	hostlist_t hl = NULL;
 	hostlist_t* sp_hl;
 	int hl_count = 0;
@@ -494,58 +655,10 @@ extern int forward_msg(forward_struct_t *forward_struct,
 		hostlist_destroy(hl);
 		return SLURM_ERROR;
 	}
-	for (j = 0; j < hl_count; j++) {
-
-		pthread_attr_t attr_agent;
-		pthread_t thread_agent;
-		char *buf = NULL;
 
-		slurm_attr_init(&attr_agent);
-		if (pthread_attr_setdetachstate
-		    (&attr_agent, PTHREAD_CREATE_DETACHED))
-			error("pthread_attr_setdetachstate error %m");
+	_forward_msg_internal(NULL, sp_hl, forward_struct, header,
+			      forward_struct->timeout, hl_count);
 
-		forward_msg = &forward_struct->forward_msg[thr_count];
-		forward_msg->ret_list = forward_struct->ret_list;
-
-		forward_msg->timeout = forward_struct->timeout;
-
-		if (forward_msg->timeout <= 0) {
-			/* convert secs to msec */
-			forward_msg->timeout  = slurm_get_msg_timeout() * 1000;
-		}
-
-		forward_msg->notify = &forward_struct->notify;
-		forward_msg->forward_mutex = &forward_struct->forward_mutex;
-		forward_msg->buf_len = forward_struct->buf_len;
-		forward_msg->buf = forward_struct->buf;
-
-		memcpy(&forward_msg->header.orig_addr,
-		       &header->orig_addr,
-		       sizeof(slurm_addr_t));
-
-		forward_msg->header.version = header->version;
-		forward_msg->header.flags = header->flags;
-		forward_msg->header.msg_type = header->msg_type;
-		forward_msg->header.body_length = header->body_length;
-		forward_msg->header.ret_list = NULL;
-		forward_msg->header.ret_cnt = 0;
-
-		buf = hostlist_ranged_string_xmalloc(sp_hl[j]);
-		hostlist_destroy(sp_hl[j]);
-		forward_init(&forward_msg->header.forward, NULL);
-		forward_msg->header.forward.nodelist = buf;
-		while (pthread_create(&thread_agent, &attr_agent,
-				     _forward_thread,
-				     (void *)forward_msg)) {
-			error("pthread_create error %m");
-			if (++retries > MAX_RETRIES)
-				fatal("Can't create pthread");
-			usleep(100000);	/* sleep and try again */
-		}
-		slurm_attr_destroy(&attr_agent);
-		thr_count++;
-	}
 	xfree(sp_hl);
 	hostlist_destroy(hl);
 	return SLURM_SUCCESS;
@@ -565,10 +678,10 @@ extern int forward_msg(forward_struct_t *forward_struct,
  */
 extern List start_msg_tree(hostlist_t hl, slurm_msg_t *msg, int timeout)
 {
-	fwd_tree_t *fwd_tree = NULL;
+	fwd_tree_t fwd_tree;
 	pthread_mutex_t tree_mutex;
 	pthread_cond_t notify;
-	int j = 0, count = 0;
+	int count = 0;
 	List ret_list = NULL;
 	int thr_count = 0;
 	int host_count = 0;
@@ -590,53 +703,16 @@ extern List start_msg_tree(hostlist_t hl, slurm_msg_t *msg, int timeout)
 
 	ret_list = list_create(destroy_data_info);
 
-	for (j = 0; j < hl_count; j++) {
-		pthread_attr_t attr_agent;
-		pthread_t thread_agent;
-		int retries = 0;
+	memset(&fwd_tree, 0, sizeof(fwd_tree));
+	fwd_tree.orig_msg = msg;
+	fwd_tree.ret_list = ret_list;
+	fwd_tree.timeout = timeout;
+	fwd_tree.notify = &notify;
+	fwd_tree.p_thr_count = &thr_count;
+	fwd_tree.tree_mutex = &tree_mutex;
 
-		slurm_attr_init(&attr_agent);
-		if (pthread_attr_setdetachstate
-		    (&attr_agent, PTHREAD_CREATE_DETACHED))
-			error("pthread_attr_setdetachstate error %m");
-
-		fwd_tree = xmalloc(sizeof(fwd_tree_t));
-		fwd_tree->orig_msg = msg;
-		fwd_tree->ret_list = ret_list;
-		fwd_tree->timeout = timeout;
-		fwd_tree->notify = &notify;
-		fwd_tree->p_thr_count = &thr_count;
-		fwd_tree->tree_mutex = &tree_mutex;
-
-		if (fwd_tree->timeout <= 0) {
-			/* convert secs to msec */
-			fwd_tree->timeout  = slurm_get_msg_timeout() * 1000;
-		}
+	_start_msg_tree_internal(NULL, sp_hl, &fwd_tree, hl_count);
 
-		fwd_tree->tree_hl = sp_hl[j];
-		sp_hl[j] = NULL;
-
-		/*
-		 * Lock and increase thread counter, we need that to protect
-		 * the start_msg_tree waiting loop that was originally designed
-		 * around a "while ((count < host_count))" loop. In case where a
-		 * fwd thread was not able to get all the return codes from
-		 * children, the waiting loop was deadlocked.
-		 */
-		slurm_mutex_lock(&tree_mutex);
-		thr_count++;
-		slurm_mutex_unlock(&tree_mutex);
-
-		while (pthread_create(&thread_agent, &attr_agent,
-				      _fwd_tree_thread, (void *)fwd_tree)) {
-			error("pthread_create error %m");
-			if (++retries > MAX_RETRIES)
-				fatal("Can't create pthread");
-			usleep(100000);	/* sleep and try again */
-		}
-		slurm_attr_destroy(&attr_agent);
-
-	}
 	xfree(sp_hl);
 
 	slurm_mutex_lock(&tree_mutex);
@@ -704,7 +780,6 @@ extern void forward_wait(slurm_msg_t * msg)
 				count = list_count(msg->ret_list);
 			}
 			debug2("Got back %d", count);
-
 		}
 		debug2("Got them all");
 		slurm_mutex_unlock(&msg->forward_struct->forward_mutex);
@@ -739,7 +814,6 @@ void destroy_forward_struct(forward_struct_t *forward_struct)
 {
 	if (forward_struct) {
 		xfree(forward_struct->buf);
-		xfree(forward_struct->forward_msg);
 		slurm_mutex_destroy(&forward_struct->forward_mutex);
 		pthread_cond_destroy(&forward_struct->notify);
 		xfree(forward_struct);
diff --git a/src/common/gres.c b/src/common/gres.c
index 305991875..cb544f539 100644
--- a/src/common/gres.c
+++ b/src/common/gres.c
@@ -37,6 +37,7 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#define _GNU_SOURCE
 #if HAVE_CONFIG_H
 #  include "config.h"
 #  if STDC_HEADERS
@@ -66,12 +67,15 @@
 #  include <string.h>
 #endif /* HAVE_CONFIG_H */
 
+#include <sched.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <sys/stat.h>
+#include <math.h>
 
 #include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
+#include "src/common/bitstring.h"
 #include "src/common/gres.h"
 #include "src/common/list.h"
 #include "src/common/log.h"
@@ -84,6 +88,7 @@
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
+#include "src/common/assoc_mgr.h"
 
 #define GRES_MAGIC 0x438a34d4
 #define MAX_GRES_BITMAP 1024
@@ -95,6 +100,9 @@ typedef struct slurm_gres_ops {
 						  void *gres_ptr );
 	void		(*step_set_env)		( char ***job_env_ptr,
 						  void *gres_ptr );
+	void		(*step_reset_env)	( char ***job_env_ptr,
+						  void *gres_ptr,
+						  bitstr_t *usable_gres );
 	void		(*send_stepd)		( int fd );
 	void		(*recv_stepd)		( int fd );
 	int		(*job_info)		( gres_job_state_t *job_gres_data,
@@ -118,6 +126,7 @@ typedef struct slurm_gres_context {
 	slurm_gres_ops_t ops;			/* pointers to plugin symbols */
 	uint32_t	plugin_id;		/* key for searches */
 	plugrack_t	plugin_list;		/* plugrack info */
+	uint64_t        total_cnt;
 } slurm_gres_context_t;
 
 /* Generic gres data structure for adding to a list. Depending upon the
@@ -128,6 +137,12 @@ typedef struct gres_state {
 	void		*gres_data;
 } gres_state_t;
 
+/* Pointers to functions in src/slurmd/common/xcpuinfo.h that we may use */
+typedef struct xcpuinfo_funcs {
+	int (*xcpuinfo_abs_to_mac) (char *abs, char **mac);
+} xcpuinfo_funcs_t;
+xcpuinfo_funcs_t xcpuinfo_ops;
+
 /* Local variables */
 static int gres_context_cnt = -1;
 static uint32_t gres_cpu_cnt = 0;
@@ -148,7 +163,7 @@ static void	_destroy_gres_slurmd_conf(void *x);
 static void	_get_gres_cnt(gres_node_state_t *gres_data, char *orig_config,
 			      char *gres_name, char *gres_name_colon,
 			      int gres_name_colon_len);
-static uint32_t	_get_tot_gres_cnt(uint32_t plugin_id, uint32_t *set_cnt);
+static uint64_t	_get_tot_gres_cnt(uint32_t plugin_id, uint64_t *set_cnt);
 static int	_gres_find_id(void *x, void *key);
 static void	_gres_job_list_delete(void *list_element);
 static bool	_is_gres_cnt_zero(char *config);
@@ -168,7 +183,7 @@ static void *	_job_state_dup(void *gres_data);
 static void *	_job_state_dup2(void *gres_data, int node_index);
 static void	_job_state_log(void *gres_data, uint32_t job_id,
 			       char *gres_name);
-static int	_job_state_validate(char *config, void **gres_data,
+static int	_job_state_validate(char *config, gres_job_state_t **gres_data,
 				    slurm_gres_context_t *gres_name);
 static uint32_t	_job_test(void *job_gres_data, void *node_gres_data,
 			  bool use_total_gres, bitstr_t *cpu_bitmap,
@@ -197,7 +212,7 @@ static int	_parse_gres_config2(void **dest, slurm_parser_enum_t type,
 				    const char *key, const char *value,
 				    const char *line, char **leftover);
 static void	_set_gres_cnt(char *orig_config, char **new_config,
-			      uint32_t new_cnt, char *gres_name,
+			      uint64_t new_cnt, char *gres_name,
 			      char *gres_name_colon, int gres_name_colon_len);
 static int	_step_alloc(void *step_gres_data, void *job_gres_data,
 			    int node_offset, int cpu_cnt, char *gres_name,
@@ -209,9 +224,10 @@ static void *	_step_state_dup(void *gres_data);
 static void *	_step_state_dup2(void *gres_data, int node_index);
 static void	_step_state_log(void *gres_data, uint32_t job_id,
 				uint32_t step_id, char *gres_name);
-static int	_step_state_validate(char *config, void **gres_data,
+static int	_step_state_validate(char *config,
+				     gres_step_state_t **gres_data,
 				     slurm_gres_context_t *context_ptr);
-static uint32_t	_step_test(void *step_gres_data, void *job_gres_data,
+static uint64_t	_step_test(void *step_gres_data, void *job_gres_data,
 			   int node_offset, bool ignore_alloc, char *gres_name,
 			   uint32_t job_id, uint32_t step_id);
 static int	_unload_gres_plugin(slurm_gres_context_t *plugin_context);
@@ -245,6 +261,60 @@ static int _gres_find_id(void *x, void *key)
 	return 0;
 }
 
+static int _gres_job_find_name(void *x, void *key)
+{
+	gres_state_t *state_ptr = (gres_state_t *) x;
+	gres_job_state_t *gres_data_ptr =
+		(gres_job_state_t *)state_ptr->gres_data;
+	char *name = gres_data_ptr->type_model;
+
+	if (!name) {
+		int i;
+		for (i=0; i < gres_context_cnt; i++) {
+			if (gres_context[i].plugin_id == state_ptr->plugin_id) {
+				name = gres_context[i].gres_name;
+				break;
+			}
+		}
+
+		if (!name) {
+			debug("_gres_job_find_name: couldn't find name");
+			return 0;
+		}
+	}
+
+	if (!xstrcmp(name, (char *)key))
+		return 1;
+	return 0;
+}
+
+static int _gres_step_find_name(void *x, void *key)
+{
+	gres_state_t *state_ptr = (gres_state_t *) x;
+	gres_step_state_t *gres_data_ptr =
+		(gres_step_state_t *)state_ptr->gres_data;
+	char *name = gres_data_ptr->type_model;
+
+	if (!name) {
+		int i;
+		for (i=0; i < gres_context_cnt; i++) {
+			if (gres_context[i].plugin_id == state_ptr->plugin_id) {
+				name = gres_context[i].gres_name;
+				break;
+			}
+		}
+
+		if (!name) {
+			debug("_gres_job_find_name: couldn't find name");
+			return 0;
+		}
+	}
+
+	if (!xstrcmp(name, (char *)key))
+		return 1;
+	return 0;
+}
+
 static int _load_gres_plugin(char *plugin_name,
 			     slurm_gres_context_t *plugin_context)
 {
@@ -255,6 +325,7 @@ static int _load_gres_plugin(char *plugin_name,
 		"node_config_load",
 		"job_set_env",
 		"step_set_env",
+		"step_reset_env",
 		"send_stepd",
 		"recv_stepd",
 		"job_info",
@@ -505,6 +576,7 @@ extern int gres_plugin_reconfig(bool *did_change)
 	int rc = SLURM_SUCCESS;
 	char *plugin_names = slurm_get_gres_plugins();
 	bool plugin_change;
+	int i;
 
 	if (did_change)
 		*did_change = false;
@@ -518,6 +590,10 @@ extern int gres_plugin_reconfig(bool *did_change)
 		plugin_change = true;
 	else
 		plugin_change = false;
+
+	for (i=0; i < gres_context_cnt; i++)
+		gres_context[i].total_cnt = 0;
+
 	slurm_mutex_unlock(&gres_context_lock);
 
 	if (plugin_change) {
@@ -549,6 +625,7 @@ static void _destroy_gres_slurmd_conf(void *x)
 
 	xassert(p);
 	xfree(p->cpus);
+	FREE_NULL_BITMAP(p->cpus_bitmap);
 	xfree(p->file);		/* Only used by slurmd */
 	xfree(p->name);
 	xfree(p->type);
@@ -566,22 +643,22 @@ static int _log_gres_slurmd_conf(void *x, void *arg)
 	xassert(p);
 
 	if (!gres_debug) {
-		verbose("Gres Name=%s Type=%s Count=%u",
+		verbose("Gres Name=%s Type=%s Count=%"PRIu64,
 			p->name, p->type, p->count);
 		return 0;
 	}
 
 	if (p->cpus) {
-		info("Gres Name=%s Type=%s Count=%u ID=%u File=%s CPUs=%s "
-		     "CpuCnt=%u",
+		info("Gres Name=%s Type=%s Count=%"PRIu64" ID=%u File=%s "
+		     "CPUs=%s CpuCnt=%u",
 		     p->name, p->type, p->count, p->plugin_id, p->file, p->cpus,
 		     p->cpu_cnt);
 	} else if (p->file) {
-		info("Gres Name=%s Type=%s Count=%u ID=%u File=%s",
+		info("Gres Name=%s Type=%s Count=%"PRIu64" ID=%u File=%s",
 		     p->name, p->type, p->count, p->plugin_id, p->file);
 	} else {
-		info("Gres Name=%s Type=%s Count=%u ID=%u", p->name, p->type,
-		     p->count, p->plugin_id);
+		info("Gres Name=%s Type=%s Count=%"PRIu64" ID=%u", p->name,
+		     p->type, p->count, p->plugin_id);
 	}
 
 	return 0;
@@ -680,7 +757,7 @@ static int _parse_gres_config(void **dest, slurm_parser_enum_t type,
 	int i;
 	s_p_hashtbl_t *tbl;
 	gres_slurmd_conf_t *p;
-	long tmp_long;
+	uint64_t tmp_uint64;
 	char *tmp_str, *last;
 
 	tbl = s_p_hashtbl_create(_gres_options);
@@ -700,15 +777,23 @@ static int _parse_gres_config(void **dest, slurm_parser_enum_t type,
 
 	p->cpu_cnt = gres_cpu_cnt;
 	if (s_p_get_string(&p->cpus, "CPUs", tbl)) {
-		bitstr_t *cpu_bitmap;	/* Just use to validate config */
-		cpu_bitmap = bit_alloc(gres_cpu_cnt);
-		i = bit_unfmt(cpu_bitmap, p->cpus);
-		if (i != 0) {
+		char *local_cpus = NULL;
+		p->cpus_bitmap = bit_alloc(gres_cpu_cnt);
+		if (xcpuinfo_ops.xcpuinfo_abs_to_mac) {
+			i = (xcpuinfo_ops.xcpuinfo_abs_to_mac)
+				(p->cpus, &local_cpus);
+			if (i != SLURM_SUCCESS) {
+				fatal("Invalid gres data for %s, CPUs=%s",
+				      p->name, p->cpus);
+			}
+		} else
+			local_cpus = xstrdup(p->cpus);
+		if (bit_unfmt(p->cpus_bitmap, local_cpus) != 0) {
 			fatal("Invalid gres data for %s, CPUs=%s (only %u CPUs"
 			      " are available)",
 			      p->name, p->cpus, gres_cpu_cnt);
 		}
-		FREE_NULL_BITMAP(cpu_bitmap);
+		xfree(local_cpus);
 	}
 
 	if (s_p_get_string(&p->file, "File", tbl)) {
@@ -722,30 +807,34 @@ static int _parse_gres_config(void **dest, slurm_parser_enum_t type,
 	}
 
 	if (s_p_get_string(&tmp_str, "Count", tbl)) {
-		tmp_long = strtol(tmp_str, &last, 10);
-		if ((tmp_long == LONG_MIN) || (tmp_long == LONG_MAX)) {
+		tmp_uint64 = strtoll(tmp_str, &last, 10);
+		if ((tmp_uint64 == LONG_MIN) || (tmp_uint64 == LONG_MAX)) {
 			fatal("Invalid gres data for %s, Count=%s", p->name,
 			      tmp_str);
 		}
 		if ((last[0] == 'k') || (last[0] == 'K'))
-			tmp_long *= 1024;
+			tmp_uint64 *= 1024;
 		else if ((last[0] == 'm') || (last[0] == 'M'))
-			tmp_long *= (1024 * 1024);
+			tmp_uint64 *= (1024 * 1024);
 		else if ((last[0] == 'g') || (last[0] == 'G'))
-			tmp_long *= (1024 * 1024 * 1024);
+			tmp_uint64 *= (1024 * 1024 * 1024);
+		else if ((last[0] == 't') || (last[0] == 'T'))
+			tmp_uint64 *= (uint64_t)pow(1024, 4);
+		else if ((last[0] == 'p') || (last[0] == 'P'))
+			tmp_uint64 *= (uint64_t)pow(1024, 5);
 		else if (last[0] != '\0') {
 			fatal("Invalid gres data for %s, Count=%s", p->name,
 			      tmp_str);
 		}
-		if (p->count && (p->count != tmp_long)) {
+		if (p->count && (p->count != tmp_uint64)) {
 			fatal("Invalid gres data for %s, Count does not match "
 			      "File value", p->name);
 		}
-		if ((tmp_long < 0) || (tmp_long >= NO_VAL)) {
-			fatal("Gres %s has invalid count value %ld",
-			      p->name, tmp_long);
+		if ((tmp_uint64 < 0) || (tmp_uint64 >= NO_VAL)) {
+			fatal("Gres %s has invalid count value %"PRIu64,
+			      p->name, tmp_uint64);
 		}
-		p->count = tmp_long;
+		p->count = tmp_uint64;
 		xfree(tmp_str);
 	} else if (p->count == 0)
 		p->count = 1;
@@ -935,8 +1024,10 @@ static int _no_gres_conf(uint32_t cpu_cnt)
  * Load this node's configuration (how many resources it has, topology, etc.)
  * IN cpu_cnt - Number of CPUs on configured on this node
  * IN node_name - Name of this node
+ * IN xcpuinfo_abs_to_mac - Pointer to xcpuinfo_abs_to_mac() funct, if available
  */
-extern int gres_plugin_node_config_load(uint32_t cpu_cnt, char *node_name)
+extern int gres_plugin_node_config_load(uint32_t cpu_cnt, char *node_name,
+					void *xcpuinfo_abs_to_mac)
 {
 	static s_p_options_t _gres_options[] = {
 		{"Name",     S_P_ARRAY, _parse_gres_config,  NULL},
@@ -950,6 +1041,9 @@ extern int gres_plugin_node_config_load(uint32_t cpu_cnt, char *node_name)
 	gres_slurmd_conf_t **gres_array;
 	char *gres_conf_file;
 
+	if (xcpuinfo_abs_to_mac)
+		xcpuinfo_ops.xcpuinfo_abs_to_mac = xcpuinfo_abs_to_mac;
+
 	rc = gres_plugin_init();
 	if (gres_context_cnt == 0)
 		return SLURM_SUCCESS;
@@ -1022,7 +1116,7 @@ extern int gres_plugin_node_config_pack(Buf buffer)
 		while ((gres_slurmd_conf =
 			(gres_slurmd_conf_t *) list_next(iter))) {
 			pack32(magic, buffer);
-			pack32(gres_slurmd_conf->count, buffer);
+			pack64(gres_slurmd_conf->count, buffer);
 			pack32(gres_slurmd_conf->cpu_cnt, buffer);
 			pack8(gres_slurmd_conf->has_file, buffer);
 			pack32(gres_slurmd_conf->plugin_id, buffer);
@@ -1046,6 +1140,7 @@ extern int gres_plugin_node_config_unpack(Buf buffer, char* node_name)
 {
 	int i, j, rc;
 	uint32_t count, cpu_cnt, magic, plugin_id, utmp32;
+	uint64_t count64;
 	uint16_t rec_cnt, version;
 	uint8_t has_file;
 	char *tmp_cpus, *tmp_name, *tmp_type;
@@ -1063,7 +1158,80 @@ extern int gres_plugin_node_config_unpack(Buf buffer, char* node_name)
 		return SLURM_SUCCESS;
 
 	slurm_mutex_lock(&gres_context_lock);
-	if (version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (version >= SLURM_15_08_PROTOCOL_VERSION) {
+		for (i = 0; i < rec_cnt; i++) {
+			safe_unpack32(&magic, buffer);
+			if (magic != GRES_MAGIC)
+				goto unpack_error;
+
+			safe_unpack64(&count64, buffer);
+			safe_unpack32(&cpu_cnt, buffer);
+			safe_unpack8(&has_file, buffer);
+			safe_unpack32(&plugin_id, buffer);
+			safe_unpackstr_xmalloc(&tmp_cpus, &utmp32, buffer);
+			safe_unpackstr_xmalloc(&tmp_name, &utmp32, buffer);
+			safe_unpackstr_xmalloc(&tmp_type, &utmp32, buffer);
+
+	 		for (j = 0; j < gres_context_cnt; j++) {
+	 			if (gres_context[j].plugin_id != plugin_id)
+					continue;
+				if (xstrcmp(gres_context[j].gres_name,
+					    tmp_name)) {
+					/* Should have beeen caught in
+					 * gres_plugin_init() */
+					error("gres_plugin_node_config_unpack: "
+					      "gres/%s duplicate plugin ID with"
+					      " %s, unable to process",
+					      tmp_name,
+					      gres_context[j].gres_name);
+					continue;
+				}
+				if (gres_context[j].has_file &&
+				    !has_file && count64) {
+					error("gres_plugin_node_config_unpack: "
+					      "gres/%s lacks File parameter "
+					      "for node %s",
+					      tmp_name, node_name);
+					has_file = 1;
+				}
+				if (has_file && (count64 > MAX_GRES_BITMAP)) {
+					/* Avoid over-subscribing memory with
+					 * huge bitmaps */
+					error("%s: gres/%s has File plus very "
+					      "large Count (%"PRIu64") for "
+					      "node %s, resetting value to %d",
+					      __func__, tmp_name, count64,
+					      node_name, MAX_GRES_BITMAP);
+					count64 = MAX_GRES_BITMAP;
+				}
+				if (has_file)	/* Don't clear if already set */
+					gres_context[j].has_file = has_file;
+				break;
+	 		}
+			if (j >= gres_context_cnt) {
+				/* GresPlugins is inconsistently configured.
+				 * Not a fatal error. Skip this data. */
+				error("gres_plugin_node_config_unpack: no "
+				      "plugin configured to unpack data "
+				      "type %s from node %s",
+				      tmp_name, node_name);
+				xfree(tmp_cpus);
+				xfree(tmp_name);
+				continue;
+			}
+			p = xmalloc(sizeof(gres_slurmd_conf_t));
+			p->count = count64;
+			p->cpu_cnt = cpu_cnt;
+			p->has_file = has_file;
+			p->cpus = tmp_cpus;
+			tmp_cpus = NULL;	/* Nothing left to xfree */
+			p->name = tmp_name;     /* Preserve for accounting! */
+			p->type = tmp_type;
+			tmp_type = NULL;	/* Nothing left to xfree */
+			p->plugin_id = plugin_id;
+			list_append(gres_conf_list, p);
+		}
+	} else if (version >= SLURM_14_11_PROTOCOL_VERSION) {
 		for (i = 0; i < rec_cnt; i++) {
 			safe_unpack32(&magic, buffer);
 			if (magic != GRES_MAGIC)
@@ -1256,7 +1424,7 @@ static void _gres_node_list_delete(void *list_element)
 }
 
 static void _add_gres_type(char *type, gres_node_state_t *gres_data,
-			   uint32_t tmp_gres_cnt)
+			   uint64_t tmp_gres_cnt)
 {
 	int i;
 
@@ -1276,10 +1444,10 @@ static void _add_gres_type(char *type, gres_node_state_t *gres_data,
 		gres_data->type_cnt++;
 		gres_data->type_cnt_alloc =
 			xrealloc(gres_data->type_cnt_alloc,
-				 sizeof(uint32_t) * gres_data->type_cnt);
+				 sizeof(uint64_t) * gres_data->type_cnt);
 		gres_data->type_cnt_avail =
 			xrealloc(gres_data->type_cnt_avail,
-				 sizeof(uint32_t) * gres_data->type_cnt);
+				 sizeof(uint64_t) * gres_data->type_cnt);
 		gres_data->type_model =
 			xrealloc(gres_data->type_model,
 				 sizeof(char *) * gres_data->type_cnt);
@@ -1306,7 +1474,7 @@ static void _get_gres_cnt(gres_node_state_t *gres_data, char *orig_config,
 	char *node_gres_config, *tok, *last_tok = NULL;
 	char *sub_tok, *last_sub_tok = NULL;
 	char *num, *last_num = NULL;
-	uint32_t gres_config_cnt = 0, tmp_gres_cnt = 0;
+	uint64_t gres_config_cnt = 0, tmp_gres_cnt = 0;
 	int i;
 
 	xassert(gres_data);
@@ -1332,7 +1500,7 @@ static void _get_gres_cnt(gres_node_state_t *gres_data, char *orig_config,
 				error("Bad GRES configuration: %s", tok);
 				break;
 			}
-			tmp_gres_cnt = strtol(num + 1, &last_num, 10);
+			tmp_gres_cnt = strtoll(num + 1, &last_num, 10);
 			if (last_num[0] == '\0')
 				;
 			else if ((last_num[0] == 'k') || (last_num[0] == 'K'))
@@ -1341,6 +1509,10 @@ static void _get_gres_cnt(gres_node_state_t *gres_data, char *orig_config,
 				tmp_gres_cnt *= (1024 * 1024);
 			else if ((last_num[0] == 'g') || (last_num[0] == 'G'))
 				tmp_gres_cnt *= (1024 * 1024 * 1024);
+			else if ((last_num[0] == 't') || (last_num[0] == 'T'))
+				tmp_gres_cnt *= (uint64_t)pow(1024, 4);
+			else if ((last_num[0] == 'p') || (last_num[0] == 'P'))
+				tmp_gres_cnt *= (uint64_t)pow(1024, 5);
 			else {
 				error("Bad GRES configuration: %s", tok);
 				break;
@@ -1368,7 +1540,7 @@ static int _valid_gres_type(char *gres_name, gres_node_state_t *gres_data,
 			    uint16_t fast_schedule, char **reason_down)
 {
 	int i, j;
-	uint32_t model_cnt;
+	uint64_t model_cnt;
 
 	if (gres_data->type_cnt == 0)
 		return 0;
@@ -1384,7 +1556,8 @@ static int _valid_gres_type(char *gres_name, gres_node_state_t *gres_data,
 			gres_data->type_cnt_avail[i] = model_cnt;
 		} else if (model_cnt < gres_data->type_cnt_avail[i]) {
 			xstrfmtcat(*reason_down,
-				   "%s:%s count too low (%u < %u)",
+				   "%s:%s count too low "
+				   "(%"PRIu64" < %"PRIu64")",
 				   gres_name, gres_data->type_model[i],
 				   model_cnt, gres_data->type_cnt_avail[i]);
 			return -1;
@@ -1394,7 +1567,7 @@ static int _valid_gres_type(char *gres_name, gres_node_state_t *gres_data,
 }
 
 static void _set_gres_cnt(char *orig_config, char **new_config,
-			  uint32_t new_cnt, char *gres_name,
+			  uint64_t new_cnt, char *gres_name,
 			  char *gres_name_colon, int gres_name_colon_len)
 {
 	char *new_configured_res = NULL, *node_gres_config;
@@ -1416,18 +1589,18 @@ static void _set_gres_cnt(char *orig_config, char **new_config,
 			xstrcat(new_configured_res, tok);
 		} else if ((new_cnt % (1024 * 1024 * 1024)) == 0) {
 			new_cnt /= (1024 * 1024 * 1024);
-			xstrfmtcat(new_configured_res, "%s:%uG",
+			xstrfmtcat(new_configured_res, "%s:%"PRIu64"G",
 				   gres_name, new_cnt);
 		} else if ((new_cnt % (1024 * 1024)) == 0) {
 			new_cnt /= (1024 * 1024);
-			xstrfmtcat(new_configured_res, "%s:%uM",
+			xstrfmtcat(new_configured_res, "%s:%"PRIu64"M",
 				   gres_name, new_cnt);
 		} else if ((new_cnt % 1024) == 0) {
 			new_cnt /= 1024;
-			xstrfmtcat(new_configured_res, "%s:%uK",
+			xstrfmtcat(new_configured_res, "%s:%"PRIu64"K",
 				   gres_name, new_cnt);
 		} else {
-			xstrfmtcat(new_configured_res, "%s:%u",
+			xstrfmtcat(new_configured_res, "%s:%"PRIu64"",
 				   gres_name, new_cnt);
 		}
 		tok = strtok_r(NULL, ",", &last_tok);
@@ -1442,8 +1615,8 @@ static gres_node_state_t *_build_gres_node_state(void)
 	gres_node_state_t *gres_data;
 
 	gres_data = xmalloc(sizeof(gres_node_state_t));
-	gres_data->gres_cnt_config = NO_VAL;
-	gres_data->gres_cnt_found  = NO_VAL;
+	gres_data->gres_cnt_config = NO_VAL64;
+	gres_data->gres_cnt_found  = NO_VAL64;
 
 	return gres_data;
 }
@@ -1476,6 +1649,9 @@ static int _node_config_init(char *node_name, char *orig_config,
 		      context_ptr->gres_name,
 		      context_ptr->gres_name_colon,
 		      context_ptr->gres_name_colon_len);
+
+	context_ptr->total_cnt += gres_data->gres_cnt_config;
+
 	/* Use count from recovered state, if higher */
 	gres_data->gres_cnt_avail  = MAX(gres_data->gres_cnt_avail,
 					 gres_data->gres_cnt_config);
@@ -1539,11 +1715,12 @@ extern int gres_plugin_init_node_config(char *node_name, char *orig_config,
  * RET - total number of gres available of this ID on this node in (sum
  *	 across all records of this ID)
  */
-static uint32_t _get_tot_gres_cnt(uint32_t plugin_id, uint32_t *set_cnt)
+static uint64_t _get_tot_gres_cnt(uint32_t plugin_id, uint64_t *set_cnt)
 {
 	ListIterator iter;
 	gres_slurmd_conf_t *gres_slurmd_conf;
-	uint32_t gres_cnt = 0, cpu_set_cnt = 0, rec_cnt = 0;
+	uint32_t cpu_set_cnt = 0, rec_cnt = 0;
+	uint64_t gres_cnt = 0;
 
 	xassert(set_cnt);
 	*set_cnt = 0;
@@ -1611,7 +1788,7 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 				 slurm_gres_context_t *context_ptr)
 {
 	int i, j, gres_inx, rc = SLURM_SUCCESS;
-	uint32_t gres_cnt, set_cnt = 0;
+	uint64_t gres_cnt, set_cnt = 0;
 	bool cpus_config = false, updated_config = false;
 	gres_node_state_t *gres_data;
 	ListIterator iter;
@@ -1623,17 +1800,18 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 
 	gres_cnt = _get_tot_gres_cnt(context_ptr->plugin_id, &set_cnt);
 	if (gres_data->gres_cnt_found != gres_cnt) {
-		if (gres_data->gres_cnt_found != NO_VAL) {
-			info("%s: count changed for node %s from %u to %u",
+		if (gres_data->gres_cnt_found != NO_VAL64) {
+			info("%s: count changed for node %s from %"PRIu64" "
+			     "to %"PRIu64"",
 			     context_ptr->gres_type, node_name,
 			     gres_data->gres_cnt_found, gres_cnt);
 		}
-		if ((gres_data->gres_cnt_found != NO_VAL) &&
+		if ((gres_data->gres_cnt_found != NO_VAL64) &&
 		    (gres_data->gres_cnt_alloc != 0)) {
 			if (reason_down && (*reason_down == NULL)) {
 				xstrfmtcat(*reason_down,
 					   "%s count changed and jobs are "
-					   "using them (%u != %u)",
+					   "using them (%"PRIu64" != %"PRIu64")",
 					   context_ptr->gres_type,
 					   gres_data->gres_cnt_found, gres_cnt);
 			}
@@ -1672,10 +1850,10 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 		/* Resize the data structures here */
 		gres_data->topo_gres_cnt_alloc =
 			xrealloc(gres_data->topo_gres_cnt_alloc,
-				 set_cnt * sizeof(uint32_t));
+				 set_cnt * sizeof(uint64_t));
 		gres_data->topo_gres_cnt_avail =
 			xrealloc(gres_data->topo_gres_cnt_avail,
-				 set_cnt * sizeof(uint32_t));
+				 set_cnt * sizeof(uint64_t));
 		for (i = 0; i < gres_data->topo_cnt; i++) {
 			if (gres_data->topo_gres_bitmap) {
 				FREE_NULL_BITMAP(gres_data->
@@ -1731,7 +1909,7 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 
 	if ((orig_config == NULL) || (orig_config[0] == '\0'))
 		gres_data->gres_cnt_config = 0;
-	else if (gres_data->gres_cnt_config == NO_VAL) {
+	else if (gres_data->gres_cnt_config == NO_VAL64) {
 		/* This should have been filled in by _node_config_init() */
 		_get_gres_cnt(gres_data, orig_config,
 			      context_ptr->gres_name,
@@ -1741,15 +1919,15 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 
 	if ((gres_data->gres_cnt_config == 0) || (fast_schedule > 0))
 		gres_data->gres_cnt_avail = gres_data->gres_cnt_config;
-	else if (gres_data->gres_cnt_found != NO_VAL)
+	else if (gres_data->gres_cnt_found != NO_VAL64)
 		gres_data->gres_cnt_avail = gres_data->gres_cnt_found;
-	else if (gres_data->gres_cnt_avail == NO_VAL)
+	else if (gres_data->gres_cnt_avail == NO_VAL64)
 		gres_data->gres_cnt_avail = 0;
 
 	if (context_ptr->has_file) {
 		if (gres_data->gres_cnt_avail > MAX_GRES_BITMAP) {
-			error("%s: gres/%s has File plus very large Count (%u) "
-			      "for node %s, resetting value to %u",
+			error("%s: gres/%s has File plus very large Count "
+			      "(%"PRIu64") for node %s, resetting value to %u",
 			      __func__, context_ptr->gres_type,
 			      gres_data->gres_cnt_avail, node_name,
 			      MAX_GRES_BITMAP);
@@ -1769,7 +1947,8 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 	if ((fast_schedule < 2) &&
 	    (gres_data->gres_cnt_found < gres_data->gres_cnt_config)) {
 		if (reason_down && (*reason_down == NULL)) {
-			xstrfmtcat(*reason_down, "%s count too low (%u < %u)",
+			xstrfmtcat(*reason_down,
+				   "%s count too low (%"PRIu64" < %"PRIu64")",
 				   context_ptr->gres_type,
 				   gres_data->gres_cnt_found,
 				   gres_data->gres_cnt_config);
@@ -1780,8 +1959,8 @@ extern int _node_config_validate(char *node_name, char *orig_config,
 		rc = EINVAL;
 	} else if ((fast_schedule == 2) && gres_data->topo_cnt &&
 		   (gres_data->gres_cnt_found != gres_data->gres_cnt_config)) {
-		error("%s on node %s configured for %u resources but %u found,"
-		      " ignoring topology support",
+		error("%s on node %s configured for %"PRIu64" resources but "
+		      "%"PRIu64" found, ignoring topology support",
 		      context_ptr->gres_type, node_name,
 		      gres_data->gres_cnt_config, gres_data->gres_cnt_found);
 		if (gres_data->topo_cpus_bitmap) {
@@ -1883,11 +2062,14 @@ static int _node_reconfig(char *node_name, char *orig_config, char **new_config,
 		      context_ptr->gres_name,
 		      context_ptr->gres_name_colon,
 		      context_ptr->gres_name_colon_len);
+
+	context_ptr->total_cnt += gres_data->gres_cnt_config;
+
 	if ((gres_data->gres_cnt_config == 0) || (fast_schedule > 0))
 		gres_data->gres_cnt_avail = gres_data->gres_cnt_config;
-	else if (gres_data->gres_cnt_found != NO_VAL)
+	else if (gres_data->gres_cnt_found != NO_VAL64)
 		gres_data->gres_cnt_avail = gres_data->gres_cnt_found;
-	else if (gres_data->gres_cnt_avail == NO_VAL)
+	else if (gres_data->gres_cnt_avail == NO_VAL64)
 		gres_data->gres_cnt_avail = 0;
 
 	if (context_ptr->has_file) {
@@ -1903,13 +2085,13 @@ static int _node_reconfig(char *node_name, char *orig_config, char **new_config,
 	}
 
 	if ((fast_schedule < 2) &&
-	    (gres_data->gres_cnt_found != NO_VAL) &&
+	    (gres_data->gres_cnt_found != NO_VAL64) &&
 	    (gres_data->gres_cnt_found <  gres_data->gres_cnt_config)) {
 		/* Do not set node DOWN, but give the node
 		 * a chance to register with more resources */
-		gres_data->gres_cnt_found = NO_VAL;
+		gres_data->gres_cnt_found = NO_VAL64;
 	} else if ((fast_schedule == 0) &&
-		   (gres_data->gres_cnt_found != NO_VAL) &&
+		   (gres_data->gres_cnt_found != NO_VAL64) &&
 		   (gres_data->gres_cnt_found >  gres_data->gres_cnt_config)) {
 		_set_gres_cnt(orig_config, new_config,
 			      gres_data->gres_cnt_found,
@@ -2003,7 +2185,7 @@ extern int gres_plugin_node_state_pack(List gres_list, Buf buffer,
 		gres_node_ptr = (gres_node_state_t *) gres_ptr->gres_data;
 		pack32(magic, buffer);
 		pack32(gres_ptr->plugin_id, buffer);
-		pack32(gres_node_ptr->gres_cnt_avail, buffer);
+		pack64(gres_node_ptr->gres_cnt_avail, buffer);
 		/* Just note if gres_bit_alloc exists.
 		 * Rebuild it based upon the state of recovered jobs */
 		if (gres_node_ptr->gres_bit_alloc)
@@ -2036,7 +2218,8 @@ extern int gres_plugin_node_state_unpack(List *gres_list, Buf buffer,
 					 uint16_t protocol_version)
 {
 	int i, rc;
-	uint32_t gres_cnt_avail, magic, plugin_id;
+	uint32_t magic, plugin_id, utmp32;
+	uint64_t gres_cnt_avail;
 	uint16_t rec_cnt;
 	uint8_t  has_bitmap;
 	gres_state_t *gres_ptr;
@@ -2056,12 +2239,20 @@ extern int gres_plugin_node_state_unpack(List *gres_list, Buf buffer,
 		if ((buffer == NULL) || (remaining_buf(buffer) == 0))
 			break;
 		rec_cnt--;
-		if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 			safe_unpack32(&magic, buffer);
 			if (magic != GRES_MAGIC)
 				goto unpack_error;
 			safe_unpack32(&plugin_id, buffer);
-			safe_unpack32(&gres_cnt_avail, buffer);
+			safe_unpack64(&gres_cnt_avail, buffer);
+			safe_unpack8(&has_bitmap, buffer);
+		} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+			safe_unpack32(&magic, buffer);
+			if (magic != GRES_MAGIC)
+				goto unpack_error;
+			safe_unpack32(&plugin_id, buffer);
+			safe_unpack32(&utmp32, buffer);
+			gres_cnt_avail = utmp32;
 			safe_unpack8(&has_bitmap, buffer);
 		} else {
 			error("gres_plugin_node_state_unpack: protocol_version"
@@ -2127,9 +2318,9 @@ static void *_node_state_dup(void *gres_data)
 	new_gres->topo_gres_bitmap = xmalloc(gres_ptr->topo_cnt *
 					     sizeof(bitstr_t *));
 	new_gres->topo_gres_cnt_alloc = xmalloc(gres_ptr->topo_cnt *
-						sizeof(uint32_t));
+						sizeof(uint64_t));
 	new_gres->topo_gres_cnt_avail = xmalloc(gres_ptr->topo_cnt *
-						sizeof(uint32_t));
+						sizeof(uint64_t));
 	new_gres->topo_model = xmalloc(gres_ptr->topo_cnt * sizeof(char *));
 	for (i = 0; i < gres_ptr->topo_cnt; i++) {
 		if (gres_ptr->topo_cpus_bitmap[i]) {
@@ -2147,9 +2338,9 @@ static void *_node_state_dup(void *gres_data)
 
 	new_gres->type_cnt       = gres_ptr->type_cnt;
 	new_gres->type_cnt_alloc = xmalloc(gres_ptr->type_cnt *
-					   sizeof(uint32_t));
+					   sizeof(uint64_t));
 	new_gres->type_cnt_avail = xmalloc(gres_ptr->type_cnt *
-					   sizeof(uint32_t));
+					   sizeof(uint64_t));
 	new_gres->type_model = xmalloc(gres_ptr->type_cnt * sizeof(char *));
 	for (i = 0; i < gres_ptr->type_cnt; i++) {
 		new_gres->type_cnt_alloc[i] = gres_ptr->type_cnt_alloc[i];
@@ -2286,7 +2477,7 @@ static char *_node_gres_used(void *gres_data, char *gres_name)
 		if (gres_node_ptr->no_consume) {
 			xstrfmtcat(gres_node_ptr->gres_used, "%s:0", gres_name);
 		} else {
-			xstrfmtcat(gres_node_ptr->gres_used, "%s:%u",
+			xstrfmtcat(gres_node_ptr->gres_used, "%s:%"PRIu64"",
 				   gres_name, gres_node_ptr->gres_cnt_alloc);
 		}
 	} else {
@@ -2297,7 +2488,7 @@ static char *_node_gres_used(void *gres_data, char *gres_name)
 					   gres_node_ptr->type_model[i]);
 			} else {
 				xstrfmtcat(gres_node_ptr->gres_used,
-					   "%s%s:%s:%u", sep, gres_name,
+					   "%s%s:%s:%"PRIu64"", sep, gres_name,
 					   gres_node_ptr->type_model[i],
 					   gres_node_ptr->type_cnt_alloc[i]);
 			}
@@ -2318,19 +2509,21 @@ static void _node_state_log(void *gres_data, char *node_name, char *gres_name)
 	gres_node_ptr = (gres_node_state_t *) gres_data;
 
 	info("gres/%s: state for %s", gres_name, node_name);
-	if (gres_node_ptr->gres_cnt_found == NO_VAL) {
+	if (gres_node_ptr->gres_cnt_found == NO_VAL64) {
 		snprintf(tmp_str, sizeof(tmp_str), "TBD");
 	} else {
-		snprintf(tmp_str, sizeof(tmp_str), "%u",
+		snprintf(tmp_str, sizeof(tmp_str), "%"PRIu64"",
 			 gres_node_ptr->gres_cnt_found);
 	}
 
 	if (gres_node_ptr->no_consume) {
-		info("  gres_cnt found:%s configured:%u avail:%u no_consume",
+		info("  gres_cnt found:%s configured:%"PRIu64" "
+		     "avail:%"PRIu64" no_consume",
 		     tmp_str, gres_node_ptr->gres_cnt_config,
 		     gres_node_ptr->gres_cnt_avail);
 	} else {
-		info("  gres_cnt found:%s configured:%u avail:%u alloc:%u",
+		info("  gres_cnt found:%s configured:%"PRIu64" "
+		     "avail:%"PRIu64" alloc:%"PRIu64"",
 		     tmp_str, gres_node_ptr->gres_cnt_config,
 		     gres_node_ptr->gres_cnt_avail,
 		     gres_node_ptr->gres_cnt_alloc);
@@ -2358,17 +2551,17 @@ static void _node_state_log(void *gres_data, char *node_name, char *gres_name)
 			info("  topo_gres_bitmap[%d]:%s", i, tmp_str);
 		} else
 			info("  topo_gres_bitmap[%d]:NULL", i);
-		info("  topo_gres_cnt_alloc[%d]:%u", i,
+		info("  topo_gres_cnt_alloc[%d]:%"PRIu64"", i,
 		     gres_node_ptr->topo_gres_cnt_alloc[i]);
-		info("  topo_gres_cnt_avail[%d]:%u", i,
+		info("  topo_gres_cnt_avail[%d]:%"PRIu64"", i,
 		     gres_node_ptr->topo_gres_cnt_avail[i]);
 		info("  type[%d]:%s", i, gres_node_ptr->topo_model[i]);
 	}
 
 	for (i = 0; i < gres_node_ptr->type_cnt; i++) {
-		info("  type_cnt_alloc[%d]:%u", i,
+		info("  type_cnt_alloc[%d]:%"PRIu64"", i,
 		     gres_node_ptr->type_cnt_alloc[i]);
-		info("  type_cnt_avail[%d]:%u", i,
+		info("  type_cnt_avail[%d]:%"PRIu64"", i,
 		     gres_node_ptr->type_cnt_avail[i]);
 		info("  type[%d]:%s", i, gres_node_ptr->type_model[i]);
 	}
@@ -2458,6 +2651,66 @@ extern char *gres_get_node_used(List gres_list)
 	return gres_used;
 }
 
+extern uint64_t gres_get_system_cnt(char *name)
+{
+	uint64_t count = 0;
+	int i;
+
+	if (!name)
+		return 0;
+
+	(void) gres_plugin_init();
+
+	slurm_mutex_lock(&gres_context_lock);
+	for (i=0; i < gres_context_cnt; i++) {
+		if (!strcmp(gres_context[i].gres_name, name)) {
+			count = gres_context[i].total_cnt;
+			break;
+		}
+	}
+	slurm_mutex_unlock(&gres_context_lock);
+	return count;
+}
+
+
+/*
+ * Get the count of a node's GRES
+ * IN gres_list - List of Gres records for this node to track usage
+ * IN name - name of gres
+ */
+extern uint64_t gres_plugin_node_config_cnt(List gres_list, char *name)
+{
+	int i;
+	ListIterator gres_iter;
+	gres_state_t *gres_ptr;
+	uint64_t count = 0;
+
+	if (!gres_list || !name || !list_count(gres_list))
+		return count;
+
+	(void) gres_plugin_init();
+
+	slurm_mutex_lock(&gres_context_lock);
+	for (i=0; i < gres_context_cnt; i++) {
+		if (strcmp(gres_context[i].gres_name, name))
+			continue;
+		/* Find or create gres_state entry on the list */
+		gres_iter = list_iterator_create(gres_list);
+		while ((gres_ptr = list_next(gres_iter))) {
+			if (gres_ptr->plugin_id == gres_context[i].plugin_id)
+				break;
+		}
+		list_iterator_destroy(gres_iter);
+		if (gres_ptr && gres_ptr->gres_data)
+			count = ((gres_node_state_t *)(gres_ptr->gres_data))->
+				gres_cnt_config;
+		break;
+	}
+	slurm_mutex_unlock(&gres_context_lock);
+
+	return count;
+}
+
 static void _job_state_delete(void *gres_data)
 {
 	int i;
@@ -2493,12 +2746,12 @@ static void _gres_job_list_delete(void *list_element)
 	slurm_mutex_unlock(&gres_context_lock);
 }
 
-static int _job_state_validate(char *config, void **gres_data,
+static int _job_state_validate(char *config, gres_job_state_t **gres_data,
 			       slurm_gres_context_t *context_ptr)
 {
 	gres_job_state_t *gres_ptr;
 	char *type = NULL, *num = NULL, *last_num = NULL;
-	long cnt;
+	uint64_t cnt;
 
 	if (!xstrcmp(config, context_ptr->gres_name)) {
 		cnt = 1;
@@ -2508,7 +2761,11 @@ static int _job_state_validate(char *config, void **gres_data,
 		num = strrchr(config, ':');
 		if (!num)
 			return SLURM_ERROR;
-		cnt = strtol(num + 1, &last_num, 10);
+		errno = 0;
+		cnt = strtoll(num + 1, &last_num, 10);
+		if (errno != 0)
+			return SLURM_ERROR;
+
 		if (last_num[0] == '\0')
 			;
 		else if ((last_num[0] == 'k') || (last_num[0] == 'K'))
@@ -2519,8 +2776,7 @@ static int _job_state_validate(char *config, void **gres_data,
 			cnt *= (1024 * 1024 * 1024);
 		else
 			return SLURM_ERROR;
-		if ((cnt < 0) || (cnt > 0xffffffff))
-			return SLURM_ERROR;
+
 	} else {
 		/* Did not find this GRES name, check for zero value */
 		num = strrchr(config, ':');
@@ -2536,12 +2792,13 @@ static int _job_state_validate(char *config, void **gres_data,
 		*gres_data = NULL;
 	} else {
 		gres_ptr = xmalloc(sizeof(gres_job_state_t));
-		gres_ptr->gres_cnt_alloc = (uint32_t) cnt;
+		gres_ptr->gres_cnt_alloc = cnt;
 		if (type && num && (type != num)) {
 			type++;
 			num[0] = '\0';
 			gres_ptr->type_model = xstrdup(type);
 		}
+
 		*gres_data = gres_ptr;
 	}
 
@@ -2574,7 +2831,7 @@ extern int gres_plugin_job_state_validate(char *req_config, List *gres_list)
 	char *tmp_str, *tok, *last = NULL;
 	int i, rc, rc2;
 	gres_state_t *gres_ptr;
-	void *job_gres_data;
+	gres_job_state_t *job_gres_data;
 
 	if ((req_config == NULL) || (req_config[0] == '\0')) {
 		*gres_list = NULL;
@@ -2761,7 +3018,46 @@ extern int gres_plugin_job_state_pack(List gres_list, Buf buffer,
 	gres_iter = list_iterator_create(gres_list);
 	while ((gres_ptr = (gres_state_t *) list_next(gres_iter))) {
 		gres_job_ptr = (gres_job_state_t *) gres_ptr->gres_data;
-		if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+			pack32(magic, buffer);
+			pack32(gres_ptr->plugin_id, buffer);
+			pack64(gres_job_ptr->gres_cnt_alloc, buffer);
+			packstr(gres_job_ptr->type_model, buffer);
+			pack32(gres_job_ptr->node_cnt, buffer);
+
+			if (gres_job_ptr->gres_bit_alloc) {
+				pack8((uint8_t) 1, buffer);
+				for (i = 0; i < gres_job_ptr->node_cnt; i++) {
+					pack_bit_str_hex(gres_job_ptr->
+							 gres_bit_alloc[i],
+							 buffer);
+				}
+			} else {
+				pack8((uint8_t) 0, buffer);
+			}
+			if (details && gres_job_ptr->gres_bit_step_alloc) {
+				pack8((uint8_t) 1, buffer);
+				for (i = 0; i < gres_job_ptr->node_cnt; i++) {
+					pack_bit_str_hex(gres_job_ptr->
+							 gres_bit_step_alloc[i],
+							 buffer);
+				}
+			} else {
+				pack8((uint8_t) 0, buffer);
+			}
+			if (details && gres_job_ptr->gres_cnt_step_alloc) {
+				pack8((uint8_t) 1, buffer);
+				for (i = 0; i < gres_job_ptr->node_cnt; i++) {
+					pack64(gres_job_ptr->
+					       gres_cnt_step_alloc[i],
+					       buffer);
+				}
+			} else {
+				pack8((uint8_t) 0, buffer);
+			}
+			rec_cnt++;
+		} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 			pack32(magic, buffer);
 			pack32(gres_ptr->plugin_id, buffer);
 			pack32(gres_job_ptr->gres_cnt_alloc, buffer);
@@ -2884,17 +3180,18 @@ extern int gres_plugin_job_state_unpack(List *gres_list, Buf buffer,
 			break;
 		rec_cnt--;
 
-		if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 			safe_unpack32(&magic, buffer);
 			if (magic != GRES_MAGIC)
 				goto unpack_error;
 			safe_unpack32(&plugin_id, buffer);
 			gres_job_ptr = xmalloc(sizeof(gres_job_state_t));
-			safe_unpack32(&gres_job_ptr->gres_cnt_alloc, buffer);
+			safe_unpack64(&gres_job_ptr->gres_cnt_alloc, buffer);
 			safe_unpackstr_xmalloc(&gres_job_ptr->type_model,
 					       &utmp32, buffer);
 			safe_unpack32(&gres_job_ptr->node_cnt, buffer);
 			safe_unpack8(&has_more, buffer);
+
 			if (has_more) {
 				gres_job_ptr->gres_bit_alloc =
 					xmalloc(sizeof(bitstr_t *) *
@@ -2919,21 +3216,67 @@ extern int gres_plugin_job_state_unpack(List *gres_list, Buf buffer,
 			safe_unpack8(&has_more, buffer);
 			if (has_more) {
 				gres_job_ptr->gres_cnt_step_alloc =
-					xmalloc(sizeof(uint32_t) *
+					xmalloc(sizeof(uint64_t) *
 						gres_job_ptr->node_cnt);
 				for (i=0; i<gres_job_ptr->node_cnt; i++) {
-					safe_unpack32(&gres_job_ptr->
+					safe_unpack64(&gres_job_ptr->
 						      gres_cnt_step_alloc[i],
 						      buffer);
 				}
 			}
+
+		} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+			safe_unpack32(&magic, buffer);
+			if (magic != GRES_MAGIC)
+				goto unpack_error;
+			safe_unpack32(&plugin_id, buffer);
+			gres_job_ptr = xmalloc(sizeof(gres_job_state_t));
+			safe_unpack32(&utmp32, buffer);
+			gres_job_ptr->gres_cnt_alloc = utmp32;
+			safe_unpackstr_xmalloc(&gres_job_ptr->type_model,
+					       &utmp32, buffer);
+			safe_unpack32(&gres_job_ptr->node_cnt, buffer);
+			safe_unpack8(&has_more, buffer);
+			if (has_more) {
+				gres_job_ptr->gres_bit_alloc =
+					xmalloc(sizeof(bitstr_t *) *
+						gres_job_ptr->node_cnt);
+				for (i = 0; i < gres_job_ptr->node_cnt; i++) {
+					unpack_bit_str_hex(&gres_job_ptr->
+							   gres_bit_alloc[i],
+							   buffer);
+				}
+			}
+			safe_unpack8(&has_more, buffer);
+			if (has_more) {
+				gres_job_ptr->gres_bit_step_alloc =
+					xmalloc(sizeof(bitstr_t *) *
+						gres_job_ptr->node_cnt);
+				for (i = 0; i < gres_job_ptr->node_cnt; i++) {
+					unpack_bit_str_hex(&gres_job_ptr->
+							   gres_bit_step_alloc[i],
+							   buffer);
+				}
+			}
+			safe_unpack8(&has_more, buffer);
+			if (has_more) {
+				gres_job_ptr->gres_cnt_step_alloc =
+					xmalloc(sizeof(uint64_t) *
+						gres_job_ptr->node_cnt);
+				for (i=0; i<gres_job_ptr->node_cnt; i++) {
+					safe_unpack32(&utmp32, buffer);
+					gres_job_ptr->gres_cnt_step_alloc[i] =
+						utmp32;
+				}
+			}
 		} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
 			safe_unpack32(&magic, buffer);
 			if (magic != GRES_MAGIC)
 				goto unpack_error;
 			safe_unpack32(&plugin_id, buffer);
 			gres_job_ptr = xmalloc(sizeof(gres_job_state_t));
-			safe_unpack32(&gres_job_ptr->gres_cnt_alloc, buffer);
+			safe_unpack32(&utmp32, buffer);
+			gres_job_ptr->gres_cnt_alloc = utmp32;
 			safe_unpack32(&gres_job_ptr->node_cnt, buffer);
 			safe_unpack8(&has_more, buffer);
 			if (has_more) {
@@ -2960,12 +3303,12 @@ extern int gres_plugin_job_state_unpack(List *gres_list, Buf buffer,
 			safe_unpack8(&has_more, buffer);
 			if (has_more) {
 				gres_job_ptr->gres_cnt_step_alloc =
-					xmalloc(sizeof(uint32_t) *
+					xmalloc(sizeof(uint64_t) *
 						gres_job_ptr->node_cnt);
 				for (i=0; i<gres_job_ptr->node_cnt; i++) {
-					safe_unpack32(&gres_job_ptr->
-						      gres_cnt_step_alloc[i],
-						      buffer);
+					safe_unpack32(&utmp32, buffer);
+					gres_job_ptr->gres_cnt_step_alloc[i] =
+						utmp32;
 				}
 			}
 		} else {
@@ -3124,7 +3467,8 @@ static uint32_t _job_test(void *job_gres_data, void *node_gres_data,
 			  int cpu_start_bit, int cpu_end_bit, bool *topo_set,
 			  uint32_t job_id, char *node_name, char *gres_name)
 {
-	int i, j, cpu_size, cpus_ctld, gres_avail = 0, gres_total, top_inx;
+	int i, j, cpu_size, cpus_ctld, top_inx;
+	uint64_t gres_avail = 0, gres_total;
 	gres_job_state_t  *job_gres_ptr  = (gres_job_state_t *)  job_gres_data;
 	gres_node_state_t *node_gres_ptr = (gres_node_state_t *) node_gres_data;
 	uint32_t *cpus_addnt = NULL;  /* Additional CPUs avail from this GRES */
@@ -3636,7 +3980,7 @@ static int _job_alloc(void *job_gres_data, void *node_gres_data,
 		xfree(job_gres_ptr->gres_cnt_step_alloc);
 	if (job_gres_ptr->gres_cnt_step_alloc == NULL) {
 		job_gres_ptr->gres_cnt_step_alloc =
-			xmalloc(sizeof(uint32_t) * node_cnt);
+			xmalloc(sizeof(uint64_t) * node_cnt);
 	}
 
 	/*
@@ -3668,7 +4012,8 @@ static int _job_alloc(void *job_gres_data, void *node_gres_data,
 				bit_alloc(node_gres_ptr->gres_cnt_avail);
 		i = bit_size(node_gres_ptr->gres_bit_alloc);
 		if (i < node_gres_ptr->gres_cnt_avail) {
-			error("gres/%s: node %s gres bitmap size bad (%d < %u)",
+			error("gres/%s: node %s gres bitmap size bad "
+			      "(%d < %"PRIu64")",
 			      gres_name, node_name,
 			      i, node_gres_ptr->gres_cnt_avail);
 			node_gres_ptr->gres_bit_alloc =
@@ -3769,7 +4114,7 @@ static int _job_alloc(void *job_gres_data, void *node_gres_data,
 		len = bit_size(job_gres_ptr->gres_bit_alloc[node_offset]);
 		if (!node_gres_ptr->topo_gres_cnt_alloc) {
 			node_gres_ptr->topo_gres_cnt_alloc =
-				xmalloc(sizeof(uint32_t) * len);
+				xmalloc(sizeof(uint64_t) * len);
 		} else {
 			len = MIN(len, node_gres_ptr->gres_cnt_config);
 		}
@@ -4139,7 +4484,7 @@ extern void gres_plugin_job_merge(List from_job_gres_list,
 	int i_first, i_last, i;
 	int from_inx, to_inx, new_inx;
 	bitstr_t **new_gres_bit_alloc, **new_gres_bit_step_alloc;
-	uint32_t *new_gres_cnt_step_alloc;
+	uint64_t *new_gres_cnt_step_alloc;
 
 	(void) gres_plugin_init();
 	new_node_cnt = bit_set_count(from_job_node_bitmap) +
@@ -4167,7 +4512,7 @@ extern void gres_plugin_job_merge(List from_job_gres_list,
 					     new_node_cnt);
 		new_gres_bit_step_alloc = xmalloc(sizeof(bitstr_t *) *
 						  new_node_cnt);
-		new_gres_cnt_step_alloc = xmalloc(sizeof(uint32_t) *
+		new_gres_cnt_step_alloc = xmalloc(sizeof(uint64_t) *
 						  new_node_cnt);
 
 		from_inx = to_inx = new_inx = -1;
@@ -4238,7 +4583,7 @@ step2:	if (!from_job_gres_list)
 			gres_job_ptr2->gres_bit_step_alloc =
 				xmalloc(sizeof(bitstr_t *) * new_node_cnt);
 			gres_job_ptr2->gres_cnt_step_alloc =
-				xmalloc(sizeof(uint32_t) * new_node_cnt);
+				xmalloc(sizeof(uint64_t) * new_node_cnt);
 			list_append(to_job_gres_list, gres_ptr2);
 		}
 		from_inx = to_inx = new_inx = -1;
@@ -4337,7 +4682,8 @@ static void _job_state_log(void *gres_data, uint32_t job_id, char *gres_name)
 	xassert(gres_data);
 	gres_ptr = (gres_job_state_t *) gres_data;
 	info("gres: %s state for job %u", gres_name, job_id);
-	info("  gres_cnt:%u node_cnt:%u type:%s", gres_ptr->gres_cnt_alloc,
+	info("  gres_cnt:%"PRIu64" node_cnt:%u type:%s",
+	     gres_ptr->gres_cnt_alloc,
 	     gres_ptr->node_cnt, gres_ptr->type_model);
 	if (gres_ptr->node_cnt == 0)
 		return;
@@ -4349,7 +4695,7 @@ static void _job_state_log(void *gres_data, uint32_t job_id, char *gres_name)
 	if (gres_ptr->gres_cnt_step_alloc == NULL)
 		info("  gres_cnt_step_alloc:NULL");
 
-	for (i=0; i<gres_ptr->node_cnt; i++) {
+	for (i = 0; i < gres_ptr->node_cnt; i++) {
 		if (gres_ptr->gres_bit_alloc && gres_ptr->gres_bit_alloc[i]) {
 			bit_fmt(tmp_str, sizeof(tmp_str),
 				gres_ptr->gres_bit_alloc[i]);
@@ -4366,7 +4712,7 @@ static void _job_state_log(void *gres_data, uint32_t job_id, char *gres_name)
 			info("  gres_bit_step_alloc[%d]:NULL", i);
 
 		if (gres_ptr->gres_cnt_step_alloc) {
-			info("  gres_cnt_step_alloc[%d]:%u", i,
+			info("  gres_cnt_step_alloc[%d]:%"PRIu64"", i,
 			     gres_ptr->gres_cnt_step_alloc[i]);
 		}
 	}
@@ -4380,18 +4726,19 @@ static void _job_state_log(void *gres_data, uint32_t job_id, char *gres_name)
  *	value from.
  * RET The value associated with the gres type or NO_VAL if not found.
  */
-extern uint32_t gres_plugin_get_job_value_by_type(List job_gres_list,
+extern uint64_t gres_plugin_get_job_value_by_type(List job_gres_list,
 						  char *gres_name_type)
 {
-	uint32_t gres_val, gres_name_type_id;
+	uint64_t gres_val;
+	uint32_t gres_name_type_id;
 	ListIterator  job_gres_iter;
 	gres_state_t *job_gres_ptr;
 
 	if (job_gres_list == NULL)
-		return NO_VAL;
+		return NO_VAL64;
 
 	gres_name_type_id = _build_id(gres_name_type);
-	gres_val = NO_VAL;
+	gres_val = NO_VAL64;
 
 	job_gres_iter = list_iterator_create(job_gres_list);
 	while ((job_gres_ptr = (gres_state_t *) list_next(job_gres_iter))) {
@@ -4517,7 +4864,7 @@ static void _gres_step_list_delete(void *list_element)
 	xfree(gres_ptr);
 }
 
-static int _step_state_validate(char *config, void **gres_data,
+static int _step_state_validate(char *config, gres_step_state_t **gres_data,
 				slurm_gres_context_t *context_ptr)
 {
 	gres_step_state_t *gres_ptr;
@@ -4571,13 +4918,13 @@ static int _step_state_validate(char *config, void **gres_data,
 	return SLURM_SUCCESS;
 }
 
-static uint32_t _step_test(void *step_gres_data, void *job_gres_data,
+static uint64_t _step_test(void *step_gres_data, void *job_gres_data,
 			   int node_offset, bool ignore_alloc, char *gres_name,
 			   uint32_t job_id, uint32_t step_id)
 {
 	gres_job_state_t  *job_gres_ptr  = (gres_job_state_t *)  job_gres_data;
 	gres_step_state_t *step_gres_ptr = (gres_step_state_t *) step_gres_data;
-	uint32_t gres_cnt;
+	uint64_t gres_cnt;
 
 	xassert(job_gres_ptr);
 	xassert(step_gres_ptr);
@@ -4587,7 +4934,7 @@ static uint32_t _step_test(void *step_gres_data, void *job_gres_data,
 		if (step_gres_ptr->gres_cnt_alloc >
 		    job_gres_ptr->gres_cnt_alloc)
 			return 0;
-		return NO_VAL;
+		return NO_VAL64;
 	}
 
 	if (node_offset >= job_gres_ptr->node_cnt) {
@@ -4598,7 +4945,7 @@ static uint32_t _step_test(void *step_gres_data, void *job_gres_data,
 	}
 
 	if (job_gres_ptr->gres_cnt_step_alloc) {
-		uint32_t job_gres_avail = job_gres_ptr->gres_cnt_alloc;
+		uint64_t job_gres_avail = job_gres_ptr->gres_cnt_alloc;
 		if (!ignore_alloc) {
 			job_gres_avail -= job_gres_ptr->
 					  gres_cnt_step_alloc[node_offset];
@@ -4625,7 +4972,7 @@ static uint32_t _step_test(void *step_gres_data, void *job_gres_data,
 		if (step_gres_ptr->gres_cnt_alloc > gres_cnt)
 			gres_cnt = 0;
 		else
-			gres_cnt = NO_VAL;
+			gres_cnt = NO_VAL64;
 	} else if (job_gres_ptr->gres_cnt_step_alloc &&
 		   job_gres_ptr->gres_cnt_step_alloc[node_offset]) {
 		gres_cnt = job_gres_ptr->gres_cnt_alloc -
@@ -4633,12 +4980,12 @@ static uint32_t _step_test(void *step_gres_data, void *job_gres_data,
 		if (step_gres_ptr->gres_cnt_alloc > gres_cnt)
 			gres_cnt = 0;
 		else
-			gres_cnt = NO_VAL;
+			gres_cnt = NO_VAL64;
 	} else {
 		/* Note: We already validated the gres count above */
 		debug("gres/%s: %s %u.%u gres_bit_alloc is NULL",
 		      gres_name, __func__, job_id, step_id);
-		gres_cnt = NO_VAL;
+		gres_cnt = NO_VAL64;
 	}
 
 	return gres_cnt;
@@ -4658,9 +5005,10 @@ extern int gres_plugin_step_state_validate(char *req_config,
 					   uint32_t step_id)
 {
 	char *tmp_str, *tok, *last = NULL;
-	int i, rc, rc2, rc3;
+	int i, rc, rc2;
 	gres_state_t *step_gres_ptr, *job_gres_ptr;
-	void *step_gres_data, *job_gres_data;
+	gres_step_state_t *step_gres_data;
+	gres_job_state_t *job_gres_data;
 	ListIterator job_gres_iter;
 	gres_step_state_t *step_gres_state;
 	gres_job_state_t *job_gres_state;
@@ -4719,10 +5067,9 @@ extern int gres_plugin_step_state_validate(char *req_config,
 				break;
 			}
 			job_gres_data = job_gres_ptr->gres_data;
-			rc3 = _step_test(step_gres_data, job_gres_data, NO_VAL,
-					 true, gres_context[i].gres_name,
-					 job_id, step_id);
-			if (rc3 == 0) {
+			if (!_step_test(step_gres_data, job_gres_data, NO_VAL,
+					true, gres_context[i].gres_name,
+					job_id, step_id)) {
 				info("Step %u.%u gres higher than in job "
 				     "allocation %s", job_id, step_id, tok);
 				rc = ESLURM_INVALID_GRES;
@@ -4981,7 +5328,24 @@ extern int gres_plugin_step_state_pack(List gres_list, Buf buffer,
 	gres_iter = list_iterator_create(gres_list);
 	while ((gres_ptr = (gres_state_t *) list_next(gres_iter))) {
 		gres_step_ptr = (gres_step_state_t *) gres_ptr->gres_data;
-		if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+			pack32(magic, buffer);
+			pack32(gres_ptr->plugin_id, buffer);
+			pack64(gres_step_ptr->gres_cnt_alloc, buffer);
+			pack32(gres_step_ptr->node_cnt, buffer);
+			pack_bit_str_hex(gres_step_ptr->node_in_use, buffer);
+			if (gres_step_ptr->gres_bit_alloc) {
+				pack8((uint8_t) 1, buffer);
+				for (i = 0; i < gres_step_ptr->node_cnt; i++)
+					pack_bit_str_hex(gres_step_ptr->
+							 gres_bit_alloc[i],
+							 buffer);
+			} else {
+				pack8((uint8_t) 0, buffer);
+			}
+			rec_cnt++;
+		} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 			pack32(magic, buffer);
 			pack32(gres_ptr->plugin_id, buffer);
 			pack32(gres_step_ptr->gres_cnt_alloc, buffer);
@@ -5041,7 +5405,7 @@ extern int gres_plugin_step_state_unpack(List *gres_list, Buf buffer,
 					 uint16_t protocol_version)
 {
 	int i, rc;
-	uint32_t magic, plugin_id;
+	uint32_t magic, plugin_id, utmp32;
 	uint16_t rec_cnt;
 	uint8_t has_file;
 	gres_state_t *gres_ptr;
@@ -5062,14 +5426,34 @@ extern int gres_plugin_step_state_unpack(List *gres_list, Buf buffer,
 		if ((buffer == NULL) || (remaining_buf(buffer) == 0))
 			break;
 		rec_cnt--;
-
-		if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 			safe_unpack32(&magic, buffer);
 			if (magic != GRES_MAGIC)
 				goto unpack_error;
 			safe_unpack32(&plugin_id, buffer);
 			gres_step_ptr = xmalloc(sizeof(gres_step_state_t));
-			safe_unpack32(&gres_step_ptr->gres_cnt_alloc, buffer);
+			safe_unpack64(&gres_step_ptr->gres_cnt_alloc, buffer);
+			safe_unpack32(&gres_step_ptr->node_cnt, buffer);
+			unpack_bit_str_hex(&gres_step_ptr->node_in_use, buffer);
+			safe_unpack8(&has_file, buffer);
+			if (has_file) {
+				gres_step_ptr->gres_bit_alloc =
+					xmalloc(sizeof(bitstr_t *) *
+						gres_step_ptr->node_cnt);
+				for (i = 0; i < gres_step_ptr->node_cnt; i++) {
+					unpack_bit_str_hex(&gres_step_ptr->
+							   gres_bit_alloc[i],
+							   buffer);
+				}
+			}
+		} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+			safe_unpack32(&magic, buffer);
+			if (magic != GRES_MAGIC)
+				goto unpack_error;
+			safe_unpack32(&plugin_id, buffer);
+			gres_step_ptr = xmalloc(sizeof(gres_step_state_t));
+			safe_unpack32(&utmp32, buffer);
+			gres_step_ptr->gres_cnt_alloc = utmp32;
 			safe_unpack32(&gres_step_ptr->node_cnt, buffer);
 			unpack_bit_str_hex(&gres_step_ptr->node_in_use, buffer);
 			safe_unpack8(&has_file, buffer);
@@ -5089,7 +5473,8 @@ extern int gres_plugin_step_state_unpack(List *gres_list, Buf buffer,
 				goto unpack_error;
 			safe_unpack32(&plugin_id, buffer);
 			gres_step_ptr = xmalloc(sizeof(gres_step_state_t));
-			safe_unpack32(&gres_step_ptr->gres_cnt_alloc, buffer);
+			safe_unpack32(&utmp32, buffer);
+			gres_step_ptr->gres_cnt_alloc = utmp32;
 			safe_unpack32(&gres_step_ptr->node_cnt, buffer);
 			unpack_bit_str(&gres_step_ptr->node_in_use, buffer);
 			safe_unpack8(&has_file, buffer);
@@ -5142,23 +5527,136 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+/* Return the count of GRES of a specific name on this machine
+ * IN step_gres_list - generated by gres_plugin_step_allocate()
+ * IN gres_name - name of the GRES to match
+ * RET count of GRES of this specific name available to the job or NO_VAL64
+ */
+extern uint64_t gres_plugin_step_count(List step_gres_list, char *gres_name)
+{
+	uint64_t gres_cnt = NO_VAL64;
+	gres_state_t *gres_ptr = NULL;
+	gres_step_state_t *gres_step_ptr = NULL;
+	ListIterator gres_iter;
+	int i;
+
+	slurm_mutex_lock(&gres_context_lock);
+	for (i = 0; i < gres_context_cnt; i++) {
+		if (strcmp(gres_context[i].gres_name, gres_name))
+			continue;
+		gres_iter = list_iterator_create(step_gres_list);
+		while ((gres_ptr = (gres_state_t *)list_next(gres_iter))) {
+			if (gres_ptr->plugin_id != gres_context[i].plugin_id)
+				continue;
+			gres_step_ptr = (gres_step_state_t*)gres_ptr->gres_data;
+			if (gres_cnt == NO_VAL64)
+				gres_cnt = gres_step_ptr->gres_cnt_alloc;
+			else
+				gres_cnt += gres_step_ptr->gres_cnt_alloc;
+		}
+		list_iterator_destroy(gres_iter);
+		break;
+	}
+	slurm_mutex_unlock(&gres_context_lock);
+
+	return gres_cnt;
+}
+
+/* Given a GRES context index, return a bitmap representing those GRES
+ * which are available from the CPUs current allocated to this process */
+static bitstr_t * _get_usable_gres(int context_inx)
+{
+	cpu_set_t mask;
+	bitstr_t *usable_gres = NULL;
+	int i, i_last, rc;
+	ListIterator iter;
+	gres_slurmd_conf_t *gres_slurmd_conf;
+	int gres_inx = 0;
+
+
+	CPU_ZERO(&mask);
+#ifdef SCHED_GETAFFINITY_THREE_ARGS
+	rc = sched_getaffinity(0, sizeof(mask), &mask);
+#else
+	rc = sched_getaffinity(0, &mask);
+#endif
+	if (rc) {
+		error("sched_getaffinity error: %m");
+		return usable_gres;
+	}
+
+	usable_gres = bit_alloc(MAX_GRES_BITMAP);
+	iter = list_iterator_create(gres_conf_list);
+	while ((gres_slurmd_conf = (gres_slurmd_conf_t *) list_next(iter))) {
+		if (gres_slurmd_conf->plugin_id !=
+		    gres_context[context_inx].plugin_id)
+			continue;
+		if (gres_inx + gres_slurmd_conf->count >= MAX_GRES_BITMAP) {
+			error("GRES %s bitmap overflow",gres_slurmd_conf->name);
+			continue;
+		}
+		if (!gres_slurmd_conf->cpus_bitmap) {
+			bit_nset(usable_gres, gres_inx,
+				 gres_inx + gres_slurmd_conf->count - 1);
+		} else {
+			i_last = bit_fls(gres_slurmd_conf->cpus_bitmap);
+			for (i = 0; i <= i_last; i++) {
+				if (!bit_test(gres_slurmd_conf->cpus_bitmap,i))
+					continue;
+				if (!CPU_ISSET(i, &mask))
+					continue;
+				bit_nset(usable_gres, gres_inx,
+					 gres_inx + gres_slurmd_conf->count-1);
+				break;
+			}
+		}
+		gres_inx += gres_slurmd_conf->count;
+	}
+	list_iterator_destroy(iter);
+
+	return usable_gres;
+}
+
 /*
  * Set environment variables as required for all tasks of a job step
  * IN/OUT job_env_ptr - environment variable array
- * IN gres_list - generated by gres_plugin_step_allocate()
-  */
-extern void gres_plugin_step_set_env(char ***job_env_ptr, List step_gres_list)
+ * IN step_gres_list - generated by gres_plugin_step_allocate()
+ * IN accel_bind_type - GRES binding options
+ */
+extern void gres_plugin_step_set_env(char ***job_env_ptr, List step_gres_list,
+				     uint16_t accel_bind_type)
 {
 	int i;
 	ListIterator gres_iter;
 	gres_state_t *gres_ptr = NULL;
+	bool bind_gpu = accel_bind_type & ACCEL_BIND_CLOSEST_GPU;
+	bool bind_nic = accel_bind_type & ACCEL_BIND_CLOSEST_NIC;
+	bool bind_mic = accel_bind_type & ACCEL_BIND_CLOSEST_MIC;
+	bitstr_t *usable_gres = NULL;
 
 	(void) gres_plugin_init();
 
 	slurm_mutex_lock(&gres_context_lock);
-	for (i=0; i<gres_context_cnt; i++) {
+	for (i = 0; i < gres_context_cnt; i++) {
 		if (gres_context[i].ops.step_set_env == NULL)
 			continue;	/* No plugin to call */
+		if (bind_gpu || bind_mic || bind_nic) {
+			if (!strcmp(gres_context[i].gres_name, "gpu")) {
+				if (!bind_gpu)
+					continue;
+				usable_gres = _get_usable_gres(i);
+			} else if (!strcmp(gres_context[i].gres_name, "mic")) {
+				if (!bind_mic)
+					continue;
+				usable_gres = _get_usable_gres(i);
+			} else if (!strcmp(gres_context[i].gres_name, "nic")) {
+				if (!bind_nic)
+					continue;
+				usable_gres = _get_usable_gres(i);
+			} else {
+				continue;
+			}
+		}
 		if (step_gres_list) {
 			gres_iter = list_iterator_create(step_gres_list);
 			while ((gres_ptr = (gres_state_t *)
@@ -5166,18 +5664,33 @@ extern void gres_plugin_step_set_env(char ***job_env_ptr, List step_gres_list)
 				if (gres_ptr->plugin_id !=
 				    gres_context[i].plugin_id)
 					continue;
-				(*(gres_context[i].ops.step_set_env))
-					(job_env_ptr, gres_ptr->gres_data);
+				if (accel_bind_type) {
+					(*(gres_context[i].ops.step_reset_env))
+						(job_env_ptr,
+						 gres_ptr->gres_data,
+						 usable_gres);
+				} else {
+					(*(gres_context[i].ops.step_set_env))
+						(job_env_ptr,
+						 gres_ptr->gres_data);
+				}
 				break;
 			}
 			list_iterator_destroy(gres_iter);
 		}
 		if (gres_ptr == NULL) { /* No data found */
-			(*(gres_context[i].ops.step_set_env))
+			if (accel_bind_type) {
+				(*(gres_context[i].ops.step_reset_env))
+					(job_env_ptr, NULL, NULL);
+			} else {
+				(*(gres_context[i].ops.step_set_env))
 					(job_env_ptr, NULL);
+			}
 		}
+		FREE_NULL_BITMAP(usable_gres);
 	}
 	slurm_mutex_unlock(&gres_context_lock);
+	FREE_NULL_BITMAP(usable_gres);
 }
 
 static void _step_state_log(void *gres_data, uint32_t job_id, uint32_t step_id,
@@ -5189,15 +5702,16 @@ static void _step_state_log(void *gres_data, uint32_t job_id, uint32_t step_id,
 
 	xassert(gres_ptr);
 	info("gres/%s state for step %u.%u", gres_name, job_id, step_id);
-	info("  gres_cnt:%u node_cnt:%u type:%s", gres_ptr->gres_cnt_alloc,
-	     gres_ptr->node_cnt, gres_ptr->type_model);
+	info("  gres_cnt:%"PRIu64" node_cnt:%u type:%s",
+	     gres_ptr->gres_cnt_alloc, gres_ptr->node_cnt,
+	     gres_ptr->type_model);
 
 	if (gres_ptr->node_in_use == NULL)
 		info("  node_in_use:NULL");
 	else if (gres_ptr->gres_bit_alloc == NULL)
 		info("  gres_bit_alloc:NULL");
 	else {
-		for (i=0; i<gres_ptr->node_cnt; i++) {
+		for (i = 0; i < gres_ptr->node_cnt; i++) {
 			if (!bit_test(gres_ptr->node_in_use, i))
 				continue;
 			if (gres_ptr->gres_bit_alloc[i]) {
@@ -5250,23 +5764,23 @@ extern void gres_plugin_step_state_log(List gres_list, uint32_t job_id,
  * IN node_offset - index into the job's node allocation
  * IN ignore_alloc - if set ignore resources already allocated to running steps
  * IN job_id, step_id - ID of the step being allocated.
- * RET Count of available CPUs on this node, NO_VAL if no limit
+ * RET Count of available CPUs on this node, NO_VAL64 if no limit
  */
-extern uint32_t gres_plugin_step_test(List step_gres_list, List job_gres_list,
+extern uint64_t gres_plugin_step_test(List step_gres_list, List job_gres_list,
 				      int node_offset, bool ignore_alloc,
 				      uint32_t job_id, uint32_t step_id)
 {
 	int i;
-	uint32_t cpu_cnt, tmp_cnt;
+	uint64_t cpu_cnt, tmp_cnt;
 	ListIterator  job_gres_iter, step_gres_iter;
 	gres_state_t *job_gres_ptr, *step_gres_ptr;
 
 	if (step_gres_list == NULL)
-		return NO_VAL;
+		return NO_VAL64;
 	if (job_gres_list == NULL)
 		return 0;
 
-	cpu_cnt = NO_VAL;
+	cpu_cnt = NO_VAL64;
 	(void) gres_plugin_init();
 
 	slurm_mutex_lock(&gres_context_lock);
@@ -5294,12 +5808,8 @@ extern uint32_t gres_plugin_step_test(List step_gres_list, List job_gres_list,
 					     node_offset, ignore_alloc,
 					     gres_context[i].gres_name,
 					     job_id, step_id);
-			if (tmp_cnt != NO_VAL) {
-				if (cpu_cnt == NO_VAL)
-					cpu_cnt = tmp_cnt;
-				else
-					cpu_cnt = MIN(tmp_cnt, cpu_cnt);
-			}
+			if (tmp_cnt != NO_VAL64)
+				cpu_cnt = MIN(tmp_cnt, cpu_cnt);
 			break;
 		}
 		if (cpu_cnt == 0)
@@ -5317,7 +5827,7 @@ static int _step_alloc(void *step_gres_data, void *job_gres_data,
 {
 	gres_job_state_t  *job_gres_ptr  = (gres_job_state_t *)  job_gres_data;
 	gres_step_state_t *step_gres_ptr = (gres_step_state_t *) step_gres_data;
-	uint32_t gres_needed;
+	uint64_t gres_needed;
 	bitstr_t *gres_bit_alloc;
 	int i, len;
 
@@ -5337,7 +5847,7 @@ static int _step_alloc(void *step_gres_data, void *job_gres_data,
 
 	if (step_gres_ptr->gres_cnt_alloc > job_gres_ptr->gres_cnt_alloc) {
 		error("gres/%s: %s for %u.%u, step's > job's "
-		      "for node %d (%d > %u)",
+		      "for node %d (%"PRIu64" > %"PRIu64")",
 		      gres_name, __func__, job_id, step_id, node_offset,
 		      step_gres_ptr->gres_cnt_alloc,
 		      job_gres_ptr->gres_cnt_alloc);
@@ -5346,14 +5856,15 @@ static int _step_alloc(void *step_gres_data, void *job_gres_data,
 
 	if (job_gres_ptr->gres_cnt_step_alloc == NULL) {
 		job_gres_ptr->gres_cnt_step_alloc =
-			xmalloc(sizeof(uint32_t) * job_gres_ptr->node_cnt);
+			xmalloc(sizeof(uint64_t) * job_gres_ptr->node_cnt);
 	}
 
 	if (step_gres_ptr->gres_cnt_alloc >
 	    (job_gres_ptr->gres_cnt_alloc -
 	     job_gres_ptr->gres_cnt_step_alloc[node_offset])) {
 		error("gres/%s: %s for %u.%u, step's > job's "
-		      "remaining for node %d (%d > (%u - %u))",
+		      "remaining for node %d (%"PRIu64" > "
+		      "(%"PRIu64" - %"PRIu64"))",
 		      gres_name, __func__, job_id, step_id, node_offset,
 		      step_gres_ptr->gres_cnt_alloc,
 		      job_gres_ptr->gres_cnt_alloc,
@@ -5637,18 +6148,19 @@ extern int gres_plugin_step_dealloc(List step_gres_list, List job_gres_list,
  * IN gres_name - name of a GRES type
  * RET count of this GRES allocated to this job
  */
-extern uint32_t gres_get_value_by_type(List job_gres_list, char* gres_name)
+extern uint64_t gres_get_value_by_type(List job_gres_list, char* gres_name)
 {
 	int i;
-	uint32_t gres_cnt = 0, plugin_id;
+	uint32_t plugin_id;
+	uint64_t gres_cnt = 0;
 	ListIterator job_gres_iter;
 	gres_state_t *job_gres_ptr;
 	gres_job_state_t *job_gres_data;
 
 	if (job_gres_list == NULL)
-		return NO_VAL;
+		return NO_VAL64;
 
-	gres_cnt = NO_VAL;
+	gres_cnt = NO_VAL64;
 	(void) gres_plugin_init();
 	plugin_id = _build_id(gres_name);
 
@@ -6017,3 +6529,183 @@ extern int gres_get_step_info(List step_gres_list, char *gres_name,
 
 	return rc;
 }
+
+extern gres_step_state_t *gres_get_step_state(List gres_list, char *name)
+{
+	gres_state_t *gres_state_ptr;
+
+	if (!gres_list || !name || !list_count(gres_list))
+		return NULL;
+
+	slurm_mutex_lock(&gres_context_lock);
+	gres_state_ptr = list_find_first(gres_list, _gres_step_find_name, name);
+	slurm_mutex_unlock(&gres_context_lock);
+
+	if (!gres_state_ptr)
+		return NULL;
+
+	return (gres_step_state_t *)gres_state_ptr->gres_data;
+}
+
+extern gres_job_state_t *gres_get_job_state(List gres_list, char *name)
+{
+	gres_state_t *gres_state_ptr;
+
+	if (!gres_list || !name || !list_count(gres_list))
+		return NULL;
+
+	slurm_mutex_lock(&gres_context_lock);
+	gres_state_ptr = list_find_first(gres_list, _gres_job_find_name, name);
+	slurm_mutex_unlock(&gres_context_lock);
+
+	if (!gres_state_ptr)
+		return NULL;
+
+	return (gres_job_state_t *)gres_state_ptr->gres_data;
+}
+
+extern char *gres_2_tres_str(List gres_list, bool is_job, bool locked)
+{
+	ListIterator itr;
+	slurmdb_tres_rec_t *tres_rec;
+	gres_state_t *gres_state_ptr;
+	char *name;
+	uint64_t count;
+	char *tres_str = NULL;
+	static bool first_run = 1;
+	static slurmdb_tres_rec_t tres_req;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	/* we only need to init this once */
+	if (first_run) {
+		first_run = 0;
+		memset(&tres_req, 0, sizeof(slurmdb_tres_rec_t));
+		tres_req.type = "gres";
+	}
+
+	if (!gres_list)
+		return NULL;
+
+	/* must be locked first before gres_contrex_lock!!! */
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	slurm_mutex_lock(&gres_context_lock);
+	itr = list_iterator_create(gres_list);
+	while ((gres_state_ptr = list_next(itr))) {
+		if (is_job) {
+			gres_job_state_t *gres_data_ptr = (gres_job_state_t *)
+				gres_state_ptr->gres_data;
+			name = gres_data_ptr->type_model;
+			count = gres_data_ptr->gres_cnt_alloc
+				* (uint64_t)gres_data_ptr->node_cnt;
+		} else {
+			gres_step_state_t *gres_data_ptr = (gres_step_state_t *)
+				gres_state_ptr->gres_data;
+			name = gres_data_ptr->type_model;
+			count = gres_data_ptr->gres_cnt_alloc
+				* (uint64_t)gres_data_ptr->node_cnt;
+		}
+
+		if (!name) {
+			int i;
+			for (i=0; i < gres_context_cnt; i++) {
+				if (gres_context[i].plugin_id ==
+				    gres_state_ptr->plugin_id) {
+					name = gres_context[i].gres_name;
+					break;
+				}
+			}
+
+			if (!name) {
+				debug("gres_add_tres: couldn't find name");
+				continue;
+			}
+		}
+
+		if (!(tres_rec = assoc_mgr_find_tres_rec(&tres_req)))
+			continue; /* not tracked */
+
+		if (slurmdb_find_tres_count_in_string(
+			    tres_str, tres_rec->id))
+			continue; /* already handled */
+
+		/* New gres */
+		xstrfmtcat(tres_str, "%s%u=%"PRIu64,
+			   tres_str ? "," : "",
+			   tres_rec->id, count);
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&gres_context_lock);
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+
+	return tres_str;
+}
+
+extern void gres_set_job_tres_cnt(List gres_list,
+				  uint32_t node_cnt,
+				  uint64_t *tres_cnt,
+				  bool locked)
+{
+	ListIterator itr;
+	gres_state_t *gres_state_ptr;
+	static bool first_run = 1;
+	static slurmdb_tres_rec_t tres_rec;
+	uint64_t count;
+	int i, tres_pos;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	/* we only need to init this once */
+	if (first_run) {
+		first_run = 0;
+		memset(&tres_rec, 0, sizeof(slurmdb_tres_rec_t));
+		tres_rec.type = "gres";
+	}
+
+	if (!gres_list || !tres_cnt || !node_cnt || (node_cnt == NO_VAL))
+		return;
+
+	/* must be locked first before gres_contrex_lock!!! */
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	slurm_mutex_lock(&gres_context_lock);
+	itr = list_iterator_create(gres_list);
+	while ((gres_state_ptr = list_next(itr))) {
+		gres_job_state_t *gres_data_ptr = (gres_job_state_t *)
+			gres_state_ptr->gres_data;
+		tres_rec.name = gres_data_ptr->type_model;
+		count = gres_data_ptr->gres_cnt_alloc * (uint64_t)node_cnt;
+
+		if (!tres_rec.name) {
+			for (i=0; i < gres_context_cnt; i++) {
+				if (gres_context[i].plugin_id ==
+				    gres_state_ptr->plugin_id) {
+					tres_rec.name =
+						gres_context[i].gres_name;
+					break;
+				}
+			}
+
+			if (!tres_rec.name) {
+				debug("gres_add_tres: couldn't find name");
+				continue;
+			}
+		}
+
+		if ((tres_pos = assoc_mgr_find_tres_pos(
+			     &tres_rec, false)) != -1)
+			tres_cnt[tres_pos] = count;
+	}
+	list_iterator_destroy(itr);
+	slurm_mutex_unlock(&gres_context_lock);
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+
+	return;
+}
diff --git a/src/common/gres.h b/src/common/gres.h
index e26ed7ce7..d87ec31c0 100644
--- a/src/common/gres.h
+++ b/src/common/gres.h
@@ -40,6 +40,7 @@
 #define _GRES_H
 
 #include "slurm/slurm.h"
+#include "slurm/slurmdb.h"
 #include "src/common/bitstring.h"
 #include "src/common/pack.h"
 
@@ -56,11 +57,12 @@ enum {
 /* Gres state information gathered by slurmd daemon */
 typedef struct gres_slurmd_conf {
 	/* Count of gres available in this configuration record */
-	uint32_t count;
+	uint64_t count;
 
 	/* Specific CPUs associated with this configuration record */
 	uint16_t cpu_cnt;
 	char *cpus;
+	bitstr_t *cpus_bitmap;	/* Using LOCAL mapping */
 
 	/* Device file associated with this configuration record */
 	char *file;
@@ -79,37 +81,37 @@ typedef struct gres_slurmd_conf {
 /* Current gres state information managed by slurmctld daemon */
 typedef struct gres_node_state {
 	/* Actual hardware found */
-	uint32_t gres_cnt_found;
+	uint64_t gres_cnt_found;
 
 	/* Configured resources via Gres parameter */
-	uint32_t gres_cnt_config;
+	uint64_t gres_cnt_config;
 
 	/* Non-consumable: Do not track resources allocated to jobs */
 	bool no_consume;
 
 	/* Total resources available for allocation to jobs.
 	 * gres_cnt_found or gres_cnt_config, depending upon FastSchedule */
-	uint32_t gres_cnt_avail;
+	uint64_t gres_cnt_avail;
 
 	/* List of GRES in current use. Set NULL if needs to be rebuilt. */
 	char *gres_used;
 
 	/* Resources currently allocated to jobs */
-	uint32_t  gres_cnt_alloc;
+	uint64_t  gres_cnt_alloc;
 	bitstr_t *gres_bit_alloc;	/* If gres.conf contains File field */
 
 	/* Topology specific information (if gres.conf contains CPUs option) */
 	uint16_t topo_cnt;		/* Size of topo_ arrays */
 	bitstr_t **topo_cpus_bitmap;
 	bitstr_t **topo_gres_bitmap;
-	uint32_t *topo_gres_cnt_alloc;
-	uint32_t *topo_gres_cnt_avail;
+	uint64_t *topo_gres_cnt_alloc;
+	uint64_t *topo_gres_cnt_avail;
 	char **topo_model;		/* Type of this gres (e.g. model name) */
 
 	/* Gres type specific information (if gres.conf contains type option) */
 	uint16_t type_cnt;		/* Size of type_ arrays */
-	uint32_t *type_cnt_alloc;
-	uint32_t *type_cnt_avail;
+	uint64_t *type_cnt_alloc;
+	uint64_t *type_cnt_avail;
 	char **type_model;		/* Type of this gres (e.g. model name) */
 } gres_node_state_t;
 
@@ -118,7 +120,7 @@ typedef struct gres_job_state {
 	char *type_model;		/* Type of this gres (e.g. model name) */
 
 	/* Count of resources needed per node */
-	uint32_t gres_cnt_alloc;
+	uint64_t gres_cnt_alloc;
 
 	/* Resources currently allocated to job on each node */
 	uint32_t node_cnt;		/* 0 if no_consume */
@@ -128,7 +130,7 @@ typedef struct gres_job_state {
 	 * This will be a subset of resources allocated to the job.
 	 * gres_bit_step_alloc is a subset of gres_bit_alloc */
 	bitstr_t **gres_bit_step_alloc;
-	uint32_t  *gres_cnt_step_alloc;
+	uint64_t  *gres_cnt_step_alloc;
 } gres_job_state_t;
 
 /* Gres job step state as used by slurmctld daemon */
@@ -136,13 +138,13 @@ typedef struct gres_step_state {
 	char *type_model;		/* Type of this gres (e.g. model name) */
 
 	/* Count of resources needed per node */
-	uint32_t gres_cnt_alloc;
+	uint64_t gres_cnt_alloc;
 
 	/* Resources currently allocated to the job step on each node
 	 *
 	 * NOTE: node_cnt and the size of node_in_use and gres_bit_alloc are
 	 * identical to that of the job for simplicity. Bits in node_in_use
-	 * are set for those node of the job that are used by this step and 
+	 * are set for those node of the job that are used by this step and
 	 * gres_bit_alloc are also set if the job's gres_bit_alloc is set */
 	uint32_t node_cnt;
 	bitstr_t *node_in_use;
@@ -190,9 +192,11 @@ extern int gres_plugin_help_msg(char *msg, int msg_size);
 /*
  * Load this node's configuration (how many resources it has, topology, etc.)
  * IN cpu_cnt - Number of CPUs on configured on this node
- * IN array_len - count of elements in dev_path and gres_name
+ * IN node_name - Name of this node
+ * IN xcpuinfo_abs_to_mac - Pointer to xcpuinfo_abs_to_mac() funct, if available
  */
-extern int gres_plugin_node_config_load(uint32_t cpu_cnt, char *node_name);
+extern int gres_plugin_node_config_load(uint32_t cpu_cnt, char *node_name,
+					void *xcpuinfo_abs_to_mac);
 
 /*
  * Pack this node's gres configuration into a buffer
@@ -348,6 +352,18 @@ extern char *gres_get_node_drain(List gres_list);
  */
 extern char *gres_get_node_used(List gres_list);
 
+/*
+ * Give the total system count of a given gres
+ */
+extern uint64_t gres_get_system_cnt(char *name);
+
+/*
+ * Get the count of a node's GRES
+ * IN gres_list - List of Gres records for this node to track usage
+ * IN name - name of gres
+ */
+extern uint64_t gres_plugin_node_config_cnt(List gres_list, char *name);
+
 /*
  * Fill in an array of GRES type ids contained within the given node gres_list
  *		and an array of corresponding counts of those GRES types.
@@ -478,7 +494,7 @@ extern uint32_t gres_plugin_job_test(List job_gres_list, List node_gres_list,
  *                  available)
  * RET SLURM_SUCCESS or error code
  */
-extern int gres_plugin_job_alloc(List job_gres_list, List node_gres_list, 
+extern int gres_plugin_job_alloc(List job_gres_list, List node_gres_list,
 				 int node_cnt, int node_offset,
 				 uint32_t cpu_cnt, uint32_t job_id,
 				 char *node_name, bitstr_t *core_bitmap);
@@ -496,7 +512,7 @@ extern void gres_plugin_job_clear(List job_gres_list);
  * IN node_name   - name of the node (for logging)
  * RET SLURM_SUCCESS or error code
  */
-extern int gres_plugin_job_dealloc(List job_gres_list, List node_gres_list, 
+extern int gres_plugin_job_dealloc(List job_gres_list, List node_gres_list,
 				   int node_offset, uint32_t job_id,
 				   char *node_name);
 
@@ -531,7 +547,7 @@ extern void gres_plugin_job_set_env(char ***job_env_ptr, List job_gres_list);
  *	value from.
  * RET The value associated with the gres type or NO_VAL if not found.
  */
-extern uint32_t gres_plugin_get_job_value_by_type(List job_gres_list,
+extern uint64_t gres_plugin_get_job_value_by_type(List job_gres_list,
 						  char *gres_name_type);
 
 /*
@@ -600,12 +616,21 @@ extern int gres_plugin_step_state_unpack(List *gres_list, Buf buffer,
 					 uint32_t job_id, uint32_t step_id,
 					 uint16_t protocol_version);
 
+/* Return the count of GRES of a specific name on this machine
+ * IN step_gres_list - generated by gres_plugin_step_allocate()
+ * IN gres_name - name of the GRES to match
+ * RET count of GRES of this specific name available to the job or NO_VAL64
+ */
+extern uint64_t gres_plugin_step_count(List step_gres_list, char *gres_name);
+
 /*
  * Set environment variables as required for all tasks of a job step
  * IN/OUT job_env_ptr - environment variable array
- * IN gres_list - generated by gres_plugin_step_allocate()
-  */
-extern void gres_plugin_step_set_env(char ***job_env_ptr, List step_gres_list);
+ * IN step_gres_list - generated by gres_plugin_step_allocate()
+ * IN accel_bind_type - GRES binding options
+ */
+extern void gres_plugin_step_set_env(char ***job_env_ptr, List step_gres_list,
+				     uint16_t accel_bind_type);
 
 /*
  * Log a step's current gres state
@@ -625,7 +650,7 @@ extern void gres_plugin_step_state_log(List gres_list, uint32_t job_id,
  * IN job_id, step_id - ID of the step being allocated.
  * RET Count of available CPUs on this node, NO_VAL if no limit
  */
-extern uint32_t gres_plugin_step_test(List step_gres_list, List job_gres_list,
+extern uint64_t gres_plugin_step_test(List step_gres_list, List job_gres_list,
 				      int node_offset, bool ignore_alloc,
 				      uint32_t job_id, uint32_t step_id);
 
@@ -670,7 +695,7 @@ extern int gres_gresid_to_gresname(uint32_t gres_id, char* gres_name,
  * IN gres_name - name of a GRES type
  * RET count of this GRES allocated to this job
  */
-extern uint32_t gres_get_value_by_type(List job_gres_list, char* gres_name);
+extern uint64_t gres_get_value_by_type(List job_gres_list, char* gres_name);
 
 enum gres_job_data_type {
 	GRES_JOB_DATA_COUNT,	/* data-> uint32_t  */
@@ -713,4 +738,28 @@ extern int gres_get_step_info(List step_gres_list, char *gres_name,
 			      uint32_t node_inx,
 			      enum gres_step_data_type data_type, void *data);
 
+extern gres_job_state_t *gres_get_job_state(List gres_list, char *name);
+extern gres_step_state_t *gres_get_step_state(List gres_list, char *name);
+
+/* Translate a gres_list into a tres_str
+ * IN gres_list - filled in with gres_job_state_t or gres_step_state_t's
+ * IN is_job - if is job function expects gres_job_state_t's else
+ *             gres_step_state_t's
+ * IN locked - if the assoc_mgr tres read locked is locked or not
+ * RET char * in a simple TRES format
+ */
+extern char *gres_2_tres_str(List gres_list, bool is_job, bool locked);
+
+/* Fill in the tres_cnt based off the gres_list and node_cnt
+ * IN gres_list - filled in with gres_job_state_t's
+ * IN node_cnt - number of nodes in the job
+ * OUT tres_cnt - gres spots filled in with total number of TRES
+ *                requested for job that are requested in gres_list
+ * IN locked - if the assoc_mgr tres read locked is locked or not
+ */
+extern void gres_set_job_tres_cnt(List gres_list,
+				  uint32_t node_cnt,
+				  uint64_t *tres_cnt,
+				  bool locked);
+
 #endif /* !_GRES_H */
diff --git a/src/common/hostlist.h b/src/common/hostlist.h
index 6f7be2593..cea134bf2 100644
--- a/src/common/hostlist.h
+++ b/src/common/hostlist.h
@@ -50,7 +50,7 @@
  * expressions that can consume all of the memory on a system and crash the
  * daemons (e.g. "a[0-999999999].b[0-9]", which generates 1 billion distinct
  * prefix records in the hostlist) */
-#define MAX_PREFIX_CNT 1024
+#define MAX_PREFIX_CNT 64*1024
 
 #if (SYSTEM_DIMENSIONS > 1)
 #define HOSTLIST_BASE 36
diff --git a/src/common/job_options.c b/src/common/job_options.c
index 8cde7b588..76d37987f 100644
--- a/src/common/job_options.c
+++ b/src/common/job_options.c
@@ -132,8 +132,7 @@ void job_options_destroy (job_options_t opts)
 	xassert (opts != NULL);
 	xassert (opts->magic == JOB_OPTIONS_MAGIC);
 
-	if (opts->options)
-		list_destroy (opts->options);
+	FREE_NULL_LIST (opts->options);
 
 	xassert (opts->magic = ~JOB_OPTIONS_MAGIC);
 	xfree (opts);
diff --git a/src/common/job_resources.c b/src/common/job_resources.c
index 03d0def05..b8b1c4aab 100644
--- a/src/common/job_resources.c
+++ b/src/common/job_resources.c
@@ -43,6 +43,7 @@
 
 #include "src/common/hostlist.h"
 #include "src/common/job_resources.h"
+#include "src/common/layouts_mgr.h"
 #include "src/common/log.h"
 #include "src/common/pack.h"
 #include "src/common/xassert.h"
@@ -624,84 +625,6 @@ extern void pack_job_resources(job_resources_t *job_resrcs_ptr, Buf buffer,
 		packstr(job_resrcs_ptr->nodes, buffer);
 		pack8(job_resrcs_ptr->whole_node, buffer);
 
-		if (job_resrcs_ptr->cpu_array_reps)
-			pack32_array(job_resrcs_ptr->cpu_array_reps,
-				     job_resrcs_ptr->cpu_array_cnt, buffer);
-		else
-			pack32_array(job_resrcs_ptr->cpu_array_reps, 0, buffer);
-
-		if (job_resrcs_ptr->cpu_array_value)
-			pack16_array(job_resrcs_ptr->cpu_array_value,
-				     job_resrcs_ptr->cpu_array_cnt, buffer);
-		else
-			pack16_array(job_resrcs_ptr->cpu_array_value,
-				     0, buffer);
-
-		if (job_resrcs_ptr->cpus)
-			pack16_array(job_resrcs_ptr->cpus,
-				     job_resrcs_ptr->nhosts, buffer);
-		else
-			pack16_array(job_resrcs_ptr->cpus, 0, buffer);
-
-		if (job_resrcs_ptr->cpus_used)
-			pack16_array(job_resrcs_ptr->cpus_used,
-				     job_resrcs_ptr->nhosts, buffer);
-		else
-			pack16_array(job_resrcs_ptr->cpus_used, 0, buffer);
-
-		if (job_resrcs_ptr->memory_allocated)
-			pack32_array(job_resrcs_ptr->memory_allocated,
-				     job_resrcs_ptr->nhosts, buffer);
-		else
-			pack32_array(job_resrcs_ptr->memory_allocated,
-				     0, buffer);
-
-		if (job_resrcs_ptr->memory_used)
-			pack32_array(job_resrcs_ptr->memory_used,
-				     job_resrcs_ptr->nhosts, buffer);
-		else
-			pack32_array(job_resrcs_ptr->memory_used, 0, buffer);
-		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
-			int i;
-			uint32_t core_cnt = 0, sock_recs = 0;
-			xassert(job_resrcs_ptr->cores_per_socket);
-			xassert(job_resrcs_ptr->sock_core_rep_count);
-			xassert(job_resrcs_ptr->sockets_per_node);
-
-			for (i=0; i<job_resrcs_ptr->nhosts; i++) {
-				core_cnt += job_resrcs_ptr->sockets_per_node[i]
-					* job_resrcs_ptr->cores_per_socket[i] *
-					job_resrcs_ptr->sock_core_rep_count[i];
-				sock_recs += job_resrcs_ptr->
-					     sock_core_rep_count[i];
-				if (sock_recs >= job_resrcs_ptr->nhosts)
-					break;
-			}
-			i++;
-			pack16_array(job_resrcs_ptr->sockets_per_node,
-				     (uint32_t) i, buffer);
-			pack16_array(job_resrcs_ptr->cores_per_socket,
-				     (uint32_t) i, buffer);
-			pack32_array(job_resrcs_ptr->sock_core_rep_count,
-				     (uint32_t) i, buffer);
-
-			xassert(job_resrcs_ptr->core_bitmap);
-			xassert(job_resrcs_ptr->core_bitmap_used);
-			pack_bit_str(job_resrcs_ptr->core_bitmap, buffer);
-			pack_bit_str(job_resrcs_ptr->core_bitmap_used, buffer);
-		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		if (job_resrcs_ptr == NULL) {
-			uint32_t empty = NO_VAL;
-			pack32(empty, buffer);
-			return;
-		}
-
-		pack32(job_resrcs_ptr->nhosts, buffer);
-		pack32(job_resrcs_ptr->ncpus, buffer);
-		pack32(job_resrcs_ptr->node_req, buffer);
-		packstr(job_resrcs_ptr->nodes, buffer);
-
 		if (job_resrcs_ptr->cpu_array_reps)
 			pack32_array(job_resrcs_ptr->cpu_array_reps,
 				     job_resrcs_ptr->cpu_array_cnt, buffer);
@@ -866,67 +789,6 @@ extern int unpack_job_resources(job_resources_t **job_resrcs_pptr,
 			xfree(job_resrcs->cpu_array_reps);
 		job_resrcs->cpu_array_cnt = tmp32;
 
-		safe_unpack16_array(&job_resrcs->cpu_array_value,
-				    &tmp32, buffer);
-		if (tmp32 == 0)
-			xfree(job_resrcs->cpu_array_value);
-
-		if (tmp32 != job_resrcs->cpu_array_cnt)
-			goto unpack_error;
-
-		safe_unpack16_array(&job_resrcs->cpus, &tmp32, buffer);
-		if (tmp32 == 0)
-			xfree(job_resrcs->cpus);
-		if (tmp32 != job_resrcs->nhosts)
-			goto unpack_error;
-		safe_unpack16_array(&job_resrcs->cpus_used, &tmp32, buffer);
-		if (tmp32 == 0)
-			xfree(job_resrcs->cpus_used);
-
-		safe_unpack32_array(&job_resrcs->memory_allocated,
-				    &tmp32, buffer);
-		if (tmp32 == 0)
-			xfree(job_resrcs->memory_allocated);
-		safe_unpack32_array(&job_resrcs->memory_used, &tmp32, buffer);
-		if (tmp32 == 0)
-			xfree(job_resrcs->memory_used);
-
-		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
-			safe_unpack16_array(&job_resrcs->sockets_per_node,
-					    &tmp32, buffer);
-			if (tmp32 == 0)
-				xfree(job_resrcs->sockets_per_node);
-			safe_unpack16_array(&job_resrcs->cores_per_socket,
-					    &tmp32, buffer);
-			if (tmp32 == 0)
-				xfree(job_resrcs->cores_per_socket);
-			safe_unpack32_array(&job_resrcs->sock_core_rep_count,
-					    &tmp32, buffer);
-			if (tmp32 == 0)
-				xfree(job_resrcs->sock_core_rep_count);
-
-			unpack_bit_str(&job_resrcs->core_bitmap, buffer);
-			unpack_bit_str(&job_resrcs->core_bitmap_used, buffer);
-		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpack32(&empty, buffer);
-		if (empty == NO_VAL) {
-			*job_resrcs_pptr = NULL;
-			return SLURM_SUCCESS;
-		}
-
-		job_resrcs = xmalloc(sizeof(struct job_resources));
-		job_resrcs->nhosts = empty;
-		safe_unpack32(&job_resrcs->ncpus, buffer);
-		safe_unpack32(&job_resrcs->node_req, buffer);
-		safe_unpackstr_xmalloc(&job_resrcs->nodes, &tmp32, buffer);
-
-		safe_unpack32_array(&job_resrcs->cpu_array_reps,
-				    &tmp32, buffer);
-		if (tmp32 == 0)
-			xfree(job_resrcs->cpu_array_reps);
-		job_resrcs->cpu_array_cnt = tmp32;
-
 		safe_unpack16_array(&job_resrcs->cpu_array_value,
 				    &tmp32, buffer);
 		if (tmp32 == 0)
@@ -1377,7 +1239,7 @@ extern int job_fits_into_cores(job_resources_t *job_resrcs_ptr,
 			for (i = 0; i < bits_per_node[full_node_inx]; i++) {
 				if (!bit_test(full_bitmap, full_bit_inx + i))
 					continue;
-				if (job_resrcs_ptr->whole_node ||
+				if ((job_resrcs_ptr->whole_node == 1) ||
 				    bit_test(job_resrcs_ptr->core_bitmap,
 					     job_bit_inx + i)) {
 					return 0;
@@ -1421,7 +1283,7 @@ extern void add_job_to_cores(job_resources_t *job_resrcs_ptr,
 		if (bit_test(job_resrcs_ptr->node_bitmap, full_node_inx)) {
 			full_bit_inx = cr_node_cores_offset[full_node_inx];
 			for (i = 0; i < bits_per_node[full_node_inx]; i++) {
-				if (!job_resrcs_ptr->whole_node &&
+				if ((job_resrcs_ptr->whole_node != 1) &&
 				    !bit_test(job_resrcs_ptr->core_bitmap,
 					      job_bit_inx + i))
 					continue;
@@ -1464,7 +1326,7 @@ extern void remove_job_from_cores(job_resources_t *job_resrcs_ptr,
 		if (bit_test(job_resrcs_ptr->node_bitmap, full_node_inx)) {
 			full_bit_inx = cr_node_cores_offset[full_node_inx];
 			for (i = 0; i < bits_per_node[full_node_inx]; i++) {
-				if (!job_resrcs_ptr->whole_node &&
+				if ((job_resrcs_ptr->whole_node != 1) &&
 				    !bit_test(job_resrcs_ptr->core_bitmap,
 					      job_bit_inx + i))
 					continue;
@@ -1520,3 +1382,136 @@ extern int job_resources_node_inx_to_cpu_inx(job_resources_t *job_resrcs_ptr,
 
 	return node_offset;
 }
+
+extern int adapt_layouts(job_resources_t *job_resrcs_ptr, uint32_t cpu_freq_max,
+			 uint32_t node_id, char* node_name, bool new_value)
+{
+	int i, k = 0, bit_inx = 0, core_cnt = 0;
+	uint32_t max_watts, zero = 0, der;
+	uint32_t core_num,val = 0;
+	uint16_t num_freq;
+	char temp[128], ename[128], keyname[128];
+	uint32_t data[2], vals[2];
+	int num_counts = 0, activate = 0;
+	int *desalloc_cores;
+
+	xassert(job_resrcs_ptr);
+
+	for (i = 0; i < job_resrcs_ptr->nhosts; i++) {
+		if (job_resrcs_ptr->sock_core_rep_count[i] <= node_id) {
+			bit_inx += job_resrcs_ptr->sockets_per_node[i] *
+				   job_resrcs_ptr->cores_per_socket[i] *
+				   job_resrcs_ptr->sock_core_rep_count[i];
+			node_id -= job_resrcs_ptr->sock_core_rep_count[i];
+		} else {
+			bit_inx += job_resrcs_ptr->sockets_per_node[i] *
+				   job_resrcs_ptr->cores_per_socket[i] *
+				   node_id;
+			core_cnt = job_resrcs_ptr->sockets_per_node[i] *
+				   job_resrcs_ptr->cores_per_socket[i];
+			break;
+		}
+	}
+	if (core_cnt < 1) {
+		error("count_job_resources_node: core_cnt=0");
+		return 0;
+	}
+
+	i = bit_size(job_resrcs_ptr->core_bitmap);
+	if ((bit_inx + core_cnt) > i) {
+		error("count_job_resources_node: offset > bitmap size "
+		      "(%d >= %d)", (bit_inx + core_cnt), i);
+		return 0;
+	}
+
+	layouts_entity_get_kv("power", node_name, "NumFreqChoices",
+			      &num_freq, L_T_UINT16);
+	layouts_entity_get_mkv("power", node_name,
+			       "CoresCount,LastCore", data,
+			       (sizeof(uint32_t)*2),L_T_UINT32);
+	if (cpu_freq_max != 0) {
+		for (i = 1; i < num_freq + 1; i++) {
+			sprintf(temp, "Cpufreq%d", i);
+			layouts_entity_pullget_kv("power", node_name,
+						  temp, &val, L_T_UINT32);
+			if (val == cpu_freq_max) {
+				k = i;
+				break;
+			}
+		}
+	}
+
+	desalloc_cores = xmalloc ( sizeof (int) * (core_cnt));	
+	for (i = 0; i < core_cnt; i++) {
+		/*core_num=LastCore+1-CoresCount*/
+		core_num = data[1] + 1 - data[0] + i;
+		sprintf(ename, "virtualcore%u", core_num);
+		if (bit_test(job_resrcs_ptr->core_bitmap, bit_inx++)) {
+			if (new_value) {
+				if (cpu_freq_max != 0 && k != 0) {
+					sprintf(keyname, 
+						"Cpufreq%dWatts", k);
+					layouts_entity_get_kv("power",
+							ename, keyname,
+							&max_watts, L_T_UINT32);
+				} else {
+					layouts_entity_get_kv("power",
+							ename, "MaxCoreWatts",
+							&max_watts, L_T_UINT32);
+				}
+				layouts_entity_setpush_kv("power",
+						    ename, "CurrentCorePower",
+						    &max_watts, L_T_UINT32);
+			} else {
+				layouts_entity_setpush_kv("power",
+						    ename, "CurrentCorePower",
+						    &zero, L_T_UINT32);
+				desalloc_cores[num_counts] = i;
+				num_counts++;
+			}
+		} else {
+			layouts_entity_get_mkv("power", ename,
+					  "CurrentCorePower,IdleCoreWatts",
+					  vals, 
+					  (sizeof(uint32_t)*2) ,L_T_UINT32);
+			if (new_value) {
+				if (vals[0] == 0) {
+					layouts_entity_setpush_kv(
+							  "power",
+							  ename,
+							  "CurrentCorePower",
+							  &vals[1],
+							  L_T_UINT32);
+				}
+			} else {
+				if (vals[1] != vals[0]) {
+					activate = 1;
+				} else {
+					desalloc_cores[num_counts] = i;
+					num_counts++;
+					layouts_entity_setpush_kv(
+							  "power",
+							  ename,
+							  "CurrentCorePower",
+							  &zero, L_T_UINT32);		
+					layouts_entity_get_kv("power",
+							  ename,
+							  "CurrentCorePower",
+							  &der, L_T_UINT32);
+				}	
+			}
+		}
+	}
+
+	if (activate == 1) {
+		for (i = 0; i < num_counts; i++) {
+			core_num = data[1] + 1- data[0] + desalloc_cores[i];
+			sprintf(ename, "virtualcore%u", core_num);
+			layouts_entity_setpush_kv("power", ename,
+						  "CurrentCorePower", &vals[1],
+						  L_T_UINT32);	
+		}
+	}
+
+	return 1;
+}
diff --git a/src/common/job_resources.h b/src/common/job_resources.h
index 1d60e0ead..5872c54cf 100644
--- a/src/common/job_resources.h
+++ b/src/common/job_resources.h
@@ -297,5 +297,13 @@ extern void remove_job_from_cores(job_resources_t *job_resrcs_ptr,
  * node in the job_resrcs_ptr->cpus. Return -1 if invalid */
 extern int job_resources_node_inx_to_cpu_inx(job_resources_t *job_resrcs_ptr, 
 					     int node_inx);
+/*
+ * adapt the power_cpufreq layout and set the CurrentCoreWatts value of the cores
+ * based on the selection of the resources and the choice of CPU Frequency 
+ * CurrentCoreWatts are set to IdleWatts when one or more jobs occupy other 
+ * resources of the node and set to 0 when the node is liberated
+ */
+extern int adapt_layouts(job_resources_t *job_resrcs_ptr, uint32_t cpu_freq_max,
+                         uint32_t node_id, char* node_name, bool new_value);
 
 #endif /* !_JOB_RESOURCES_H */
diff --git a/src/common/layouts_mgr.c b/src/common/layouts_mgr.c
index 3fffa6459..f44e8b474 100644
--- a/src/common/layouts_mgr.c
+++ b/src/common/layouts_mgr.c
@@ -4,6 +4,7 @@
  *  Initially written by Francois Chevallier <chevallierfrancois@free.fr>
  *  at Bull for slurm-2.6.
  *  Adapted by Matthieu Hautreux <matthieu.hautreux@cea.fr> for slurm-14.11.
+ *  Enhanced by Matthieu Hautreux <matthieu.hautreux@cea.fr> for slurm-15.x.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -39,6 +40,7 @@
 #include <string.h>
 #include <strings.h>
 #include <ctype.h>
+#include <sys/stat.h>
 
 #include "layouts_mgr.h"
 
@@ -50,15 +52,24 @@
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/node_conf.h"
+#include "src/common/pack.h"
 #include "src/common/plugin.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/timers.h"
 #include "src/common/xstring.h"
 #include "src/common/xtree.h"
 #include "src/common/xmalloc.h"
 
 #define PATHLEN 256
 
+/* use to specify which layout callbacks to perform while loading data
+ * from conf files, state files or input buffers */
+#define CONF_DONE       0x00000001
+#define PARSE_ENTITY    0x00000002
+#define UPDATE_DONE     0x00000004
+#define PARSE_RELATIONS 0x00000008
+
 /*****************************************************************************\
  *                            STRUCTURES AND TYPES                           *
 \*****************************************************************************/
@@ -101,6 +112,8 @@ typedef struct layout_ops_st {
 			  s_p_hashtbl_t* tbl);
 	void (*entity_parsing) (entity_t* e, s_p_hashtbl_t* etbl,
 				layout_t* layout);
+	int (*update_done) (layout_t* layout, entity_t** e_array,
+			    int e_cnt);
 } layout_ops_t;
 
 /*
@@ -112,6 +125,7 @@ const char *layout_syms[] = {
 	"plugin_spec",             /* holds constants, definitions, ... */
 	"layouts_p_conf_done",     /* */
 	"layouts_p_entity_parsing",
+	"layouts_p_update_done",
 };
 
 /*
@@ -127,13 +141,12 @@ typedef struct layout_plugin_st {
 	layout_ops_t* ops;
 } layout_plugin_t;
 
-static void _layout_plugins_destroy(layout_plugin_t *lp) {
+static void _layout_plugins_destroy(layout_plugin_t *lp)
+{
 	plugin_context_destroy(lp->context);
 	/* it might be interesting to also dlclose the ops here */
-	layout_free(lp->layout);
 	xfree(lp->name);
 	xfree(lp->ops);
-	xfree(lp->layout);
 }
 /*
  * layouts_keydef_t - entities similar keys share a same key definition
@@ -148,11 +161,22 @@ static void _layout_plugins_destroy(layout_plugin_t *lp) {
  * L_T_CUSTOM
  */
 typedef struct layouts_keydef_st {
-	char*			key; /* normalize to lower or upper case */
+	char*			key; /* lower case key prefixed by the
+					"%layout_type%." string */
+	char*			shortkey; /* original key as defined in
+					     the layout keys definition */
 	layouts_keydef_types_t	type;
+	uint32_t                flags;
 	void			(*custom_destroy)(void* value);
 	char*			(*custom_dump)(void* value);
 	layout_plugin_t*	plugin;
+	char*			ref_key; /* lower case reference key prefixed by
+					    the "%layout_type%." might be NULL 
+					    if not defined. */
+	char*			ref_shortkey; /* original ref key as defined in
+						 the layout keys definition,
+						 might be null too. */
+
 } layouts_keydef_t;
 
 /*
@@ -215,6 +239,9 @@ static void _layouts_keydef_free(void* x)
 {
 	layouts_keydef_t* keydef = (layouts_keydef_t*)x;
 	xfree(keydef->key);
+	xfree(keydef->shortkey);
+	xfree(keydef->ref_key);
+	xfree(keydef->ref_shortkey);
 	xfree(keydef);
 }
 
@@ -318,25 +345,455 @@ static void _normalize_keydef_mgrkey(char* buffer, uint32_t size,
 
 static void _entity_add_data(entity_t* e, const char* key, void* data)
 {
-	int rc;
 	layouts_keydef_t* hkey = xhash_get(mgr->keydefs, key);
 	xassert(hkey);
 	void (*freefunc)(void* p) = xfree_as_callback;
-	if (hkey->type == L_T_CUSTOM) {
+	if (hkey && hkey->type == L_T_CUSTOM) {
 		freefunc = hkey->custom_destroy;
 	}
-	rc = entity_add_data(e, hkey->key, data, freefunc);
-	if (rc)
-		xassert(rc);
+	entity_set_data_ref(e, hkey->key, data, freefunc);
+}
+
+/*
+ * used in both automerge and autoupdate calls when dealing with
+ * advanced operations (SUM,MIN,MAX,AVG,...) while setting new key values
+ */
+#define _entity_update_kv_helper(type_t, operator)			\
+	type_t* lvalue = (type_t*) oldvalue;				\
+	type_t* rvalue = (type_t*) value;				\
+	uint32_t* divider;						\
+	switch (operator) {						\
+	case S_P_OPERATOR_SET:						\
+		*lvalue = *rvalue;					\
+		break;							\
+	case S_P_OPERATOR_ADD:						\
+		*lvalue += *rvalue;					\
+		break;							\
+	case S_P_OPERATOR_SUB:						\
+		*lvalue -= *rvalue;					\
+		break;							\
+	case S_P_OPERATOR_MUL:						\
+		*lvalue *= *rvalue;					\
+		break;							\
+	case S_P_OPERATOR_DIV:						\
+		if (*rvalue != (type_t) 0)				\
+			*lvalue /= *rvalue;				\
+		else {							\
+			error("layouts: entity_update: "		\
+			      "key=%s val=0 operator="			\
+			      "DIV !! skipping !!",			\
+			      keydef->key);				\
+		}							\
+		break;							\
+	case S_P_OPERATOR_AVG:						\
+		divider = (uint32_t*) value;				\
+		if (*divider != (uint32_t) 0)				\
+			*lvalue /= (type_t) *divider;			\
+		else {							\
+			error("layouts: entity_update: "		\
+			      "key=%s val=0 operator="			\
+			      "AVG !! skipping !!",			\
+			      keydef->key);				\
+		}							\
+		break;							\
+	case S_P_OPERATOR_SET_IF_MIN:					\
+		if (*rvalue < *lvalue)					\
+			*lvalue = *rvalue;				\
+		break;							\
+	case S_P_OPERATOR_SET_IF_MAX:					\
+		if (*rvalue > *lvalue)					\
+			*lvalue = *rvalue;				\
+		break;							\
+	default:							\
+		break;							\
+	}
+
+static int _layouts_autoupdate_layout(layout_t* layout);
+static int _layouts_autoupdate_layout_if_allowed(layout_t* layout);
+
+/*****************************************************************************\
+ *                       LAYOUTS INTERNAL LOCKLESS API                       *
+\*****************************************************************************/
+
+layouts_keydef_t* _layouts_entity_get_kv_keydef(layout_t* l, entity_t* e,
+						char* key)
+{
+	char keytmp[PATHLEN];
+	if (l == NULL || e == NULL || key == NULL)
+		return NULL;
+	_normalize_keydef_key(keytmp, PATHLEN, key, l->type);
+	return xhash_get(mgr->keydefs, keytmp);
+}
+
+int _layouts_entity_get_kv_type(layout_t* l, entity_t* e, char* key)
+{
+	layouts_keydef_t* keydef;
+	keydef = _layouts_entity_get_kv_keydef(l, e, key);
+	if (keydef != NULL) {
+		return keydef->type;
+	}
+	return SLURM_ERROR;
+}
+
+int _layouts_entity_get_kv_flags(layout_t* l, entity_t* e, char* key)
+{
+	layouts_keydef_t* keydef;
+	keydef = _layouts_entity_get_kv_keydef(l, e, key);
+	if (keydef != NULL) {
+		return keydef->flags;
+	}
+	return SLURM_ERROR;
+}
+
+int _layouts_entity_get_kv_size(layout_t* l, entity_t* e, char* key, size_t *size)
+{
+	layouts_keydef_t* keydef;
+	keydef = _layouts_entity_get_kv_keydef(l, e, key);
+	if (keydef != NULL) {
+		switch(keydef->type) {
+		case L_T_ERROR:
+			return SLURM_ERROR;
+		case L_T_STRING:
+			*size = sizeof(void*);
+			break;
+		case L_T_CUSTOM:
+			*size = sizeof(void*);
+			break;
+		case L_T_LONG:
+			*size = sizeof(long);
+			break;
+		case L_T_UINT16:
+			*size = sizeof(uint16_t);
+			break;
+		case L_T_UINT32:
+			*size = sizeof(uint32_t);
+			break;
+		case L_T_BOOLEAN:
+			*size = sizeof(bool);
+			break;
+		case L_T_FLOAT:
+			*size = sizeof(float);
+			break;
+		case L_T_DOUBLE:
+			*size = sizeof(double);
+			break;
+		case L_T_LONG_DOUBLE:
+			*size = sizeof(long double);
+			break;
+		}
+	} else
+		return SLURM_ERROR;
+	return SLURM_SUCCESS;
+}
+
+bool _layouts_entity_check_kv_keytype(layout_t* l, entity_t* e, char* key,
+				      layouts_keydef_types_t key_type)
+{
+	layouts_keydef_types_t real_type;
+	if (l == NULL || e == NULL || key == NULL)
+		return SLURM_ERROR;
+	if (key_type) {
+		real_type = _layouts_entity_get_kv_type(l, e, key);
+		return (real_type == key_type);
+	}
+	/* no key type provided, consider that as a no-check request */
+	return true;
+}
+
+int _layouts_entity_push_kv(layout_t* l, entity_t* e, char* key)
+{
+	/* a more advanced implementation should only pull what is necessary
+	 * instead of forcing a full autoupdate */
+	return _layouts_autoupdate_layout_if_allowed(l);
+}
+
+int _layouts_entity_pull_kv(layout_t* l, entity_t* e, char* key)
+{
+	/* a more advanced implementation should only pull what is necessary
+	 * instead of forcing a full autoupdate */
+	return _layouts_autoupdate_layout_if_allowed(l);
+}
+
+int _layouts_entity_set_kv(layout_t* l, entity_t* e, char* key, void* value,
+			   layouts_keydef_types_t key_type)
+{
+	void* data;
+	size_t size;
+	layouts_keydef_types_t real_type;
+	char key_keydef[PATHLEN];
+
+	if (l == NULL || e == NULL || key == NULL || value == NULL)
+		return SLURM_ERROR;
+
+	real_type = _layouts_entity_get_kv_type(l, e, key);
+	if (key_type > 0 && real_type != key_type)
+		return SLURM_ERROR;
+
+	_normalize_keydef_key(key_keydef, PATHLEN, key, l->type);
+
+	switch(real_type) {
+	case L_T_ERROR:
+		return SLURM_ERROR;
+	case L_T_STRING:
+		data = xstrdup(value);
+		return entity_set_data_ref(e, key_keydef, data,
+					   xfree_as_callback);
+	case L_T_CUSTOM:
+		/* TBD : add a custom_set call */
+		value = NULL;
+		return SLURM_ERROR;
+	case L_T_LONG:
+		size = sizeof(long);
+		break;
+	case L_T_UINT16:
+		size = sizeof(uint16_t);
+		break;
+	case L_T_UINT32:
+		size = sizeof(uint32_t);
+		break;
+	case L_T_BOOLEAN:
+		size = sizeof(bool);
+		break;
+	case L_T_FLOAT:
+		size = sizeof(float);
+		break;
+	case L_T_DOUBLE:
+		size = sizeof(double);
+		break;
+	case L_T_LONG_DOUBLE:
+		size = sizeof(long double);
+		break;
+	}
+	return entity_set_data(e, key_keydef, value, size);
+}
+
+int _layouts_entity_set_kv_ref(layout_t* l, entity_t* e, char* key, void* value,
+			       layouts_keydef_types_t key_type)
+{
+	int rc = SLURM_ERROR;
+	char key_keydef[PATHLEN];
+
+	if (l == NULL || e == NULL || key == NULL || value == NULL)
+		return rc;
+
+	if (!_layouts_entity_check_kv_keytype(l, e, key, key_type))
+		return rc;
+
+	_normalize_keydef_key(key_keydef, PATHLEN, key, l->type);
+	return entity_set_data_ref(e, key_keydef, value, xfree_as_callback);
+}
+
+int _layouts_entity_setpush_kv(layout_t* l, entity_t* e, char* key, void* value,
+			       layouts_keydef_types_t key_type)
+{
+	int rc = SLURM_ERROR;
+	if (_layouts_entity_set_kv(l, e, key, value, key_type) == SLURM_SUCCESS)
+		rc = _layouts_entity_push_kv(l, e, key);
+	return rc;
+}
+
+int _layouts_entity_setpush_kv_ref(layout_t* l, entity_t* e, char* key,
+				   void* value, layouts_keydef_types_t key_type)
+{
+	int rc = SLURM_ERROR;
+	if (_layouts_entity_set_kv_ref(l, e, key, value, key_type) ==
+	    SLURM_SUCCESS)
+		rc = _layouts_entity_push_kv(l, e, key);
+	return rc;
+}
+
+int _layouts_entity_get_kv(layout_t* l, entity_t* e, char* key, void* value,
+			   layouts_keydef_types_t key_type)
+{
+	void* data;
+	size_t size;
+	layouts_keydef_types_t real_type;
+	char key_keydef[PATHLEN];
+	char ** pstr;
+
+	if (l == NULL || e == NULL || key == NULL || value == NULL)
+		return SLURM_ERROR;
+
+	real_type = _layouts_entity_get_kv_type(l, e, key);
+	if (key_type > 0 && real_type != key_type)
+		return SLURM_ERROR;
+
+	_normalize_keydef_key(key_keydef, PATHLEN, key, l->type);
+
+	data = entity_get_data_ref(e, key_keydef);
+	if (data == NULL) {
+		return SLURM_ERROR;
+	}
+
+	switch(real_type) {
+	case L_T_ERROR:
+		return SLURM_ERROR;
+	case L_T_STRING:
+		pstr = (char**) value;
+		if (data)
+			*pstr = xstrdup(data);
+		else
+			*pstr = NULL;
+		return SLURM_SUCCESS;
+	case L_T_CUSTOM:
+		/* TBD : add a custom_get call */
+		pstr = (char**) value;
+		*pstr = NULL;
+		return SLURM_ERROR;
+	case L_T_LONG:
+		size = sizeof(long);
+		break;
+	case L_T_UINT16:
+		size = sizeof(uint16_t);
+		break;
+	case L_T_UINT32:
+		size = sizeof(uint32_t);
+		break;
+	case L_T_BOOLEAN:
+		size = sizeof(bool);
+		break;
+	case L_T_FLOAT:
+		size = sizeof(float);
+		break;
+	case L_T_DOUBLE:
+		size = sizeof(double);
+		break;
+	case L_T_LONG_DOUBLE:
+		size = sizeof(long double);
+		break;
+	}
+	memcpy(value, data, size);
+	return SLURM_SUCCESS;
+}
+
+int _layouts_entity_get_mkv(layout_t* l, entity_t* e, char* keys, void* value,
+			    size_t length, layouts_keydef_types_t key_type)
+{
+	char *key = NULL;
+	hostlist_t kl;
+	size_t processed = 0;
+	size_t elt_size = sizeof(void*);;
+	int rc = 0;
+
+	/* expand in order the requested keys (in hostlist format)
+	 * and iterate over each one of them, collecting the different
+	 * values into the provided buffer.
+	 * if no more space is available in the buffer, then just count
+	 * the missing elements for the exit code.
+	 * the first error encountered fakes a full buffer to just add
+	 * the remaining keys to the missing elements count before
+	 * exiting. */
+	kl = hostlist_create(keys);
+	while ((key = hostlist_shift(kl))) {
+		if (processed >= length) {
+			rc++;
+			continue;
+		}
+		if (_layouts_entity_get_kv_size(l, e, key, &elt_size) ||
+		    (processed + elt_size) > length ||
+		    _layouts_entity_get_kv(l, e, key, value, key_type)) {
+			rc++;
+			processed = length;
+			continue;
+		}
+		value += elt_size;
+		processed += elt_size;
+	}
+	hostlist_destroy(kl);
+
+	return rc;
+}
+
+int _layouts_entity_get_kv_ref(layout_t* l, entity_t* e,
+			       char* key, void** value,
+			       layouts_keydef_types_t key_type)
+{
+	int rc = SLURM_ERROR;
+	char key_keydef[PATHLEN];
+	void* data;
+
+	if (l == NULL || e == NULL || key == NULL || value == NULL)
+		return rc;
+
+	if (!_layouts_entity_check_kv_keytype(l, e, key, key_type))
+		return rc;
+
+	_normalize_keydef_key(key_keydef, PATHLEN, key, l->type);
+	data = entity_get_data_ref(e, key_keydef);
+	if (data != NULL) {
+		*value = data;
+		rc = SLURM_SUCCESS;
+	}
+	return rc;
+}
+
+int _layouts_entity_get_mkv_ref(layout_t* l, entity_t* e, char* keys,
+				void* value, size_t length,
+				layouts_keydef_types_t key_type)
+{
+	char *key = NULL;
+	hostlist_t kl;
+	size_t processed = 0;
+	size_t elt_size = sizeof(void*);
+	int rc = 0;
+
+	/* expand in order the requested keys (in hostlist format)
+	 * and iterate over each one of them, collecting the different
+	 * references into the provided buffer.
+	 * if no more space is available in the buffer, then just count
+	 * the missing elements for the exit code.
+	 * the first error encountered fakes a full buffer to just add
+	 * the remaining keys to the missing elements count before
+	 * exiting. */
+	kl = hostlist_create(keys);
+	while ((key = hostlist_shift(kl))) {
+		if (processed >= length) {
+			rc++;
+			continue;
+		}
+		if (_layouts_entity_get_kv_ref(l, e, key, value, key_type)) {
+			rc++;
+			processed = length;
+			continue;
+		}
+		value += elt_size;
+		processed += elt_size;
+	}
+	hostlist_destroy(kl);
+
+	return rc;
+}
+
+int _layouts_entity_pullget_kv(layout_t* l, entity_t* e, char* key, void* value,
+			       layouts_keydef_types_t key_type)
+{
+	int rc = SLURM_ERROR;
+	if (!_layouts_entity_check_kv_keytype(l, e, key, key_type))
+		return rc;
+	if (_layouts_entity_pull_kv(l, e, key) == SLURM_SUCCESS)
+		rc = _layouts_entity_get_kv(l, e, key, value, key_type);
+	return rc;
+}
+
+int _layouts_entity_pullget_kv_ref(layout_t* l, entity_t* e,
+				   char* key, void** value,
+				   layouts_keydef_types_t key_type)
+{
+	int rc = SLURM_ERROR;
+	if (!_layouts_entity_check_kv_keytype(l, e, key, key_type))
+		return rc;
+	if (_layouts_entity_pull_kv(l, e, key) == SLURM_SUCCESS)
+		rc = _layouts_entity_get_kv_ref(l, e, key, value, key_type);
+	return rc;
 }
 
 /*****************************************************************************\
  *                                MANAGER INIT                               *
 \*****************************************************************************/
 
-static void _slurm_layouts_init_keydef(xhash_t* keydefs,
-				       const layouts_keyspec_t* plugin_keyspec,
-				       layout_plugin_t* plugin)
+static void _layouts_init_keydef(xhash_t* keydefs,
+				 const layouts_keyspec_t* plugin_keyspec,
+				 layout_plugin_t* plugin)
 {
 	char keytmp[PATHLEN];
 
@@ -357,10 +814,21 @@ static void _slurm_layouts_init_keydef(xhash_t* keydefs,
 		nkeydef = (layouts_keydef_t*)
 			xmalloc(sizeof(layouts_keydef_t));
 		nkeydef->key = xstrdup(keytmp);
+		nkeydef->shortkey = xstrdup(current->key);
 		nkeydef->type = current->type;
+		nkeydef->flags = current->flags;
 		nkeydef->custom_destroy = current->custom_destroy;
 		nkeydef->custom_dump = current->custom_dump;
 		nkeydef->plugin = plugin;
+		if (current->ref_key != NULL) {
+			_normalize_keydef_key(keytmp, PATHLEN, current->ref_key,
+					      plugin->layout->type);
+			nkeydef->ref_key = xstrdup(keytmp);
+			nkeydef->ref_shortkey = xstrdup(current->ref_key);
+		} else {
+			nkeydef->ref_key = NULL;
+			nkeydef->ref_shortkey = NULL;
+		}
 		xhash_add(keydefs, nkeydef);
 	}
 
@@ -373,6 +841,7 @@ static void _slurm_layouts_init_keydef(xhash_t* keydefs,
 		nkeydef = (layouts_keydef_t*)
 			xmalloc(sizeof(layouts_keydef_t));
 		nkeydef->key = xstrdup(keytmp);
+		nkeydef->shortkey = xstrdup("Enclosed");
 		nkeydef->type = L_T_STRING;
 		nkeydef->plugin = plugin;
 		xhash_add(keydefs, nkeydef);
@@ -380,7 +849,7 @@ static void _slurm_layouts_init_keydef(xhash_t* keydefs,
 	}
 }
 
-static int _slurm_layouts_init_layouts_walk_helper(void* x, void* arg)
+static int _layouts_init_layouts_walk_helper(void* x, void* arg)
 {
 	layouts_conf_spec_t* spec = (layouts_conf_spec_t*)x;
 	int* i = (int*)arg;
@@ -389,6 +858,7 @@ static int _slurm_layouts_init_layouts_walk_helper(void* x, void* arg)
 	char plugin_name[PATHLEN];
 	void* inserted_item;
 	plugin_context_t* plugin_context;
+
 	snprintf(plugin_name, PATHLEN,
 		 "layouts/%s_%s", spec->type, spec->name);
 	plugin->ops = (layout_ops_t*)xmalloc(sizeof(layout_ops_t));
@@ -413,12 +883,11 @@ static int _slurm_layouts_init_layouts_walk_helper(void* x, void* arg)
 	plugin->layout = (layout_t*)xmalloc(sizeof(layout_t));
 	layout_init(plugin->layout, spec->name, spec->type, 0,
 		    plugin->ops->spec->struct_type);
-	inserted_item = xhash_add(mgr->layouts, plugin->layout);
-	if (inserted_item)
+	if ((inserted_item = xhash_add(mgr->layouts, plugin->layout)))
 		xassert(inserted_item == plugin->layout);
-	_slurm_layouts_init_keydef(mgr->keydefs,
-				   plugin->ops->spec->keyspec,
-				   plugin);
+	_layouts_init_keydef(mgr->keydefs,
+			     plugin->ops->spec->keyspec,
+			     plugin);
 	++*i;
 	return SLURM_SUCCESS;
 }
@@ -493,6 +962,13 @@ static char* _conf_get_filename(const char* type)
 	return final_path;
 }
 
+static char* _state_get_filename(const char* type)
+{
+	return xstrdup_printf("%s/layouts_state_%s",
+			      slurmctld_conf.state_save_location,
+			      type);
+}
+
 static s_p_hashtbl_t* _conf_make_hashtbl(int struct_type,
 					 const s_p_options_t* layout_options)
 {
@@ -531,7 +1007,7 @@ static s_p_hashtbl_t* _conf_make_hashtbl(int struct_type,
 		relational_options = tree_options;
 		break;
 	default:
-		fatal("layouts: does not know what relation structure to"
+		fatal("layouts: does not know what relation structure to "
 		      "use for type %d", struct_type);
 	}
 
@@ -548,28 +1024,32 @@ static s_p_hashtbl_t* _conf_make_hashtbl(int struct_type,
 	return tbl;
 }
 
-#define _layouts_load_merge(type_t, s_p_get_type) { \
-	type_t newvalue; \
-	type_t** oldvalue; \
-	if (!s_p_get_type(&newvalue, option_key, etbl)) { \
-		/* no value to merge/create */ \
-		continue; \
-	} \
-	oldvalue = (type_t**)entity_get_data(e, key_keydef); \
-	if (oldvalue) { \
-		**oldvalue = newvalue; \
-	} else { \
-		type_t* newalloc = (type_t*)xmalloc(sizeof(type_t)); \
-		*newalloc = newvalue; \
-		_entity_add_data(e, key_keydef, newalloc); \
-	} \
-}
-
-#define _layouts_merge_check(type1, type2) \
+#define _layouts_load_merge(type_t, s_p_get_type) {			\
+		type_t  rvalue;						\
+		type_t* value = &rvalue;				\
+		type_t* oldvalue;					\
+		slurm_parser_operator_t operator = S_P_OPERATOR_SET;	\
+		if (!s_p_get_type(&rvalue, option_key, etbl)) {		\
+			/* no value to merge/create */			\
+			continue;					\
+		}							\
+		s_p_get_operator(&operator, option_key, etbl);		\
+		oldvalue = (type_t*)entity_get_data_ref(e, key_keydef); \
+		if (oldvalue) {						\
+			_entity_update_kv_helper(type_t, operator);	\
+		} else {						\
+			type_t* newalloc = (type_t*)			\
+				xmalloc(sizeof(type_t));		\
+			*newalloc = *value;				\
+			_entity_add_data(e, key_keydef, newalloc);	\
+		}							\
+	}								\
+
+#define _layouts_merge_check(type1, type2)			\
 	(entity_option->type == type1 && keydef->type == type2)
 
 static void _layouts_load_automerge(layout_plugin_t* plugin, entity_t* e,
-		s_p_hashtbl_t* etbl)
+				    s_p_hashtbl_t* etbl, uint32_t flags)
 {
 	const s_p_options_t* layout_option;
 	const s_p_options_t* entity_option;
@@ -578,22 +1058,29 @@ static void _layouts_load_automerge(layout_plugin_t* plugin, entity_t* e,
 	char* option_key;
 
 	for (layout_option = plugin->ops->spec->options;
-		layout_option && strcasecmp("Entity", layout_option->key);
-		++layout_option);
+	     layout_option && strcasecmp("Entity", layout_option->key);
+	     ++layout_option);
 	xassert(layout_option);
 
 	for (entity_option = layout_option->line_options;
-			entity_option->key;
-			++entity_option) {
+	     entity_option->key;
+	     ++entity_option) {
 		option_key = entity_option->key;
 		_normalize_keydef_key(key_keydef, PATHLEN, option_key,
-				plugin->layout->type);
+				      plugin->layout->type);
 		keydef = xhash_get(mgr->keydefs, key_keydef);
 		if (!keydef) {
 			/* key is not meant to be automatically handled,
 			 * ignore it for this function */
 			continue;
 		}
+		/* do not perform automerge on updates for read-only keys */
+		if (flags & UPDATE_DONE &&
+		    keydef->flags & KEYSPEC_RDONLY) {
+			debug4("layouts: do not try to merge RDONLY key '%s'",
+			       keydef->key);
+			continue;
+		}
 		if (_layouts_merge_check(S_P_LONG, L_T_LONG)) {
 			_layouts_load_merge(long, s_p_get_long);
 		} else if (_layouts_merge_check(S_P_UINT16, L_T_UINT16)) {
@@ -604,6 +1091,13 @@ static void _layouts_load_automerge(layout_plugin_t* plugin, entity_t* e,
 			_layouts_load_merge(bool, s_p_get_boolean);
 		} else if (_layouts_merge_check(S_P_LONG, L_T_LONG)) {
 			_layouts_load_merge(long, s_p_get_long);
+		} else if (_layouts_merge_check(S_P_FLOAT, L_T_FLOAT)) {
+			_layouts_load_merge(float, s_p_get_float);
+		} else if (_layouts_merge_check(S_P_DOUBLE, L_T_DOUBLE)) {
+			_layouts_load_merge(double, s_p_get_double);
+		} else if (_layouts_merge_check(S_P_LONG_DOUBLE,
+						L_T_LONG_DOUBLE)) {
+			_layouts_load_merge(long double, s_p_get_long_double);
 		} else if (_layouts_merge_check(S_P_STRING, L_T_STRING)) {
 			char* newvalue;
 			if (s_p_get_string(&newvalue, option_key, etbl)) {
@@ -618,38 +1112,39 @@ static void _layouts_parse_relations(layout_plugin_t* plugin, entity_t* e,
 				     s_p_hashtbl_t* entity_tbl)
 {
 	char* e_enclosed;
-	char** e_already_enclosed;
+	char* e_already_enclosed;
+	char* e_new_enclosed;
 	char key[PATHLEN];
 	switch(plugin->layout->struct_type) {
 	case LAYOUT_STRUCT_TREE:
 		if (s_p_get_string(&e_enclosed, "Enclosed", entity_tbl)) {
 			_normalize_keydef_mgrkey(key, PATHLEN, "enclosed",
-					plugin->layout->type);
-			e_already_enclosed = (char**)entity_get_data(e, key);
+						 plugin->layout->type);
+			e_already_enclosed = (char*)
+				entity_get_data_ref(e, key);
 			if (e_already_enclosed) {
-				/* FC expressed warnings about that section,
-				 * should be checked more */
-				*e_already_enclosed = xrealloc(
-						*e_already_enclosed,
-						strlen(*e_already_enclosed) +
-						strlen(e_enclosed) + 2);
-				strcat(*e_already_enclosed, ",");
-				strcat(*e_already_enclosed, e_enclosed);
+				e_new_enclosed = (char*) xmalloc(
+					strlen(e_already_enclosed) +
+					strlen(e_enclosed) + 2);
+				strcat(e_new_enclosed, e_already_enclosed);
+				strcat(e_new_enclosed, ",");
+				strcat(e_new_enclosed, e_enclosed);
 				xfree(e_enclosed);
-			} else {
-				_entity_add_data(e, key, e_enclosed);
+				e_enclosed = e_new_enclosed;
 			}
+			_entity_add_data(e, key, e_enclosed);
 		}
 		break;
 	}
 }
 
 static int _layouts_read_config_post(layout_plugin_t* plugin,
-		s_p_hashtbl_t* tbl)
+				     s_p_hashtbl_t* tbl)
 {
 	char* root_nodename;
 	entity_t* e;
-	xtree_node_t* root_node,* inserted_node;
+	entity_node_t* enode;
+	xtree_node_t* root_node;
 	xtree_t* tree;
 	switch(plugin->layout->struct_type) {
 	case LAYOUT_STRUCT_TREE:
@@ -669,38 +1164,34 @@ static int _layouts_read_config_post(layout_plugin_t* plugin,
 			return SLURM_ERROR;
 		}
 		xfree(root_nodename);
-		root_node = xtree_add_child(tree, NULL, e, XTREE_APPEND);
-		if (root_node)
+
+		if (!(enode = entity_add_node(e, plugin->layout)))
+			xassert(enode);
+		if (!(root_node = xtree_add_child(
+			      tree, NULL, enode, XTREE_APPEND)))
 			xassert(root_node);
-		inserted_node = list_append(e->nodes, root_node);
-		if (inserted_node)
-			xassert(inserted_node == root_node);
+		enode->node = (void*) root_node;
 		break;
 	}
 	return SLURM_SUCCESS;
 }
 
 /*
- * _layouts_read_config - called after base entities are loaded successfully
- *
- * This function is the stage 1 of the layouts loading stage, where we collect
- * info on all the entities and store them in a global hash table.
- * Entities that do not already exist are created, otherwise updated.
- *
- * Information concerning the relations among entities provided by the
- * 'Enclosed' conf pragma are also extracted here for further usage in stage 2.
- *
- * When layout plugins callbacks are called, relational structures among
- * entities are not yet built.
+ * _layouts_load_config_common - called by layouts_read_config,
+ *       layouts_read_state or layouts_update_config with either a
+ *       filename or a buffer as well as a flag to indicate if it
+ *       is a full load or not (state save only)
  */
-static int _layouts_read_config(layout_plugin_t* plugin)
+static int _layouts_load_config_common(layout_plugin_t* plugin,
+				       char* filename, Buf buffer,
+				       uint32_t flags)
 {
 	s_p_hashtbl_t* tbl = NULL;
 	s_p_hashtbl_t** entities_tbl = NULL;
 	s_p_hashtbl_t* entity_tbl = NULL;
 	int entities_tbl_count = 0, i;
-	int rc = SLURM_ERROR;
-	char* filename = NULL;
+	entity_t** updated_entities = NULL;
+	int rc = SLURM_SUCCESS;
 
 	uint32_t l_priority;
 
@@ -715,16 +1206,27 @@ static int _layouts_read_config(layout_plugin_t* plugin)
 
 	tbl = _conf_make_hashtbl(plugin->layout->struct_type,
 				 plugin->ops->spec->options);
-	filename = _conf_get_filename(plugin->layout->type);
-	if (!filename) {
-		fatal("layouts: cannot find configuration file for "
-		      "required layout '%s'", plugin->name);
-	}
-	if (s_p_parse_file(tbl, NULL, filename, false) == SLURM_ERROR) {
-		fatal("layouts: something went wrong when opening/reading "
-		      "'%s': %m", filename);
+	if (filename) {
+		if (s_p_parse_file(tbl, NULL, filename, false) == SLURM_ERROR) {
+			info("layouts: something went wrong when opening/reading "
+			      "'%s': %m", filename);
+			rc = SLURM_ERROR;
+			goto cleanup;
+		}
+		debug3("layouts: configuration file '%s' is loaded", filename);
+	} else if (buffer) {
+		if (s_p_parse_buffer(tbl, NULL, buffer, false) == SLURM_ERROR) {
+			error("layouts: something went wrong when parsing "
+			      "buffer : %m");
+			rc = SLURM_ERROR;
+			goto cleanup;
+		}
+		debug3("layouts: buffer loaded");
+	} else {
+		error("layouts: invalid usage of _layouts_load_config_common");
+		rc = SLURM_ERROR;
+		goto cleanup;
 	}
-	debug3("layouts: configuration file '%s' is loaded", filename);
 
 	if (s_p_get_uint32(&l_priority, "Priority", tbl)) {
 		plugin->layout->priority = l_priority;
@@ -732,21 +1234,29 @@ static int _layouts_read_config(layout_plugin_t* plugin)
 
 	/* get the config hash tables of the defined entities */
 	if (!s_p_get_expline(&entities_tbl, &entities_tbl_count,
-				"Entity", tbl)) {
+			     "Entity", tbl)) {
 		error("layouts: no valid Entity found, can not append any "
 		      "information nor construct relations for %s/%s",
 		      plugin->layout->type, plugin->layout->name);
+		rc = SLURM_ERROR;
 		goto cleanup;
 	}
 
+	/* stage 0: xmalloc an array of entity_t* to save the updated entity_t
+	 * and give their references in the update_done layout callback */
+	updated_entities = (entity_t**)
+		xmalloc(entities_tbl_count*sizeof(entity_t*));
+
 	/* stage 1: create the described entities or update them */
 	for (i = 0; i < entities_tbl_count; ++i) {
+		updated_entities[i] = NULL;
 		entity_tbl = entities_tbl[i];
 		xfree(e_name);
 		xfree(e_type);
 		if (!s_p_get_string(&e_name, "Entity", entity_tbl)) {
-			error("layouts: no name associated to entity[%d], "
+			info("layouts: no name associated to entity[%d], "
 			      "skipping...", i);
+			rc = SLURM_ERROR;
 			continue;
 		}
 
@@ -755,15 +1265,17 @@ static int _layouts_read_config(layout_plugin_t* plugin)
 		if (!e) {
 			/* if the entity does not already exists, create it */
 			if (!s_p_get_string(&e_type, "Type", entity_tbl)) {
-				error("layouts: entity '%s' does not already "
-				      "exists and no type was specified, "
-				      "skipping", e_name);
+				info("layouts: entity '%s' does not already "
+				     "exists and no type was specified, "
+				     "skipping", e_name);
+				rc = SLURM_ERROR;
 				continue;
 			}
 			if (!_string_in_array(e_type,
 					      plugin->ops->spec->etypes)) {
-				error("layouts: entity '%s' type (%s) is "
-				      "invalid, skipping", e_name, e_type);
+				info("layouts: entity '%s' type (%s) is "
+				     "invalid, skipping", e_name, e_type);
+				rc = SLURM_ERROR;
 				continue;
 			}
 
@@ -775,22 +1287,26 @@ static int _layouts_read_config(layout_plugin_t* plugin)
 			/* if defined, check that the type is consistent */
 			if (!_string_in_array(e_type,
 					      plugin->ops->spec->etypes)) {
-				error("layouts: entity '%s' type (%s) is "
-				      "invalid, skipping", e_name, e_type);
+				info("layouts: entity '%s' type (%s) is "
+				     "invalid, skipping", e_name, e_type);
+				rc = SLURM_ERROR;
 				continue;
 			}
-			if (!strcmp(e_type, e->type)) {
-				error("layouts: entity '%s' type (%s) differs "
-				      "from already registered entity type (%s)"
-				      "skipping", e_name, e_type, e->type);
+			if (!e->type || strcmp(e_type, e->type)) {
+				info("layouts: entity '%s' type (%s) differs "
+				     "from already registered entity type (%s)"
+				     " skipping", e_name, e_type, e->type);
+				rc = SLURM_ERROR;
 				continue;
 			}
 		}
 
-		/* look for "Enclosed" pragmas identifying the relations
+		/* ** Full load config only (flags==0) **
+		 * look for "Enclosed" pragmas identifying the relations
 		 * among entities and kep that along with the entity for
 		 * stage 2 */
-		_layouts_parse_relations(plugin, e, entity_tbl);
+		if (flags & PARSE_RELATIONS)
+			_layouts_parse_relations(plugin, e, entity_tbl);
 
 		/*
 		 * if the layout plugin requests automerge, try to automatically
@@ -799,72 +1315,184 @@ static int _layouts_read_config(layout_plugin_t* plugin)
 		 * type and adding them to the entity key hash table.
 		 */
 		if (plugin->ops->spec->automerge) {
-			_layouts_load_automerge(plugin, e, entity_tbl);
+			_layouts_load_automerge(plugin, e, entity_tbl, flags);
 		}
 
 		/*
 		 * in case the automerge was not sufficient, the layout parsing
 		 * callback is called for further actions.
 		 */
-		if (plugin->ops->entity_parsing) {
+		if ((flags & PARSE_ENTITY) && plugin->ops->entity_parsing) {
 			plugin->ops->entity_parsing(e, entity_tbl,
 						    plugin->layout);
 		}
+
+		/* add the entity ref to the array for further usage when
+		 * calling the update_done layout callback */
+		updated_entities[i] = e;
 	}
+	xfree(e_name);
+	xfree(e_type);
 
-	/* post-read-and-build (post stage 1)
+	/* ** Full load config only (flags==0) **
+	 * post-read-and-build (post stage 1)
 	 * ensure that a Root entity was defined and set it as the root of
 	 * the relational structure of the layout.
 	 * fails in case of error as a root is mandatory to walk the relational
 	 * structure of the layout */
-	if (_layouts_read_config_post(plugin, tbl) != SLURM_SUCCESS) {
+	if ((flags & CONF_DONE) &&
+	    _layouts_read_config_post(plugin, tbl) != SLURM_SUCCESS) {
 		goto cleanup;
 	}
 
-	/*
+	/* ** Full load config only (flags==0) **
 	 * call the layout plugin conf_done callback for further
 	 * layout specific actions.
 	 */
-	if (plugin->ops->conf_done) {
+	if ((flags & CONF_DONE) && plugin->ops->conf_done) {
 		if (!plugin->ops->conf_done(mgr->entities, plugin->layout,
 					    tbl)) {
 			error("layouts: plugin %s/%s has an error parsing its"
 			      " configuration", plugin->layout->type,
 			      plugin->layout->name);
+			rc = SLURM_ERROR;
 			goto cleanup;
 		}
 	}
 
-	rc = SLURM_SUCCESS;
+	/*
+	 * In case we are processing an update (not a startup configuration)
+	 * if the layout plugin requests autoupdate, call the autoupdate
+	 * function on the current layout in order to set the inherited values
+	 * according to the newly modified ones.
+	 * (in startup configuration, the autoupdate is performed in stage 3
+	 *  when the relational structures are available)
+	 */
+	if ((flags & UPDATE_DONE) && plugin->ops->spec->autoupdate) {
+		_layouts_autoupdate_layout(plugin->layout);
+	}
+
+	/*
+	 * Call the layout plugin update_done callback for further
+	 * layout specific actions.
+	 * Note : some entries of the updated_entities array might be NULL
+	 * reflecting an issue while trying to analyze the corresponding
+	 * parsed hash table.
+	 */
+	if ((flags & UPDATE_DONE) && plugin->ops->update_done) {
+		if (!plugin->ops->update_done(plugin->layout, updated_entities,
+					      entities_tbl_count)) {
+			error("layouts: plugin %s/%s has an error reacting to"
+			      " entities update", plugin->layout->type,
+			      plugin->layout->name);
+			rc = SLURM_ERROR;
+			goto cleanup;
+		}
+	}
+	xfree(updated_entities);
 
 cleanup:
 	s_p_hashtbl_destroy(tbl);
-	xfree(filename);
 
 	return rc;
 }
 
-typedef struct _layouts_build_xtree_walk_st {
-	char* enclosed_key;
-	xtree_t* tree;
-} _layouts_build_xtree_walk_t;
-
-uint8_t _layouts_build_xtree_walk(xtree_node_t* node,
-					 uint8_t which,
-					 uint32_t level,
-					 void* arg)
-{
-	_layouts_build_xtree_walk_t* p = (_layouts_build_xtree_walk_t*)arg;
-	entity_t* e;
-	char** enclosed_str;
-	char* enclosed_name;
-	hostlist_t enclosed_hostlist;
+/*
+ * _layouts_read_config - called after base entities are loaded successfully
+ *
+ * This function is the stage 1 of the layouts loading stage, where we collect
+ * info on all the entities and store them in a global hash table.
+ * Entities that do not already exist are created, otherwise updated.
+ *
+ * Information concerning the relations among entities provided by the
+ * 'Enclosed' conf pragma are also extracted here for further usage in stage 2.
+ *
+ * When layout plugins callbacks are called, relational structures among
+ * entities are not yet built.
+ */
+static int _layouts_read_config(layout_plugin_t* plugin)
+{
+	int rc;
+	char* filename = _conf_get_filename(plugin->layout->type);
+	if (!filename) {
+		fatal("layouts: cannot find configuration file for "
+		      "required layout '%s'", plugin->name);
+	}
+	rc = _layouts_load_config_common(plugin, filename, NULL,
+					 CONF_DONE |
+					 PARSE_ENTITY | PARSE_RELATIONS);
+	xfree(filename);
+	return rc;
+}
+
+/*
+ * _layouts_read_state - called to restore saved state of layout entities
+ *
+ * This function is the stage 1.1 of the layouts loading stage, where we collect
+ * info on all the entities stored in the state of the layout and store/update
+ * them in the global hash table.
+ *
+ * Information concerning the relations among entities provided by the
+ * 'Enclosed' conf pragma are not taken into account for now and will be those
+ * loaded during stage 1.
+ *
+ * No layout plugins callbacks are called when doing that for now.
+ */
+static int _layouts_read_state(layout_plugin_t* plugin)
+{
+	int rc = SLURM_SUCCESS;
+	struct stat stat_buf;
+	char *filename = _state_get_filename(plugin->layout->type);
+	if (!filename) {
+		error("layouts: unable to build read state filename of layout"
+		      " '%s/%s'", plugin->layout->type, plugin->layout->name);
+		return SLURM_ERROR;
+	}
+	/* check availability of the file otherwise it will later block
+	 * waiting for a file to appear (in s_p_parse_file) */
+	if (stat(filename, &stat_buf) < 0) {
+		debug("layouts: skipping non existent state file for '%s/%s'",
+		      plugin->layout->type, plugin->layout->name);
+	} else {
+		rc = _layouts_load_config_common(plugin, filename, NULL,
+						 PARSE_ENTITY);
+	}
+	xfree(filename);
+	return rc;
+}
+
+static int _layouts_update_state(layout_plugin_t* plugin, Buf buffer)
+{
+	return _layouts_load_config_common(plugin, NULL, buffer,
+					   PARSE_ENTITY | UPDATE_DONE);
+}
+
+typedef struct _layouts_build_xtree_walk_st {
+	layout_t* layout;
+	char* enclosed_key;
+	xtree_t* tree;
+} _layouts_build_xtree_walk_t;
+
+uint8_t _layouts_build_xtree_walk(xtree_node_t* node,
+				  uint8_t which,
+				  uint32_t level,
+				  void* arg)
+{
+	_layouts_build_xtree_walk_t* p = (_layouts_build_xtree_walk_t*)arg;
+	entity_t* e;
+	entity_node_t* enode;
+	char* enclosed_str;
+	char* enclosed_name;
+	hostlist_t enclosed_hostlist;
 	entity_t* enclosed_e;
-	xtree_node_t* enclosed_node,* inserted_node;
+	xtree_node_t* enclosed_node;
 
 	xassert(arg);
 
-	e = xtree_node_get_data(node);
+	/* get the entity from the entity node associated with the tree node */
+	enode = (entity_node_t*) xtree_node_get_data(node);
+	xassert(enode);
+	e = enode->entity;
 	xassert(e);
 
 	/*
@@ -877,11 +1505,10 @@ uint8_t _layouts_build_xtree_walk(xtree_node_t* node,
 	if (which != XTREE_GROWING && which != XTREE_PREORDER)
 		return 1;
 
-	enclosed_str = (char**)entity_get_data(e, p->enclosed_key);
+	enclosed_str = (char*) entity_get_data_ref(e, p->enclosed_key);
 
 	if (enclosed_str) {
-		enclosed_hostlist = hostlist_create(*enclosed_str);
-		xfree(*enclosed_str);
+		enclosed_hostlist = hostlist_create(enclosed_str);
 		entity_delete_data(e, p->enclosed_key);
 		while ((enclosed_name = hostlist_shift(enclosed_hostlist))) {
 			enclosed_e = xhash_get(mgr->entities, enclosed_name);
@@ -894,15 +1521,18 @@ uint8_t _layouts_build_xtree_walk(xtree_node_t* node,
 				continue;
 			}
 			free(enclosed_name);
-			enclosed_node = xtree_add_child(p->tree, node,
-							enclosed_e,
-							XTREE_APPEND);
-			if (enclosed_node)
+			/* create an entity node associated to the entity
+			 * for this layout */
+			enode = entity_add_node(enclosed_e, p->layout);
+			xassert(enode);
+			/* add it to the tree, getting an xtree_node_t ref */
+			if (!(enclosed_node = xtree_add_child(
+				      p->tree, node, enode, XTREE_APPEND)))
 				xassert(enclosed_node);
-			inserted_node = list_append(enclosed_e->nodes,
-						    enclosed_node);
-			if (inserted_node)
-				xassert(inserted_node == enclosed_node);
+			/* store the xtree_node_t ref in the entity node. It
+			 * will be used to access this layout tree from the
+			 * entity when necessary through the entity node */
+			enode->node = enclosed_node;
 		}
 		hostlist_destroy(enclosed_hostlist);
 	}
@@ -934,6 +1564,7 @@ static int _layouts_build_relations(layout_plugin_t* plugin)
 		_normalize_keydef_mgrkey(key, PATHLEN, "enclosed",
 					 plugin->layout->type);
 		_layouts_build_xtree_walk_t p = {
+			plugin->layout,
 			key,
 			tree
 		};
@@ -948,6 +1579,570 @@ static int _layouts_build_relations(layout_plugin_t* plugin)
 	return SLURM_SUCCESS;
 }
 
+/*****************************************************************************\
+ *                                  STATE DUMP                               *
+\*****************************************************************************/
+
+/*
+ * _pack_args_t : helper struct/type used when passing args among the various
+ * functions used when packing layouts into a buffer as a set of strings.
+ */
+typedef struct _pack_args {
+	Buf        buffer;
+	char       *current_line;
+	layout_t   *layout;
+	hostlist_t list_entities;
+	char       *type;
+	uint32_t   all;
+	uint32_t   no_relation;
+	uint32_t   record_count;
+} _pack_args_t;
+
+/*
+ * _pack_data_key : internal function used to get the key=val
+ * string representation of a particular entity data value
+ */
+static char* _pack_data_key(layouts_keydef_t* keydef, void* value)
+{
+	char val;
+	if (!keydef) {
+		return NULL;
+	}
+	switch(keydef->type) {
+	case L_T_ERROR:
+		return NULL;
+	case L_T_STRING:
+		return xstrdup_printf("%s=%s", keydef->shortkey,
+				      (char*)value);
+	case L_T_LONG:
+		return xstrdup_printf("%s=%ld", keydef->shortkey,
+				      *(long*)value);
+	case L_T_UINT16:
+		return xstrdup_printf("%s=%u", keydef->shortkey,
+				      *(uint16_t*)value);
+	case L_T_UINT32:
+		return xstrdup_printf("%s=%"PRIu32, keydef->shortkey,
+				      *(uint32_t*)value);
+	case L_T_BOOLEAN:
+		val = *(bool*)value;
+		return xstrdup_printf("%s=%s", keydef->shortkey,
+				      val ? "true" : "false");
+	case L_T_FLOAT:
+		return xstrdup_printf("%s=%f", keydef->shortkey,
+				      *(float*)value);
+	case L_T_DOUBLE:
+		return xstrdup_printf("%s=%f", keydef->shortkey,
+				      *(double*)value);
+	case L_T_LONG_DOUBLE:
+		return xstrdup_printf("%s=%Lf", keydef->shortkey,
+				      *(long double*)value);
+	case L_T_CUSTOM:
+		if (keydef->custom_dump) {
+			return keydef->custom_dump(value);
+		} else
+			return NULL;
+	}
+	return NULL;
+}
+
+/*
+ * _pack_entity_layout_data : internal function used to append the
+ * key/value of a entity data element corresponding to the targeted
+ * layout when walking an entity list of entity data elements
+ *
+ * - append the " %key%=%val%" to the char* received as an input arg member
+ *
+ */
+static void _pack_entity_layout_data(void* item, void* arg)
+{
+	entity_data_t* data;
+	_pack_args_t *pargs;
+
+	layouts_keydef_t* keydef;
+	char *data_dump;
+
+	xassert(item);
+	xassert(arg);
+
+	data = (entity_data_t*) item;
+	pargs = (_pack_args_t *) arg;
+
+	/* the pack args must contain a valid char* to append to */
+	xassert(pargs->current_line);
+
+	/* we must be able to get the keydef associated to the data key */
+	xassert(data);
+	keydef = xhash_get(mgr->keydefs, data->key);
+	xassert(keydef);
+
+	/* only dump keys related to the targeted layout */
+	if (!strncmp(keydef->plugin->layout->type, pargs->layout->type, PATHLEN)) {
+		data_dump = _pack_data_key(keydef, data->value);
+		/* avoid printing any error in case of NULL pointer returned */
+		if (data_dump) {
+			xstrcat(pargs->current_line, " ");
+			xstrcat(pargs->current_line, data_dump);
+			xfree(data_dump);
+		}
+	}
+
+	return;
+}
+
+/*
+ * _pack_layout_tree : internal function used when walking a layout tree
+ *
+ * - print one line per entity with the following pattern :
+ *  Entity=%name% [Type=%type%] [Enclosed=%childrens%] [key1=val1 ...]
+ *
+ * - potentially print an header line if the entity is the root like :
+ *  Root=%name%
+ *
+ */
+static uint8_t _pack_layout_tree(xtree_node_t* node, uint8_t which,
+				 uint32_t level, void* arg)
+{
+	_pack_args_t *pargs;
+	xtree_node_t* child;
+	entity_node_t* enode;
+	hostlist_t enclosed;
+	char *enclosed_str = NULL, *e_name = NULL, *e_type = NULL;
+	Buf buffer;
+	char *strdump, *str = NULL;
+
+	/* only need to work for preorder and leaf cases */
+	if (which != XTREE_PREORDER && which != XTREE_LEAF) {
+		return 1;
+	}
+
+	/* get the buffer we need to pack the data too */
+	pargs = (_pack_args_t *) arg;
+	buffer = pargs->buffer;
+
+	/* aggregate children names to build the Enclosed=.. value */
+	if (which == XTREE_PREORDER) {
+		enclosed = hostlist_create(NULL);
+		child = node->start;
+		while (child) {
+			enode = (entity_node_t*) xtree_node_get_data(child);
+			if (!enode || !enode->entity) {
+				hostlist_push(enclosed, "NULL");
+			} else {
+				hostlist_push(enclosed, enode->entity->name);
+			}
+			child = child->next;
+		}
+		hostlist_uniq(enclosed);
+		if (hostlist_count(enclosed) > 0) {
+			enclosed_str = hostlist_ranged_string_xmalloc(enclosed);
+		}
+		hostlist_destroy(enclosed);
+	}
+
+	/* get the entity associated to this xtree node */
+	enode = (entity_node_t*) xtree_node_get_data(node);
+	if (!enode || !enode->entity) {
+		e_name = (char*) "NULL";
+		e_type = NULL;
+	} else {
+		e_name = enode->entity->name;
+		e_type = enode->entity->type;
+	}
+
+	/* print this entity as root if necessary */
+	if (level == 0 && pargs->no_relation != 1 && pargs->type == NULL) {
+		if (pargs->all != 0 ||
+		    pargs->list_entities == NULL ||
+		    hostlist_find(pargs->list_entities, e_name) != -1) {
+			str = xstrdup_printf("Root=%s\n", e_name);
+			packstr(str, buffer);
+			pargs->record_count++;
+			xfree(str);
+		}
+	}
+
+	/* print entity name and type when possible */
+	str = xstrdup_printf("Entity=%s", e_name);
+	if (e_type) {
+		strdump = xstrdup_printf("%s Type=%s", str, e_type);
+		xfree(str);
+		str = strdump;
+	}
+
+	/* add entity keys matching the layout to the current str */
+	pargs->current_line = str;
+	if (enode && enode->entity) {
+		xhash_walk(enode->entity->data, _pack_entity_layout_data,
+			   pargs);
+	}
+	/* the current line might have been extended/remalloced, so
+	 * we need to sync it again in str for further actions */
+	str = pargs->current_line;
+	pargs->current_line = NULL;
+
+	/* don't print enclosed if no_relation option */
+	if (pargs->no_relation == 1
+	    && enclosed_str != NULL
+	    && pargs->list_entities == NULL) {
+		xfree(enclosed_str);
+		xfree(str);
+		return 1;
+	}
+
+	/* don't print non enclosed if no "entities char*" option */
+	if (pargs->all == 0
+	    && pargs->list_entities == NULL
+	    && enclosed_str == NULL ) {
+		xfree(str);
+		return 1;
+	}
+
+	/* don't print entities if not named in "entities char*" */
+	if (pargs->all == 0
+	    && pargs->list_entities != NULL
+	    && hostlist_find(pargs->list_entities, e_name) == -1) {
+		xfree(str);
+		return 1;
+	}
+
+	/* don't print entities if not type of "type char*" */
+	if (pargs->type != NULL
+	    && (e_type == NULL || strcasecmp(e_type, pargs->type)!=0)) {
+		xfree(str);
+		return 1;
+	}
+
+	/* print enclosed entities if any */
+	if (!enclosed_str) {
+		xstrcat(str, "\n");
+	} else {
+		strdump = xstrdup_printf("%s Enclosed=%s\n", str, enclosed_str);
+		xfree(enclosed_str);
+		xfree(str);
+		str = strdump;
+	}
+
+	packstr(str, buffer);
+	pargs->record_count++;
+	xfree(str);
+
+	return 1;
+}
+
+/* helper function used by layouts_save_state when walking through
+ * the various layouts to save their state in Slurm state save location */
+static void _state_save_layout(void* item, void* arg)
+{
+	layout_t* layout = (layout_t*)item;
+	layouts_state_save_layout(layout->type);
+}
+
+/*****************************************************************************\
+ *                            ENTITIES KVs AUTOUPDATE                        *
+\*****************************************************************************/
+
+/*
+ * helper structure used when walking the tree of relational nodes in order
+ * to automatically update the entities KVs based on their inheritance
+ * relationships
+ */
+typedef struct _autoupdate_tree_args {
+	entity_node_t* enode;
+	uint8_t which;
+	uint32_t level;
+} _autoupdate_tree_args_t;
+
+/*
+ * helper function used to update a particular KV value of an entity according
+ * to a particular operator looking for the right type to apply during the 
+ * operation
+ */
+static int _autoupdate_entity_kv(layouts_keydef_t* keydef,
+				 layouts_keydef_t* ref_keydef,
+				 slurm_parser_operator_t operator,
+				 void* oldvalue, void* value)
+{
+	int rc = SLURM_ERROR;
+
+	if (keydef->type != ref_keydef->type)
+		return rc;
+
+	if (keydef->type == L_T_LONG) {
+		_entity_update_kv_helper(long, operator);
+	} else if (keydef->type == L_T_UINT16) {
+		_entity_update_kv_helper(uint16_t, operator);
+	} else if (keydef->type == L_T_UINT32) {
+		_entity_update_kv_helper(uint32_t, operator);
+	} else if (keydef->type == L_T_FLOAT) {
+		_entity_update_kv_helper(float, operator);
+	} else if (keydef->type == L_T_DOUBLE) {
+		_entity_update_kv_helper(double, operator);
+	} else if (keydef->type == L_T_LONG_DOUBLE) {
+		_entity_update_kv_helper(long double, operator);
+	} else {
+		// L_T_BOOLEAN, L_T_STRING, L_T_CUSTOM not yet supported
+		return rc;
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * helper function used to update KVs of an entity using its xtree_node
+ * looking for known inheritance in the neighborhood (parents/children) */
+static void _tree_update_node_entity_data(void* item, void* arg) {
+
+	uint32_t action;
+	entity_data_t* data;
+	_autoupdate_tree_args_t *pargs;
+	layouts_keydef_t* keydef;
+	layouts_keydef_t* ref_keydef;
+	slurm_parser_operator_t operator;
+	xtree_node_t *node, *child;
+	entity_node_t *enode, *cnode;
+	void* oldvalue;
+	void* value;
+	uint32_t count;
+	int setter;
+
+	xassert(item);
+	xassert(arg);
+
+	data = (entity_data_t*) item;
+	pargs = (_autoupdate_tree_args_t *) arg;
+	cnode = pargs->enode;
+
+	/* we must be able to get the keydef associated to the data key */
+	xassert(data);
+	keydef = xhash_get(mgr->keydefs, data->key);
+	xassert(keydef);
+
+	/* only work on keys that depend of their neighborhood */
+	if (!(keydef->flags & KEYSPEC_UPDATE_CHILDREN_MASK) &&
+	    !(keydef->flags & KEYSPEC_UPDATE_PARENTS_MASK)) {
+		return;
+	}
+
+	/* if children dependant and we are at leaf level, nothing to do */
+	if (keydef->flags & KEYSPEC_UPDATE_CHILDREN_MASK &&
+	    pargs->which == XTREE_LEAF)
+		return;
+
+	/* only work on keys related to the targeted layout */
+	if (strncmp(keydef->plugin->layout->type, pargs->enode->layout->type,
+		    PATHLEN)) {
+		return;
+	}
+
+	/* get ref_key (identical if not defined) */
+	if (keydef->ref_key != NULL) {
+		ref_keydef = xhash_get(mgr->keydefs, keydef->ref_key);
+		if (!ref_keydef) {
+			debug2("layouts: autoupdate: key='%s': invalid "
+			       "ref_key='%s'", keydef->key, keydef->ref_key);
+			return;
+		}
+	} else {
+		ref_keydef = keydef;
+	}
+
+	/* process parents aggregation
+	 * for now, xtree only provides one parent so any update op
+	 * (MAX, MIN, FSHARE, ...) is a setter */
+	if ((action = keydef->flags & KEYSPEC_UPDATE_PARENTS_MASK) &&
+	    (pargs->which == XTREE_PREORDER || pargs->which == XTREE_LEAF) &&
+	    (node = ((xtree_node_t*)pargs->enode->node)->parent) != NULL ) {
+
+		/* get current node value reference */
+		oldvalue = entity_get_data_ref(cnode->entity, keydef->key);
+
+		/* get siblings count */
+		child = node->start;
+		count = 0;
+		while (child) {
+			count++;
+			child = child->next;
+		}
+
+		/* get parent node KV data ref */
+		enode = (entity_node_t*) xtree_node_get_data(node);
+		value = entity_get_data_ref(enode->entity, ref_keydef->key);
+		if (!value)
+			return;
+
+		/* only set operation currently provided for parents except
+		 * for fshare action */
+		_autoupdate_entity_kv(keydef, ref_keydef, S_P_OPERATOR_SET,
+				      oldvalue, value);
+		if (action == KEYSPEC_UPDATE_PARENTS_FSHARE) {
+			_autoupdate_entity_kv(keydef, ref_keydef,
+					      S_P_OPERATOR_AVG,
+					      oldvalue, (void*) &count);
+		}
+
+		return;
+	}
+
+	/* process children aggregation */
+	if ((action = keydef->flags & KEYSPEC_UPDATE_CHILDREN_MASK) &&
+	    pargs->which == XTREE_ENDORDER) {
+
+		/* get current node value reference */
+		oldvalue = entity_get_data_ref(cnode->entity, keydef->key);
+
+		/* get children count */
+		node = (xtree_node_t*)cnode->node;
+		child = node->start;
+		count = 0;
+		while (child) {
+			count++;
+			child = child->next;
+		}
+
+		/* no action if no children */
+		if (count == 0)
+			return;
+
+		/* if count action, do what is necessary and return */
+		if (action == KEYSPEC_UPDATE_CHILDREN_COUNT) {
+			_autoupdate_entity_kv(keydef, ref_keydef,
+					      S_P_OPERATOR_SET,
+					      oldvalue, (void*) &count);
+			return;
+		}
+
+		/* iterate on the children */
+		setter = 1;
+		child = node->start;
+		while (child) {
+			/* get child node KV data ref */
+			enode = (entity_node_t*) xtree_node_get_data(child);
+			value = entity_get_data_ref(enode->entity,
+						    ref_keydef->key);
+
+			if (!value) {
+				/* try next child */
+				child = child-> next;
+				continue;
+			}
+
+			switch (action) {
+			case KEYSPEC_UPDATE_CHILDREN_SUM:
+			case KEYSPEC_UPDATE_CHILDREN_AVG:
+				/* first child is a setter */
+				if (setter) {
+					operator = S_P_OPERATOR_SET;
+					setter = 0;
+				}
+				else
+					operator = S_P_OPERATOR_ADD;
+				break;
+			case KEYSPEC_UPDATE_CHILDREN_MIN:
+				operator = S_P_OPERATOR_SET_IF_MIN;
+				break;
+			case KEYSPEC_UPDATE_CHILDREN_MAX:
+				operator = S_P_OPERATOR_SET_IF_MAX;
+				break;
+			default:
+				/* should not be called! */
+				return;
+			}
+
+			/* update the value according to the operator */
+			_autoupdate_entity_kv(keydef, ref_keydef, operator,
+					      oldvalue, value);
+
+			/* then next child */
+			child = child-> next;
+		}
+
+		/* if average action, do what is necessary before return */
+		if (action == KEYSPEC_UPDATE_CHILDREN_AVG) {
+			_autoupdate_entity_kv(keydef, ref_keydef,
+					      S_P_OPERATOR_AVG,
+					      oldvalue, (void*) &count);
+			return;
+		}
+
+		return;
+	}
+
+}
+
+/*
+ * _autoupdate_layout_tree : internal function used when automatically
+ * updating elements of a layout tree using _layouts_autoupdate_layout */
+static uint8_t _autoupdate_layout_tree(xtree_node_t* node, uint8_t which,
+				       uint32_t level, void* arg)
+{
+	entity_node_t* cnode;
+	_autoupdate_tree_args_t sync_args;
+
+	/* only need to work for preorder, leaf and endorder cases */
+	if (which != XTREE_PREORDER &&
+	    which != XTREE_LEAF &&
+	    which != XTREE_ENDORDER) {
+		return 1;
+	}
+
+	/* extract current node entity_node to next browsing */
+	cnode = (entity_node_t*) xtree_node_get_data(node);
+	if (!cnode)
+		return 1;
+
+	/* prepare downcall args */
+	sync_args.enode = cnode;
+	sync_args.which = which;
+	sync_args.level = level;
+
+	/* iterate over the K/V of the entity, syncing them according
+	 * to their autoupdate flags */
+	xhash_walk(cnode->entity->data, _tree_update_node_entity_data,
+		   &sync_args);
+
+	return 1;
+}
+
+/* helper function used to automatically update a layout internal
+ * entities KVs based on inheritance relations (parents/children) */
+static int _layouts_autoupdate_layout(layout_t* layout)
+{
+	/* autoupdate according to the layout struct type */
+	switch(layout->struct_type) {
+	case LAYOUT_STRUCT_TREE:
+		xtree_walk(layout->tree, NULL, 0,
+			   XTREE_LEVEL_MAX,
+			   _autoupdate_layout_tree, NULL);
+		break;
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/* helper function used to automatically update a layout internal
+ * entities KVs based on inheritance relations (parents/children)
+ * only when allowed by the associated plugin */
+static int _layouts_autoupdate_layout_if_allowed(layout_t* layout)
+{
+	int i, rc = SLURM_ERROR;
+	/* look if the corresponding layout plugin enables autoupdate */
+	for (i = 0; i < mgr->plugins_count; i++) {
+		if (mgr->plugins[i].layout == layout) {
+			/* no autoupdate allowed, return success */
+			if (!mgr->plugins[i].ops->spec->autoupdate)
+				rc = SLURM_SUCCESS;
+			else
+				rc = _layouts_autoupdate_layout(layout);
+			break;
+		}
+	}
+	return rc;
+}
+
+/*****************************************************************************\
+ *                                   DEBUG DUMP                              *
+\*****************************************************************************/
+
 /*
  * For debug purposes, dump functions helping to print the layout mgr
  * internal states in a file after the load.
@@ -1021,15 +2216,16 @@ static uint8_t _dump_layout_tree(xtree_node_t* node, uint8_t which,
 {
 	FILE* fdump = (FILE*)arg;
 	entity_t* e;
+	entity_node_t* enode;
 	if (which != XTREE_PREORDER && which != XTREE_LEAF) {
 		return 1;
 	}
-	e = xtree_node_get_data(node);
-	if (!e) {
+	enode = (entity_node_t*) xtree_node_get_data(node);
+	if (!enode || !enode->entity) {
 		fprintf(fdump, "NULL_entity\n");
 	}
 	else {
-		fprintf(fdump, "%*s%s\n", level, " ", e->name);
+		fprintf(fdump, "%*s%s\n", level, " ", enode->entity->name);
 	}
 	return 1;
 }
@@ -1060,16 +2256,17 @@ static void _dump_layouts(void* item, void* arg)
 }
 #endif
 
+
 /*****************************************************************************\
  *                             SLURM LAYOUTS API                             *
 \*****************************************************************************/
 
-int slurm_layouts_init(void)
+int layouts_init(void)
 {
 	int i = 0;
 	uint32_t layouts_count;
 
-	debug3("layouts: slurm_layouts_init()...");
+	debug3("layouts: layouts_init()...");
 
 	if (mgr->plugins) {
 		return SLURM_SUCCESS;
@@ -1086,8 +2283,8 @@ int slurm_layouts_init(void)
 
 	mgr->plugins = xmalloc(sizeof(layout_plugin_t) * layouts_count);
 	list_for_each(layouts_mgr.layouts_desc,
-			_slurm_layouts_init_layouts_walk_helper,
-			&i);
+		      _layouts_init_layouts_walk_helper,
+		      &i);
 	mgr->plugins_count = i;
 
 	if (mgr->plugins_count != layouts_count) {
@@ -1099,7 +2296,7 @@ int slurm_layouts_init(void)
 		xfree(mgr->plugins);
 		mgr->plugins = NULL;
 	} else if (layouts_count > 0) {
-		info("layouts: slurm_layouts_init done : %d layout(s) "
+		info("layouts: layouts_init done : %d layout(s) "
 		     "initialized", layouts_count);
 	}
 
@@ -1109,14 +2306,22 @@ int slurm_layouts_init(void)
 		SLURM_SUCCESS : SLURM_ERROR;
 }
 
-int slurm_layouts_fini(void)
+int layouts_fini(void)
 {
 	int i;
 
-	debug3("layouts: slurm_layouts_fini()...");
+	debug3("layouts: layouts_fini()...");
+
+	/* push layouts states to the state save location */
+	layouts_state_save();
 
 	slurm_mutex_lock(&mgr->lock);
 
+	/* free the layouts before destroying the plugins,
+	 * otherwise we will get trouble xfreeing the layouts whose
+	 * memory is owned by the plugins structs */
+	layouts_mgr_free(mgr);
+
 	for (i = 0; i < mgr->plugins_count; i++) {
 		_layout_plugins_destroy(&mgr->plugins[i]);
 	}
@@ -1124,8 +2329,6 @@ int slurm_layouts_fini(void)
 	mgr->plugins = NULL;
 	mgr->plugins_count = 0;
 
-	layouts_mgr_free(mgr);
-
 	slurm_mutex_unlock(&mgr->lock);
 
 	info("layouts: all layouts are now unloaded.");
@@ -1133,13 +2336,14 @@ int slurm_layouts_fini(void)
 	return SLURM_SUCCESS;
 }
 
-int slurm_layouts_load_config(void)
+int layouts_load_config(int recover)
 {
 	int i, rc, inx;
 	struct node_record *node_ptr;
 	layout_t *layout;
 	uint32_t layouts_count;
 	entity_t *entity;
+	entity_node_t *enode;
 	void *ptr;
 
 	info("layouts: loading entities/relations information");
@@ -1170,12 +2374,13 @@ int slurm_layouts_load_config(void)
 	 */
 	for (inx = 0, node_ptr = node_record_table_ptr; inx < node_record_count;
 	     inx++, node_ptr++) {
+		debug3("layouts: loading node %s", node_ptr->name);
 		xassert (node_ptr->magic == NODE_MAGIC);
 		xassert (node_ptr->config_ptr->magic == CONFIG_MAGIC);
 
 		/* init entity structure on the heap */
 		entity = (entity_t*) xmalloc(sizeof(struct entity_st));
-		entity_init(entity, node_ptr->name, 0);
+		entity_init(entity, node_ptr->name, "Node");
 		entity->ptr = node_ptr;
 
 		/* add to mgr entity hashtable */
@@ -1190,8 +2395,9 @@ int slurm_layouts_load_config(void)
 
 		/* add to the base layout (storing a callback ref to the
 		 * layout node pointing to it) */
+		enode = entity_add_node(entity, layout);
 		ptr = xtree_add_child(layout->tree, layout->tree->root,
-				      (void*)entity, XTREE_APPEND);
+				      (void*)enode, XTREE_APPEND);
 		if (!ptr) {
 			error("layouts: unable to add entity of node %s"
 			      "in the hashtable, aborting", node_ptr->name);
@@ -1200,8 +2406,7 @@ int slurm_layouts_load_config(void)
 			rc = SLURM_ERROR;
 			break;
 		} else {
-			debug3("layouts: loading node %s", node_ptr->name);
-			entity_add_node(entity, layout, ptr);
+			enode->node = ptr;
 		}
 	}
 	debug("layouts: %d/%d nodes in hash table, rc=%d",
@@ -1247,6 +2452,14 @@ exit:
 				break;
 			}
 		}
+		if (recover) {
+			debug("layouts: loading stage 1.1 (restore state)");
+			for (i = 0; i < mgr->plugins_count; ++i) {
+				debug3("layouts: reading state of %s",
+				       mgr->plugins[i].name);
+				_layouts_read_state(&mgr->plugins[i]);
+			}
+		}
 		debug("layouts: loading stage 2");
 		for (i = 0; i < mgr->plugins_count; ++i) {
 			debug3("layouts: creating relations for %s",
@@ -1257,6 +2470,19 @@ exit:
 				break;
 			}
 		}
+		debug("layouts: loading stage 3");
+		for (i = 0; i < mgr->plugins_count; ++i) {
+			debug3("layouts: autoupdating %s",
+			       mgr->plugins[i].name);
+			if (mgr->plugins[i].ops->spec->autoupdate) {
+				if (_layouts_autoupdate_layout(mgr->plugins[i].
+							       layout) !=
+				    SLURM_SUCCESS) {
+					rc = SLURM_ERROR;
+					break;
+				}
+			}
+		}
 	}
 
 /*
@@ -1279,14 +2505,324 @@ exit:
 	return rc;
 }
 
-layout_t* slurm_layouts_get_layout(const char* type)
+layout_t* layouts_get_layout_nolock(const char* type)
+{
+	return (layout_t*)xhash_get(mgr->layouts, type);
+}
+
+layout_t* layouts_get_layout(const char* type)
 {
-	layout_t* layout = (layout_t*)xhash_get(mgr->layouts, type);
+	layout_t *layout = NULL;
+	slurm_mutex_lock(&mgr->lock);
+	layout = layouts_get_layout_nolock(type);
+	slurm_mutex_unlock(&mgr->lock);
 	return layout;
 }
 
-entity_t* slurm_layouts_get_entity(const char* name)
+entity_t* layouts_get_entity_nolock(const char* name)
+{
+	return (entity_t*)xhash_get(mgr->entities, name);
+}
+
+entity_t* layouts_get_entity(const char* name)
 {
-	entity_t* e = (entity_t*)xhash_get(mgr->entities, name);
+	entity_t* e;
+	slurm_mutex_lock(&mgr->lock);
+	e = layouts_get_entity_nolock(name);
+	slurm_mutex_unlock(&mgr->lock);
 	return e;
 }
+
+
+int layouts_pack_layout(char *l_type, char *char_entities, char *type,
+			uint32_t no_relation, Buf buffer)
+{
+	_pack_args_t pargs;
+	layout_t* layout;
+	int orig_offset, fini_offset;
+	char *str;
+
+	slurm_mutex_lock(&mgr->lock);
+
+	layout = layouts_get_layout_nolock(l_type);
+	if (layout == NULL) {
+		slurm_mutex_unlock(&mgr->lock);
+		info("unable to get layout of type '%s'", l_type);
+		return SLURM_ERROR;
+	}
+	/* initialize args for recursive packing */
+	pargs.buffer = buffer;
+	pargs.layout = layout;
+	pargs.current_line = NULL;
+	pargs.all = 0;
+	pargs.list_entities = NULL;
+	if (char_entities != NULL) {
+		if (strcmp(char_entities, "*") == 0)
+			pargs.all = 1;
+		else
+			pargs.list_entities = hostlist_create(char_entities);
+	}
+	pargs.type = type;
+	pargs.no_relation = no_relation;
+	pargs.record_count = 0;
+	orig_offset = get_buf_offset(buffer);
+	pack32(pargs.record_count, buffer);
+
+	if ( pargs.no_relation == 0
+	     && pargs.list_entities == NULL
+	     && pargs.type == NULL ) {
+		/* start by packing the layout priority */
+		str = xstrdup_printf("Priority=%u\n", layout->priority);
+		packstr(str, buffer);
+		pargs.record_count++;
+		xfree(str);
+	}
+
+	/* pack according to the layout struct type */
+	switch (layout->struct_type) {
+	case LAYOUT_STRUCT_TREE:
+		xtree_walk(layout->tree, NULL, 0, XTREE_LEVEL_MAX,
+			   _pack_layout_tree, &pargs);
+		break;
+	}
+
+	if (pargs.list_entities != NULL)
+		slurm_hostlist_destroy(pargs.list_entities);
+
+	fini_offset = get_buf_offset(buffer);
+	set_buf_offset(buffer, orig_offset);
+	pack32(pargs.record_count, buffer);
+	set_buf_offset(buffer, fini_offset);
+
+	slurm_mutex_unlock(&mgr->lock);
+
+	return SLURM_SUCCESS;
+}
+
+int layouts_update_layout(char *l_type, Buf buffer)
+{
+	int i, rc;
+	slurm_mutex_lock(&mgr->lock);
+	for (i = 0; i < mgr->plugins_count; i++) {
+		if (!strcmp(mgr->plugins[i].name, l_type)) {
+			rc = _layouts_update_state((layout_plugin_t*)
+						   &mgr->plugins[i],
+						   buffer);
+			slurm_mutex_unlock(&mgr->lock);
+			return rc;
+		}
+	}
+	info("%s: no plugin matching layout=%s, skipping", __func__, l_type);
+	slurm_mutex_unlock(&mgr->lock);
+	return SLURM_ERROR;
+}
+
+int layouts_autoupdate_layout(char *l_type)
+{
+	int rc = SLURM_ERROR;
+	layout_t* layout;
+
+	slurm_mutex_lock(&mgr->lock);
+	layout = layouts_get_layout_nolock(l_type);
+	if (layout == NULL) {
+		info("unable to get layout of type '%s'", l_type);
+	} else {
+		rc = _layouts_autoupdate_layout(layout);
+	}
+	slurm_mutex_unlock(&mgr->lock);
+
+	return rc;
+}
+
+int layouts_state_save_layout(char* l_type)
+{
+	int error_code = 0, log_fd, offset;
+	char *old_file = NULL, *new_file = NULL, *reg_file = NULL;
+	static int high_buffer_size = (16 * 1024);
+	Buf buffer = init_buf(high_buffer_size);
+	FILE* fdump;
+	uint32_t utmp32, record_count = 0;
+	char *tmp_str = NULL;
+
+	DEF_TIMERS;
+	START_TIMER;
+
+	/* pack the targeted layout into a tmp buffer */
+	error_code = layouts_pack_layout(l_type, "*", NULL, 0, buffer);
+
+	if (error_code != SLURM_SUCCESS) {
+		error("unable to save layout[%s] state", l_type);
+		return error_code;
+	}
+
+	/* rewind the freshly created buffer to unpack it into a file */
+	offset = get_buf_offset(buffer);
+	high_buffer_size = MAX(high_buffer_size, offset);
+	set_buf_offset(buffer, 0);
+
+	/* create working files */
+	reg_file = _state_get_filename(l_type);
+	old_file = xstrdup_printf("%s.old", reg_file);
+	new_file = xstrdup_printf("%s.new", reg_file);
+	log_fd = creat(new_file, 0600);
+	if (log_fd < 0 || !(fdump = fdopen(log_fd, "w"))) {
+		error("Can't save state, create file %s error %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		/* extract the amount of records and then proceed
+		 * then dump packed strings into the temporary file */
+		safe_unpack32(&record_count, buffer);
+		debug("layouts/%s: dumping %u records into state file",
+		      l_type, record_count);
+		while (get_buf_offset(buffer) < offset) {
+			safe_unpackstr_xmalloc(&tmp_str, &utmp32, buffer);
+			if (tmp_str != NULL) {
+				if (*tmp_str == '\0') {
+					xfree(tmp_str);
+					break;
+				}
+				fprintf(fdump, "%s", tmp_str);
+				xfree(tmp_str);
+				continue;
+			}
+		unpack_error:
+			break;
+		}
+		fflush(fdump);
+		fsync(log_fd);
+		fclose(fdump);
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		(void) unlink(old_file);
+		if (link(reg_file, old_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       reg_file, old_file);
+		(void) unlink(reg_file);
+		if (link(new_file, reg_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+
+	free_buf(buffer);
+
+	END_TIMER2("layouts_state_save_layout");
+
+	return SLURM_SUCCESS;
+}
+
+int layouts_state_save(void)
+{
+	DEF_TIMERS;
+	START_TIMER;
+	xhash_walk(mgr->layouts,  _state_save_layout, NULL);
+	END_TIMER2("layouts_state_save");
+	return SLURM_SUCCESS;
+}
+
+#define _layouts_entity_wrapper(func, l, e, r...)			\
+	layout_t* layout;						\
+	entity_t* entity;						\
+	int rc;								\
+	slurm_mutex_lock(&mgr->lock);					\
+	layout = layouts_get_layout_nolock(l);				\
+	entity = layouts_get_entity_nolock(e);				\
+	rc = func(layout, entity, ##r);					\
+	slurm_mutex_unlock(&mgr->lock);					\
+	return rc;							\
+
+int layouts_entity_get_kv_type(char* l, char* e, char* key)
+{
+	_layouts_entity_wrapper(_layouts_entity_get_kv_type,l,e,key);
+}
+
+int layouts_entity_get_kv_flags(char* l, char* e, char* key)
+{
+	_layouts_entity_wrapper(_layouts_entity_get_kv_flags, l, e, key);
+}
+
+int layouts_entity_push_kv(char* l, char* e, char* key)
+{
+	_layouts_entity_wrapper(_layouts_entity_push_kv, l, e, key);
+}
+
+int layouts_entity_pull_kv(char* l, char* e, char* key)
+{
+	_layouts_entity_wrapper(_layouts_entity_pull_kv, l, e, key);
+}
+
+int layouts_entity_set_kv(char* l, char* e, char* key, void* value,
+			  layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_set_kv, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_set_kv_ref(char* l, char* e, char* key, void* value,
+			      layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_set_kv_ref, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_setpush_kv(char* l, char* e, char* key, void* value,
+			      layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_setpush_kv, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_setpush_kv_ref(char* l, char* e, char* key, void* value,
+				  layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_setpush_kv_ref, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_get_kv(char* l, char* e, char* key, void* value,
+			  layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_get_kv, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_get_mkv(char* l, char* e, char* keys, void* value,
+			   size_t size, layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_get_mkv, l, e,
+				keys, value, size, key_type);
+}
+
+int layouts_entity_get_kv_ref(char* l, char* e, char* key, void** value,
+			      layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_get_kv_ref, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_get_mkv_ref(char* l, char* e, char* keys, void* value,
+			       size_t size, layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_get_mkv_ref, l, e,
+				keys, value, size, key_type);
+}
+
+int layouts_entity_pullget_kv(char* l, char* e, char* key, void* value,
+			      layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_pullget_kv, l, e,
+				key, value, key_type);
+}
+
+int layouts_entity_pullget_kv_ref(char* l, char* e, char* key, void** value,
+				  layouts_keydef_types_t key_type)
+{
+	_layouts_entity_wrapper(_layouts_entity_pullget_kv_ref, l, e,
+				key, value, key_type);
+}
diff --git a/src/common/layouts_mgr.h b/src/common/layouts_mgr.h
index 357ab5f19..40db1ee6e 100644
--- a/src/common/layouts_mgr.h
+++ b/src/common/layouts_mgr.h
@@ -4,6 +4,7 @@
  *  Initially written by Francois Chevallier <chevallierfrancois@free.fr>
  *  at Bull for slurm-2.6.
  *  Adapted by Matthieu Hautreux <matthieu.hautreux@cea.fr> for slurm-14.11.
+ *  Enhanced by Matthieu Hautreux <matthieu.hautreux@cea.fr> for slurm-15.x.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -41,6 +42,7 @@
 #include "src/common/list.h"
 #include "src/common/xhash.h"
 #include "src/common/xtree.h"
+#include "src/common/pack.h"
 #include "src/common/parse_config.h"
 
 #include "src/common/layout.h"
@@ -52,12 +54,12 @@
  * The layouts_mgr_t manages the layouts and entities loaded through the list
  * of layouts specified in the Slurm configuration file (slurm.conf)
  *
- * At startup, Slurm initialize one layouts_mgr_t using slurm_layouts_init()
+ * At startup, Slurm initialize one layouts_mgr_t using layouts_init()
  * and then load the required layouts defined in the configuration using
- * slurm_layouts_load_config().
+ * layouts_load_config().
  *
  * The different layouts and entities can then be queried using either
- * slurm_layouts_get_layout() and slurm_layouts_get_entity().
+ * layouts_get_layout() and layouts_get_entity().
  *
  * Note that each entity contains a list of nodes appearing inside the
  * associated layouts.
@@ -82,9 +84,30 @@ typedef enum layouts_keydef_types_en {
 	L_T_CUSTOM,
 } layouts_keydef_types_t;
 
+/* keyspec flags */
+#define KEYSPEC_RDONLY        0x00000001
+
+#define KEYSPEC_UPDATE_CHILDREN_SUM   0x00010000
+#define KEYSPEC_UPDATE_CHILDREN_AVG   0x00020000
+#define KEYSPEC_UPDATE_CHILDREN_MIN   0x00040000
+#define KEYSPEC_UPDATE_CHILDREN_MAX   0x00080000
+#define KEYSPEC_UPDATE_CHILDREN_COUNT 0x00110000
+#define KEYSPEC_UPDATE_CHILDREN_MASK  0x00FF0000
+
+#define KEYSPEC_UPDATE_PARENTS_SUM    0x01000000
+#define KEYSPEC_UPDATE_PARENTS_AVG    0x02000000
+#define KEYSPEC_UPDATE_PARENTS_MIN    0x04000000
+#define KEYSPEC_UPDATE_PARENTS_MAX    0x08000000
+#define KEYSPEC_UPDATE_PARENTS_FSHARE 0x11000000
+#define KEYSPEC_UPDATE_PARENTS_MASK   0xFF000000
+
 typedef struct layouts_keyspec_st {
 	char*			key;
 	layouts_keydef_types_t	type;
+	uint32_t                flags;
+	char*			ref_key; /* reference key to use for update
+					  * NULL means use the same key in my
+					  * neighborhood */
 	void			(*custom_destroy)(void*);
 	char*			(*custom_dump)(void*);
 } layouts_keyspec_t;
@@ -95,6 +118,7 @@ typedef struct layouts_plugin_spec_st {
 	int				struct_type;
 	const char**			etypes;
 	bool				automerge;
+	bool				autoupdate;
 } layouts_plugin_spec_t;
 
 /*****************************************************************************\
@@ -102,7 +126,7 @@ typedef struct layouts_plugin_spec_st {
 \*****************************************************************************/
 
 /*
- * slurm_layouts_init - intialize the layouts mgr, load the required plugins
+ * layouts_init - intialize the layouts mgr, load the required plugins
  *        and initialize the internal hash tables for entities, keydefs and
  *        layouts.
  *
@@ -112,40 +136,363 @@ typedef struct layouts_plugin_spec_st {
  * Notes: this call do not try to read and parse the layouts configuration
  * files. It only loads the layouts plugins, dlsym the layout API and conf
  * elements to prepare the reading and parsing performed in the adhoc call
- * slurm_layouts_load_config()
+ * layouts_load_config()
  *
  */
-int slurm_layouts_init(void);
+int layouts_init(void);
 
 /*
- * slurm_layouts_fini - uninitialize the layouts mgr and free the internal
+ * layouts_fini - uninitialize the layouts mgr and free the internal
  *        hash tables.
  */
-int slurm_layouts_fini(void);
+int layouts_fini(void);
 
 /*
- * slurm_layouts_load_config - use the layouts plugins details loaded during
- *        slurm_layouts_init() and read+parse the different layouts
+ * layouts_load_config - use the layouts plugins details loaded during
+ *        layouts_init() and read+parse the different layouts
  *        configuration files, creating the entities and the relational
  *        structures associated the eaf of them.
  *
+ * IN recover - update entities information with the latest available
+ *              information depending upon value
+ *              0 = use no saved state information, rebuild everything from
+ *		    layouts conf files contents
+ *              1 = recover saved entities information
+ *              2 = recover saved entities information
+ *
  * Return SLURM_SUCCESS or SLURM_ERROR if all the required layouts were not
  * loaded correctly.
  */
-int slurm_layouts_load_config(void);
+int layouts_load_config(int recover);
 
 /*
- * slurm_layouts_get_layout - return the layout from a given type
+ * layouts_get_layout - return the layout from a given type
  *
  * Return a pointer to the layout_t struct of the layout or NULL if not found
  */
-layout_t* slurm_layouts_get_layout(const char* type);
+layout_t* layouts_get_layout(const char* type);
 
 /*
- * slurm_layouts_get_entity - return the entity from a given name
+ * layouts_get_entity - return the entity from a given name
  *
  * Return a pointer to the entity_t struct of the entity or NULL if not found
  */
-entity_t* slurm_layouts_get_entity(const char* name);
+entity_t* layouts_get_entity(const char* name);
+
+/*
+ * layouts_pack_layout - pack the layout of the target type into the provided
+ *        buffer.
+ *
+ * The buffer will be appended with multiple strings representing an expanded
+ * form of its configuration element, terminated by a "\0" string.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_pack_layout(char *l_type, char *entities, char *type,
+			uint32_t no_relation, Buf buffer);
+
+/*
+ * layouts_update_layout - update a particular layout loading the information
+ *        provided in the input buffer.
+ *
+ * The buffer must contain multiple strings corresponding to the different
+ * configuration lines similar to those that can be put in a configuration
+ * file that will be parsed and integrated.
+ *
+ * Note that the entities key/value entries will be updated only.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_update_layout(char *l_type, Buf buffer);
+
+/*
+ * layouts_state_save_layout - save the state of a particular layout
+ *        in the adhoc file in slurm state save location.
+ *
+ * The file produced will be an ASCII file created from the configuration
+ * strings packed using layouts_pack_layout(). Thus it will be the expanded
+ * form of the current configuration of the layout that could be used as
+ * a perfect updated replacement of the layout configuration file.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_state_save_layout(char* l_type);
+
+/*
+ * layouts_state_save - save the state of all the loaded layouts iterating
+ *        over each one of them and applying layouts_state_save_layout().
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_state_save(void);
+
+/*
+ * layouts_entity_get_kv_type - get the type of the value associated with a key
+ *        of an entity in a particular layout.
+ *
+ * The returned type is a member of the layouts_keydef_types_t enum :
+ * L_T_ERROR, L_T_STRING, L_T_LONG, L_T_UINT16, ...
+ *
+ * Return the requested type or SLURM_ERROR in case of failure
+ */
+int layouts_entity_get_kv_type(char* layout, char* entity,
+			       char* key);
+
+/*
+ * layouts_entity_get_kv_flags - get the keyspec flags associated with the
+ *        targeted key/value pair of an entity in a particular layout.
+ *
+ * Return the associated flags or SLURM_ERROR in case of failure
+ */
+int layouts_entity_get_kv_flags(char* layout, char* entity,
+				char* key);
+
+/*
+ * layouts_entity_push_kv - update the layout internal states to take into
+ *        account the current state of the targeted key/value pair.
+ *
+ * This ensures that the child and/or parents of the targeted entity in the
+ * targeted layout are synchronized with the current value associated with
+ * the key.
+ *
+ * Note: this call only makes sense when the targeted k/v is a k/v that helps
+ *       to dynamically compute its parents and/or children. It is a
+ *       no-op otherwise that just returns SLURM_SUCCESS.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_push_kv(char* layout, char* entity,
+			   char* key);
+
+/*
+ * layouts_entity_pull_kv - synchronize the targeted key/value pair based on
+ *        the states of their neighborhood in the targeted layout.
+ *
+ * This ensures that the K/V is up-to-date and correspond to the values that
+ *        its neighborhood in the layout think it should have.
+ *
+ * Note: this call only makes sense when the targeted k/v is a k/v that is
+ *       dynamically computed based on its parents and/or children. It is a
+ *       no-op otherwise that just returns SLURM_SUCCESS.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_pull_kv(char* layout, char* entity,
+			   char* key);
+
+/*
+ * layouts_entity_set_kv - update an entity with a new value for a particular
+ *        key in the targeted layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note : in case the key/value is already set for the entity, the content of
+ * the provided buffer will override the current content. In case the key/value
+ * already exists, it will be xfree and a new memory allocation will be
+ * performed and the content of the provided buffer dumped into it.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_set_kv(char* layout, char* entity,
+			  char* key, void* value,
+			  layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_set_kv_ref - replace an entity key value with a new memory
+ *        area for a particular key in the targeted layout
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note : in case the key/value is already set for the entity, the older value
+ * will be free and the provided buffer will be associated to the new value.
+ * Once done, the caller must not free the provided buffer has it will then
+ * be owned by the layout logic and will be free automatically when the layout
+ * framework will be unloaded or at a next call to that function.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_set_kv_ref(char* layout, char* entity,
+			      char* key, void* value,
+			      layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_setpush_kv - combination of layouts_entity_set_kv and
+ *        layouts_entity_push_kv to update an entity with a new value and force
+ *        the synchronization of its neighborhood in the layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note: see layouts_entity_push_kv.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_setpush_kv(char* layout, char* entity,
+			      char* key, void* value,
+			      layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_setpush_kv - combination of layouts_entity_set_kv_ref and
+ *        layouts_entity_push_kv to replace an entity key value with a new
+ *        memory area and force the synchronization of its neighborhood in
+ *        the layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note: see layouts_entity_push_kv.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_setpush_kv_ref(char* layout, char* entity,
+				  char* key, void* value,
+				  layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_get_kv - get the value associated with a key of an entity
+ *        in a particular layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note : the destination buffer will be filled with the content of the
+ * value associated with the requested key in the entity except for these
+ * types for which :
+ *   L_T_STRING  : value must be the address of the char* that will be
+ *                 xstrduped with the key value. The char* will have to be
+ *                 xfree() after that.
+ *   L_T_CUSTOM : value must be the address of the char* that will result
+ *                of the custom_dump function. The char* will have to be
+ *                xfree() after that.
+ *   L_T_ERROR : will return SLURM_ERROR in all cases.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_get_kv(char* layout, char* entity,
+			  char* key, void* value,
+			  layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_get_kv_ref - get a pointer to the value associated with a key
+ *        of an entity in a particular layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note : this call must be used with caution as the pointer could be free
+ * sooner or later by the underlying layout engine in reply to the execution
+ * of the layouts_entity_set_kv_ref().
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_get_kv_ref(char* layout, char* entity,
+			      char* key, void** pvalue,
+			      layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_pullget_kv - combination of layouts_entity_pull_kv and
+ *        layouts_entity_get_kv to retrieve the up-to-date value of a particular
+ *        entity key in the targeted layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note: see layouts_entity_pull_kv.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_pullget_kv(char* layout, char* entity,
+			      char* key, void* value,
+			      layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_pullget_kv - combination of layouts_entity_pull_kv_ref and
+ *        layouts_entity_get_kv to retrieve a reference to the up-to-date value
+ *        of a particular entity key in the targeted layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value.
+ *
+ * Note: see layouts_entity_pull_kv_ref.
+ *
+ * Return SLURM_SUCCES or SLURM_ERROR in case of failure
+ */
+int layouts_entity_pullget_kv_ref(char* layout, char* entity,
+				  char* key, void** value,
+				  layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_get_mkv - get the values associated with a set of keys of an
+ *        entity in a particular layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspec associated with the key. To skip
+ * that check the caller will have to pass a 0 value. This is mandatory for
+ * cases where the keyspecs of the requested keys do not share the same type.
+ *
+ * Note : the destination buffer will be sequentially filled with the content of
+ * the values associated with the requested keys in the entity.
+ * If the length of the buffer is too small, the remaining references will not
+ * be added and the counter of missed keys incremented as necessary.
+ * The first encountered error terminates the logic and the missing elements
+ * counter will reflect all the unprocessed elements including the faulty one.
+
+ * Special care must be taken for the following types of key :
+ *   L_T_STRING  : a char* will be added to the buffer. It will be xstrduped
+ *                 with the associated key value. The char* will have to be
+ *                 xfree() after that.
+ *   L_T_CUSTOM : a char* will be added to the buffer. It will be xstrduped
+ *                with the result of the custom_dump function. It will have to
+ *                be xfree() after that.
+ *   L_T_ERROR : will generate an error that will force the function to return
+ *               the count of missing elements (at least 1, depending on where
+ *               this type first appeared in the ordered list of keys to get.
+ *
+ * Note: keys correspond to a list of keys that can be represented as
+ * an hostlist expression (i.e. keys[1-10]).
+ *
+ * Return SLURM_SUCCES or the count of missed keys/references
+ */
+int layouts_entity_get_mkv(char* layout, char* entity,
+			   char* keys, void* value, size_t length,
+			   layouts_keydef_types_t key_type);
+
+/*
+ * layouts_entity_get_mkv_ref - get a set of pointers to the values associated
+ *        with a set of keys of an entity in a particular layout.
+ *
+ * The input key_type will force the call to check types consistency between
+ * the requester and the underlying keyspecs associated with the keys. To skip
+ * that check the caller will have to pass a 0 value. This is mandatory for cases
+ * where the keyspecs of the requested keys do not share the same type.
+ *
+ * The output buffer will be filled with the different references.
+ * If the length of the buffer is too small, the remaining references will not
+ * be added and the counter of missed keys incremented as necessary.
+ * The first encountered error terminates the logic and the missing elements
+ * counter will reflect all the unprocessed elements including the faulty one.
+ *
+ * Note: this call must be used with caution as the pointers could be free
+ * sooner or later by the underlying layout engine in reply to the execution
+ * of the layouts_entity_set_kv_ref().
+ *
+ * Note: keys correspond to a list of keys that can be represented as
+ * an hostlist expression (i.e. keys[1-10]).
+ *
+ * Return SLURM_SUCCES or the count of missed keys/references
+ */
+int layouts_entity_get_mkv_ref(char* layout, char* entity,
+			       char* keys, void* buffer, size_t length,
+			       layouts_keydef_types_t key_type);
 
 #endif /* end of include guard: __LAYOUTS_MGR_1NRINRSD__INC__ */
diff --git a/src/common/log.c b/src/common/log.c
index 22950d9f9..decae2441 100644
--- a/src/common/log.c
+++ b/src/common/log.c
@@ -78,14 +78,15 @@
 #include <sys/unistd.h>
 
 #include "slurm/slurm_errno.h"
-#include "src/common/log.h"
 #include "src/common/fd.h"
+#include "src/common/log.h"
 #include "src/common/macros.h"
 #include "src/common/safeopen.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_protocol_api.h"
 
 #ifndef LINEBUFSIZE
 #  define LINEBUFSIZE 256
@@ -188,7 +189,7 @@ static size_t _make_timestamp(char *timestamp_buf, size_t max,
 {
 	time_t timestamp_t = time(NULL);
 	struct tm timestamp_tm;
-	if (!localtime_r(&timestamp_t, &timestamp_tm)) {
+	if (!slurm_localtime_r(&timestamp_t, &timestamp_tm)) {
 		fprintf(stderr, "localtime_r() failed\n");
 		return 0;
 	}
@@ -678,7 +679,7 @@ set_idbuf(char *idbuf)
 
 	gettimeofday(&now, NULL);
 
-	sprintf(idbuf, "%.15s.%-6d %5d %p", ctime(&now.tv_sec) + 4,
+	sprintf(idbuf, "%.15s.%-6d %5d %p", slurm_ctime(&now.tv_sec) + 4,
 	        (int)now.tv_usec, (int)getpid(), (void *)pthread_self());
 
 }
diff --git a/src/common/mapping.c b/src/common/mapping.c
new file mode 100644
index 000000000..c81c53311
--- /dev/null
+++ b/src/common/mapping.c
@@ -0,0 +1,397 @@
+/*****************************************************************************\
+ *  src/common/mapping.c - routines for compact process mapping representation
+ *****************************************************************************
+ *  Copyright (C) 2014 Institute of Semiconductor Physics
+ *                     Siberian Branch of Russian Academy of Science
+ *  Written by Artem Polyakov <artpol84@gmail.com>.
+ *  All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#define _GNU_SOURCE
+
+#include "src/common/mapping.h"
+
+/* pack_process_mapping()
+ */
+char *
+pack_process_mapping(uint32_t node_cnt,
+		     uint32_t task_cnt,
+		     uint16_t *tasks,
+		     uint32_t **tids)
+{
+	int offset, i;
+	int start_node, end_node;
+	char *packing = NULL;
+
+	/* next_task[i] - next process for processing
+	 */
+	uint16_t *next_task = xmalloc(node_cnt * sizeof(uint16_t));
+
+	packing = xstrdup("(vector");
+	offset = 0;
+	while (offset < task_cnt) {
+		int mapped = 0;
+		int depth = -1;
+		int j;
+		start_node = end_node = 0;
+
+		/* find the task with id == offset
+		 */
+		for (i = 0; i < node_cnt; i++) {
+
+			if (next_task[i] < tasks[i]) {
+				/* if we didn't consume entire
+				 * quota on this node
+				 */
+				xassert(offset >= tids[i][next_task[i]]);
+				if (offset == tids[i][next_task[i]]) {
+					start_node = i;
+					break;
+				}
+			}
+		}
+
+		end_node = node_cnt;
+		for (i = start_node; i < end_node; i++) {
+			if (next_task[i] >= tasks[i] ) {
+				/* Save first non-matching node index
+				 * and interrupt loop
+				 */
+				end_node = i;
+				continue;
+			}
+
+			for (j = next_task[i]; ((j + 1) < tasks[i])
+				     && ((tids[i][j]+1) == tids[i][j+1]); j++);
+			j++;
+			/* First run determines the depth
+			 */
+			if (depth < 0) {
+				depth = j - next_task[i];
+			} else {
+				/* If this is not the first node in the bar
+				 * check that: 1. First tid on this node is
+				 * sequentially next after last tid
+				 *    on the previous node
+				 */
+				if (tids[i-1][next_task[i-1]-1] + 1
+				    != tids[i][next_task[i]]) {
+					end_node = i;
+					continue;
+				}
+			}
+
+			if (depth == (j - next_task[i])) {
+				mapped += depth;
+				next_task[i] = j;
+			} else {
+				/* Save first non-matching node index
+				 *
+				 * and interrupt loop
+				 */
+				end_node = i;
+			}
+		}
+		xstrfmtcat(packing,",(%u,%u,%u)",
+			   start_node, end_node - start_node, depth);
+		offset += mapped;
+	}
+	xstrcat(packing,")");
+	return packing;
+}
+
+uint32_t *
+unpack_process_mapping_flat(char *map,
+			    uint32_t node_cnt,
+			    uint32_t task_cnt,
+			    uint16_t *tasks)
+{
+	/* Start from the flat array. For i'th task is located
+	 * on the task_map[i]'th node
+	 */
+	uint32_t *task_map = xmalloc(sizeof(int) * task_cnt);
+	char *prefix = "(vector,", *p = NULL;
+	uint32_t taskid, i;
+
+	if (tasks) {
+		for (i = 0; i < node_cnt; i++) {
+			tasks[i] = 0;
+		}
+	}
+
+	if ((p = strstr(map, prefix)) == NULL) {
+		error("\
+unpack_process_mapping: The mapping string should start from %s", prefix);
+		goto err_exit;
+	}
+
+	/* Skip prefix
+	 */
+	p += strlen(prefix);
+	taskid = 0;
+	while ((p = strchr(p,'('))) {
+		int depth, node, end_node;
+		p++;
+		if (3!= sscanf(p,"%d,%d,%d", &node, &end_node, &depth)) {
+			goto err_exit;
+		}
+		end_node += node;
+		xassert(node < node_cnt && end_node <= node_cnt );
+		for (; node < end_node; node++) {
+			for (i = 0; i < depth; i++){
+				task_map[taskid++] = node;
+				if (tasks != NULL) {
+					/*Cont tasks on each node if was
+					 * requested
+					 */
+					tasks[node]++;
+				}
+			}
+		}
+	}
+	return task_map;
+err_exit:
+	xfree(task_map);
+	return NULL;
+}
+
+int
+unpack_process_mapping(char *map,
+		       uint32_t node_cnt,
+		       uint32_t task_cnt,
+		       uint16_t *tasks,
+		       uint32_t **tids)
+{
+	/* Start from the flat array. For i'th task is located
+	 * on the task_map[i]'th node
+	*/
+	uint32_t *task_map = NULL;
+	uint16_t *node_task_cnt = NULL;
+	uint32_t i;
+	int rc = 0;
+
+	task_map = unpack_process_mapping_flat(map, node_cnt, task_cnt, tasks);
+	if (task_map == NULL) {
+		rc = SLURM_ERROR;
+		goto err_exit;
+	}
+
+	node_task_cnt = xmalloc(sizeof(uint16_t) * node_cnt);
+	for (i = 0;  i < node_cnt; i++){
+		tids[i] = xmalloc(sizeof(uint32_t) * tasks[i]);
+		node_task_cnt[i] = 0;
+	}
+
+	for (i = 0; i < task_cnt; i++) {
+		uint32_t node = task_map[i];
+		tids[node][ node_task_cnt[node]++ ] = i;
+		xassert( node_task_cnt[node] <= tasks[node] );
+	}
+
+	goto exit;
+err_exit:
+	error("unpack_process_mapping: bad mapping format");
+exit:
+	if (task_map != NULL){
+		xfree(task_map);
+	}
+
+	if (node_task_cnt != NULL){
+		xfree(node_task_cnt);
+	}
+	return rc;
+}
+
+
+#if 0
+
+/*
+ * Mutual check for both routines
+ */
+
+/* Emulate 16-core nodes
+ */
+#define NCPUS 16
+#define NODES 200
+
+static
+void block_distr(uint32_t task_cnt,
+		 uint16_t *tasks,
+		 uint32_t **tids)
+{
+	int i, j, tnum = 0;
+
+	for (i = 0; i < NODES; i++) {
+		tasks[i] = 0;
+	}
+
+	/* BLOCK distribution
+	 */
+	for (i = 0; (i < NODES) && (tnum < task_cnt); i++) {
+		for (j = 0; j < NCPUS && (tnum < task_cnt); j++) {
+			tids[i][j] = tnum++;
+		}
+		tasks[i] = j;
+	}
+}
+
+static void
+cyclic_distr(uint32_t task_cnt,
+	     uint16_t *tasks,
+	     uint32_t **tids)
+{
+	int i, j, tnum = 0;
+	/* CYCLIC distribution
+	 */
+	tnum = 0;
+	for (i = 0; i < NODES; i++) {
+		tasks[i] = 0;
+	}
+	for (j = 0; j < NCPUS && (tnum < task_cnt); j++) {
+		for (i = 0; (i < NODES) && (tnum < task_cnt); i++ ) {
+			tids[i][j] = tnum++;
+			tasks[i]++;
+		}
+	}
+}
+
+
+static void
+plane_distr(uint32_t task_cnt,
+	    int plane_factor,
+	    uint16_t *tasks,
+	    uint32_t **tids)
+{
+	int i, j, tnum = 0;
+	/* PLANE distribution
+	 */
+	tnum = 0;
+	for (i = 0; i < NODES; i++) {
+		tasks[i] = 0;
+	}
+
+	while (tnum < task_cnt) {
+		for (i = 0; (i < NODES) && (tnum < task_cnt); i++) {
+			for (j = 0;
+			    (j < plane_factor)
+				    && (tasks[i] < NCPUS)
+				    && (tnum < task_cnt); j++) {
+				tids[i][tasks[i]++] = tnum++;
+			}
+		}
+	}
+}
+
+static void check(uint32_t node_cnt, uint32_t task_cnt,
+		  uint16_t *tasks, uint32_t **tids)
+{
+	uint16_t *new_tasks;
+	uint32_t **new_tids;
+	char *map = pack_process_mapping(node_cnt, task_cnt, tasks, tids);
+	int i,j;
+
+	printf("mapping: %s\n", map);
+
+	new_tasks = xmalloc(sizeof(uint16_t) * node_cnt);
+	new_tids = xmalloc(sizeof(uint32_t *) * node_cnt);
+	unpack_process_mapping(map,node_cnt,task_cnt,new_tasks,new_tids);
+
+	for (i = 0; i < node_cnt; i++) {
+
+		if (new_tasks[i] != tasks[i]) {
+			printf("Task count mismatch on node %d\n", i);
+			exit(0);
+		}
+
+		for (j = 0; j< tasks[i]; j++) {
+			if (new_tids[i][j] != tids[i][j]){
+				printf("\
+Task id mismatch on node %d, idx = %d\n", i, j);
+				exit(0);
+			}
+		}
+	}
+
+	for (i = 0; i< node_cnt; i++) {
+		xfree(new_tids[i]);
+	}
+	xfree(new_tasks);
+	xfree(new_tids);
+
+	xfree(map);
+
+}
+
+
+int
+main(int argc, char **argv)
+{
+	uint16_t  tasks[NODES] = { 0 };
+	uint32_t **tids = NULL;
+	int tnum = 0, i;
+
+	tids = xmalloc(sizeof(uint32_t*) * NODES);
+	for (i = 0; i< NODES; i++) {
+		tids[i] = xmalloc(sizeof(uint32_t) * NCPUS);
+	}
+
+	for (tnum = 1; tnum < NCPUS*NODES; tnum++) {
+
+		printf("Map %d tasks into cluster %dx%d\n", tnum, NODES, NCPUS);
+		block_distr(tnum, tasks, tids);
+		check(NODES,tnum, tasks, tids);
+
+		cyclic_distr(tnum, tasks, tids);
+		check(NODES,tnum, tasks, tids);
+
+		plane_distr(tnum,2,tasks, tids);
+		check(NODES,tnum, tasks, tids);
+
+		plane_distr(tnum,4,tasks, tids);
+		check(NODES,tnum, tasks, tids);
+
+		plane_distr(tnum,6,tasks, tids);
+		check(NODES,tnum, tasks, tids);
+
+		plane_distr(tnum,8,tasks, tids);
+		check(NODES,tnum, tasks, tids);
+	}
+
+	for (i = 0; i < NODES; i++){
+		xfree(tids[i]);
+	}
+	xfree(tids);
+
+	return 0;
+}
+
+#endif
diff --git a/src/plugins/slurmctld/dynalloc/allocator.h b/src/common/mapping.h
similarity index 67%
rename from src/plugins/slurmctld/dynalloc/allocator.h
rename to src/common/mapping.h
index 093424bb0..957f4804a 100644
--- a/src/plugins/slurmctld/dynalloc/allocator.h
+++ b/src/common/mapping.h
@@ -1,8 +1,9 @@
 /*****************************************************************************\
- *  allocator.h  - dynamic resource allocation
+ *  src/common/mapping.h - routines for compact process mapping representation
  *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
+ *  Copyright (C) 2014 Institute of Semiconductor Physics
+ *                     Siberian Branch of Russian Academy of Science
+ *  Written by Artem Polyakov <artpol84@gmail.com>.
  *  All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
@@ -35,36 +36,35 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef DYNALLOC_ALLOCATOR_H_
-#define DYNALLOC_ALLOCATOR_H_
+#ifndef MAPPING_H
+#define MAPPING_H
 
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
+#include <stdint.h>
+#include <unistd.h>
+#include <string.h>
+#include <sys/types.h>
 
 #include "slurm/slurm.h"
-#include "msg.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/common/xassert.h"
 
-/*
- * allocate resources for a job.
- *
- * The job will consist of at least one app, e.g., "allocate
- * jobid=100 return=all timeout=10:app=0 np=5 N=2
- * node_list=vm2,vm3 flag=mandatory:app=1 N=2".
- *
- * IN:
- * 	new_fd: send allocation result to socket_fd
- * 	msg: resource requirement cmd
- */
-extern void allocate_job_op(slurm_fd_t new_fd, const char *msg);
+BEGIN_C_DECLS
+
+extern char *pack_process_mapping(uint32_t node_cnt,
+				  uint32_t task_cnt,
+				  uint16_t *tasks,
+				  uint32_t **tids);
+extern uint32_t *unpack_process_mapping_flat(char *map,
+					     uint32_t node_cnt,
+					     uint32_t task_cnt,
+					     uint16_t *tasks);
+extern int unpack_process_mapping(char *map,
+				  uint32_t node_cnt,
+				  uint32_t task_cnt,
+				  uint16_t *tasks,
+				  uint32_t **tids);
+
+END_C_DECLS
 
-#endif /* DYNALLOC_ALLOCATOR_H_ */
+#endif // MAPPING_H
diff --git a/src/common/msg_aggr.c b/src/common/msg_aggr.c
new file mode 100644
index 000000000..8f38bfc26
--- /dev/null
+++ b/src/common/msg_aggr.c
@@ -0,0 +1,493 @@
+/*****************************************************************************\
+ *  msg_aggr.c - Message Aggregator for sending messages to the
+ *               slurmctld, if a reply is expected this also will wait
+ *               and get that reply when received.
+ *****************************************************************************
+ *  Copyright (C) 2015 Bull S. A. S.
+ *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Martin Perry <martin.perry@bull.com>
+ *             Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include "slurm/slurm.h"
+
+#include "src/common/msg_aggr.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/common/slurm_auth.h"
+#include "src/common/slurm_route.h"
+#include "src/common/read_config.h"
+#include "src/common/slurm_protocol_interface.h"
+
+#include "src/slurmd/slurmd/slurmd.h"
+
+#ifdef WITH_PTHREADS
+#  include <pthread.h>
+#endif /* WITH_PTHREADS */
+
+typedef struct {
+	pthread_mutex_t	aggr_mutex;
+	pthread_cond_t	cond;
+	uint32_t        debug_flags;
+	bool		max_msgs;
+	uint64_t        max_msg_cnt;
+	List            msg_aggr_list;
+	List            msg_list;
+	pthread_mutex_t	mutex;
+	slurm_addr_t    node_addr;
+	bool            running;
+	pthread_t       thread_id;
+	uint64_t        window;
+} msg_collection_type_t;
+
+typedef struct {
+	uint16_t msg_index;
+	void (*resp_callback) (slurm_msg_t *msg);
+	pthread_cond_t wait_cond;
+} msg_aggr_t;
+
+
+/*
+ * Message collection data & controls
+ */
+static msg_collection_type_t msg_collection;
+
+
+static void _msg_aggr_free(void *x)
+{
+	msg_aggr_t *object = (msg_aggr_t *)x;
+	if (object) {
+		pthread_cond_destroy(&object->wait_cond);
+		xfree(object);
+	}
+}
+
+static msg_aggr_t *_handle_msg_aggr_ret(uint32_t msg_index, bool locked)
+{
+	msg_aggr_t *msg_aggr;
+	ListIterator itr;
+
+	if (!locked)
+		slurm_mutex_lock(&msg_collection.aggr_mutex);
+
+	itr = list_iterator_create(msg_collection.msg_aggr_list);
+
+	while ((msg_aggr = list_next(itr))) {
+		/* just remove them all */
+		if (!msg_index) {
+			/* make sure we don't wait any longer */
+			pthread_cond_signal(&msg_aggr->wait_cond);
+			list_remove(itr);
+		} else if (msg_aggr->msg_index == msg_index) {
+			list_remove(itr);
+			break;
+		}
+
+	}
+	list_iterator_destroy(itr);
+
+	if (!locked)
+		slurm_mutex_unlock(&msg_collection.aggr_mutex);
+
+	return msg_aggr;
+}
+
+static int _send_to_backup_collector(slurm_msg_t *msg, int rc)
+{
+	slurm_addr_t *next_dest = NULL;
+
+	if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) {
+		info("_send_to_backup_collector: primary %s, "
+		     "getting backup",
+		     rc ? "can't be reached" : "is null");
+	}
+
+	if ((next_dest = route_g_next_collector_backup())) {
+		if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) {
+			char addrbuf[100];
+			slurm_print_slurm_addr(next_dest, addrbuf, 32);
+			info("_send_to_backup_collector: *next_dest is "
+			     "%s", addrbuf);
+		}
+		memcpy(&msg->address, next_dest, sizeof(slurm_addr_t));
+		rc = slurm_send_only_node_msg(msg);
+	}
+
+	if (!next_dest ||  (rc != SLURM_SUCCESS)) {
+		if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE)
+			info("_send_to_backup_collector: backup %s, "
+			     "sending msg to controller",
+			     rc ? "can't be reached" : "is null");
+		rc = slurm_send_only_controller_msg(msg);
+	}
+
+	return rc;
+}
+
+/*
+ *  Send a msg to the next msg aggregation collector node. If primary
+ *  collector is unavailable or returns error, try backup collector.
+ *  If backup collector is unavailable or returns error, send msg
+ *  directly to controller.
+ */
+static int _send_to_next_collector(slurm_msg_t *msg)
+{
+	slurm_addr_t *next_dest = NULL;
+	bool i_am_collector;
+	int rc = SLURM_SUCCESS;
+
+	if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE)
+		info("msg aggr: send_to_next_collector: getting primary next "
+		     "collector");
+	if ((next_dest = route_g_next_collector(&i_am_collector))) {
+		if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) {
+			char addrbuf[100];
+			slurm_print_slurm_addr(next_dest, addrbuf, 32);
+			info("msg aggr: send_to_next_collector: *next_dest is "
+			     "%s", addrbuf);
+		}
+		memcpy(&msg->address, next_dest, sizeof(slurm_addr_t));
+		rc = slurm_send_only_node_msg(msg);
+	}
+
+	if (!next_dest || (rc != SLURM_SUCCESS))
+		rc = _send_to_backup_collector(msg, rc);
+
+	return rc;
+}
+
+/*
+ * _msg_aggregation_sender()
+ *
+ *  Start and terminate message collection windows.
+ *  Send collected msgs to next collector node or final destination
+ *  at window expiration.
+ */
+static void * _msg_aggregation_sender(void *arg)
+{
+	struct timeval now;
+	struct timespec timeout;
+	slurm_msg_t msg;
+	composite_msg_t cmp;
+
+	msg_collection.running = 1;
+
+	slurm_mutex_lock(&msg_collection.mutex);
+
+	while (msg_collection.running) {
+		/* Wait for a new msg to be collected */
+		pthread_cond_wait(&msg_collection.cond, &msg_collection.mutex);
+
+
+		if (!msg_collection.running &&
+		    !list_count(msg_collection.msg_list))
+			break;
+
+		/* A msg has been collected; start new window */
+		gettimeofday(&now, NULL);
+		timeout.tv_sec = now.tv_sec + (msg_collection.window / 1000);
+		timeout.tv_nsec = (now.tv_usec * 1000) +
+			(1000000 * (msg_collection.window % 1000));
+		timeout.tv_sec += timeout.tv_nsec / 1000000000;
+		timeout.tv_nsec %= 1000000000;
+
+		pthread_cond_timedwait(&msg_collection.cond,
+				       &msg_collection.mutex, &timeout);
+
+		if (!msg_collection.running &&
+		    !list_count(msg_collection.msg_list))
+			break;
+
+		msg_collection.max_msgs = true;
+
+		/* Msg collection window has expired and message collection
+		 * is suspended; now build and send composite msg */
+		memset(&msg, 0, sizeof(slurm_msg_t));
+		memset(&cmp, 0, sizeof(composite_msg_t));
+
+		memcpy(&cmp.sender, &msg_collection.node_addr,
+		       sizeof(slurm_addr_t));
+		cmp.msg_list = msg_collection.msg_list;
+
+		msg_collection.msg_list =
+			list_create(slurm_free_comp_msg_list);
+		msg_collection.max_msgs = false;
+
+		slurm_msg_t_init(&msg);
+		msg.msg_type = MESSAGE_COMPOSITE;
+		msg.protocol_version = SLURM_PROTOCOL_VERSION;
+		msg.data = &cmp;
+		if (_send_to_next_collector(&msg) != SLURM_SUCCESS) {
+			error("_msg_aggregation_engine: Unable to send "
+			      "composite msg: %m");
+		}
+		FREE_NULL_LIST(cmp.msg_list);
+
+		/* Resume message collection */
+		pthread_cond_broadcast(&msg_collection.cond);
+	}
+
+	slurm_mutex_unlock(&msg_collection.mutex);
+	return NULL;
+}
+
+extern void msg_aggr_sender_init(char *host, uint16_t port, uint64_t window,
+				 uint64_t max_msg_cnt)
+{
+	pthread_attr_t attr;
+	int            retries = 0;
+
+	if (msg_collection.running || (max_msg_cnt <= 1))
+		return;
+
+	memset(&msg_collection, 0, sizeof(msg_collection_type_t));
+
+	slurm_mutex_init(&msg_collection.aggr_mutex);
+	slurm_mutex_init(&msg_collection.mutex);
+
+	slurm_mutex_lock(&msg_collection.mutex);
+	slurm_mutex_lock(&msg_collection.aggr_mutex);
+	pthread_cond_init(&msg_collection.cond, NULL);
+	slurm_set_addr(&msg_collection.node_addr, port, host);
+	msg_collection.window = window;
+	msg_collection.max_msg_cnt = max_msg_cnt;
+	msg_collection.msg_aggr_list = list_create(_msg_aggr_free);
+	msg_collection.msg_list = list_create(slurm_free_comp_msg_list);
+	msg_collection.max_msgs = false;
+	msg_collection.debug_flags = slurm_get_debug_flags();
+	slurm_mutex_unlock(&msg_collection.aggr_mutex);
+	slurm_mutex_unlock(&msg_collection.mutex);
+
+	slurm_attr_init(&attr);
+
+	while (pthread_create(&msg_collection.thread_id, &attr,
+			      &_msg_aggregation_sender, NULL)) {
+		error("msg_aggr_sender_init: pthread_create: %m");
+		if (++retries > 3)
+			fatal("msg_aggr_sender_init: pthread_create: %m");
+		usleep(10);	/* sleep and again */
+	}
+
+	return;
+}
+
+extern void msg_aggr_sender_reconfig(uint64_t window, uint64_t max_msg_cnt)
+{
+	if (msg_collection.running) {
+		slurm_mutex_lock(&msg_collection.mutex);
+		msg_collection.window = window;
+		msg_collection.max_msg_cnt = max_msg_cnt;
+		msg_collection.debug_flags = slurm_get_debug_flags();
+		slurm_mutex_unlock(&msg_collection.mutex);
+	} else if (max_msg_cnt > 1) {
+		error("can't start the msg_aggr on a reconfig, "
+		      "a restart is needed");
+	}
+}
+
+extern void msg_aggr_sender_fini(void)
+{
+	if (!msg_collection.running)
+		return;
+	msg_collection.running = 0;
+	slurm_mutex_lock(&msg_collection.mutex);
+
+	pthread_cond_signal(&msg_collection.cond);
+	slurm_mutex_unlock(&msg_collection.mutex);
+
+	pthread_join(msg_collection.thread_id, NULL);
+	msg_collection.thread_id = (pthread_t) 0;
+
+	pthread_cond_destroy(&msg_collection.cond);
+	/* signal and clear the waiting list */
+	slurm_mutex_lock(&msg_collection.aggr_mutex);
+	_handle_msg_aggr_ret(0, 1);
+	FREE_NULL_LIST(msg_collection.msg_aggr_list);
+	slurm_mutex_unlock(&msg_collection.aggr_mutex);
+	FREE_NULL_LIST(msg_collection.msg_list);
+	slurm_mutex_destroy(&msg_collection.aggr_mutex);
+	slurm_mutex_destroy(&msg_collection.mutex);
+}
+
+extern void msg_aggr_add_msg(slurm_msg_t *msg, bool wait,
+			     void (*resp_callback) (slurm_msg_t *msg))
+{
+	int count;
+	static uint16_t msg_index = 1;
+
+	if (!msg_collection.running)
+		return;
+
+	slurm_mutex_lock(&msg_collection.mutex);
+	if (msg_collection.max_msgs == true) {
+		pthread_cond_wait(&msg_collection.cond, &msg_collection.mutex);
+	}
+
+	msg->msg_index = msg_index++;
+
+	/* Add msg to message collection */
+	list_append(msg_collection.msg_list, msg);
+
+	count = list_count(msg_collection.msg_list);
+
+
+	/* First msg in collection; initiate new window */
+	if (count == 1)
+		pthread_cond_signal(&msg_collection.cond);
+
+	/* Max msgs reached; terminate window */
+	if (count >= msg_collection.max_msg_cnt) {
+		msg_collection.max_msgs = true;
+		pthread_cond_signal(&msg_collection.cond);
+	}
+	slurm_mutex_unlock(&msg_collection.mutex);
+
+	if (wait) {
+		msg_aggr_t *msg_aggr = xmalloc(sizeof(msg_aggr_t));
+		uint16_t        msg_timeout;
+		struct timeval  now;
+		struct timespec timeout;
+
+		msg_aggr->msg_index = msg->msg_index;
+		msg_aggr->resp_callback = resp_callback;
+		pthread_cond_init(&msg_aggr->wait_cond, NULL);
+
+		slurm_mutex_lock(&msg_collection.aggr_mutex);
+		list_append(msg_collection.msg_aggr_list, msg_aggr);
+
+		msg_timeout = slurm_get_msg_timeout();
+		gettimeofday(&now, NULL);
+		timeout.tv_sec = now.tv_sec + msg_timeout;
+		timeout.tv_nsec = now.tv_usec * 1000;
+
+		if (pthread_cond_timedwait(&msg_aggr->wait_cond,
+					   &msg_collection.aggr_mutex,
+					   &timeout) == ETIMEDOUT)
+			_handle_msg_aggr_ret(msg_aggr->msg_index, 1);
+		slurm_mutex_unlock(&msg_collection.aggr_mutex);
+
+
+		_msg_aggr_free(msg_aggr);
+	}
+}
+
+extern void msg_aggr_add_comp(Buf buffer, void *auth_cred, header_t *header)
+{
+	slurm_msg_t *msg;
+
+	if (!msg_collection.running)
+		return;
+
+	msg = xmalloc_nz(sizeof(slurm_msg_t));
+	slurm_msg_t_init(msg);
+
+	msg->protocol_version = header->version;
+	msg->msg_type = header->msg_type;
+	msg->flags = header->flags;
+
+	msg->auth_cred = auth_cred;
+
+	msg->data = buffer;
+	msg->data_size = remaining_buf(buffer);
+
+	msg_aggr_add_msg(msg, 0, NULL);
+}
+
+extern void msg_aggr_resp(slurm_msg_t *msg)
+{
+	slurm_msg_t *next_msg;
+	composite_msg_t *comp_msg;
+	msg_aggr_t *msg_aggr;
+	ListIterator itr;
+
+	comp_msg = (composite_msg_t *)msg->data;
+	itr = list_iterator_create(comp_msg->msg_list);
+	if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE)
+		info("msg_aggr_resp: processing composite msg_list...");
+	while ((next_msg = list_next(itr))) {
+		switch (next_msg->msg_type) {
+		case REQUEST_BATCH_JOB_LAUNCH:
+		case RESPONSE_SLURM_RC:
+			/* signal sending thread that slurmctld received this
+			 * msg */
+			if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE)
+				info("msg_aggr_resp: response found for "
+				     "index %u signaling sending thread",
+				     next_msg->msg_index);
+			slurm_mutex_lock(&msg_collection.aggr_mutex);
+			if (!(msg_aggr = _handle_msg_aggr_ret(
+				      next_msg->msg_index, 1))) {
+				debug2("msg_aggr_resp: error: unable to "
+				       "locate aggr message struct for job %u",
+					next_msg->msg_index);
+				slurm_mutex_unlock(&msg_collection.aggr_mutex);
+				continue;
+			}
+			if (msg_aggr->resp_callback &&
+			    (next_msg->msg_type != RESPONSE_SLURM_RC))
+				(*(msg_aggr->resp_callback))(next_msg);
+			pthread_cond_signal(&msg_aggr->wait_cond);
+			slurm_mutex_unlock(&msg_collection.aggr_mutex);
+			break;
+		case RESPONSE_MESSAGE_COMPOSITE:
+			comp_msg = (composite_msg_t *)next_msg->data;
+			/* set up the address here for the next node */
+			memcpy(&next_msg->address, &comp_msg->sender,
+			       sizeof(slurm_addr_t));
+
+			if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE) {
+				char addrbuf[100];
+				slurm_print_slurm_addr(&next_msg->address,
+						       addrbuf, 32);
+				info("msg_aggr_resp: composite response msg "
+				     "found for %s", addrbuf);
+			}
+
+			slurm_send_only_node_msg(next_msg);
+
+			break;
+		default:
+			error("_rpc_composite_resp: invalid msg type in "
+			      "composite msg_list");
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	if (msg_collection.debug_flags & DEBUG_FLAG_ROUTE)
+		info("msg aggr: _rpc_composite_resp: finished processing "
+		     "composite msg_list...");
+}
diff --git a/src/common/msg_aggr.h b/src/common/msg_aggr.h
new file mode 100644
index 000000000..f2ad2b89c
--- /dev/null
+++ b/src/common/msg_aggr.h
@@ -0,0 +1,62 @@
+/*****************************************************************************\
+ *  msg_aggr.h - Message Aggregator for sending messages to the
+ *               slurmctld, if a reply is expected this also will wait
+ *               and get that reply when received.
+ *****************************************************************************
+ *  Copyright (C) 2015 Bull S. A. S.
+ *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Martin Perry <martin.perry@bull.com>
+ *             Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _MSG_AGGR_H
+#define _MSG_AGGR_H
+
+#include "src/common/slurm_protocol_defs.h"
+
+extern void msg_aggr_sender_init(char *host, uint16_t port, uint64_t window,
+				 uint64_t max_msg_cnt);
+extern void msg_aggr_sender_reconfig(uint64_t window, uint64_t max_msg_cnt);
+extern void msg_aggr_sender_fini(void);
+
+/* add a message that needs to be sent.
+ * IN: msg - message to be sent
+ * IN: wait - whether or not we need to wait for a response
+ * IN: resp_callback - function to process response
+ */
+extern void msg_aggr_add_msg(slurm_msg_t *msg, bool wait,
+			     void (*resp_callback) (slurm_msg_t *msg));
+extern void msg_aggr_add_comp(Buf buffer, void *auth_cred, header_t *header);
+extern void msg_aggr_resp(slurm_msg_t *msg);
+
+#endif
diff --git a/src/common/net.c b/src/common/net.c
index f4800834f..8d07a302c 100644
--- a/src/common/net.c
+++ b/src/common/net.c
@@ -81,10 +81,7 @@ static short _sock_bind_wild(int sockfd)
 	socklen_t len;
 	struct sockaddr_in sin;
 
-	memset(&sin, 0, sizeof(sin));
-	sin.sin_family = AF_INET;
-	sin.sin_addr.s_addr = htonl(INADDR_ANY);
-	sin.sin_port = htons(0);	/* bind ephemeral port */
+	slurm_setup_sockaddr(&sin, 0); /* bind ephemeral port */
 
 	if (bind(sockfd, (struct sockaddr *) &sin, sizeof(sin)) < 0)
 		return (-1);
diff --git a/src/common/node_conf.c b/src/common/node_conf.c
index 0c803a738..a0265e520 100644
--- a/src/common/node_conf.c
+++ b/src/common/node_conf.c
@@ -98,8 +98,10 @@ static int	_delete_config_record (void);
 #if _DEBUG
 static void	_dump_hash (void);
 #endif
-static struct node_record *_find_alias_node_record (char *name);
-static struct node_record *_find_node_record (char *name, bool test_alias);
+static struct node_record *
+		_find_alias_node_record(char *name, bool log_missing);
+static struct node_record *
+		_find_node_record (char *name,bool test_alias,bool log_missing);
 static void	_list_delete_config (void *config_entry);
 static void	_list_delete_feature (void *feature_entry);
 static int	_list_find_config (void *config_entry, void *key);
@@ -354,18 +356,17 @@ static void _dump_hash (void)
 /*
  * _find_alias_node_record - find a record for node with the alias of
  * the specified name supplied
- * input: name - name to be aliased of the desired node
- * output: return pointer to node record or NULL if not found
- * global: node_record_table_ptr - pointer to global node table
- *         node_hash_table - xhash struct indexing node records per name
+ * IN: name - name to be aliased of the desired node
+ * IN: log_missing - if set, then print an error message if the node is not found
+ * OUT: return pointer to node record or NULL if not found
  */
-static struct node_record *_find_alias_node_record (char *name)
+static struct node_record *_find_alias_node_record(char *name, bool log_missing)
 {
 	int i;
 	char *alias = NULL;
 
 	if ((name == NULL) || (name[0] == '\0')) {
-		info("_find_alias_node_record: passed NULL name");
+		info("%s: passed NULL name", __func__);
 		return NULL;
 	}
 	/* Get the alias we have just to make sure the user isn't
@@ -388,7 +389,9 @@ static struct node_record *_find_alias_node_record (char *name)
 			xfree(alias);
 			return node_ptr;
 		}
-		error ("_find_alias_node_record: lookup failure for %s", name);
+
+		if (log_missing)
+			error("%s: lookup failure for %s", __func__, name);
 	}
 
 	/* revert to sequential search */
@@ -457,7 +460,8 @@ static int _list_find_config (void *config_entry, void *key)
  * globals: node_record_table_ptr - pointer to node table
  * NOTE: the caller must xfree the memory at node_list when no longer required
  */
-hostlist_t bitmap2hostlist (bitstr_t *bitmap) {
+hostlist_t bitmap2hostlist (bitstr_t *bitmap)
+{
 	int i, first, last;
 	hostlist_t hl;
 
@@ -789,6 +793,7 @@ extern struct node_record *create_node_record (
 	/* these values will be overwritten when the node actually registers */
 	node_ptr->cpus = config_ptr->cpus;
 	node_ptr->cpu_load = NO_VAL;
+	node_ptr->free_mem = NO_VAL;
 	node_ptr->cpu_spec_list = xstrdup(config_ptr->cpu_spec_list);
 	node_ptr->boards = config_ptr->boards;
 	node_ptr->sockets = config_ptr->sockets;
@@ -800,8 +805,9 @@ extern struct node_record *create_node_record (
 	node_ptr->node_spec_bitmap = NULL;
 	node_ptr->tmp_disk = config_ptr->tmp_disk;
 	node_ptr->select_nodeinfo = select_g_select_nodeinfo_alloc();
-	node_ptr->energy = acct_gather_energy_alloc();
+	node_ptr->energy = acct_gather_energy_alloc(1);
 	node_ptr->ext_sensors = ext_sensors_alloc();
+	node_ptr->owner = NO_VAL;
 	xassert (node_ptr->magic = NODE_MAGIC)  /* set value */;
 	return node_ptr;
 }
@@ -810,19 +816,45 @@ extern struct node_record *create_node_record (
  * find_node_record - find a record for node with specified name
  * IN: name - name of the desired node
  * RET: pointer to node record or NULL if not found
+ * NOTE: Logs an error if the node name is NOT found
  */
 extern struct node_record *find_node_record (char *name)
 {
-	return _find_node_record(name, true);
+	return _find_node_record(name, true, true);
+}
+
+/*
+ * find_node_record2 - find a record for node with specified name
+ * IN: name - name of the desired node
+ * RET: pointer to node record or NULL if not found
+ * NOTE: Does not log an error if the node name is NOT found
+ */
+extern struct node_record *find_node_record2 (char *name)
+{
+	return _find_node_record(name, true, false);
+}
+
+/*
+ * find_node_record_no_alias - find a record for node with specified name
+ * without looking at the node's alias (NodeHostName).
+ * IN: name - name of the desired node
+ * RET: pointer to node record or NULL if not found
+ * NOTE: Does not log an error if the node name is NOT found
+ */
+extern struct node_record *find_node_record_no_alias (char *name)
+{
+	return _find_node_record(name, false, true);
 }
 
 /*
  * _find_node_record - find a record for node with specified name
  * IN: name - name of the desired node
  * IN: test_alias - if set, also test NodeHostName value
+ * IN: log_missing - if set, then print an error message if the node is not found
  * RET: pointer to node record or NULL if not found
  */
-static struct node_record *_find_node_record (char *name, bool test_alias)
+static struct node_record *_find_node_record (char *name, bool test_alias,
+					      bool log_missing)
 {
 	int i;
 	struct node_record *node_ptr;
@@ -845,7 +877,8 @@ static struct node_record *_find_node_record (char *name, bool test_alias)
 		    (strcmp(node_record_table_ptr[0].name, "localhost") == 0))
 			return (&node_record_table_ptr[0]);
 
-		error ("find_node_record: lookup failure for %s", name);
+		if (log_missing)
+			error ("find_node_record: lookup failure for %s", name);
 	}
 	/* revert to sequential search */
 	else {
@@ -859,7 +892,7 @@ static struct node_record *_find_node_record (char *name, bool test_alias)
 	if (test_alias) {
 		/* look for the alias node record if the user put this in
 	 	 * instead of what slurm sees the node name as */
-	 	return _find_alias_node_record (name);
+		return _find_alias_node_record(name, log_missing);
 	}
 	return NULL;
 }
@@ -886,7 +919,7 @@ extern int init_node_conf (void)
 	struct node_record *node_ptr;
 
 	node_ptr = node_record_table_ptr;
-	for (i=0; i< node_record_count; i++, node_ptr++)
+	for (i = 0; i < node_record_count; i++, node_ptr++)
 		purge_node_rec(node_ptr);
 
 	node_record_count = 0;
@@ -912,17 +945,14 @@ extern void node_fini2 (void)
 	struct node_record *node_ptr;
 
 	if (config_list) {
-		list_destroy(config_list);
-		config_list = NULL;
-		list_destroy(feature_list);
-		feature_list = NULL;
-		list_destroy(front_end_list);
-		front_end_list = NULL;
+		FREE_NULL_LIST(config_list);
+		FREE_NULL_LIST(feature_list);
+		FREE_NULL_LIST(front_end_list);
 	}
 
 	xhash_free(node_hash_table);
 	node_ptr = node_record_table_ptr;
-	for (i=0; i< node_record_count; i++, node_ptr++)
+	for (i = 0; i < node_record_count; i++, node_ptr++)
 		purge_node_rec(node_ptr);
 
 	xfree(node_record_table_ptr);
@@ -965,7 +995,7 @@ extern int node_name2bitmap (char *node_names, bool best_effort,
 
 	while ( (this_node_name = hostlist_shift (host_list)) ) {
 		struct node_record *node_ptr;
-		node_ptr = _find_node_record(this_node_name, best_effort);
+		node_ptr = _find_node_record(this_node_name, best_effort, true);
 		if (node_ptr) {
 			bit_set (my_bitmap, (bitoff_t) (node_ptr -
 							node_record_table_ptr));
@@ -1003,7 +1033,7 @@ extern int hostlist2bitmap (hostlist_t hl, bool best_effort, bitstr_t **bitmap)
 	hi = hostlist_iterator_create(hl);
 	while ((name = hostlist_next(hi)) != NULL) {
 		struct node_record *node_ptr;
-		node_ptr = _find_node_record(name, best_effort);
+		node_ptr = _find_node_record(name, best_effort, true);
 		if (node_ptr) {
 			bit_set (my_bitmap, (bitoff_t) (node_ptr -
 							node_record_table_ptr));
@@ -1029,18 +1059,21 @@ extern void purge_node_rec (struct node_record *node_ptr)
 	xfree(node_ptr->cpu_spec_list);
 	xfree(node_ptr->features);
 	xfree(node_ptr->gres);
-	if (node_ptr->gres_list)
-		list_destroy(node_ptr->gres_list);
+	FREE_NULL_LIST(node_ptr->gres_list);
 	xfree(node_ptr->name);
 	xfree(node_ptr->node_hostname);
 	FREE_NULL_BITMAP(node_ptr->node_spec_bitmap);
 	xfree(node_ptr->os);
 	xfree(node_ptr->part_pptr);
+	xfree(node_ptr->power);
 	xfree(node_ptr->reason);
 	xfree(node_ptr->version);
 	acct_gather_energy_destroy(node_ptr->energy);
 	ext_sensors_destroy(node_ptr->ext_sensors);
 	select_g_select_nodeinfo_free(node_ptr->select_nodeinfo);
+	xfree(node_ptr->tres_str);
+	xfree(node_ptr->tres_fmt_str);
+	xfree(node_ptr->tres_cnt);
 }
 
 /*
diff --git a/src/common/node_conf.h b/src/common/node_conf.h
index 405393dd1..c43cb79f3 100644
--- a/src/common/node_conf.h
+++ b/src/common/node_conf.h
@@ -160,16 +160,24 @@ struct node_record {
 					 * no need to save/restore */
 	time_t down_time;		/* When first set to DOWN state */
 #endif	/* HAVE_ALPS_CRAY */
-	acct_gather_energy_t *energy;
+	acct_gather_energy_t *energy;	/* power consumption data */
 	ext_sensors_data_t *ext_sensors; /* external sensor data */
+	power_mgmt_data_t *power;	/* power management data */
 	dynamic_plugin_data_t *select_nodeinfo; /* opaque data structure,
 						 * use select_g_get_nodeinfo()
 						 * to access contents */
 	uint32_t cpu_load;		/* CPU load * 100 */
 	time_t cpu_load_time;		/* Time when cpu_load last set */
+	uint32_t free_mem;		/* Free memory in MiB */
+	time_t free_mem_time;		/* Time when free_mem last set */
 	uint16_t protocol_version;	/* Slurm version number */
 	char *version;			/* Slurm version */
 	bitstr_t *node_spec_bitmap;	/* node cpu specialization bitmap */
+	uint32_t owner;			/* User allowed to use node or NO_VAL */
+	uint16_t owner_job_cnt;		/* Count of exclusive jobs by "owner" */
+	char *tres_str;                 /* tres this node has */
+	char *tres_fmt_str;		/* tres this node has */
+	uint64_t *tres_cnt;		/* tres this node has. NO_PACK*/
 };
 extern struct node_record *node_record_table_ptr;  /* ptr to node records */
 extern int node_record_count;		/* count in node_record_table_ptr */
@@ -253,12 +261,29 @@ extern struct node_record *create_node_record (
 
 /*
  * find_node_record - find a record for node with specified name
- * input: name - name of the desired node
- * output: return pointer to node record or NULL if not found
- *         node_hash_table - table of hash indexes
+ * IN: name - name of the desired node
+ * RET: pointer to node record or NULL if not found
+ * NOTE: Logs an error if the node name is NOT found
  */
 extern struct node_record *find_node_record (char *name);
 
+/*
+ * find_node_record2 - find a record for node with specified name
+ * IN: name - name of the desired node
+ * RET: pointer to node record or NULL if not found
+ * NOTE: Does not log an error if the node name is NOT found
+ */
+extern struct node_record *find_node_record2 (char *name);
+
+/*
+ * find_node_record_no_alias - find a record for node with specified name
+ * without looking at the node's alias (NodeHostName).
+ * IN: name - name of the desired node
+ * RET: pointer to node record or NULL if not found
+ * NOTE: Does not log an error if the node name is NOT found
+ */
+extern struct node_record *find_node_record_no_alias (char *name);
+
 /*
  * hostlist2bitmap - given a hostlist, build a bitmap representation
  * IN hl          - hostlist
diff --git a/src/common/node_select.c b/src/common/node_select.c
index c0f32ef45..bc7cb74cd 100644
--- a/src/common/node_select.c
+++ b/src/common/node_select.c
@@ -136,8 +136,7 @@ extern void destroy_select_ba_request(void *arg)
 
 	if (ba_request) {
 		xfree(ba_request->save_name);
-		if (ba_request->elongate_geos)
-			list_destroy(ba_request->elongate_geos);
+		FREE_NULL_LIST(ba_request->elongate_geos);
 
 		xfree(ba_request->blrtsimage);
 		xfree(ba_request->linuximage);
@@ -393,11 +392,6 @@ done:
 				      select_type_param_string(cr_type),
 				      cr_type);
 			}
-		} else {
-#ifdef HAVE_XCPU
-			error("%s is incompatible with XCPU use", type);
-			fatal("Use SelectType=select/linear");
-#endif
 		}
 	}
 
diff --git a/src/common/pack.c b/src/common/pack.c
index b925fe71d..ec7b6e1eb 100644
--- a/src/common/pack.c
+++ b/src/common/pack.c
@@ -70,6 +70,8 @@ strong_alias(pack_time,		slurm_pack_time);
 strong_alias(unpack_time,	slurm_unpack_time);
 strong_alias(packdouble,	slurm_packdouble);
 strong_alias(unpackdouble,	slurm_unpackdouble);
+strong_alias(packlongdouble,	slurm_packlongdouble);
+strong_alias(unpacklongdouble,	slurm_unpacklongdouble);
 strong_alias(pack64,		slurm_pack64);
 strong_alias(unpack64,		slurm_unpack64);
 strong_alias(pack32,		slurm_pack32);
@@ -151,7 +153,7 @@ Buf init_buf(int size)
 	my_buf->magic = BUF_MAGIC;
 	my_buf->size = size;
 	my_buf->processed = 0;
-	my_buf->head = xmalloc_nz(sizeof(char)*size);
+	my_buf->head = xmalloc(sizeof(char)*size);
 	return my_buf;
 }
 
@@ -208,6 +210,7 @@ int unpack_time(time_t * valp, Buf buffer)
  * Given a double, multiple by FLOAT_MULT and then
  * typecast to a uint64_t in host byte order, convert to network byte order
  * store in buffer, and adjust buffer counters.
+ * NOTE: There is an IEEE standard format for double.
  */
 void 	packdouble(double val, Buf buffer)
 {
@@ -240,6 +243,7 @@ void 	packdouble(double val, Buf buffer)
  * Given a buffer containing a network byte order 64-bit integer,
  * typecast as double, and  divide by FLOAT_MULT
  * store a host double at 'valp', and adjust buffer counters.
+ * NOTE: There is an IEEE standard format for double.
  */
 int	unpackdouble(double *valp, Buf buffer)
 {
@@ -261,6 +265,38 @@ int	unpackdouble(double *valp, Buf buffer)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * long double has no standard format, so pass the data as a string
+ */
+void 	packlongdouble(long double val, Buf buffer)
+{
+	char val_str[256];
+
+	snprintf(val_str, sizeof(val_str), "%Lf", val);
+	packstr(val_str, buffer);
+}
+
+/*
+ * long double has no standard format, so pass the data as a string
+ */
+int	unpacklongdouble(long double *valp, Buf buffer)
+{
+	long double nl;
+	char *val_str = NULL;
+	uint32_t size_val_str = 0;
+	int rc;
+
+	rc = unpackmem_ptr(&val_str, &size_val_str, buffer);
+	if (rc != SLURM_SUCCESS)
+		return rc;
+
+	if (sscanf(val_str, "%Lf", &nl) != 1)
+		return SLURM_ERROR;
+
+	*valp = nl;
+	return SLURM_SUCCESS;
+}
+
 /*
  * Given a 64-bit integer in host byte order, convert to network byte order
  * store in buffer, and adjust buffer counters.
@@ -426,6 +462,60 @@ int unpack64_array(uint64_t ** valp, uint32_t * size_val, Buf buffer)
 	return SLURM_SUCCESS;
 }
 
+void packdouble_array(double *valp, uint32_t size_val, Buf buffer)
+{
+	uint32_t i = 0;
+
+	pack32(size_val, buffer);
+
+	for (i = 0; i < size_val; i++) {
+		packdouble(*(valp + i), buffer);
+	}
+}
+
+int unpackdouble_array(double **valp, uint32_t* size_val, Buf buffer)
+{
+	uint32_t i = 0;
+
+	if (unpack32(size_val, buffer))
+		return SLURM_ERROR;
+
+	*valp = xmalloc_nz((*size_val) * sizeof(double));
+	for (i = 0; i < *size_val; i++) {
+		if (unpackdouble((*valp) + i, buffer))
+			return SLURM_ERROR;
+	}
+	return SLURM_SUCCESS;
+}
+
+void packlongdouble_array(long double *valp, uint32_t size_val, Buf buffer)
+{
+	uint32_t i = 0;
+
+	pack32(size_val, buffer);
+
+	for (i = 0; i < size_val; i++) {
+		packlongdouble(*(valp + i), buffer);
+	}
+}
+
+int unpacklongdouble_array(long double **valp, uint32_t* size_val, Buf buffer)
+{
+	uint32_t i = 0;
+
+	if (unpack32(size_val, buffer))
+		return SLURM_ERROR;
+
+	*valp = xmalloc_nz((*size_val) * sizeof(long double));
+	for (i = 0; i < *size_val; i++) {
+		if (unpacklongdouble((*valp) + i, buffer))
+			return SLURM_ERROR;
+	}
+	return SLURM_SUCCESS;
+}
+
+
+
 /*
  * Given a 16-bit integer in host byte order, convert to network byte order,
  * store in buffer and adjust buffer counters.
diff --git a/src/common/pack.h b/src/common/pack.h
index ad79daf4b..80cb2fd43 100644
--- a/src/common/pack.h
+++ b/src/common/pack.h
@@ -67,7 +67,7 @@
 /* If we unpack a buffer that contains bad data, we want to avoid a memory
  * allocation error due to array or buffer sizes that are unreasonably large */
 #define MAX_PACK_ARRAY_LEN	(128 * 1024)
-#define MAX_PACK_MEM_LEN	(64 * 1024 * 1024)
+#define MAX_PACK_MEM_LEN	(1024 * 1024 * 1024)
 
 struct slurm_buf {
 	uint32_t magic;
@@ -96,6 +96,9 @@ int	unpack_time(time_t *valp, Buf buffer);
 void 	packdouble(double val, Buf buffer);
 int	unpackdouble(double *valp, Buf buffer);
 
+void 	packlongdouble(long double val, Buf buffer);
+int	unpacklongdouble(long double *valp, Buf buffer);
+
 void 	pack64(uint64_t val, Buf buffer);
 int	unpack64(uint64_t *valp, Buf buffer);
 
@@ -117,6 +120,13 @@ int	unpack32_array(uint32_t **valp, uint32_t* size_val, Buf buffer);
 void	pack64_array(uint64_t *valp, uint32_t size_val, Buf buffer);
 int	unpack64_array(uint64_t **valp, uint32_t* size_val, Buf buffer);
 
+void	packdouble_array(double *valp, uint32_t size_val, Buf buffer);
+int	unpackdouble_array(double **valp, uint32_t* size_val, Buf buffer);
+
+void	packlongdouble_array(long double *valp, uint32_t size_val, Buf buffer);
+int	unpacklongdouble_array(long double **valp, uint32_t* size_val,
+			       Buf buffer);
+
 void	packmem(char *valp, uint32_t size_val, Buf buffer);
 int	unpackmem(char *valp, uint32_t *size_valp, Buf buffer);
 int	unpackmem_ptr(char **valp, uint32_t *size_valp, Buf buffer);
@@ -155,6 +165,19 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 		goto unpack_error;			\
 } while (0)
 
+#define safe_packlongdouble(val,buf) do {		\
+	assert(sizeof(val) == sizeof(long double));   	\
+	assert(buf->magic == BUF_MAGIC);		\
+	packlongdouble(val,buf);			\
+} while (0)
+
+#define safe_unpacklongdouble(valp,buf) do {		\
+	assert(sizeof(*valp) == sizeof(long double));	\
+	assert(buf->magic == BUF_MAGIC);		\
+        if (unpacklongdouble(valp,buf))			\
+		goto unpack_error;			\
+} while (0)
+
 #define safe_pack64(val,buf) do {			\
 	assert(sizeof(val) == sizeof(uint64_t)); 	\
 	assert(buf->magic == BUF_MAGIC);		\
@@ -234,6 +257,20 @@ int	unpackmem_array(char *valp, uint32_t size_valp, Buf buffer);
 		goto unpack_error;			\
 } while (0)
 
+#define safe_unpackdouble_array(valp,size_valp,buf) do {	\
+	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
+	assert(buf->magic == BUF_MAGIC);		\
+	if (unpackdouble_array(valp,size_valp,buf))		\
+		goto unpack_error;			\
+} while (0)
+
+#define safe_unpacklongdouble_array(valp,size_valp,buf) do {	\
+	assert(sizeof(*size_valp) == sizeof(uint32_t)); \
+	assert(buf->magic == BUF_MAGIC);		\
+	if (unpacklongdouble_array(valp,size_valp,buf))		\
+		goto unpack_error;			\
+} while (0)
+
 #define safe_packmem(valp,size_val,buf) do {		\
 	assert(sizeof(size_val) == sizeof(uint32_t)); 	\
 	assert(size_val == 0 || valp != NULL);		\
diff --git a/src/common/parse_config.c b/src/common/parse_config.c
index 68c4eb807..c9cc8f015 100644
--- a/src/common/parse_config.c
+++ b/src/common/parse_config.c
@@ -55,6 +55,7 @@
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/pack.h"
 #include "src/common/parse_config.h"
 #include "src/common/parse_value.h"
 #include "src/common/read_config.h"
@@ -67,6 +68,7 @@
 
 strong_alias(s_p_hashtbl_create,	slurm_s_p_hashtbl_create);
 strong_alias(s_p_hashtbl_destroy,	slurm_s_p_hashtbl_destroy);
+strong_alias(s_p_parse_buffer,		slurm_s_p_parse_buffer);
 strong_alias(s_p_parse_file,		slurm_s_p_parse_file);
 strong_alias(s_p_parse_pair,		slurm_s_p_parse_pair);
 strong_alias(s_p_parse_line,		slurm_s_p_parse_line);
@@ -75,6 +77,9 @@ strong_alias(s_p_get_string,		slurm_s_p_get_string);
 strong_alias(s_p_get_long,		slurm_s_p_get_long);
 strong_alias(s_p_get_uint16,		slurm_s_p_get_uint16);
 strong_alias(s_p_get_uint32,		slurm_s_p_get_uint32);
+strong_alias(s_p_get_float,		slurm_s_p_get_float);
+strong_alias(s_p_get_double,		slurm_s_p_get_double);
+strong_alias(s_p_get_long_double,	slurm_s_p_get_long_double);
 strong_alias(s_p_get_pointer,		slurm_s_p_get_pointer);
 strong_alias(s_p_get_array,		slurm_s_p_get_array);
 strong_alias(s_p_get_boolean,		slurm_s_p_get_boolean);
@@ -88,8 +93,8 @@ strong_alias(transfer_s_p_options,	slurm_transfer_s_p_options);
 static regex_t keyvalue_re;
 static char *keyvalue_pattern =
 	"^[[:space:]]*"
-	"([[:alnum:]]+)" /* key */
-	"[[:space:]]*=[[:space:]]*"
+	"([[:alnum:]_.]+)" /* key */
+	"[[:space:]]*([-*+/]?)=[[:space:]]*"
 	"((\"([^\"]*)\")|([^[:space:]]+))" /* value: quoted with whitespace,
 					    * or unquoted and no whitespace */
 	"([[:space:]]|$)";
@@ -98,6 +103,7 @@ static bool keyvalue_initialized = false;
 struct s_p_values {
 	char *key;
 	int type;
+	slurm_parser_operator_t operator;
 	int data_count;
 	void *data;
 	int (*handler)(void **data, slurm_parser_enum_t type,
@@ -172,6 +178,7 @@ s_p_hashtbl_t *s_p_hashtbl_create(const s_p_options_t options[])
 	for (op = options; op->key != NULL; op++) {
 		value = xmalloc(sizeof(s_p_values_t));
 		value->key = xstrdup(op->key);
+		value->operator = S_P_OPERATOR_SET;
 		value->type = op->type;
 		value->data_count = 0;
 		value->data = NULL;
@@ -240,6 +247,7 @@ static void _conf_file_values_free(s_p_values_t *p)
 			for (i = 0; i < p->data_count; ++i) {
 				s_p_hashtbl_destroy(v->values[i]);
 			}
+			xfree(v->values);
 			xfree(p->data);
 			break;
 		default:
@@ -292,14 +300,17 @@ static void _keyvalue_regex_init(void)
  * Return 0 when a key-value pair is found, and -1 otherwise.
  */
 static int _keyvalue_regex(const char *line,
-			   char **key, char **value, char **remaining)
+			   char **key, char **value, char **remaining,
+			   slurm_parser_operator_t *operator)
 {
 	size_t nmatch = 8;
 	regmatch_t pmatch[8];
+	char op;
 
 	*key = NULL;
 	*value = NULL;
 	*remaining = (char *)line;
+	*operator = S_P_OPERATOR_SET;
 	memset(pmatch, 0, sizeof(regmatch_t)*nmatch);
 
 	if (regexec(&keyvalue_re, line, nmatch, pmatch, 0)
@@ -309,18 +320,30 @@ static int _keyvalue_regex(const char *line,
 
 	*key = (char *)(xstrndup(line + pmatch[1].rm_so,
 				 pmatch[1].rm_eo - pmatch[1].rm_so));
-
-	if (pmatch[4].rm_so != -1) {
-		*value = (char *)(xstrndup(line + pmatch[4].rm_so,
-					   pmatch[4].rm_eo - pmatch[4].rm_so));
-	} else if (pmatch[5].rm_so != -1) {
+	if (pmatch[2].rm_so != -1 &&
+	    (pmatch[2].rm_so != pmatch[2].rm_eo)) {
+		op = *(line + pmatch[2].rm_so);
+		if (op == '+') {
+			*operator = S_P_OPERATOR_ADD;
+		} else if (op == '-') {
+			*operator = S_P_OPERATOR_SUB;
+		} else if (op == '*') {
+			*operator = S_P_OPERATOR_MUL;
+		} else if (op == '/') {
+			*operator = S_P_OPERATOR_DIV;
+		}
+	}
+	if (pmatch[5].rm_so != -1) {
 		*value = (char *)(xstrndup(line + pmatch[5].rm_so,
 					   pmatch[5].rm_eo - pmatch[5].rm_so));
+	} else if (pmatch[6].rm_so != -1) {
+		*value = (char *)(xstrndup(line + pmatch[6].rm_so,
+					   pmatch[6].rm_eo - pmatch[6].rm_so));
 	} else {
 		*value = xstrdup("");
 	}
 
-	*remaining = (char *)(line + pmatch[2].rm_eo);
+	*remaining = (char *)(line + pmatch[3].rm_eo);
 
 	return 0;
 }
@@ -493,10 +516,11 @@ s_p_hashtbl_t* _hashtbl_copy_keys(const s_p_hashtbl_t* from_hashtbl,
 	to_hashtbl = (s_p_hashtbl_t *)xmalloc(len);
 
 	for (i = 0; i < CONF_HASH_LEN; ++i) {
-		for (val_ptr = from_hashtbl[i]; val_ptr; val_ptr =
-			     val_ptr->next) {
+		for (val_ptr = from_hashtbl[i]; val_ptr;
+		     val_ptr = val_ptr->next) {
 			val_copy = xmalloc(sizeof(s_p_values_t));
 			val_copy->key = xstrdup(val_ptr->key);
+			val_copy->operator = val_ptr->operator;
 			val_copy->type = val_ptr->type;
 			val_copy->handler = val_ptr->handler;
 			val_copy->destroy = val_ptr->destroy;
@@ -583,6 +607,29 @@ static void* _handle_boolean(const char* key, const char* value)
 	return data;
 }
 
+static void* _handle_float(const char* key, const char* value)
+{
+	float* data = (float*)xmalloc(sizeof(float));
+	if (s_p_handle_float(data, key, value) == SLURM_ERROR)
+		return NULL;
+	return data;
+}
+
+static void* _handle_double(const char* key, const char* value)
+{
+	double* data = (double*)xmalloc(sizeof(double));
+	if (s_p_handle_double(data, key, value) == SLURM_ERROR)
+		return NULL;
+	return data;
+}
+
+static void* _handle_ldouble(const char* key, const char* value)
+{
+	long double* data = (long double*)xmalloc(sizeof(long double));
+	if (s_p_handle_long_double(data, key, value) == SLURM_ERROR)
+		return NULL;
+	return data;
+}
 
 static int _handle_pointer(s_p_values_t *v, const char *value,
 			   const char *line, char **leftover)
@@ -678,6 +725,18 @@ static int _handle_expline_cmp_uint32(const void* v1, const void* v2)
 {
 	return *((uint32_t*)v1) != *((uint32_t*)v2);
 }
+static int _handle_expline_cmp_float(const void* v1, const void* v2)
+{
+	return *((float*)v1) != *((float*)v2);
+}
+static int _handle_expline_cmp_double(const void* v1, const void* v2)
+{
+	return *((double*)v1) != *((double*)v2);
+}
+static int _handle_expline_cmp_ldouble(const void* v1, const void* v2)
+{
+	return *((long double*)v1) != *((long double*)v2);
+}
 
 /* ac = array case
  * the master key type is not string. Iterate over the tables looking
@@ -751,6 +810,21 @@ static void _handle_expline_merge(_expline_values_t* v_data,
 				   _handle_expline_cmp_uint32, &v_data->values,
 				   tables_count);
 		break;
+	case S_P_FLOAT:
+		_handle_expline_ac(current_tbl, master_key, matchp->data,
+				   _handle_expline_cmp_float, &v_data->values,
+				   tables_count);
+		break;
+	case S_P_DOUBLE:
+		_handle_expline_ac(current_tbl, master_key, matchp->data,
+				   _handle_expline_cmp_double, &v_data->values,
+				   tables_count);
+		break;
+	case S_P_LONG_DOUBLE:
+		_handle_expline_ac(current_tbl, master_key, matchp->data,
+				   _handle_expline_cmp_ldouble, &v_data->values,
+				   tables_count);
+		break;
 	}
 }
 
@@ -840,7 +914,15 @@ static void _handle_keyvalue_match(s_p_values_t *v,
 	case S_P_EXPLINE:
 		_handle_expline(v, value, line, leftover);
 		break;
-
+	case S_P_FLOAT:
+		_handle_common(v, value, line, leftover, _handle_float);
+		break;
+	case S_P_DOUBLE:
+		_handle_common(v, value, line, leftover, _handle_double);
+		break;
+	case S_P_LONG_DOUBLE:
+		_handle_common(v, value, line, leftover, _handle_ldouble);
+		break;
 	}
 }
 
@@ -875,11 +957,13 @@ int s_p_parse_line(s_p_hashtbl_t *hashtbl, const char *line, char **leftover)
 	char *ptr = (char *)line;
 	s_p_values_t *p;
 	char *new_leftover;
+	slurm_parser_operator_t op;
 
 	_keyvalue_regex_init();
 
-	while (_keyvalue_regex(ptr, &key, &value, &new_leftover) == 0) {
+	while (_keyvalue_regex(ptr, &key, &value, &new_leftover, &op) == 0) {
 		if ((p = _conf_hashtbl_lookup(hashtbl, key))) {
+			p->operator = op;
 			_handle_keyvalue_match(p, value,
 					       new_leftover, &new_leftover);
 			*leftover = ptr = new_leftover;
@@ -906,11 +990,13 @@ static int _parse_next_key(s_p_hashtbl_t *hashtbl,
 	char *key, *value;
 	s_p_values_t *p;
 	char *new_leftover;
+	slurm_parser_operator_t op;
 
 	_keyvalue_regex_init();
 
-	if (_keyvalue_regex(line, &key, &value, &new_leftover) == 0) {
+	if (_keyvalue_regex(line, &key, &value, &new_leftover, &op) == 0) {
 		if ((p = _conf_hashtbl_lookup(hashtbl, key))) {
+			p->operator = op;
 			_handle_keyvalue_match(p, value,
 					       new_leftover, &new_leftover);
 			*leftover = new_leftover;
@@ -1175,6 +1261,59 @@ int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename,
 	return rc;
 }
 
+int s_p_parse_buffer(s_p_hashtbl_t *hashtbl, uint32_t *hash_val,
+		     Buf buffer, bool ignore_new)
+{
+	char *leftover = NULL;
+	int rc = SLURM_SUCCESS;
+	int line_number;
+	uint32_t utmp32;
+	char *tmp_str = NULL;
+
+	if (!buffer) {
+		error("s_p_parse_buffer: No buffer given.");
+		return SLURM_ERROR;
+	}
+
+	line_number = 0;
+	_keyvalue_regex_init();
+	while (remaining_buf(buffer) > 0) {
+		safe_unpackstr_xmalloc(&tmp_str, &utmp32, buffer);
+		if (tmp_str != NULL) {
+			line_number++;
+			if (*tmp_str == '\0') {
+				xfree(tmp_str);
+				continue;
+			}
+			_parse_next_key(hashtbl, tmp_str, &leftover, ignore_new);
+			/* Make sure that after parsing only whitespace
+			   is left over */
+			if (!_line_is_space(leftover)) {
+				char *ptr = xstrdup(leftover);
+				_strip_cr_nl(ptr);
+				if (ignore_new) {
+					debug("s_p_parse_buffer : error in line"
+					      " %d: \"%s\"", line_number, ptr);
+				} else {
+					error("s_p_parse_buffer : error in line"
+					      " %d: \"%s\"", line_number, ptr);
+					rc = SLURM_ERROR;
+				}
+				xfree(ptr);
+			}
+			xfree(tmp_str);
+			if (rc == SLURM_SUCCESS)
+				continue;
+		}
+	unpack_error:
+		debug3("s_p_parse_buffer: ending after line %u",
+		       line_number);
+		break;
+	}
+
+	return rc;
+}
+
 /*
  * s_p_hashtbl_merge
  *
@@ -1287,6 +1426,11 @@ void s_p_hashtbl_merge_keys(s_p_hashtbl_t *to_hashtbl,
 					s_p_hashtbl_merge_keys(
 							t_expline->template,
 							f_expline->template);
+					/* Keys merged, free container memory */
+					s_p_hashtbl_destroy(f_expline->template);
+					s_p_hashtbl_destroy(f_expline->index);
+					//FIXME: Destroy "values" ?
+					xfree(f_expline);
 				}
 				pp = &p->next;
 				p = p->next;
@@ -1367,6 +1511,7 @@ static s_p_hashtbl_t* _parse_expline_adapt_table(const s_p_hashtbl_t* hashtbl)
 		for (val_ptr = hashtbl[i]; val_ptr; val_ptr = val_ptr->next) {
 			val_copy = xmalloc(sizeof(s_p_values_t));
 			val_copy->key = xstrdup(val_ptr->key);
+			val_copy->operator = val_ptr->operator;
 			if (val_ptr->type == S_P_PLAIN_STRING) {
 				val_copy->type = S_P_STRING;
 			} else {
@@ -1466,17 +1611,26 @@ static int _parse_expline_doexpand(s_p_hashtbl_t** tables,
 	for (i = 0; i < tables_count; ++i) {
 		if (item_count > 0) {
 			--item_count;
-			free(item_str);
+			if (item_str)
+				free(item_str);
 			item_str = hostlist_shift(item_hl);
 		}
-		if (!s_p_parse_pair(tables[i], item->key, item_str)) {
+		/*
+		 * The destination tables are created without any info on the
+		 * operator associated with the key in s_p_parse_line_expanded.
+		 * So, parse the targeted pair injecting that information to
+		 * push it into the destination table.
+		 */
+		if (!s_p_parse_pair_with_op(tables[i], item->key, item_str,
+					    item->operator)) {
 			error("Error parsing %s = %s.", item->key, item_str);
 			free(item_str);
 			return 0;
 		}
 	}
 
-	free(item_str);
+	if (item_str)
+		free(item_str);
 	return 1;
 }
 
@@ -1525,7 +1679,7 @@ int s_p_parse_line_expanded(const s_p_hashtbl_t *hashtbl,
 	 */
 	tables = (s_p_hashtbl_t**)xmalloc(tables_count *
 					  sizeof(s_p_hashtbl_t*));
-	for (i = 0; i < tables_count; ++i) {
+	for (i = 0; i < tables_count; i++) {
 		free(value_str);
 		value_str = hostlist_shift(value_hl);
 		tables[i] = _hashtbl_copy_keys(hashtbl,
@@ -1563,9 +1717,10 @@ cleanup:
 	if (strhashtbl)
 		s_p_hashtbl_destroy(strhashtbl);
 
-	if (status == SLURM_ERROR) {
-		for (i = 0; i < tables_count; ++i)
-			s_p_hashtbl_destroy(tables[i]);
+	if (status == SLURM_ERROR && tables) {
+		for (i = 0; i < tables_count; i++)
+			if (tables[i])
+				s_p_hashtbl_destroy(tables[i]);
 		xfree(tables);
 	}
 	else {
@@ -1577,16 +1732,24 @@ cleanup:
 
 /*
  * Returns 1 if the line is parsed cleanly, and 0 otherwise.
+ * Set the operator of the targeted s_p_values_t to the provided value.
  */
-int s_p_parse_pair(s_p_hashtbl_t *hashtbl, const char *key, const char *value)
+int s_p_parse_pair_with_op(s_p_hashtbl_t *hashtbl, const char *key,
+			   const char *value, slurm_parser_operator_t opt)
 {
 	s_p_values_t *p;
 	char *leftover, *v;
 
 	if ((p = _conf_hashtbl_lookup(hashtbl, key)) == NULL) {
-		error("Parsing error at unrecognized key: %s", key);
+		error("%s: Parsing error at unrecognized key: %s",
+		      __func__, key);
+		return 0;
+	}
+	if (!value) {
+		error("%s: Value pointer is NULL for key %s", __func__, key);
 		return 0;
 	}
+	p-> operator = opt;
 	/* we have value separated from key here so parse it different way */
 	while (*value != '\0' && isspace(*value))
 		value++; /* skip spaces at start if any */
@@ -1613,6 +1776,14 @@ int s_p_parse_pair(s_p_hashtbl_t *hashtbl, const char *key, const char *value)
 	return 1;
 }
 
+/*
+ * Returns 1 if the line is parsed cleanly, and 0 otherwise.
+ */
+int s_p_parse_pair(s_p_hashtbl_t *hashtbl, const char *key, const char *value)
+{
+	return s_p_parse_pair_with_op(hashtbl, key, value, S_P_OPERATOR_SET);
+}
+
 /* common checks for s_p_get_* returns NULL if invalid.
  *
  * Information concerning theses function can be found in the header file.
@@ -1688,6 +1859,21 @@ int s_p_get_uint32(uint32_t *num, const char *key,
 	return 0;
 }
 
+int s_p_get_operator(slurm_parser_operator_t *opt, const char *key,
+		     const s_p_hashtbl_t *hashtbl)
+{
+	s_p_values_t *p;
+	if (!hashtbl)
+		return 0;
+	p = _conf_hashtbl_lookup(hashtbl, key);
+	if (p) {
+		*opt = p->operator;
+		return 1;
+	}
+	error("Invalid key \"%s\"", key);
+	return 0;
+}
+
 int s_p_get_pointer(void **ptr, const char *key, const s_p_hashtbl_t *hashtbl)
 {
 	s_p_values_t *p = _get_check(S_P_POINTER, key, hashtbl);
@@ -1755,6 +1941,44 @@ int s_p_get_boolean(bool *flag, const char *key, const s_p_hashtbl_t *hashtbl)
 	return 0;
 }
 
+int s_p_get_float(float *num, const char *key,
+		  const s_p_hashtbl_t *hashtbl)
+{
+	s_p_values_t *p = _get_check(S_P_FLOAT, key, hashtbl);
+
+	if (p) {
+		*num = *(float *)p->data;
+		return 1;
+	}
+
+	return 0;
+}
+
+int s_p_get_double(double *num, const char *key,
+		  const s_p_hashtbl_t *hashtbl)
+{
+	s_p_values_t *p = _get_check(S_P_DOUBLE, key, hashtbl);
+
+	if (p) {
+		*num = *(double *)p->data;
+		return 1;
+	}
+
+	return 0;
+}
+
+int s_p_get_long_double(long double *num, const char *key,
+			const s_p_hashtbl_t *hashtbl)
+{
+	s_p_values_t *p = _get_check(S_P_LONG_DOUBLE, key, hashtbl);
+
+	if (p) {
+		*num = *(long double *)p->data;
+		return 1;
+	}
+
+	return 0;
+}
 
 /*
  * Given an "options" array, print the current values of all
@@ -1769,6 +1993,9 @@ void s_p_dump_values(const s_p_hashtbl_t *hashtbl,
 	long num;
 	uint16_t num16;
 	uint32_t num32;
+	float numf;
+	double numd;
+	long double numld;
 	char *str;
 	void *ptr;
 	void **ptr_array;
@@ -1842,6 +2069,24 @@ void s_p_dump_values(const s_p_hashtbl_t *hashtbl,
 				verbose("%s", op->key);
 			}
 			break;
+		case S_P_FLOAT:
+			if (s_p_get_float(&numf, op->key, hashtbl))
+				verbose("%s = %f", op->key, numf);
+			else
+				verbose("%s", op->key);
+			break;
+		case S_P_DOUBLE:
+			if (s_p_get_double(&numd, op->key, hashtbl))
+				verbose("%s = %f", op->key, numd);
+			else
+				verbose("%s", op->key);
+			break;
+		case S_P_LONG_DOUBLE:
+			if (s_p_get_long_double(&numld, op->key, hashtbl))
+				verbose("%s = %Lf", op->key, numld);
+			else
+				verbose("%s", op->key);
+			break;
 		case S_P_IGNORE:
 			break;
 		}
diff --git a/src/common/parse_config.h b/src/common/parse_config.h
index 39e988816..171a51755 100644
--- a/src/common/parse_config.h
+++ b/src/common/parse_config.h
@@ -45,6 +45,7 @@
 
 #include <stdint.h>
 #include "slurm/slurm.h"
+#include "src/common/pack.h"
 
 /*
  * This slurm file parser provides a method for parsing a file
@@ -253,9 +254,38 @@ typedef enum slurm_parser_enum {
 	S_P_BOOLEAN,
 	S_P_LINE,
 	S_P_EXPLINE,
-	S_P_PLAIN_STRING /* useful only within S_P_EXPLINE */
+	S_P_PLAIN_STRING /* useful only within S_P_EXPLINE */,
+	S_P_FLOAT,
+	S_P_DOUBLE,
+	S_P_LONG_DOUBLE
+
 } slurm_parser_enum_t;
 
+/*
+ * Standard Slurm conf files use key=value elements.
+ * slurm_parser_operator_t extends that concept to cover additionnal
+ * use cases like :
+ *        key+=value
+ *        key-=value
+ *        key*=value
+ *        key/=value
+ *
+ * this feature is for now dedicated to the layouts framework. It enables
+ * to have advanced modifications of entities reusing the traditional
+ * Slurm parser with the new operator information to manage updates.
+ *
+ */
+typedef enum slurm_parser_operator {
+	S_P_OPERATOR_SET = 0,
+	S_P_OPERATOR_ADD,
+	S_P_OPERATOR_SUB,
+	S_P_OPERATOR_MUL,
+	S_P_OPERATOR_DIV,
+	S_P_OPERATOR_SET_IF_MIN,
+	S_P_OPERATOR_SET_IF_MAX,
+	S_P_OPERATOR_AVG
+} slurm_parser_operator_t;
+
 typedef struct conf_file_options {
 	char *key;
 	slurm_parser_enum_t type;
@@ -279,11 +309,29 @@ void s_p_hashtbl_destroy(s_p_hashtbl_t *hashtbl);
 int s_p_parse_file(s_p_hashtbl_t *hashtbl, uint32_t *hash_val, char *filename,
 		   bool ignore_new);
 
+/* Returns SLURM_SUCCESS if buffer was opened and parse correctly.
+ * buffer must be a valid Buf bufferonly containing strings.The parsing
+ * stops at the first non string content extracted.
+ * OUT hash_val - cyclic redundancy check (CRC) character-wise value
+ *                of file.
+ * IN ignore_new - do not treat unrecognized keywords as a fatal error,
+ *                 print debug() message and continue
+ */
+int s_p_parse_buffer(s_p_hashtbl_t *hashtbl, uint32_t *hash_val,
+		     Buf buffer, bool ignore_new);
+
 /*
  * Returns 1 if the line is parsed cleanly, and 0 otherwise.
  */
 int s_p_parse_pair(s_p_hashtbl_t *hashtbl, const char *key, const char *value);
 
+/*
+ * Returns 1 if the line is parsed cleanly, and 0 otherwise.
+ * Set the operator of the updated s_p_values_t to the provided one.
+ */
+int s_p_parse_pair_with_op(s_p_hashtbl_t *hashtbl, const char *key,
+			   const char *value, slurm_parser_operator_t opt);
+
 /*
  * Returns 1 if the line is parsed cleanly, and 0 otherwise.
  */
@@ -416,6 +464,75 @@ int s_p_get_uint16(uint16_t *num, const char *key,
 int s_p_get_uint32(uint32_t *num, const char *key,
 		   const s_p_hashtbl_t *hashtbl);
 
+/*
+ * s_p_get_float
+ *
+ * Search for a key in a s_p_hashtbl_t with value of type
+ * float.  If the key is found and has a set value, the
+ * value is retuned in "num".
+ *
+ * OUT num - pointer to a float where the value is returned
+ * IN key - hash table key
+ * IN hashtbl - hash table created by s_p_hashtbl_create()
+ *
+ * Returns 1 when a value was set for "key" during parsing and "num"
+ *   was successfully set, otherwise returns 0;
+ */
+int s_p_get_float(float *num, const char *key,
+		  const s_p_hashtbl_t *hashtbl);
+
+/*
+ * s_p_get_double
+ *
+ * Search for a key in a s_p_hashtbl_t with value of type
+ * double.  If the key is found and has a set value, the
+ * value is retuned in "num".
+ *
+ * OUT num - pointer to a double where the value is returned
+ * IN key - hash table key
+ * IN hashtbl - hash table created by s_p_hashtbl_create()
+ *
+ * Returns 1 when a value was set for "key" during parsing and "num"
+ *   was successfully set, otherwise returns 0;
+ */
+int s_p_get_double(double *num, const char *key,
+		   const s_p_hashtbl_t *hashtbl);
+
+/*
+ * s_p_get_long_double
+ *
+ * Search for a key in a s_p_hashtbl_t with value of type
+ * long double.  If the key is found and has a set value, the
+ * value is retuned in "num".
+ *
+ * OUT num - pointer to a long double where the value is returned
+ * IN key - hash table key
+ * IN hashtbl - hash table created by s_p_hashtbl_create()
+ *
+ * Returns 1 when a value was set for "key" during parsing and "num"
+ *   was successfully set, otherwise returns 0;
+ */
+int s_p_get_long_double(long double *num, const char *key,
+			const s_p_hashtbl_t *hashtbl);
+
+/*
+ * s_p_get_operator
+ *
+ * Search for a key in a s_p_hashtbl_t and return the operator
+ * associated with that key in the configuration file. The operator
+ * is one of the slurm_parser_operator_t enum possible values.
+ *
+ * OUT operator - pointer to a slurm_parser_operator_t where the
+ *     operator is returned
+ * IN key - hash table key
+ * IN hashtbl - hash table created by s_p_hashtbl_create()
+ *
+ * Returns 1 when a operator was set for "key" during parsing and
+ *     "operator" was successfully set, otherwise returns 0;
+ */
+int s_p_get_operator(slurm_parser_operator_t *opt, const char *key,
+		     const s_p_hashtbl_t *hashtbl);
+
 /*
  * s_p_get_pointer
  *
diff --git a/src/common/parse_time.c b/src/common/parse_time.c
index 61550c530..8e8f12f14 100644
--- a/src/common/parse_time.c
+++ b/src/common/parse_time.c
@@ -54,6 +54,7 @@
 
 #include "slurm/slurm.h"
 #include "src/common/macros.h"
+#include "src/common/slurm_time.h"
 
 #define _RUN_STAND_ALONE 0
 
@@ -378,7 +379,7 @@ static int _get_date(char *time_str, int *pos, int *month, int *mday, int *year)
 		*pos = offset - 1;
 		*month = mon - 1;	/* zero origin */
 		*mday  = day;
-		*year  = yr - 1900;     /* need to make it mktime
+		*year  = yr - 1900;     /* need to make it slurm_mktime
 					   happy 1900 == "00" */
 		return 0;
 	}
@@ -462,7 +463,7 @@ extern time_t parse_time(char *time_str, int past)
 	}
 
 	time_now = time(NULL);
-	time_now_tm = localtime(&time_now);
+	time_now_tm = slurm_localtime(&time_now);
 
 	for (pos=0; ((time_str[pos] != '\0') && (time_str[pos] != '\n'));
 	     pos++) {
@@ -478,7 +479,7 @@ extern time_t parse_time(char *time_str, int past)
 		}
 		if (strncasecmp(time_str+pos, "tomorrow", 8) == 0) {
 			time_t later = time_now + (24 * 60 * 60);
-			struct tm *later_tm = localtime(&later);
+			struct tm *later_tm = slurm_localtime(&later);
 			month = later_tm->tm_mon;
 			mday  = later_tm->tm_mday;
 			year  = later_tm->tm_year;
@@ -536,7 +537,7 @@ extern time_t parse_time(char *time_str, int past)
 				goto prob;
 			}
 			later    = time_now + delta;
-			later_tm = localtime(&later);
+			later_tm = slurm_localtime(&later);
 			month    = later_tm->tm_mon;
 			mday     = later_tm->tm_mday;
 			year     = later_tm->tm_year;
@@ -580,7 +581,7 @@ extern time_t parse_time(char *time_str, int past)
 			year  = time_now_tm->tm_year;
 		} else {/* tomorrow */
 			time_t later = time_now + (24 * 60 * 60);
-			struct tm *later_tm = localtime(&later);
+			struct tm *later_tm = slurm_localtime(&later);
 			month = later_tm->tm_mon;
 			mday  = later_tm->tm_mday;
 			year  = later_tm->tm_year;
@@ -624,7 +625,7 @@ extern time_t parse_time(char *time_str, int past)
 	res_tm.tm_isdst = -1;
 
 /* 	printf("%d/%d/%d %d:%d\n",month+1,mday,year,hour,minute); */
-	if ((ret_time = mktime(&res_tm)) != -1)
+	if ((ret_time = slurm_mktime(&res_tm)) != -1)
 		return ret_time;
 
  prob:	fprintf(stderr, "Invalid time specification (pos=%d): %s\n", pos, time_str);
@@ -644,7 +645,7 @@ int main(int argc, char *argv[])
 		||  (in_line[0] == '\n'))
 			break;
 		when = parse_time(in_line);
-		printf("%s", asctime(localtime(&when)));
+		printf("%s", slurm_asctime(slurm_localtime(&when)));
 	}
 }
 #endif
@@ -670,7 +671,7 @@ static char *_relative_date_fmt(const struct tm *when)
 		time_t now = time(NULL);
 		struct tm tm;
 
-		localtime_r(&now, &tm);
+		slurm_localtime_r(&now, &tm);
 		todays_date = 1000 * (tm.tm_year + 1900) + tm.tm_yday;
 	}
 
@@ -705,7 +706,7 @@ slurm_make_time_str (time_t *time, char *string, int size)
 {
 	struct tm time_tm;
 
-	localtime_r(time, &time_tm);
+	slurm_localtime_r(time, &time_tm);
 	if ((*time == (time_t) 0) || (*time == (time_t) INFINITE)) {
 		snprintf(string, size, "Unknown");
 	} else {
diff --git a/src/common/parse_value.c b/src/common/parse_value.c
index 4243af2b3..5e1e1ed4e 100644
--- a/src/common/parse_value.c
+++ b/src/common/parse_value.c
@@ -157,11 +157,13 @@ int s_p_handle_boolean(bool* data, const char* key, const char* value)
 
 	if (!strcasecmp(value, "yes")
 		|| !strcasecmp(value, "up")
+		|| !strcasecmp(value, "true")
 		|| !strcasecmp(value, "1")) {
 		flag = true;
 	} else if (!strcasecmp(value, "no")
-			|| !strcasecmp(value, "down")
-			|| !strcasecmp(value, "0")) {
+		   || !strcasecmp(value, "down")
+		   || !strcasecmp(value, "false")
+		   || !strcasecmp(value, "0")) {
 		flag = false;
 	} else {
 		error("\"%s\" is not a valid option for \"%s\"",
@@ -223,7 +225,8 @@ int s_p_handle_double(double* data, const char* key, const char* value)
 	return SLURM_SUCCESS;
 }
 
-int s_p_handle_ldouble(long double* data, const char* key, const char* value)
+int s_p_handle_long_double(long double* data, const char* key,
+			   const char* value)
 {
 	char *endptr;
 	long double num;
diff --git a/src/common/parse_value.h b/src/common/parse_value.h
index b7dc223e0..5326a9c24 100644
--- a/src/common/parse_value.h
+++ b/src/common/parse_value.h
@@ -49,6 +49,7 @@ int s_p_handle_uint32(uint32_t* data, const char* key, const char* value);
 int s_p_handle_boolean(bool* data, const char* key, const char* value);
 int s_p_handle_float(float* data, const char* key, const char* value);
 int s_p_handle_double(double* data, const char* key, const char* value);
-int s_p_handle_ldouble(long double* data, const char* key, const char* value);
+int s_p_handle_long_double(long double* data, const char* key,
+			   const char* value);
 
 #endif /* !_PARSE_VALUE_H */
diff --git a/src/common/plugin.c b/src/common/plugin.c
index 40b78dea4..971a1a5da 100644
--- a/src/common/plugin.c
+++ b/src/common/plugin.c
@@ -105,6 +105,8 @@ const char * plugin_strerror(plugin_err_t e)
 		case EPLUGIN_MISSING_SYMBOL:
 			return ("Plugin missing a required symbol use "
 				"debug3 to see");
+		case EPLUGIN_BAD_VERSION:
+			return ("Incompatible plugin version");
 	}
 	return ("Unknown error");
 }
@@ -135,14 +137,18 @@ plugin_peek( const char *fq_path,
 		return SLURM_ERROR;
 	}
 
-	if ( ( version = (uint32_t *) dlsym( plug, PLUGIN_VERSION ) ) != NULL ) {
-		if ( plugin_version != NULL ) {
-			*plugin_version = *version;
-		}
-	} else {
-		dlclose( plug );
-		/* could be vestigial library, don't treat as an error */
-		verbose( "%s: not a SLURM plugin", fq_path );
+	version = (uint32_t *) dlsym(plug, PLUGIN_VERSION);
+	if (!version) {
+		verbose("%s: plugin_version symbol not defined", fq_path);
+	} else if ((*version != SLURM_VERSION_NUMBER) && strcmp(type, "spank")){
+		/* NOTE: We could alternatly test just the MAJOR.MINOR values */
+		int plugin_major, plugin_minor, plugin_micro;
+		plugin_major = SLURM_VERSION_MAJOR(*version);
+		plugin_minor = SLURM_VERSION_MINOR(*version);
+		plugin_micro = SLURM_VERSION_MICRO(*version);
+		dlclose(plug);
+		info("%s: Incompatible Slurm plugin version (%d.%d.%d)",
+		     fq_path, plugin_major, plugin_minor, plugin_micro);
 		return SLURM_ERROR;
 	}
 
@@ -155,6 +161,8 @@ plugin_load_from_file(plugin_handle_t *p, const char *fq_path)
 {
 	plugin_handle_t plug;
 	int (*init)(void);
+	uint32_t *version;
+	char *type = NULL;
 
 	*p = PLUGIN_INVALID_HANDLE;
 
@@ -188,12 +196,26 @@ plugin_load_from_file(plugin_handle_t *p, const char *fq_path)
 
 	/* Now see if our required symbols are defined. */
 	if ((dlsym(plug, PLUGIN_NAME) == NULL) ||
-	    (dlsym(plug, PLUGIN_TYPE) == NULL) ||
-	    (dlsym(plug, PLUGIN_VERSION) == NULL)) {
-		dlclose (plug);
+	    ((type = dlsym(plug, PLUGIN_TYPE)) == NULL)) {
+		dlclose(plug);
 		return EPLUGIN_MISSING_NAME;
 	}
 
+	version = (uint32_t *) dlsym(plug, PLUGIN_VERSION);
+	if (!version) {
+		verbose("%s: plugin_version symbol not defined", fq_path);
+	} else if ((*version != SLURM_VERSION_NUMBER) && strcmp(type, "spank")){
+		/* NOTE: We could alternatly test just the MAJOR.MINOR values */
+		int plugin_major, plugin_minor, plugin_micro;
+		plugin_major = SLURM_VERSION_MAJOR(*version);
+		plugin_minor = SLURM_VERSION_MINOR(*version);
+		plugin_micro = SLURM_VERSION_MICRO(*version);
+		dlclose(plug);
+		info("%s: Incompatible Slurm plugin version (%d.%d.%d)",
+		     fq_path, plugin_major, plugin_minor, plugin_micro);
+		return EPLUGIN_BAD_VERSION;
+	}
+
 	/*
 	 * Now call its init() function, if present.  If the function
 	 * returns nonzero, unload the plugin and signal an error.
@@ -298,7 +320,18 @@ plugin_unload( plugin_handle_t plug )
 		if ( ( fini = dlsym( plug, "fini" ) ) != NULL ) {
 			(*fini)();
 		}
+#ifndef MEMORY_LEAK_DEBUG
+/**************************************************************************\
+ * To test for memory leaks, set MEMORY_LEAK_DEBUG to 1 using
+ * "configure --enable-memory-leak-debug" then execute
+ *
+ * Note that without --enable-memory-leak-debug the daemon will
+ * unload the shared objects at exit thus preventing valgrind
+ * to display the stack where the eventual leaks may be.
+ * It is always best to test with and without --enable-memory-leak-debug.
+\**************************************************************************/
 		(void) dlclose( plug );
+#endif
 	}
 }
 
@@ -335,8 +368,9 @@ plugin_get_version( plugin_handle_t plug )
 {
 	uint32_t *ptr;
 
-	if ( plug == PLUGIN_INVALID_HANDLE ) return 0;
-	ptr = (uint32_t *) dlsym( plug, PLUGIN_VERSION );
+	if (plug == PLUGIN_INVALID_HANDLE)
+		return 0;
+	ptr = (uint32_t *) dlsym(plug, PLUGIN_VERSION);
 	return ptr ? *ptr : 0;
 }
 
diff --git a/src/common/plugin.h b/src/common/plugin.h
index 8b9d3a363..a51f5d69d 100644
--- a/src/common/plugin.h
+++ b/src/common/plugin.h
@@ -96,6 +96,7 @@ typedef enum {
 	EPLUGIN_INIT_FAILED,     /* Plugin's init() callback failed     */
 	EPLUGIN_MISSING_NAME,    /* plugin_name/type/version missing    */
 	EPLUGIN_MISSING_SYMBOL,  /* some symbol needed isn't found      */
+	EPLUGIN_BAD_VERSION,     /* incompatable plugin version         */
 } plugin_err_t;
 
 const char *plugin_strerror(plugin_err_t err);
diff --git a/src/common/plugrack.c b/src/common/plugrack.c
index c11b5a753..5c5e9fb63 100644
--- a/src/common/plugrack.c
+++ b/src/common/plugrack.c
@@ -259,7 +259,7 @@ plugrack_destroy( plugrack_t rack )
 	}
 	list_iterator_destroy( it );
 
-	list_destroy( rack->entries );
+	FREE_NULL_LIST( rack->entries );
 	xfree( rack->major_type );
 	xfree( rack );
 	return SLURM_SUCCESS;
diff --git a/src/common/plugstack.c b/src/common/plugstack.c
index 8ec118869..7d0dca6ba 100644
--- a/src/common/plugstack.c
+++ b/src/common/plugstack.c
@@ -208,10 +208,8 @@ static int dyn_spank_set_job_env (const char *var, const char *val, int ovwt);
 
 static void spank_stack_destroy (struct spank_stack *stack)
 {
-	if (stack->plugin_list)
-		list_destroy (stack->plugin_list);
-	if (stack->option_cache)
-		list_destroy (stack->option_cache);
+	FREE_NULL_LIST (stack->plugin_list);
+	FREE_NULL_LIST (stack->option_cache);
 	xfree (stack->plugin_path);
 	xfree (stack);
 }
diff --git a/src/common/power.c b/src/common/power.c
new file mode 100644
index 000000000..cf9a8ea00
--- /dev/null
+++ b/src/common/power.c
@@ -0,0 +1,224 @@
+/*****************************************************************************\
+ *  src/common/power.hc - Generic power management plugin wrapper functions.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/common/macros.h"
+#include "src/common/plugin.h"
+#include "src/common/plugrack.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/power.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/slurmctld.h"
+
+/*
+ * WARNING:  Do not change the order of these fields or add additional
+ * fields at the beginning of the structure.  If you do, the plugin will stop
+ * working.  If you need to add fields, add them at the end of the structure.
+ */
+typedef struct slurm_power_ops {
+	void		(*job_resume)	(struct job_record *job_ptr);
+	void		(*job_start)	(struct job_record *job_ptr);
+	void		(*reconfig)	(void);
+} slurm_power_ops_t;
+
+/*
+ * These strings must be kept in the same order as the fields
+ * declared for slurm_power_ops_t.
+ */
+static const char *syms[] = {
+	"power_p_job_resume",
+	"power_p_job_start",
+	"power_p_reconfig"
+};
+
+static int g_context_cnt = -1;
+static slurm_power_ops_t *ops = NULL;
+static plugin_context_t **g_context = NULL;
+static char *power_plugin_list = NULL;
+static pthread_mutex_t g_context_lock = PTHREAD_MUTEX_INITIALIZER;
+static bool init_run = false;
+
+/* Initialize the power plugin */
+extern int power_g_init(void)
+{
+	int rc = SLURM_SUCCESS;
+	char *last = NULL, *names;
+	char *plugin_type = "power";
+	char *type;
+
+	if (init_run && (g_context_cnt >= 0))
+		return rc;
+
+	slurm_mutex_lock(&g_context_lock);
+	if (g_context_cnt >= 0)
+		goto fini;
+
+	power_plugin_list = slurm_get_power_plugin();
+	g_context_cnt = 0;
+	if ((power_plugin_list == NULL) || (power_plugin_list[0] == '\0'))
+		goto fini;
+
+	names = power_plugin_list;
+	while ((type = strtok_r(names, ",", &last))) {
+		xrealloc(ops, (sizeof(slurm_power_ops_t)*(g_context_cnt + 1)));
+		xrealloc(g_context,
+			 (sizeof(plugin_context_t *) * (g_context_cnt + 1)));
+		if (strncmp(type, "power/", 6) == 0)
+			type += 6; /* backward compatibility */
+		type = xstrdup_printf("power/%s", type);
+		g_context[g_context_cnt] = plugin_context_create(
+			plugin_type, type, (void **)&ops[g_context_cnt],
+			syms, sizeof(syms));
+		if (!g_context[g_context_cnt]) {
+			error("cannot create %s context for %s",
+			      plugin_type, type);
+			rc = SLURM_ERROR;
+			xfree(type);
+			break;
+		}
+
+		xfree(type);
+		g_context_cnt++;
+		names = NULL; /* for next iteration */
+	}
+	init_run = true;
+
+fini:
+	slurm_mutex_unlock(&g_context_lock);
+
+	if (rc != SLURM_SUCCESS)
+		power_g_fini();
+
+	return rc;
+}
+
+/* Terminate the power plugin and free all memory */
+extern void power_g_fini(void)
+{
+	int i;
+
+	slurm_mutex_lock(&g_context_lock);
+	if (g_context_cnt < 0)
+		goto fini;
+
+	init_run = false;
+	for (i = 0; i < g_context_cnt; i++) {
+		if (g_context[i])
+			plugin_context_destroy(g_context[i]);
+	}
+	xfree(ops);
+	xfree(g_context);
+	xfree(power_plugin_list);
+	g_context_cnt = -1;
+
+fini:	slurm_mutex_unlock(&g_context_lock);
+	return;
+}
+
+/* Read the configuration file */
+extern void power_g_reconfig(void)
+{
+	int i;
+
+	(void) power_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++)
+		(*(ops[i].reconfig))();
+	slurm_mutex_unlock(&g_context_lock);
+}
+
+/* Note that a suspended job has been resumed */
+extern void power_g_job_resume(struct job_record *job_ptr)
+{
+	int i;
+
+	(void) power_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++)
+		(*(ops[i].job_resume))(job_ptr);
+	slurm_mutex_unlock(&g_context_lock);
+}
+
+/* Note that a job has been allocated resources and is ready to start */
+extern void power_g_job_start(struct job_record *job_ptr)
+{
+	int i;
+
+	(void) power_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++)
+		(*(ops[i].job_start))(job_ptr);
+	slurm_mutex_unlock(&g_context_lock);
+}
+
+/* Pack a power management data structure */
+extern void power_mgmt_data_pack(power_mgmt_data_t *power, Buf buffer,
+				 uint16_t protocol_version)
+{
+	if (!power) {
+		pack32(NO_VAL, buffer);
+	} else {
+		pack32(power->cap_watts, buffer);
+	}
+}
+
+/* Unpack a power management data structure
+ * Use power_mgmt_data_free() to free the returned structure */
+extern int power_mgmt_data_unpack(power_mgmt_data_t **power, Buf buffer,
+				  uint16_t protocol_version)
+{
+	power_mgmt_data_t *power_ptr = xmalloc(sizeof(power_mgmt_data_t));
+
+	safe_unpack32(&power_ptr->cap_watts, buffer);
+	*power = power_ptr;
+	return SLURM_SUCCESS;
+
+unpack_error:
+	xfree(power_ptr);
+	*power = NULL;
+	return SLURM_ERROR;
+}
+
+/* Free a power management data structure */
+extern void power_mgmt_data_free(power_mgmt_data_t *power)
+{
+	xfree(power);
+}
diff --git a/src/plugins/slurmctld/dynalloc/slurmctld_dynalloc.c b/src/common/power.h
similarity index 57%
rename from src/plugins/slurmctld/dynalloc/slurmctld_dynalloc.c
rename to src/common/power.h
index 36191e73a..a7d347445 100644
--- a/src/plugins/slurmctld/dynalloc/slurmctld_dynalloc.c
+++ b/src/common/power.h
@@ -1,9 +1,8 @@
 /*****************************************************************************\
- *  slurmctld_dynalloc.c - plugin for dynalloc (resource dynamic allocation)
+ *  src/common/power.h - Generic power management plugin wrapper functions.
  *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -35,36 +34,48 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#include <stdio.h>
+#ifndef _SLURM_POWER_H
+#define _SLURM_POWER_H 	1
 
-#include "slurm/slurm_errno.h"
-#include "slurm/slurm.h"
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
 
-#include "src/common/plugin.h"
-#include "src/common/log.h"
-#include "src/common/node_select.h"
-#include "src/common/slurm_priority.h"
+#include "src/common/macros.h"
+#include "src/common/pack.h"
 #include "src/slurmctld/slurmctld.h"
 
-#include "msg.h"
+/*****************************************************************************\
+ * PLUGIN FUNCTIONS
+\*****************************************************************************/
+/* Initialize the power plugin */
+extern int power_g_init(void);
+
+/* Terminate the power plugin and free all memory */
+extern void power_g_fini(void);
+
+/* Read the configuration file */
+extern void power_g_reconfig(void);
+
+/* Note that a suspended job has been resumed */
+extern void power_g_job_resume(struct job_record *job_ptr);
+
+/* Note that a job has been allocated resources and is ready to start */
+extern void power_g_job_start(struct job_record *job_ptr);
+
+/*****************************************************************************\
+ * GENERIC DATA MOVEMENT FUNCTIONS
+\*****************************************************************************/
+/* Pack a power management data structure */
+extern void power_mgmt_data_pack(power_mgmt_data_t *power, Buf buffer,
+				 uint16_t protocol_version);
 
-const char		plugin_name[]	= "SLURM resource dynamic allocation plugin";
-const char		plugin_type[]	= "slurmctld/dynalloc";
-const uint32_t		plugin_version	= 100;
+/* Unpack a power management data structure
+ * Use power_mgmt_data_free() to free the returned structure */
+extern int power_mgmt_data_unpack(power_mgmt_data_t **power, Buf buffer,
+				  uint16_t protocol_version);
 
-/**************************************************************************/
-/*  TAG(                              init                              ) */
-/**************************************************************************/
-extern int init( void )
-{
-	verbose( "sched: resource dynamic allocation plugin loaded" );
-	return spawn_msg_thread();
-}
+/* Free a power management data structure */
+extern void power_mgmt_data_free(power_mgmt_data_t *power);
 
-/**************************************************************************/
-/*  TAG(                              fini                              ) */
-/**************************************************************************/
-extern void fini( void )
-{
-	term_msg_thread();
-}
+#endif /* _SLURM_POWER_H */
diff --git a/src/common/print_fields.c b/src/common/print_fields.c
index 6afeb8ce2..be444c45e 100644
--- a/src/common/print_fields.c
+++ b/src/common/print_fields.c
@@ -42,6 +42,7 @@
 
 int print_fields_parsable_print = 0;
 int print_fields_have_header = 1;
+char *fields_delimiter = NULL;
 
 extern void destroy_print_field(void *object)
 {
@@ -71,9 +72,14 @@ extern void print_fields_header(List print_fields_list)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && (curr_inx == field_count))
 			printf("%s", field->name);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print
+			 && fields_delimiter) {
+			printf("%s%s", field->name, fields_delimiter);
+		} else if (print_fields_parsable_print
+			 && !fields_delimiter) {
 			printf("%s|", field->name);
-		else {
+
+		} else {
 			int abs_len = abs(field->len);
 			printf("%*.*s ", abs_len, abs_len, field->name);
 		}
@@ -101,8 +107,10 @@ extern void print_fields_date(print_field_t *field, time_t value, int last)
 	if (print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
 	   && last)
 		printf("%s", temp_char);
-	else if (print_fields_parsable_print)
+	else if (print_fields_parsable_print && !fields_delimiter)
 		printf("%s|", temp_char);
+	else if (print_fields_parsable_print && fields_delimiter)
+		printf("%s%s", temp_char, fields_delimiter);
 	else if (field->len == abs_len)
 		printf("%*.*s ", abs_len, abs_len, temp_char);
 	else
@@ -125,8 +133,10 @@ extern void print_fields_str(print_field_t *field, char *value, int last)
 	if (print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
 	   && last)
 		printf("%s", print_this);
-	else if (print_fields_parsable_print)
+	else if (print_fields_parsable_print && !fields_delimiter)
 		printf("%s|", print_this);
+	else if (print_fields_parsable_print && fields_delimiter)
+		printf("%s%s", print_this, fields_delimiter);
 	else {
 		if (value) {
 			int len = strlen(value);
@@ -153,8 +163,10 @@ extern void print_fields_int(print_field_t *field, int value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			;
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("|");
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%s", fields_delimiter);
 		else
 			printf("%*s ", abs_len, " ");
 	} else {
@@ -162,8 +174,10 @@ extern void print_fields_int(print_field_t *field, int value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%d", value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%d|", value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%d%s", value, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*d ", abs_len, value);
 		else
@@ -184,8 +198,10 @@ extern void print_fields_uint16(print_field_t *field, uint32_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			;
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("|");
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%s", fields_delimiter);
 		else
 			printf("%*s ", field->len, " ");
 	} else {
@@ -193,8 +209,10 @@ extern void print_fields_uint16(print_field_t *field, uint32_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%u", value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%u|", value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%u%s", value, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*u ", abs_len, value);
 		else
@@ -211,8 +229,10 @@ extern void print_fields_uint32(print_field_t *field, uint32_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			;
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("|");
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%s", fields_delimiter);
 		else
 			printf("%*s ", field->len, " ");
 	} else {
@@ -220,8 +240,10 @@ extern void print_fields_uint32(print_field_t *field, uint32_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%u", value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%u|", value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%u%s", value, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*u ", abs_len, value);
 		else
@@ -234,13 +256,15 @@ extern void print_fields_uint64(print_field_t *field, uint64_t value, int last)
 	int abs_len = abs(field->len);
 
 	/* (value == unset)  || (value == cleared) */
-	if ((value == (uint64_t)NO_VAL) || (value == (uint64_t)INFINITE)) {
+	if ((value == NO_VAL64) || (value == INFINITE64)) {
 		if (print_fields_parsable_print
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			;
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("|");
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%s", fields_delimiter);
 		else
 			printf("%*s ", field->len, " ");
 	} else {
@@ -248,8 +272,11 @@ extern void print_fields_uint64(print_field_t *field, uint64_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%llu", (long long unsigned) value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%llu|", (long long unsigned) value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%llu%s", (long long unsigned) value,
+				fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*llu ", abs_len, (long long unsigned) value);
 		else
@@ -262,7 +289,7 @@ extern void print_fields_hex064(print_field_t *field, uint64_t value, int last)
 	int abs_len = abs(field->len);
 
 	/* (value == unset)  || (value == cleared) */
-	if ((value == (uint64_t)NO_VAL) || (value == (uint64_t)INFINITE)) {
+	if ((value == NO_VAL64) || (value == INFINITE64)) {
 		if (print_fields_parsable_print
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
@@ -276,8 +303,11 @@ extern void print_fields_hex064(print_field_t *field, uint64_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%0llX", (long long unsigned) value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%0llX|", (long long unsigned) value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%0llX%s", (long long unsigned) value,
+			       fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%0*llX ", abs_len, (long long unsigned) value);
 		else
@@ -289,7 +319,8 @@ extern void print_fields_double(print_field_t *field, double value, int last)
 {
 	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
-	if ((value == (double)NO_VAL) || (value == (double)INFINITE)) {
+	if ((value == NO_VAL64) || (value == INFINITE64) ||
+	    (value == (uint64_t)NO_VAL) || (value == (uint64_t)INFINITE)) {
 		if (print_fields_parsable_print
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
@@ -303,8 +334,10 @@ extern void print_fields_double(print_field_t *field, double value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%f", value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%f|", value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%f%s", value, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*f ", abs_len, value);
 		else
@@ -332,8 +365,10 @@ extern void print_fields_long_double(
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%Lf", value);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%Lf|", value);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%Lf%s", value, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*Lf ", abs_len, value);
 		else
@@ -342,11 +377,11 @@ extern void print_fields_long_double(
 
 }
 
-extern void print_fields_time(print_field_t *field, uint64_t value, int last)
+extern void print_fields_time(print_field_t *field, uint32_t value, int last)
 {
 	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
-	if ((value == (uint64_t)NO_VAL) || (value == (uint64_t)INFINITE)) {
+	if ((value == NO_VAL) || (value == INFINITE)) {
 		if (print_fields_parsable_print
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
@@ -362,8 +397,10 @@ extern void print_fields_time(print_field_t *field, uint64_t value, int last)
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%s", time_buf);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%s|", time_buf);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%s%s", time_buf, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*s ", abs_len, time_buf);
 		else
@@ -376,7 +413,7 @@ extern void print_fields_time_from_secs(print_field_t *field,
 {
 	int abs_len = abs(field->len);
 	/* (value == unset)  || (value == cleared) */
-	if ((value == (uint64_t)NO_VAL) || (value == (uint64_t)INFINITE)) {
+	if ((value == NO_VAL64) || (value == INFINITE64)) {
 		if (print_fields_parsable_print
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
@@ -392,8 +429,10 @@ extern void print_fields_time_from_secs(print_field_t *field,
 		   == PRINT_FIELDS_PARSABLE_NO_ENDING
 		   && last)
 			printf("%s", time_buf);
-		else if (print_fields_parsable_print)
+		else if (print_fields_parsable_print && !fields_delimiter)
 			printf("%s|", time_buf);
+		else if (print_fields_parsable_print && fields_delimiter)
+			printf("%s%s", time_buf, fields_delimiter);
 		else if (field->len == abs_len)
 			printf("%*s ", abs_len, time_buf);
 		else
@@ -428,8 +467,10 @@ extern void print_fields_char_list(print_field_t *field, List value, int last)
 	if (print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
 	   && last)
 		printf("%s", print_this);
-	else if (print_fields_parsable_print)
+	else if (print_fields_parsable_print && !fields_delimiter)
 		printf("%s|", print_this);
+	else if (print_fields_parsable_print && fields_delimiter)
+		printf("%s%s", print_this, fields_delimiter);
 	else if (print_this) {
 		if (strlen(print_this) > abs_len)
 			print_this[abs_len-1] = '+';
diff --git a/src/common/print_fields.h b/src/common/print_fields.h
index eaf8cc895..85fb2503d 100644
--- a/src/common/print_fields.h
+++ b/src/common/print_fields.h
@@ -83,6 +83,7 @@ enum {
 
 extern int print_fields_parsable_print;
 extern int print_fields_have_header;
+extern char *fields_delimiter;
 
 extern void destroy_print_field(void *object);
 extern void print_fields_header(List print_fields_list);
@@ -104,7 +105,7 @@ extern void print_fields_uint64(
 extern void print_fields_hex064(
 	print_field_t *field, uint64_t value, int last);
 extern void print_fields_time_from_mins(print_field_t *field,
-					uint64_t value, int last);
+					uint32_t value, int last);
 extern void print_fields_time_from_secs(print_field_t *field,
 					uint64_t value, int last);
 extern void print_fields_char_list(print_field_t *field, List value, int last);
diff --git a/src/common/proc_args.c b/src/common/proc_args.c
index 14b91b64c..856ddeec4 100644
--- a/src/common/proc_args.c
+++ b/src/common/proc_args.c
@@ -1,8 +1,8 @@
 /*****************************************************************************\
  *  proc_args.c - helper functions for command argument processing
- *  $Id: opt.h 11996 2007-08-10 20:36:26Z jette $
  *****************************************************************************
  *  Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>.
  *  Written by Christopher Holmes <cholmes@hp.com>, who borrowed heavily
  *  from existing SLURM source code, particularly src/srun/opt.c
  *
@@ -104,8 +104,8 @@ void set_distribution(task_dist_states_t distribution,
 		      char **dist, char **lllp_dist)
 {
 	if (((int)distribution >= 0)
-	    && (distribution != SLURM_DIST_UNKNOWN)) {
-		switch (distribution) {
+	    && ((distribution & SLURM_DIST_STATE_BASE) != SLURM_DIST_UNKNOWN)) {
+		switch (distribution & SLURM_DIST_STATE_BASE) {
 		case SLURM_DIST_CYCLIC:
 			*dist      = "cyclic";
 			break;
@@ -143,8 +143,79 @@ void set_distribution(task_dist_states_t distribution,
 			*dist      = "block:fcyclic";
 			*lllp_dist = "cyclic";
 			break;
+		case SLURM_DIST_CYCLIC_CYCLIC_CYCLIC:
+			*dist      = "cyclic:cyclic:cyclic";
+			*lllp_dist = "cyclic:cyclic";
+			break;
+		case SLURM_DIST_CYCLIC_CYCLIC_BLOCK:
+			*dist      = "cyclic:cyclic:block";
+			*lllp_dist = "cyclic:block";
+			break;
+		case SLURM_DIST_CYCLIC_CYCLIC_CFULL:
+			*dist      = "cyclic:cyclic:fcyclic";
+			*lllp_dist = "cyclic:fcyclic";
+			break;
+		case SLURM_DIST_CYCLIC_BLOCK_CYCLIC:
+			*dist      = "cyclic:block:cyclic";
+			*lllp_dist = "block:cyclic";
+			break;
+		case SLURM_DIST_CYCLIC_BLOCK_BLOCK:
+			*dist      = "cyclic:block:block";
+			*lllp_dist = "block:block";
+			break;
+		case SLURM_DIST_CYCLIC_BLOCK_CFULL:
+			*dist      = "cyclic:cylic:cyclic";
+			*lllp_dist = "cyclic:cyclic";
+			break;
+		case SLURM_DIST_CYCLIC_CFULL_CYCLIC:
+			*dist      = "cyclic:cylic:cyclic";
+			*lllp_dist = "cyclic:cyclic";
+			break;
+		case SLURM_DIST_CYCLIC_CFULL_BLOCK:
+			*dist      = "cyclic:fcyclic:block";
+			*lllp_dist = "fcyclic:block";
+		case SLURM_DIST_CYCLIC_CFULL_CFULL:
+			*dist      = "cyclic:fcyclic:fcyclic";
+			*lllp_dist = "fcyclic:fcyclic";
+			break;
+		case SLURM_DIST_BLOCK_CYCLIC_CYCLIC:
+			*dist      = "block:cyclic:cyclic";
+			*lllp_dist = "cyclic:cyclic";
+			break;
+		case SLURM_DIST_BLOCK_CYCLIC_BLOCK:
+			*dist      = "block:cyclic:block";
+			*lllp_dist = "cyclic:block";
+			break;
+		case SLURM_DIST_BLOCK_CYCLIC_CFULL:
+			*dist      = "block:cyclic:fcyclic";
+			*lllp_dist = "cyclic:fcyclic";
+			break;
+		case SLURM_DIST_BLOCK_BLOCK_CYCLIC:
+			*dist      = "block:block:cyclic";
+			*lllp_dist = "block:cyclic";
+			break;
+		case SLURM_DIST_BLOCK_BLOCK_BLOCK:
+			*dist      = "block:block:block";
+			*lllp_dist = "block:block";
+			break;
+		case SLURM_DIST_BLOCK_BLOCK_CFULL:
+			*dist      = "block:block:fcyclic";
+			*lllp_dist = "block:fcyclic";
+			break;
+		case SLURM_DIST_BLOCK_CFULL_CYCLIC:
+			*dist      = "block:fcyclic:cyclic";
+			*lllp_dist = "fcyclic:cyclic";
+			break;
+		case SLURM_DIST_BLOCK_CFULL_BLOCK:
+			*dist      = "block:fcyclic:block";
+			*lllp_dist = "fcyclic:block";
+			break;
+		case SLURM_DIST_BLOCK_CFULL_CFULL:
+			*dist      = "block:fcyclic:fcyclic";
+			*lllp_dist = "fcyclic:fcyclic";
+			break;
 		default:
-			error("unknown dist, type %d", distribution);
+			error("unknown dist, type 0x%X", distribution);
 			break;
 		}
 	}
@@ -156,57 +227,238 @@ void set_distribution(task_dist_states_t distribution,
  */
 task_dist_states_t verify_dist_type(const char *arg, uint32_t *plane_size)
 {
-	int len = strlen(arg);
+	int len;
 	char *dist_str = NULL;
 	task_dist_states_t result = SLURM_DIST_UNKNOWN;
-	bool lllp_dist = false, plane_dist = false;
+	bool pack_nodes = false, no_pack_nodes = false;
+	char *tok, *tmp, *save_ptr = NULL;
+	int i, j;
+	char *cur_ptr;
+	char buf[3][25];
+	buf[0][0] = '\0';
+	buf[1][0] = '\0';
+	buf[2][0] = '\0';
+	char outstr[100];
+	outstr[0]='\0';
 
-	dist_str = strchr(arg,':');
-	if (dist_str != NULL) {
-		/* -m cyclic|block:cyclic|block */
-		lllp_dist = true;
-	} else {
-		/* -m plane=<plane_size> */
-		dist_str = strchr(arg,'=');
+	if (!arg)
+		return result;
+
+	tmp = xstrdup(arg);
+	tok = strtok_r(tmp, ",", &save_ptr);
+	while (tok) {
+		bool lllp_dist = false, plane_dist = false;
+		len = strlen(tok);
+		dist_str = strchr(tok, ':');
 		if (dist_str != NULL) {
-			*plane_size=atoi(dist_str+1);
-			len = dist_str-arg;
-			plane_dist = true;
+			/* -m cyclic|block:cyclic|block */
+			lllp_dist = true;
+		} else {
+			/* -m plane=<plane_size> */
+			dist_str = strchr(tok, '=');
+			if (dist_str != NULL) {
+				*plane_size = atoi(dist_str + 1);
+				len = dist_str - tok;
+				plane_dist = true;
+			}
 		}
-	}
 
-	if (lllp_dist) {
-		if (strcasecmp(arg, "cyclic:cyclic") == 0) {
-			result = SLURM_DIST_CYCLIC_CYCLIC;
-		} else if (strcasecmp(arg, "cyclic:block") == 0) {
-			result = SLURM_DIST_CYCLIC_BLOCK;
-		} else if (strcasecmp(arg, "block:block") == 0) {
-			result = SLURM_DIST_BLOCK_BLOCK;
-		} else if (strcasecmp(arg, "block:cyclic") == 0) {
-			result = SLURM_DIST_BLOCK_CYCLIC;
-		} else if (strcasecmp(arg, "block:fcyclic") == 0) {
-			result = SLURM_DIST_BLOCK_CFULL;
-		} else if (strcasecmp(arg, "cyclic:fcyclic") == 0) {
-			result = SLURM_DIST_CYCLIC_CFULL;
+		cur_ptr = tok;
+	 	for (j = 0; j < 3; j++) {
+			for (i = 0; i < 24; i++) {
+				if (*cur_ptr == '\0' || *cur_ptr ==':')
+					break;
+				buf[j][i] = *cur_ptr++;
+			}
+			buf[j][i] = '\0';
+			if (*cur_ptr == '\0')
+				break;
+			buf[j][i] = '\0';
+			cur_ptr++;
+		}
+		if (strcmp(buf[0], "*") == 0)
+			/* default node distribution is block */
+			strcpy(buf[0], "block");
+		strcat(outstr, buf[0]);
+		if (strcmp(buf[1], "\0") != 0) {
+			strcat(outstr, ":");
+			if (!strcmp(buf[1], "*") || !strcmp(buf[1], "\0")) {
+				/* default socket distribution is cyclic */
+				strcpy(buf[1], "cyclic");
+			}
+			strcat(outstr, buf[1]);
 		}
-	} else if (plane_dist) {
-		if (strncasecmp(arg, "plane", len) == 0) {
-			result = SLURM_DIST_PLANE;
+		if (strcmp(buf[2], "\0") != 0) {
+			strcat(outstr, ":");
+			if (!strcmp(buf[2], "*") || !strcmp(buf[2], "\0")) {
+				/* default core dist is inherited socket dist */
+				strcpy(buf[2], buf[1]);
+			}
+			strcat(outstr, buf[2]);
 		}
-	} else {
-		if (strncasecmp(arg, "cyclic", len) == 0) {
-			result = SLURM_DIST_CYCLIC;
-		} else if (strncasecmp(arg, "block", len) == 0) {
-			result = SLURM_DIST_BLOCK;
-		} else if ((strncasecmp(arg, "arbitrary", len) == 0) ||
-			   (strncasecmp(arg, "hostfile", len) == 0)) {
-			result = SLURM_DIST_ARBITRARY;
+
+		if (lllp_dist) {
+			if (strcasecmp(outstr, "cyclic:cyclic") == 0) {
+				result = SLURM_DIST_CYCLIC_CYCLIC;
+			} else if (strcasecmp(outstr, "cyclic:block") == 0) {
+				result = SLURM_DIST_CYCLIC_BLOCK;
+			} else if (strcasecmp(outstr, "block:block") == 0) {
+				result = SLURM_DIST_BLOCK_BLOCK;
+			} else if (strcasecmp(outstr, "block:cyclic") == 0) {
+				result = SLURM_DIST_BLOCK_CYCLIC;
+			} else if (strcasecmp(outstr, "block:fcyclic") == 0) {
+				result = SLURM_DIST_BLOCK_CFULL;
+			} else if (strcasecmp(outstr, "cyclic:fcyclic") == 0) {
+				result = SLURM_DIST_CYCLIC_CFULL;
+			} else if (strcasecmp(outstr, "cyclic:cyclic:cyclic")
+				   == 0) {
+				result = SLURM_DIST_CYCLIC_CYCLIC_CYCLIC;
+			} else if (strcasecmp(outstr, "cyclic:cyclic:block")
+				   == 0) {
+				result = SLURM_DIST_CYCLIC_CYCLIC_BLOCK;
+			} else if (strcasecmp(outstr, "cyclic:cyclic:fcyclic")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_CYCLIC_CFULL;
+			} else if (strcasecmp(outstr, "cyclic:block:cyclic")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_BLOCK_CYCLIC;
+			} else if (strcasecmp(outstr, "cyclic:block:block")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_BLOCK_BLOCK;
+			} else if (strcasecmp(outstr, "cyclic:block:fcyclic")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_BLOCK_CFULL;
+			} else if (strcasecmp(outstr, "cyclic:fcyclic:cyclic")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_CFULL_CYCLIC;
+			} else if (strcasecmp(outstr, "cyclic:fcyclic:block")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_CFULL_BLOCK;
+			} else if (strcasecmp(outstr, "cyclic:fcyclic:fcyclic")
+				== 0) {
+				result = SLURM_DIST_CYCLIC_CFULL_CFULL;
+			} else if (strcasecmp(outstr, "block:cyclic:cyclic")
+				== 0) {
+				result = SLURM_DIST_BLOCK_CYCLIC_CYCLIC;
+			} else if (strcasecmp(outstr, "block:cyclic:block")
+				== 0) {
+				result = SLURM_DIST_BLOCK_CYCLIC_BLOCK;
+			} else if (strcasecmp(outstr, "block:cyclic:fcyclic")
+				== 0) {
+				result = SLURM_DIST_BLOCK_CYCLIC_CFULL;
+			} else if (strcasecmp(outstr, "block:block:cyclic")
+				== 0) {
+				result = SLURM_DIST_BLOCK_BLOCK_CYCLIC;
+			} else if (strcasecmp(outstr, "block:block:block")
+				== 0) {
+				result = SLURM_DIST_BLOCK_BLOCK_BLOCK;
+			} else if (strcasecmp(outstr, "block:block:fcyclic")
+				== 0) {
+				result = SLURM_DIST_BLOCK_BLOCK_CFULL;
+			} else if (strcasecmp(outstr, "block:fcyclic:cyclic")
+				== 0) {
+				result = SLURM_DIST_BLOCK_CFULL_CYCLIC;
+			} else if (strcasecmp(outstr, "block:fcyclic:block")
+				== 0) {
+				result = SLURM_DIST_BLOCK_CFULL_BLOCK;
+			} else if (strcasecmp(outstr, "block:fcyclic:fcyclic")
+				== 0) {
+				result = SLURM_DIST_BLOCK_CFULL_CFULL;
+			}
+		} else if (plane_dist) {
+			if (strncasecmp(tok, "plane", len) == 0) {
+				result = SLURM_DIST_PLANE;
+			}
+		} else {
+			if (strncasecmp(tok, "cyclic", len) == 0) {
+				result = SLURM_DIST_CYCLIC;
+			} else if (strncasecmp(tok, "block", len) == 0) {
+				result = SLURM_DIST_BLOCK;
+			} else if ((strncasecmp(tok, "arbitrary", len) == 0) ||
+				   (strncasecmp(tok, "hostfile", len) == 0)) {
+				result = SLURM_DIST_ARBITRARY;
+			} else if (strncasecmp(tok, "nopack", len) == 0) {
+				no_pack_nodes = true;
+			} else if (strncasecmp(tok, "pack", len) == 0) {
+				pack_nodes = true;
+			}
 		}
+		tok = strtok_r(NULL, ",", &save_ptr);
 	}
+	xfree(tmp);
+
+	if (pack_nodes)
+		result |= SLURM_DIST_PACK_NODES;
+	else if (no_pack_nodes)
+		result |= SLURM_DIST_NO_PACK_NODES;
 
 	return result;
 }
 
+extern char *format_task_dist_states(task_dist_states_t t)
+{
+	switch (t & SLURM_DIST_STATE_BASE) {
+	case SLURM_DIST_BLOCK:
+		return "block";
+	case SLURM_DIST_CYCLIC:
+		return "cyclic";
+	case SLURM_DIST_PLANE:
+		return "plane";
+	case SLURM_DIST_ARBITRARY:
+		return "arbitrary";
+	case SLURM_DIST_CYCLIC_CYCLIC:
+		return "cyclic:cyclic";
+	case SLURM_DIST_CYCLIC_BLOCK:
+		return "cyclic:block";
+	case SLURM_DIST_CYCLIC_CFULL:
+		return "cyclic:fcyclic";
+	case SLURM_DIST_BLOCK_CYCLIC:
+		return "block:cyclic";
+	case SLURM_DIST_BLOCK_BLOCK:
+		return "block:block";
+	case SLURM_DIST_BLOCK_CFULL:
+		return "block:fcyclic";
+	case SLURM_DIST_CYCLIC_CYCLIC_CYCLIC:
+		return "cyclic:cyclic:cyclic";
+	case SLURM_DIST_CYCLIC_CYCLIC_BLOCK:
+		return "cyclic:cyclic:block";
+	case SLURM_DIST_CYCLIC_CYCLIC_CFULL:
+		return "cyclic:cyclic:fcyclic";
+	case SLURM_DIST_CYCLIC_BLOCK_CYCLIC:
+		return "cyclic:block:cyclic";
+	case SLURM_DIST_CYCLIC_BLOCK_BLOCK:
+		return "cyclic:block:block";
+	case SLURM_DIST_CYCLIC_BLOCK_CFULL:
+		return "cyclic:block:fcyclic";
+	case SLURM_DIST_CYCLIC_CFULL_CYCLIC:
+		return "cyclic:fcyclic:cyclic" ;
+	case SLURM_DIST_CYCLIC_CFULL_BLOCK:
+		return "cyclic:fcyclic:block";
+	case SLURM_DIST_CYCLIC_CFULL_CFULL:
+		return "cyclic:fcyclic:fcyclic";
+	case SLURM_DIST_BLOCK_CYCLIC_CYCLIC:
+		return "block:cyclic:cyclic";
+	case SLURM_DIST_BLOCK_CYCLIC_BLOCK:
+		return "block:cyclic:block";
+	case SLURM_DIST_BLOCK_CYCLIC_CFULL:
+		return "block:cyclic:fcyclic";
+	case SLURM_DIST_BLOCK_BLOCK_CYCLIC:
+		return "block:block:cyclic";
+	case SLURM_DIST_BLOCK_BLOCK_BLOCK:
+		return "block:block:block";
+	case SLURM_DIST_BLOCK_BLOCK_CFULL:
+		return "block:block:fcyclic";
+	case SLURM_DIST_BLOCK_CFULL_CYCLIC:
+		return "block:fcyclic:cyclic";
+	case SLURM_DIST_BLOCK_CFULL_BLOCK:
+		return "block:fcyclic:block";
+	case SLURM_DIST_BLOCK_CFULL_CFULL:
+		return "block:fcyclic:fcyclic";
+	default:
+		return "unknown";
+	}
+}
+
 static uint16_t _get_conn_type(char *arg, bool bgp)
 {
 	uint16_t len = strlen(arg);
@@ -480,7 +732,7 @@ bool verify_node_list(char **node_list_pptr, enum task_dist_states dist,
 	/* If we are using Arbitrary grab count out of the hostfile
 	   using them exactly the way we read it in since we are
 	   saying, lay it out this way! */
-	if (dist == SLURM_DIST_ARBITRARY)
+	if ((dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
 		nodelist = slurm_read_hostfile(*node_list_pptr, task_count);
 	else
 		nodelist = slurm_read_hostfile(*node_list_pptr, NO_VAL);
@@ -738,7 +990,9 @@ uint16_t parse_mail_type(const char *arg)
 			rc |= MAIL_JOB_REQUEUE;
 		else if (strcasecmp(tok, "ALL") == 0)
 			rc |= MAIL_JOB_BEGIN |  MAIL_JOB_END |  MAIL_JOB_FAIL |
-			      MAIL_JOB_REQUEUE;
+			      MAIL_JOB_REQUEUE | MAIL_JOB_STAGE_OUT;
+		else if (!strcasecmp(tok, "STAGE_OUT"))
+			rc |= MAIL_JOB_STAGE_OUT;
 		else if (strcasecmp(tok, "TIME_LIMIT") == 0)
 			rc |= MAIL_JOB_TIME100;
 		else if (strcasecmp(tok, "TIME_LIMIT_90") == 0)
@@ -782,6 +1036,11 @@ char *print_mail_type(const uint16_t type)
 			strcat(buf, ",");
 		strcat(buf, "REQUEUE");
 	}
+	if (type & MAIL_JOB_STAGE_OUT) {
+		if (buf[0])
+			strcat(buf, ",");
+		strcat(buf, "STAGE_OUT");
+	}
 	if (type & MAIL_JOB_TIME50) {
 		if (buf[0])
 			strcat(buf, ",");
@@ -846,8 +1105,17 @@ _create_path_list(void)
 	return l;
 }
 
-char *
-search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode)
+/*
+ * search PATH to confirm the location and access mode of the given command
+ * IN cwd - current working directory
+ * IN cmd - command to execute
+ * IN check_current_dir - if true, search cwd for the command
+ * IN access_mode - required access rights of cmd
+ * IN test_exec - if false, do not confirm access mode of cmd if full path
+ * RET full path of cmd or NULL if not found
+ */
+char *search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode,
+		  bool test_exec)
 {
 	List         l        = NULL;
 	ListIterator i        = NULL;
@@ -855,16 +1123,22 @@ search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode)
 
 #if defined HAVE_BG && !defined HAVE_BG_L_P
 	/* BGQ's runjob command required a fully qualified path */
-	if ( (cmd[0] == '.' || cmd[0] == '/') &&
-	     (access(cmd, access_mode) == 0 ) ) {
+	if (((cmd[0] == '.') || (cmd[0] == '/')) &&
+	    (access(cmd, access_mode) == 0)) {
 		if (cmd[0] == '.')
 			xstrfmtcat(fullpath, "%s/", cwd);
 		xstrcat(fullpath, cmd);
 		goto done;
 	}
 #else
-	if ((cmd[0] == '.') || (cmd[0] == '/'))
-		return NULL;
+	if ((cmd[0] == '.') || (cmd[0] == '/')) {
+		if (test_exec && (access(cmd, access_mode) == 0)) {
+			if (cmd[0] == '.')
+				xstrfmtcat(fullpath, "%s/", cwd);
+			xstrcat(fullpath, cmd);
+		}
+		goto done;
+	}
 #endif
 
 	l = _create_path_list();
@@ -882,11 +1156,9 @@ search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode)
 			goto done;
 
 		xfree(fullpath);
-		fullpath = NULL;
 	}
-  done:
-	if (l)
-		list_destroy(l);
+done:
+	FREE_NULL_LIST(l);
 	return fullpath;
 }
 
@@ -1076,6 +1348,35 @@ extern int parse_uint16(char *aval, uint16_t *ival)
 	return 0;
 }
 
+/*
+ *  Get a decimal integer from arg.
+ *
+ *  Returns the integer on success, exits program on failure.
+ *
+ */
+extern int parse_int(const char *name, const char *val, bool positive)
+{
+	char *p = NULL;
+	int result = 0;
+
+	if (val)
+		result = strtol(val, &p, 10);
+
+	if ((p == NULL) || (p[0] != '\0') || (result < 0L)
+	||  (positive && (result <= 0L))) {
+		error ("Invalid numeric value \"%s\" for %s.", val, name);
+		exit(1);
+	} else if (result > INT_MAX) {
+		error ("Numeric argument (%d) to big for %s.", result, name);
+		exit(1);
+	} else if (result < INT_MIN) {
+		error ("Numeric argument %d to small for %s.", result, name);
+		exit(1);
+	}
+
+	return (int) result;
+}
+
 /* print_db_notok() - Print an error message about slurmdbd
  *                    is unreachable or wrong cluster name.
  * IN  cname - char * cluster name
@@ -1355,13 +1656,13 @@ parse_resv_flags(const char *flagstr, const char *msg)
 				outflags |= RESERVE_FLAG_NO_WEEKLY;
 			else
 				outflags |= RESERVE_FLAG_WEEKLY;
-		} else if (strncasecmp(curr, "License_Only", MAX(taglen,1))
-			   == 0) {
+		} else if (!strncasecmp(curr, "Any_Nodes", MAX(taglen,1)) ||
+			   !strncasecmp(curr, "License_Only", MAX(taglen,1))) {
 			curr += taglen;
 			if (flip)
-				outflags |= RESERVE_FLAG_NO_LIC_ONLY;
+				outflags |= RESERVE_FLAG_NO_ANY_NODES;
 			else
-				outflags |= RESERVE_FLAG_LIC_ONLY;
+				outflags |= RESERVE_FLAG_ANY_NODES;
 		} else if (strncasecmp(curr, "Static_Alloc", MAX(taglen,1))
 			   == 0) {
 			curr += taglen;
@@ -1384,6 +1685,10 @@ parse_resv_flags(const char *flagstr, const char *msg)
 			   !flip) {
 			curr += taglen;
 			outflags |= RESERVE_FLAG_TIME_FLOAT;
+		} else if (!strncasecmp(curr, "Replace", MAX(taglen,1)) &&
+			   !flip) {
+			curr += taglen;
+			outflags |= RESERVE_FLAG_REPLACE;
 		} else {
 			error("Error parsing flags %s.  %s", flagstr, msg);
 			return 0xffffffff;
diff --git a/src/common/proc_args.h b/src/common/proc_args.h
index 2885f1941..f1b41d7f4 100644
--- a/src/common/proc_args.h
+++ b/src/common/proc_args.h
@@ -50,20 +50,8 @@
 #include "src/common/macros.h" /* true and false */
 #include "src/common/env.h"
 
-
-#define format_task_dist_states(t)			\
-(t == SLURM_DIST_BLOCK) ? "block" :			\
-(t == SLURM_DIST_CYCLIC) ? "cyclic" :			\
-(t == SLURM_DIST_PLANE) ? "plane" :			\
-(t == SLURM_DIST_CYCLIC_CYCLIC) ? "cyclic:cyclic" :	\
-(t == SLURM_DIST_CYCLIC_BLOCK) ? "cyclic:block" :	\
-(t == SLURM_DIST_BLOCK_CYCLIC) ? "block:cyclic" :	\
-(t == SLURM_DIST_BLOCK_BLOCK) ? "block:block" :		\
-(t == SLURM_DIST_BLOCK_CFULL) ? "block:fcyclic" :	\
-(t == SLURM_DIST_CYCLIC_CFULL) ? "cyclic:fcyclic" :	\
-(t == SLURM_DIST_ARBITRARY) ? "arbitrary" :		\
-"unknown"
-
+/* convert task state ID to equivalent string */
+extern char *format_task_dist_states(task_dist_states_t t);
 
 /* print this version of SLURM */
 void print_slurm_version(void);
@@ -133,9 +121,17 @@ uint16_t parse_mail_type(const char *arg);
 /* print the mail type */
 char *print_mail_type(const uint16_t type);
 
-/* search PATH to confirm the access of the given command */
-char *search_path(char *cwd, char *cmd, bool check_current_dir,
-		  int access_mode);
+/*
+ * search PATH to confirm the location and access mode of the given command
+ * IN cwd - current working directory
+ * IN cmd - command to execute
+ * IN check_current_dir - if true, search cwd for the command
+ * IN access_mode - required access rights of cmd
+ * IN test_exec - if false, do not confirm access mode of cmd if full path
+ * RET full path of cmd or NULL if not found
+ */
+char *search_path(char *cwd, char *cmd, bool check_current_dir, int access_mode,
+		  bool test_exec);
 
 /* helper function for printing options */
 char *print_commandline(const int script_argc, char **script_argv);
@@ -169,6 +165,15 @@ extern int	parse_uint16(char *aval, uint16_t *ival);
  */
 extern int	parse_uint32(char *aval, uint32_t *ival);
 
+/* Get a decimal integer from arg
+ * IN      name - command line name
+ * IN      val - command line argument value
+ * IN      positive - true if number needs to be greater than 0
+ * RET     Returns the integer on success, exits program on failure.
+ */
+extern int parse_int(const char *name, const char *val, bool positive);
+
+
 /* print_db_notok() - Print an error message about slurmdbd
  *                    is unreachable or wrong cluster name.
  * IN  cname - char * cluster name
diff --git a/src/common/read_config.c b/src/common/read_config.c
index 62d0f1e23..0f852d67d 100644
--- a/src/common/read_config.c
+++ b/src/common/read_config.c
@@ -162,6 +162,7 @@ static int _validate_and_set_defaults(slurm_ctl_conf_t *conf,
 static uint16_t *_parse_srun_ports(const char *);
 
 s_p_options_t slurm_conf_options[] = {
+	{"AccountingStorageTRES", S_P_STRING},
 	{"AccountingStorageEnforce", S_P_STRING},
 	{"AccountingStorageHost", S_P_STRING},
 	{"AccountingStorageBackupHost", S_P_STRING},
@@ -182,6 +183,8 @@ s_p_options_t slurm_conf_options[] = {
 	{"BackupAddr", S_P_STRING},
 	{"BackupController", S_P_STRING},
 	{"BatchStartTimeout", S_P_UINT16},
+	{"BurstBufferParameters", S_P_STRING},
+	{"BurstBufferType", S_P_STRING},
 	{"CacheGroups", S_P_UINT16},
 	{"CheckpointType", S_P_STRING},
 	{"ChosLoc", S_P_STRING},
@@ -191,6 +194,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"ControlAddr", S_P_STRING},
 	{"ControlMachine", S_P_STRING},
 	{"CpuFreqDef", S_P_STRING},
+	{"CpuFreqGovernors", S_P_STRING},
 	{"CryptoType", S_P_STRING},
 	{"DebugFlags", S_P_STRING},
 	{"DefaultStorageHost", S_P_STRING},
@@ -202,7 +206,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"DefMemPerCPU", S_P_UINT32},
 	{"DefMemPerNode", S_P_UINT32},
 	{"DisableRootJobs", S_P_BOOLEAN},
-	{"DynAllocPort", S_P_UINT16},
+	{"EioTimeout", S_P_UINT16},
 	{"EnforcePartLimits", S_P_BOOLEAN},
 	{"Epilog", S_P_STRING},
 	{"EpilogMsgTime", S_P_UINT32},
@@ -239,6 +243,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"KeepAliveTime", S_P_UINT16},
 	{"KillOnBadExit", S_P_UINT16},
 	{"KillWait", S_P_UINT16},
+	{"LaunchParameters", S_P_STRING},
 	{"LaunchType", S_P_STRING},
 	{"Layouts", S_P_STRING},
 	{"Licenses", S_P_STRING},
@@ -253,12 +258,15 @@ s_p_options_t slurm_conf_options[] = {
 	{"MaxTasksPerNode", S_P_UINT16},
 	{"MemLimitEnforce", S_P_STRING},
 	{"MessageTimeout", S_P_UINT16},
-	{"MinJobAge", S_P_UINT16},
+	{"MinJobAge", S_P_UINT32},
 	{"MpiDefault", S_P_STRING},
 	{"MpiParams", S_P_STRING},
+	{"MsgAggregationParams", S_P_STRING},
 	{"OverTimeLimit", S_P_UINT16},
 	{"PluginDir", S_P_STRING},
 	{"PlugStackConfig", S_P_STRING},
+	{"PowerParameters", S_P_STRING},
+	{"PowerPlugin", S_P_STRING},
 	{"PreemptMode", S_P_STRING},
 	{"PreemptType", S_P_STRING},
 	{"PriorityDecayHalfLife", S_P_STRING},
@@ -274,10 +282,12 @@ s_p_options_t slurm_conf_options[] = {
 	{"PriorityWeightJobSize", S_P_UINT32},
 	{"PriorityWeightPartition", S_P_UINT32},
 	{"PriorityWeightQOS", S_P_UINT32},
+	{"PriorityWeightTRES", S_P_STRING},
 	{"PrivateData", S_P_STRING},
 	{"ProctrackType", S_P_STRING},
 	{"Prolog", S_P_STRING},
 	{"PrologSlurmctld", S_P_STRING},
+	{"PrologEpilogTimeout", S_P_UINT16},
 	{"PrologFlags", S_P_STRING},
 	{"PropagatePrioProcess", S_P_UINT16},
 	{"PropagateResourceLimitsExcept", S_P_STRING},
@@ -328,7 +338,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"SuspendExcParts", S_P_STRING},
 	{"SuspendProgram", S_P_STRING},
 	{"SuspendRate", S_P_UINT16},
-	{"SuspendTime", S_P_LONG},
+	{"SuspendTime", S_P_STRING},
 	{"SuspendTimeout", S_P_UINT16},
 	{"SwitchType", S_P_STRING},
 	{"TaskEpilog", S_P_STRING},
@@ -336,6 +346,7 @@ s_p_options_t slurm_conf_options[] = {
 	{"TaskPlugin", S_P_STRING},
 	{"TaskPluginParam", S_P_STRING},
 	{"TmpFS", S_P_STRING},
+	{"TopologyParam", S_P_STRING},
 	{"TopologyPlugin", S_P_STRING},
 	{"TrackWCKey", S_P_BOOLEAN},
 	{"TreeWidth", S_P_UINT16},
@@ -1034,6 +1045,7 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		{"AllowGroups", S_P_STRING},
 		{"AllowQos", S_P_STRING},
 		{"Alternate", S_P_STRING},
+		{"TRESBillingWeights", S_P_STRING},
 		{"DefMemPerCPU", S_P_UINT32},
 		{"DefMemPerNode", S_P_UINT32},
 		{"Default", S_P_BOOLEAN}, /* YES or NO */
@@ -1041,6 +1053,7 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		{"DenyAccounts", S_P_STRING},
 		{"DenyQos", S_P_STRING},
 		{"DisableRootJobs", S_P_BOOLEAN}, /* YES or NO */
+		{"ExclusiveUser", S_P_BOOLEAN}, /* YES or NO */
 		{"GraceTime", S_P_UINT32},
 		{"Hidden", S_P_BOOLEAN}, /* YES or NO */
 		{"LLN", S_P_BOOLEAN}, /* YES or NO */
@@ -1053,6 +1066,7 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		{"Nodes", S_P_STRING},
 		{"PreemptMode", S_P_STRING},
 		{"Priority", S_P_UINT16},
+		{"QOS", S_P_STRING},
 		{"RootOnly", S_P_BOOLEAN}, /* YES or NO */
 		{"ReqResv", S_P_BOOLEAN}, /* YES or NO */
 		{"SelectTypeParameters", S_P_STRING}, /* CR_Socket, CR_Core */
@@ -1081,7 +1095,8 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		p->name = xstrdup(value);
 
 		if (!s_p_get_string(&p->allow_accounts, "AllowAccounts",tbl))
-			s_p_get_string(&p->allow_accounts, "AllowAccounts", dflt);
+			s_p_get_string(&p->allow_accounts,
+				       "AllowAccounts", dflt);
 		if (p->allow_accounts &&
 		    (strcasecmp(p->allow_accounts, "ALL") == 0))
 			xfree(p->allow_accounts);
@@ -1098,7 +1113,8 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 			xfree(p->allow_qos);
 
 		if (!s_p_get_string(&p->deny_accounts, "DenyAccounts", tbl))
-			s_p_get_string(&p->deny_accounts, "DenyAccounts", dflt);
+			s_p_get_string(&p->deny_accounts,
+				       "DenyAccounts", dflt);
 		if (p->allow_accounts && p->deny_accounts) {
 			error("Both AllowAccounts and DenyAccounts are "
 			      "defined, DenyAccounts will be ignored");
@@ -1111,7 +1127,8 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 			      "DenyQos will be ignored");
 		}
 
-		if (!s_p_get_string(&p->allow_alloc_nodes, "AllocNodes", tbl)) {
+		if (!s_p_get_string(&p->allow_alloc_nodes,
+				    "AllocNodes", tbl)) {
 			s_p_get_string(&p->allow_alloc_nodes, "AllocNodes",
 				       dflt);
 			if (p->allow_alloc_nodes &&
@@ -1122,6 +1139,12 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		if (!s_p_get_string(&p->alternate, "Alternate", tbl))
 			s_p_get_string(&p->alternate, "Alternate", dflt);
 
+		if (!s_p_get_string(&p->billing_weights_str,
+				    "TRESBillingWeights", tbl) &&
+		    !s_p_get_string(&p->billing_weights_str,
+				    "TRESBillingWeights", dflt))
+			xfree(p->billing_weights_str);
+
 		if (!s_p_get_boolean(&p->default_flag, "Default", tbl)
 		    && !s_p_get_boolean(&p->default_flag, "Default", dflt))
 			p->default_flag = false;
@@ -1164,6 +1187,10 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 				     "DisableRootJobs", tbl))
 			p->disable_root_jobs = (uint16_t)NO_VAL;
 
+		if (!s_p_get_boolean((bool *)&p->exclusive_user,
+				     "ExclusiveUser", tbl))
+			p->exclusive_user = 0;
+
 		if (!s_p_get_boolean(&p->hidden_flag, "Hidden", tbl) &&
 		    !s_p_get_boolean(&p->hidden_flag, "Hidden", dflt))
 			p->hidden_flag = false;
@@ -1256,6 +1283,10 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		    !s_p_get_uint16(&p->priority, "Priority", dflt))
 			p->priority = 1;
 
+		if (!s_p_get_string(&p->qos_char, "QOS", tbl)
+		    && !s_p_get_string(&p->qos_char, "QOS", dflt))
+			p->qos_char = NULL;
+
 		if (s_p_get_string(&tmp, "SelectTypeParameters", tbl)) {
 			if (strncasecmp(tmp, "CR_Socket", 9) == 0)
 				p->cr_type = CR_SOCKET;
@@ -1277,8 +1308,6 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 		    s_p_get_string(&tmp, "Shared", dflt)) {
 			if (strcasecmp(tmp, "NO") == 0)
 				p->max_share = 1;
-#ifndef HAVE_XCPU
-			/* Only "Shared=NO" is valid on XCPU systems */
 			else if (strcasecmp(tmp, "EXCLUSIVE") == 0)
 				p->max_share = 0;
 			else if (strncasecmp(tmp, "YES:", 4) == 0) {
@@ -1301,7 +1330,6 @@ static int _parse_partitionname(void **dest, slurm_parser_enum_t type,
 					p->max_share = i | SHARED_FORCE;
 			} else if (strcasecmp(tmp, "FORCE") == 0)
 				p->max_share = 4 | SHARED_FORCE;
-#endif
 			else {
 				error("Bad value \"%s\" for Shared", tmp);
 				_destroy_partitionname(p);
@@ -1352,9 +1380,11 @@ static void _destroy_partitionname(void *ptr)
 	xfree(p->allow_accounts);
 	xfree(p->allow_groups);
 	xfree(p->allow_qos);
+	xfree(p->qos_char);
 	xfree(p->deny_accounts);
 	xfree(p->deny_qos);
 	xfree(p->alternate);
+	xfree(p->billing_weights_str);
 	xfree(p->name);
 	xfree(p->nodes);
 	xfree(ptr);
@@ -2248,10 +2278,10 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->accounting_storage_host);
 	xfree (ctl_conf_ptr->accounting_storage_loc);
 	xfree (ctl_conf_ptr->accounting_storage_pass);
+	xfree (ctl_conf_ptr->accounting_storage_tres);
 	xfree (ctl_conf_ptr->accounting_storage_type);
 	xfree (ctl_conf_ptr->accounting_storage_user);
-	if (ctl_conf_ptr->acct_gather_conf)
-		list_destroy((List)ctl_conf_ptr->acct_gather_conf);
+	FREE_NULL_LIST(ctl_conf_ptr->acct_gather_conf);
 	xfree (ctl_conf_ptr->acct_gather_energy_type);
 	xfree (ctl_conf_ptr->acct_gather_profile_type);
 	xfree (ctl_conf_ptr->acct_gather_infiniband_type);
@@ -2260,6 +2290,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->authtype);
 	xfree (ctl_conf_ptr->backup_addr);
 	xfree (ctl_conf_ptr->backup_controller);
+	xfree (ctl_conf_ptr->bb_type);
 	xfree (ctl_conf_ptr->checkpoint_type);
 	xfree (ctl_conf_ptr->chos_loc);
 	xfree (ctl_conf_ptr->cluster_name);
@@ -2269,8 +2300,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->crypto_type);
 	xfree (ctl_conf_ptr->epilog);
 	xfree (ctl_conf_ptr->epilog_slurmctld);
-	if (ctl_conf_ptr->ext_sensors_conf)
-		list_destroy((List)ctl_conf_ptr->ext_sensors_conf);
+	FREE_NULL_LIST(ctl_conf_ptr->ext_sensors_conf);
 	xfree (ctl_conf_ptr->ext_sensors_type);
 	xfree (ctl_conf_ptr->gres_plugins);
 	xfree (ctl_conf_ptr->health_check_program);
@@ -2287,6 +2317,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->job_credential_private_key);
 	xfree (ctl_conf_ptr->job_credential_public_certificate);
 	xfree (ctl_conf_ptr->job_submit_plugins);
+	xfree (ctl_conf_ptr->launch_params);
 	xfree (ctl_conf_ptr->launch_type);
 	xfree (ctl_conf_ptr->layouts);
 	xfree (ctl_conf_ptr->licenses);
@@ -2294,12 +2325,16 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->mail_prog);
 	xfree (ctl_conf_ptr->mpi_default);
 	xfree (ctl_conf_ptr->mpi_params);
+	xfree (ctl_conf_ptr->msg_aggr_params);
 	xfree (ctl_conf_ptr->node_prefix);
 	xfree (ctl_conf_ptr->plugindir);
 	xfree (ctl_conf_ptr->plugstack);
+	xfree (ctl_conf_ptr->power_parameters);
+	xfree (ctl_conf_ptr->power_plugin);
 	xfree (ctl_conf_ptr->preempt_type);
 	xfree (ctl_conf_ptr->priority_params);
 	xfree (ctl_conf_ptr->priority_type);
+	xfree (ctl_conf_ptr->priority_weight_tres);
 	xfree (ctl_conf_ptr->proctrack_type);
 	xfree (ctl_conf_ptr->prolog);
 	xfree (ctl_conf_ptr->prolog_slurmctld);
@@ -2317,8 +2352,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->sched_params);
 	xfree (ctl_conf_ptr->schedtype);
 	xfree (ctl_conf_ptr->select_type);
-	if (ctl_conf_ptr->select_conf_key_pairs)
-		list_destroy((List)ctl_conf_ptr->select_conf_key_pairs);
+	FREE_NULL_LIST(ctl_conf_ptr->select_conf_key_pairs);
 	xfree (ctl_conf_ptr->slurm_conf);
 	xfree (ctl_conf_ptr->slurm_user_name);
 	xfree (ctl_conf_ptr->slurmctld_logfile);
@@ -2341,6 +2375,7 @@ free_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr, bool purge_node_hash)
 	xfree (ctl_conf_ptr->task_plugin);
 	xfree (ctl_conf_ptr->task_prolog);
 	xfree (ctl_conf_ptr->tmp_fs);
+	xfree (ctl_conf_ptr->topology_param);
 	xfree (ctl_conf_ptr->topology_plugin);
 	xfree (ctl_conf_ptr->unkillable_program);
 	xfree (ctl_conf_ptr->version);
@@ -2366,6 +2401,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->accounting_storage_loc);
 	xfree (ctl_conf_ptr->accounting_storage_pass);
 	ctl_conf_ptr->accounting_storage_port             = 0;
+	xfree (ctl_conf_ptr->accounting_storage_tres);
 	xfree (ctl_conf_ptr->accounting_storage_type);
 	xfree (ctl_conf_ptr->accounting_storage_user);
 	xfree (ctl_conf_ptr->authinfo);
@@ -2373,6 +2409,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->backup_addr);
 	xfree (ctl_conf_ptr->backup_controller);
 	ctl_conf_ptr->batch_start_timeout	= 0;
+	xfree (ctl_conf_ptr->bb_type);
 	xfree (ctl_conf_ptr->checkpoint_type);
 	xfree (ctl_conf_ptr->chos_loc);
 	xfree (ctl_conf_ptr->cluster_name);
@@ -2391,7 +2428,6 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	xfree (ctl_conf_ptr->acct_gather_filesystem_type);
 	ctl_conf_ptr->ext_sensors_freq		= 0;
 	xfree (ctl_conf_ptr->ext_sensors_type);
-	ctl_conf_ptr->dynalloc_port		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->enforce_part_limits       = 0;
 	xfree (ctl_conf_ptr->epilog);
 	ctl_conf_ptr->epilog_msg_time		= (uint32_t) NO_VAL;
@@ -2422,6 +2458,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->keep_alive_time		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->kill_on_bad_exit		= 0;
 	ctl_conf_ptr->kill_wait			= (uint16_t) NO_VAL;
+	xfree (ctl_conf_ptr->launch_params);
 	xfree (ctl_conf_ptr->launch_type);
 	xfree (ctl_conf_ptr->layouts);
 	xfree (ctl_conf_ptr->licenses);
@@ -2432,19 +2469,23 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->max_mem_per_cpu           = 0;
 	ctl_conf_ptr->max_step_cnt		= (uint32_t) NO_VAL;
 	ctl_conf_ptr->mem_limit_enforce         = true;
-	ctl_conf_ptr->min_job_age		= (uint16_t) NO_VAL;
+	ctl_conf_ptr->min_job_age = (uint32_t) NO_VAL;
 	xfree (ctl_conf_ptr->mpi_default);
 	xfree (ctl_conf_ptr->mpi_params);
+	xfree (ctl_conf_ptr->msg_aggr_params);
 	ctl_conf_ptr->msg_timeout		= (uint16_t) NO_VAL;
 	ctl_conf_ptr->next_job_id		= (uint32_t) NO_VAL;
 	xfree (ctl_conf_ptr->node_prefix);
 	ctl_conf_ptr->over_time_limit           = 0;
 	xfree (ctl_conf_ptr->plugindir);
 	xfree (ctl_conf_ptr->plugstack);
+	xfree (ctl_conf_ptr->power_parameters);
+	xfree (ctl_conf_ptr->power_plugin);
 	ctl_conf_ptr->preempt_mode              = 0;
 	xfree (ctl_conf_ptr->preempt_type);
 	xfree (ctl_conf_ptr->priority_params);
 	xfree (ctl_conf_ptr->priority_type);
+	xfree (ctl_conf_ptr->priority_weight_tres);
 	ctl_conf_ptr->private_data              = 0;
 	xfree (ctl_conf_ptr->proctrack_type);
 	xfree (ctl_conf_ptr->prolog);
@@ -2507,6 +2548,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->task_plugin_param		= 0;
 	xfree (ctl_conf_ptr->task_prolog);
 	xfree (ctl_conf_ptr->tmp_fs);
+	xfree (ctl_conf_ptr->topology_param);
 	xfree (ctl_conf_ptr->topology_plugin);
 	ctl_conf_ptr->tree_width       		= (uint16_t) NO_VAL;
 	xfree (ctl_conf_ptr->unkillable_program);
@@ -2515,6 +2557,7 @@ init_slurm_conf (slurm_ctl_conf_t *ctl_conf_ptr)
 	ctl_conf_ptr->use_spec_resources	= 0;
 	ctl_conf_ptr->vsize_factor              = 0;
 	ctl_conf_ptr->wait_time			= (uint16_t) NO_VAL;
+	ctl_conf_ptr->prolog_epilog_timeout = (uint16_t)NO_VAL;
 
 	_free_name_hashtbl();
 	_init_name_hashtbl();
@@ -2574,7 +2617,7 @@ static int _config_is_storage(s_p_hashtbl_t *hashtbl, char *name)
 	while ((pair = list_next(iter)) != NULL)
 		s_p_parse_pair(hashtbl, pair->name, pair->value);
 	list_iterator_destroy(iter);
-	list_destroy(config);
+	FREE_NULL_LIST(config);
 	rc = 0; /* done */
 
 end:
@@ -2967,6 +3010,8 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->authtype, "AuthType", hashtbl))
 		conf->authtype = xstrdup(DEFAULT_AUTH_TYPE);
 
+	s_p_get_string(&conf->bb_type, "BurstBufferType", hashtbl);
+
 	if (s_p_get_uint16(&uint16_tmp, "GroupUpdateTime", hashtbl)) {
 		if (uint16_tmp > GROUP_TIME_MASK) {
 			error("GroupUpdateTime exceeds limit of %u",
@@ -2994,7 +3039,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	s_p_get_string(&conf->chos_loc, "ChosLoc", hashtbl);
 
 	if (s_p_get_string(&temp_str, "CpuFreqDef", hashtbl)) {
-		if (cpu_freq_verify_param(temp_str, &conf->cpu_freq_def)) {
+		if (cpu_freq_verify_def(temp_str, &conf->cpu_freq_def)) {
 			error("Ignoring invalid CpuFreqDef: %s", temp_str);
 			conf->cpu_freq_def = CPU_FREQ_ONDEMAND;
 		}
@@ -3003,6 +3048,17 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->cpu_freq_def = CPU_FREQ_ONDEMAND;
 	}
 
+	if (s_p_get_string(&temp_str, "CpuFreqGovernors", hashtbl)) {
+		if (cpu_freq_verify_govlist(temp_str, &conf->cpu_freq_govs)) {
+			error("Ignoring invalid CpuFreqGovernors: %s",
+				temp_str);
+			conf->cpu_freq_govs = CPU_FREQ_ONDEMAND;
+		}
+		xfree(temp_str);
+	} else {
+		conf->cpu_freq_govs = CPU_FREQ_ONDEMAND;
+	}
+
 	if (!s_p_get_string(&conf->crypto_type, "CryptoType", hashtbl))
 		 conf->crypto_type = xstrdup(DEFAULT_CRYPTO_TYPE);
 	if ((strcmp(conf->crypto_type, "crypto/openssl") == 0) &&
@@ -3034,14 +3090,6 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			     "DisableRootJobs", hashtbl))
 		conf->disable_root_jobs = DEFAULT_DISABLE_ROOT_JOBS;
 
-	if (s_p_get_uint16(&conf->dynalloc_port, "DynAllocPort", hashtbl)) {
-		if (conf->dynalloc_port == 0) {
-			error("DynAllocPort=0 is invalid");
-		}
-	} else {
-		conf->dynalloc_port = 0;
-	}
-
 	if (!s_p_get_boolean((bool *) &conf->enforce_part_limits,
 			     "EnforcePartLimits", hashtbl))
 		conf->enforce_part_limits = DEFAULT_ENFORCE_PART_LIMITS;
@@ -3203,6 +3251,8 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->kill_wait, "KillWait", hashtbl))
 		conf->kill_wait = DEFAULT_KILL_WAIT;
 
+	s_p_get_string(&conf->launch_params, "LaunchParameters", hashtbl);
+
 	if (!s_p_get_string(&conf->launch_type, "LaunchType", hashtbl))
 		conf->launch_type = xstrdup(DEFAULT_LAUNCH_TYPE);
 
@@ -3247,6 +3297,10 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	if (!s_p_get_uint32(&conf->max_job_id, "MaxJobId", hashtbl))
 		conf->max_job_id = DEFAULT_MAX_JOB_ID;
+	if (conf->max_job_id > 0x7fffffff) {
+		error("MaxJobId can not exceed 0x7fffffff, resetting value");
+		conf->max_job_id = 0x7fffffff;
+	}
 
 	if (conf->first_job_id > conf->max_job_id) {
 		error("FirstJobId > MaxJobId");
@@ -3295,7 +3349,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		}
 	}
 
-	if (!s_p_get_uint16(&conf->min_job_age, "MinJobAge", hashtbl))
+	if (!s_p_get_uint32(&conf->min_job_age, "MinJobAge", hashtbl))
 		conf->min_job_age = DEFAULT_MIN_JOB_AGE;
 	else if (conf->min_job_age < 2) {
 		if (getuid() == 0)
@@ -3317,6 +3371,11 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	}
 #endif
 
+	if (!s_p_get_string(&conf->msg_aggr_params,
+			   "MsgAggregationParams", hashtbl))
+		conf->msg_aggr_params =
+			xstrdup(DEFAULT_MSG_AGGREGATION_PARAMS);
+
 	if (!s_p_get_boolean((bool *)&conf->track_wckey,
 			    "TrackWCKey", hashtbl))
 		conf->track_wckey = false;
@@ -3332,6 +3391,14 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 				xstrdup(DEFAULT_ACCOUNTING_STORAGE_TYPE);
 	}
 
+	if (!s_p_get_string(&conf->accounting_storage_tres,
+			    "AccountingStorageTRES", hashtbl))
+		conf->accounting_storage_tres =
+			xstrdup(DEFAULT_ACCOUNTING_TRES);
+	else
+		xstrfmtcat(conf->accounting_storage_tres,
+			   ",%s", DEFAULT_ACCOUNTING_TRES);
+
 	if (s_p_get_string(&temp_str, "AccountingStorageEnforce", hashtbl)) {
 		if (slurm_strcasestr(temp_str, "1")
 		    || slurm_strcasestr(temp_str, "associations"))
@@ -3482,6 +3549,10 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->plugstack, "PlugStackConfig", hashtbl))
 		conf->plugstack = xstrdup(default_plugstack);
 
+	s_p_get_string(&conf->power_parameters, "PowerParameters", hashtbl);
+	if (!s_p_get_string(&conf->power_plugin, "PowerPlugin", hashtbl))
+		conf->power_plugin = xstrdup(DEFAULT_POWER_PLUGIN);
+
 	if (s_p_get_string(&temp_str, "PreemptMode", hashtbl)) {
 		conf->preempt_mode = preempt_mode_num(temp_str);
 		if (conf->preempt_mode == (uint16_t) NO_VAL) {
@@ -3500,8 +3571,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		conf->preempt_type = xstrdup(DEFAULT_PREEMPT_TYPE);
 	if (strcmp(conf->preempt_type, "preempt/qos") == 0) {
 		int preempt_mode = conf->preempt_mode & (~PREEMPT_MODE_GANG);
-		if ((preempt_mode == PREEMPT_MODE_OFF) ||
-		    (preempt_mode == PREEMPT_MODE_SUSPEND)) {
+		if (preempt_mode == PREEMPT_MODE_OFF) {
 			error("PreemptType and PreemptMode values "
 			      "incompatible");
 			return SLURM_ERROR;
@@ -3567,13 +3637,14 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		if (slurm_strcasestr(temp_str, "CALCULATE_RUNNING"))
 			conf->priority_flags |= PRIORITY_FLAGS_CALCULATE_RUNNING;
 
-		if (slurm_strcasestr(temp_str, "TICKET_BASED"))
-			conf->priority_flags |= PRIORITY_FLAGS_TICKET_BASED;
-		else if (slurm_strcasestr(temp_str, "DEPTH_OBLIVIOUS"))
+		if (slurm_strcasestr(temp_str, "DEPTH_OBLIVIOUS"))
 			conf->priority_flags |= PRIORITY_FLAGS_DEPTH_OBLIVIOUS;
 		else if (slurm_strcasestr(temp_str, "FAIR_TREE"))
 			conf->priority_flags |= PRIORITY_FLAGS_FAIR_TREE;
 
+		if (slurm_strcasestr(temp_str, "MAX_TRES"))
+			conf->priority_flags |= PRIORITY_FLAGS_MAX_TRES;
+
 		xfree(temp_str);
 	}
 
@@ -3627,14 +3698,6 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 
 	if (!s_p_get_string(&conf->priority_type, "PriorityType", hashtbl))
 		conf->priority_type = xstrdup(DEFAULT_PRIORITY_TYPE);
-	if (!strcasecmp(conf->priority_type, "priority/multifactor2")) {
-		error("PriorityType=priority/multifactor2 is deprecated.  "
-		      "In the future use\nPriorityType=priority/multifactor\n"
-		      "PriortyFlags=Ticket_Based\nThis is what is loaded now.");
-		xfree(conf->priority_type);
-		conf->priority_type = xstrdup("priority/multifactor");
-		conf->priority_flags |= PRIORITY_FLAGS_TICKET_BASED;
-	}
 
 	if (!s_p_get_uint32(&conf->priority_weight_age,
 			    "PriorityWeightAge", hashtbl))
@@ -3651,6 +3714,9 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint32(&conf->priority_weight_qos,
 			    "PriorityWeightQOS", hashtbl))
 		conf->priority_weight_qos = 0;
+	if (!s_p_get_string(&conf->priority_weight_tres, "PriorityWeightTRES",
+			    hashtbl))
+		conf->priority_weight_tres = NULL;
 
 	/* Check for possible overflow of priority.
 	 * We also check when doing the computation for each job. */
@@ -3659,6 +3725,7 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 		(uint64_t) conf->priority_weight_js   +
 		(uint64_t) conf->priority_weight_part +
 		(uint64_t) conf->priority_weight_qos;
+	/* TODO include TRES weights */
 	if (tot_prio_weight > 0xffffffff) {
 		error("PriorityWeight values too high, job priority value may "
 		      "overflow");
@@ -4025,7 +4092,12 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	s_p_get_string(&conf->suspend_program, "SuspendProgram", hashtbl);
 	if (!s_p_get_uint16(&conf->suspend_rate, "SuspendRate", hashtbl))
 		conf->suspend_rate = DEFAULT_SUSPEND_RATE;
-	if (s_p_get_long(&long_suspend_time, "SuspendTime", hashtbl)) {
+	if (s_p_get_string(&temp_str, "SuspendTime", hashtbl)) {
+		if (!strcasecmp(temp_str, "NONE"))
+			long_suspend_time = -1;
+		else
+			long_suspend_time = atoi(temp_str);
+		xfree(temp_str);
 		if (long_suspend_time < -1) {
 			error("SuspendTime value (%ld) is less than -1",
 			      long_suspend_time);
@@ -4036,8 +4108,9 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 			return SLURM_ERROR;
 		} else
 			conf->suspend_time = long_suspend_time + 1;
-	} else
+	} else {
 		conf->suspend_time = 0;
+	}
 	if (!s_p_get_uint16(&conf->suspend_timeout, "SuspendTimeout", hashtbl))
 		conf->suspend_timeout = DEFAULT_SUSPEND_TIMEOUT;
 
@@ -4125,6 +4198,14 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 					set_auto = true;
 					conf->task_plugin_param |=
 						CPU_AUTO_BIND_TO_THREADS;
+				} else if (strcasecmp(val_ptr, "cores") == 0) {
+					set_auto = true;
+					conf->task_plugin_param |=
+						CPU_AUTO_BIND_TO_CORES;
+				} else if (strcasecmp(val_ptr, "sockets") == 0) {
+					set_auto = true;
+					conf->task_plugin_param |=
+						CPU_AUTO_BIND_TO_SOCKETS;
 				} else {
 					error("Bad TaskPluginParam autobind "
 							"value: %s",val_ptr);
@@ -4148,6 +4229,8 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_uint16(&conf->wait_time, "WaitTime", hashtbl))
 		conf->wait_time = DEFAULT_WAIT_TIME;
 
+	s_p_get_string(&conf->topology_param, "TopologyParam", hashtbl);
+
 	if (!s_p_get_string(&conf->topology_plugin, "TopologyPlugin", hashtbl))
 		conf->topology_plugin = xstrdup(DEFAULT_TOPOLOGY_PLUGIN);
 #ifdef HAVE_BG
@@ -4205,11 +4288,25 @@ _validate_and_set_defaults(slurm_ctl_conf_t *conf, s_p_hashtbl_t *hashtbl)
 	if (!s_p_get_string(&conf->layouts, "Layouts", hashtbl))
 		conf->layouts = xstrdup("");
 
+	/* srun eio network timeout with the slurmstepd
+	 */
+	if (!s_p_get_uint16(&conf->eio_timeout, "EioTimeout", hashtbl))
+		conf->eio_timeout = DEFAULT_EIO_SHUTDOWN_WAIT;
+
+	if (!s_p_get_uint16(&conf->prolog_epilog_timeout,
+			    "PrologEpilogTimeout",
+			    hashtbl)) {
+		/* The default value is wait forever
+		 */
+		conf->prolog_epilog_timeout = (uint16_t)NO_VAL;
+	}
+
 	xfree(default_storage_type);
 	xfree(default_storage_loc);
 	xfree(default_storage_host);
 	xfree(default_storage_user);
 	xfree(default_storage_pass);
+
 	return SLURM_SUCCESS;
 }
 
@@ -4250,6 +4347,12 @@ extern char * prolog_flags2str(uint16_t prolog_flags)
 		xstrcat(rc, "Alloc");
 	}
 
+	if (prolog_flags & PROLOG_FLAG_CONTAIN) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Contain");
+	}
+
 	if (prolog_flags & PROLOG_FLAG_NOHOLD) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -4277,6 +4380,8 @@ extern uint16_t prolog_str2flags(char *prolog_flags)
 	while (tok) {
 		if (strcasecmp(tok, "Alloc") == 0)
 			rc |= PROLOG_FLAG_ALLOC;
+		else if (strcasecmp(tok, "Contain") == 0)
+			rc |= PROLOG_FLAG_CONTAIN;
 		else if (strcasecmp(tok, "NoHold") == 0)
 			rc |= PROLOG_FLAG_NOHOLD;
 		else {
@@ -4333,16 +4438,31 @@ extern char * debug_flags2str(uint64_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "BGBlockWires");
 	}
+	if (debug_flags & DEBUG_FLAG_BURST_BUF) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "BurstBuffer");
+	}
 	if (debug_flags & DEBUG_FLAG_CPU_BIND) {
 		if (rc)
 			xstrcat(rc, ",");
 		xstrcat(rc, "CPU_Bind");
 	}
+	if (debug_flags & DEBUG_FLAG_DB_ARCHIVE) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "DB_Archive");
+	}
 	if (debug_flags & DEBUG_FLAG_DB_ASSOC) {
 		if (rc)
 			xstrcat(rc, ",");
 		xstrcat(rc, "DB_Assoc");
 	}
+	if (debug_flags & DEBUG_FLAG_DB_TRES) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "DB_TRES");
+	}
 	if (debug_flags & DEBUG_FLAG_DB_EVENT) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -4473,6 +4593,11 @@ extern char * debug_flags2str(uint64_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "SelectType");
 	}
+	if (debug_flags & DEBUG_FLAG_SICP) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "SICP");
+	}
 	if (debug_flags & DEBUG_FLAG_STEPS) {
 		if (rc)
 			xstrcat(rc, ",");
@@ -4504,6 +4629,16 @@ extern char * debug_flags2str(uint64_t debug_flags)
 			xstrcat(rc, ",");
 		xstrcat(rc, "Wiki");
 	}
+	if (debug_flags & DEBUG_FLAG_CPU_FREQ) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "CpuFrequency");
+	}
+	if (debug_flags & DEBUG_FLAG_POWER) {
+		if (rc)
+			xstrcat(rc, ",");
+		xstrcat(rc, "Power");
+	}
 	return rc;
 }
 
@@ -4539,10 +4674,16 @@ extern int debug_str2flags(char *debug_flags, uint64_t *flags_out)
 			(*flags_out) |= DEBUG_FLAG_BG_PICK;
 		else if (strcasecmp(tok, "BGBlockWires") == 0)
 			(*flags_out) |= DEBUG_FLAG_BG_WIRES;
+		else if (strcasecmp(tok, "BurstBuffer") == 0)
+			(*flags_out) |= DEBUG_FLAG_BURST_BUF;
 		else if (strcasecmp(tok, "CPU_Bind") == 0)
 			(*flags_out) |= DEBUG_FLAG_CPU_BIND;
+		else if (strcasecmp(tok, "DB_Archive") == 0)
+			(*flags_out) |= DEBUG_FLAG_DB_ARCHIVE;
 		else if (strcasecmp(tok, "DB_Assoc") == 0)
 			(*flags_out) |= DEBUG_FLAG_DB_ASSOC;
+		else if (strcasecmp(tok, "DB_TRES") == 0)
+			(*flags_out) |= DEBUG_FLAG_DB_TRES;
 		else if (strcasecmp(tok, "DB_Event") == 0)
 			(*flags_out) |= DEBUG_FLAG_DB_EVENT;
 		else if (strcasecmp(tok, "DB_Job") == 0)
@@ -4595,6 +4736,8 @@ extern int debug_str2flags(char *debug_flags, uint64_t *flags_out)
 			(*flags_out) |= DEBUG_FLAG_ROUTE;
 		else if (strcasecmp(tok, "SelectType") == 0)
 			(*flags_out) |= DEBUG_FLAG_SELECT_TYPE;
+		else if (strcasecmp(tok, "SICP") == 0)
+			(*flags_out) |= DEBUG_FLAG_SICP;
 		else if (strcasecmp(tok, "Steps") == 0)
 			(*flags_out) |= DEBUG_FLAG_STEPS;
 		else if (strcasecmp(tok, "Switch") == 0)
@@ -4609,6 +4752,10 @@ extern int debug_str2flags(char *debug_flags, uint64_t *flags_out)
 			(*flags_out) |= DEBUG_FLAG_TRIGGERS;
 		else if (strcasecmp(tok, "Wiki") == 0)
 			(*flags_out) |= DEBUG_FLAG_WIKI;
+		else if (strcasecmp(tok, "CpuFrequency") == 0)
+			(*flags_out) |= DEBUG_FLAG_CPU_FREQ;
+		else if (strcasecmp(tok, "Power") == 0)
+			(*flags_out) |= DEBUG_FLAG_POWER;
 		else {
 			error("Invalid DebugFlag: %s", tok);
 			(*flags_out) = 0;
@@ -4726,6 +4873,7 @@ extern int sort_key_pairs(void *v1, void *v2)
 
 	return 0;
 }
+
 /*
  * Return the pathname of the extra .conf file
  */
diff --git a/src/common/read_config.h b/src/common/read_config.h
index 2b16e2757..1b8afc183 100644
--- a/src/common/read_config.h
+++ b/src/common/read_config.h
@@ -60,7 +60,9 @@ extern char *default_plugstack;
 #define ACCOUNTING_ENFORCE_SAFE   0x0010
 #define ACCOUNTING_ENFORCE_NO_JOBS 0x0020
 #define ACCOUNTING_ENFORCE_NO_STEPS 0x0040
+#define ACCOUNTING_ENFORCE_TRES   0x0080
 
+#define DEFAULT_ACCOUNTING_TRES  "cpu,mem,energy,node"
 #define DEFAULT_ACCOUNTING_DB      "slurm_acct_db"
 #define DEFAULT_ACCOUNTING_ENFORCE  0
 #define DEFAULT_ACCOUNTING_STORAGE_TYPE "accounting_storage/none"
@@ -116,13 +118,17 @@ extern char *default_plugstack;
 #define DEFAULT_MAIL_PROG           "/bin/mail"
 #define DEFAULT_MAX_ARRAY_SIZE      1001
 #define DEFAULT_MAX_JOB_COUNT       10000
-#define DEFAULT_MAX_JOB_ID          0xffff0000
+#define DEFAULT_MAX_JOB_ID          0x7fff0000
 #define DEFAULT_MAX_STEP_COUNT      40000
 #define DEFAULT_MEM_PER_CPU         0
 #define DEFAULT_MAX_MEM_PER_CPU     0
 #define DEFAULT_MIN_JOB_AGE         300
 #define DEFAULT_MPI_DEFAULT         "none"
+#define DEFAULT_MSG_AGGREGATION_PARAMS "WindowMsgs=1,WindowTime=100"
+#define DEFAULT_MSG_AGGR_WINDOW_MSGS 1
+#define DEFAULT_MSG_AGGR_WINDOW_TIME 100
 #define DEFAULT_MSG_TIMEOUT         10
+#define DEFAULT_POWER_PLUGIN        ""
 #ifdef HAVE_AIX		/* AIX specific default configuration parameters */
 #  define DEFAULT_CHECKPOINT_TYPE   "checkpoint/aix"
 #  define DEFAULT_PROCTRACK_TYPE    "proctrack/aix"
@@ -245,12 +251,14 @@ typedef struct slurm_conf_partition {
 	char *alternate;	/* name of alternate partition */
 	uint16_t cr_type;	/* Custom CR values for partition (supported
 				 * by select/cons_res plugin only) */
+	char *billing_weights_str;/* per TRES billing weights */
 	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	bool default_flag;	/* Set if default partition */
 	uint32_t default_time;	/* minutes or INFINITE */
 	uint16_t disable_root_jobs; /* if set then user root can't run
 				     * jobs if NO_VAL use global
 				     * default */
+	uint16_t exclusive_user; /* 1 if node allocations by user */
 	uint32_t grace_time;	/* default grace time for partition */
 	bool     hidden_flag;	/* 1 if hidden by default */
 	bool     lln_flag;	/* 1 if nodes are selected in LLN order */
@@ -264,6 +272,7 @@ typedef struct slurm_conf_partition {
 	char 	*nodes;		/* comma delimited list names of nodes */
 	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	uint16_t priority;	/* scheduling priority for jobs */
+	char    *qos_char;      /* Name of QOS associated with partition */
 	bool     req_resv_flag; /* 1 if partition can only be used in a
 				 * reservation */
 	bool     root_only_flag;/* 1 if allocate/submit RPC can only be
diff --git a/src/common/slurm_accounting_storage.c b/src/common/slurm_accounting_storage.c
index 6b51cc367..c82dd7fc6 100644
--- a/src/common/slurm_accounting_storage.c
+++ b/src/common/slurm_accounting_storage.c
@@ -63,6 +63,8 @@
 #include "src/sacctmgr/sacctmgr.h"
 #include "src/slurmctld/slurmctld.h"
 
+int with_slurmdbd = 0;
+
 /*
  * Local data
  */
@@ -82,8 +84,10 @@ typedef struct slurm_acct_storage_ops {
 				    List acct_list);
 	int  (*add_clusters)       (void *db_conn, uint32_t uid,
 				    List cluster_list);
-	int  (*add_associations)   (void *db_conn, uint32_t uid,
-				    List association_list);
+	int  (*add_tres)           (void *db_conn, uint32_t uid,
+				    List tres_list_in);
+	int  (*add_assocs)         (void *db_conn, uint32_t uid,
+				    List assoc_list);
 	int  (*add_qos)            (void *db_conn, uint32_t uid,
 				    List qos_list);
 	int  (*add_res)            (void *db_conn, uint32_t uid,
@@ -101,9 +105,9 @@ typedef struct slurm_acct_storage_ops {
 	List (*modify_clusters)    (void *db_conn, uint32_t uid,
 				    slurmdb_cluster_cond_t *cluster_cond,
 				    slurmdb_cluster_rec_t *cluster);
-	List (*modify_associations)(void *db_conn, uint32_t uid,
-				    slurmdb_association_cond_t *assoc_cond,
-				    slurmdb_association_rec_t *assoc);
+	List (*modify_assocs)      (void *db_conn, uint32_t uid,
+				    slurmdb_assoc_cond_t *assoc_cond,
+				    slurmdb_assoc_rec_t *assoc);
 	List (*modify_job)         (void *db_conn, uint32_t uid,
 				    slurmdb_job_modify_cond_t *job_cond,
 				    slurmdb_job_rec_t *job);
@@ -127,8 +131,8 @@ typedef struct slurm_acct_storage_ops {
 				    slurmdb_account_cond_t *acct_cond);
 	List (*remove_clusters)    (void *db_conn, uint32_t uid,
 				    slurmdb_cluster_cond_t *cluster_cond);
-	List (*remove_associations)(void *db_conn, uint32_t uid,
-				    slurmdb_association_cond_t *assoc_cond);
+	List (*remove_assocs)      (void *db_conn, uint32_t uid,
+				    slurmdb_assoc_cond_t *assoc_cond);
 	List (*remove_qos)         (void *db_conn, uint32_t uid,
 				    slurmdb_qos_cond_t *qos_cond);
 	List (*remove_res)         (void *db_conn, uint32_t uid,
@@ -144,12 +148,14 @@ typedef struct slurm_acct_storage_ops {
 	List (*get_clusters)       (void *db_conn, uint32_t uid,
 				    slurmdb_cluster_cond_t *cluster_cond);
 	List (*get_config)         (void *db_conn, char *config_name);
-	List (*get_associations)   (void *db_conn, uint32_t uid,
-				    slurmdb_association_cond_t *assoc_cond);
+	List (*get_tres)           (void *db_conn, uint32_t uid,
+				    slurmdb_tres_cond_t *tres_cond);
+	List (*get_assocs)         (void *db_conn, uint32_t uid,
+				    slurmdb_assoc_cond_t *assoc_cond);
 	List (*get_events)         (void *db_conn, uint32_t uid,
 				    slurmdb_event_cond_t *event_cond);
 	List (*get_problems)       (void *db_conn, uint32_t uid,
-				    slurmdb_association_cond_t *assoc_cond);
+				    slurmdb_assoc_cond_t *assoc_cond);
 	List (*get_qos)            (void *db_conn, uint32_t uid,
 				    slurmdb_qos_cond_t *qos_cond);
 	List (*get_res)            (void *db_conn, uint32_t uid,
@@ -174,8 +180,8 @@ typedef struct slurm_acct_storage_ops {
 	int  (*node_up)            (void *db_conn,
 				    struct node_record *node_ptr,
 				    time_t event_time);
-	int  (*cluster_cpus)       (void *db_conn, char *cluster_nodes,
-				    uint32_t cpus, time_t event_time);
+	int  (*cluster_tres)       (void *db_conn, char *cluster_nodes,
+				    char *tres_str_in, time_t event_time);
 	int  (*register_ctld)      (void *db_conn, uint16_t port);
 	int  (*register_disconn_ctld)(void *db_conn, char *control_host);
 	int  (*fini_ctld)          (void *db_conn,
@@ -212,7 +218,8 @@ static const char *syms[] = {
 	"acct_storage_p_add_coord",
 	"acct_storage_p_add_accts",
 	"acct_storage_p_add_clusters",
-	"acct_storage_p_add_associations",
+	"acct_storage_p_add_tres",
+	"acct_storage_p_add_assocs",
 	"acct_storage_p_add_qos",
 	"acct_storage_p_add_res",
 	"acct_storage_p_add_wckeys",
@@ -220,7 +227,7 @@ static const char *syms[] = {
 	"acct_storage_p_modify_users",
 	"acct_storage_p_modify_accts",
 	"acct_storage_p_modify_clusters",
-	"acct_storage_p_modify_associations",
+	"acct_storage_p_modify_assocs",
 	"acct_storage_p_modify_job",
 	"acct_storage_p_modify_qos",
 	"acct_storage_p_modify_res",
@@ -230,7 +237,7 @@ static const char *syms[] = {
 	"acct_storage_p_remove_coord",
 	"acct_storage_p_remove_accts",
 	"acct_storage_p_remove_clusters",
-	"acct_storage_p_remove_associations",
+	"acct_storage_p_remove_assocs",
 	"acct_storage_p_remove_qos",
 	"acct_storage_p_remove_res",
 	"acct_storage_p_remove_wckeys",
@@ -239,7 +246,8 @@ static const char *syms[] = {
 	"acct_storage_p_get_accts",
 	"acct_storage_p_get_clusters",
 	"acct_storage_p_get_config",
-	"acct_storage_p_get_associations",
+	"acct_storage_p_get_tres",
+	"acct_storage_p_get_assocs",
 	"acct_storage_p_get_events",
 	"acct_storage_p_get_problems",
 	"acct_storage_p_get_qos",
@@ -251,7 +259,7 @@ static const char *syms[] = {
 	"acct_storage_p_roll_usage",
 	"clusteracct_storage_p_node_down",
 	"clusteracct_storage_p_node_up",
-	"clusteracct_storage_p_cluster_cpus",
+	"clusteracct_storage_p_cluster_tres",
 	"clusteracct_storage_p_register_ctld",
 	"clusteracct_storage_p_register_disconn_ctld",
 	"clusteracct_storage_p_fini_ctld",
@@ -274,6 +282,19 @@ static pthread_mutex_t plugin_context_lock = PTHREAD_MUTEX_INITIALIZER;
 static bool init_run = false;
 static uint16_t enforce = 0;
 
+/*
+ * If running with slurmdbd don't run if we don't have an index, else
+ * go ahead.
+ */
+extern int jobacct_storage_job_start_direct(void *db_conn,
+					    struct job_record *job_ptr)
+{
+	if (with_slurmdbd && !job_ptr->db_index)
+		return SLURM_SUCCESS;
+
+	return jobacct_storage_g_job_start(db_conn, job_ptr);
+}
+
 /*
  * Initialize context for acct_storage plugin
  */
@@ -384,12 +405,20 @@ extern int acct_storage_g_add_clusters(void *db_conn, uint32_t uid,
 	return (*(ops.add_clusters))(db_conn, uid, cluster_list);
 }
 
-extern int acct_storage_g_add_associations(void *db_conn, uint32_t uid,
-					   List association_list)
+extern int acct_storage_g_add_tres(void *db_conn, uint32_t uid,
+				   List tres_list_in)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
-	return (*(ops.add_associations))(db_conn, uid, association_list);
+	return (*(ops.add_tres))(db_conn, uid, tres_list_in);
+}
+
+extern int acct_storage_g_add_assocs(void *db_conn, uint32_t uid,
+				     List assoc_list)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return SLURM_ERROR;
+	return (*(ops.add_assocs))(db_conn, uid, assoc_list);
 }
 
 extern int acct_storage_g_add_qos(void *db_conn, uint32_t uid,
@@ -450,14 +479,14 @@ extern List acct_storage_g_modify_clusters(void *db_conn, uint32_t uid,
 	return (*(ops.modify_clusters))(db_conn, uid, cluster_cond, cluster);
 }
 
-extern List acct_storage_g_modify_associations(
+extern List acct_storage_g_modify_assocs(
 	void *db_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond,
-	slurmdb_association_rec_t *assoc)
+	slurmdb_assoc_cond_t *assoc_cond,
+	slurmdb_assoc_rec_t *assoc)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return NULL;
-	return (*(ops.modify_associations))(db_conn, uid, assoc_cond, assoc);
+	return (*(ops.modify_assocs))(db_conn, uid, assoc_cond, assoc);
 }
 
 extern List acct_storage_g_modify_job(void *db_conn, uint32_t uid,
@@ -538,13 +567,13 @@ extern List acct_storage_g_remove_clusters(void *db_conn, uint32_t uid,
 	return (*(ops.remove_clusters))(db_conn, uid, cluster_cond);
 }
 
-extern List acct_storage_g_remove_associations(
+extern List acct_storage_g_remove_assocs(
 	void *db_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return NULL;
-	return (*(ops.remove_associations))(db_conn, uid, assoc_cond);
+	return (*(ops.remove_assocs))(db_conn, uid, assoc_cond);
 }
 
 extern List acct_storage_g_remove_qos(void *db_conn, uint32_t uid,
@@ -610,13 +639,22 @@ extern List acct_storage_g_get_config(void *db_conn, char *config_name)
 	return (*(ops.get_config))(db_conn, config_name);
 }
 
-extern List acct_storage_g_get_associations(
+extern List acct_storage_g_get_tres(
+	void *db_conn, uint32_t uid,
+	slurmdb_tres_cond_t *tres_cond)
+{
+	if (slurm_acct_storage_init(NULL) < 0)
+		return NULL;
+	return (*(ops.get_tres))(db_conn, uid, tres_cond);
+}
+
+extern List acct_storage_g_get_assocs(
 	void *db_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return NULL;
-	return (*(ops.get_associations))(db_conn, uid, assoc_cond);
+	return (*(ops.get_assocs))(db_conn, uid, assoc_cond);
 }
 
 extern List acct_storage_g_get_events(void *db_conn, uint32_t uid,
@@ -628,7 +666,7 @@ extern List acct_storage_g_get_events(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_g_get_problems(void *db_conn, uint32_t uid,
-					slurmdb_association_cond_t *assoc_cond)
+					slurmdb_assoc_cond_t *assoc_cond)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return NULL;
@@ -774,14 +812,15 @@ extern int clusteracct_storage_g_node_up(void *db_conn,
 }
 
 
-extern int clusteracct_storage_g_cluster_cpus(void *db_conn,
+extern int clusteracct_storage_g_cluster_tres(void *db_conn,
 					      char *cluster_nodes,
-					      uint32_t cpus,
+					      char *tres_str_in,
 					      time_t event_time)
 {
 	if (slurm_acct_storage_init(NULL) < 0)
 		return SLURM_ERROR;
-	return (*(ops.cluster_cpus))(db_conn, cluster_nodes, cpus, event_time);
+	return (*(ops.cluster_tres))(db_conn, cluster_nodes,
+				     tres_str_in, event_time);
 }
 
 
diff --git a/src/common/slurm_accounting_storage.h b/src/common/slurm_accounting_storage.h
index e3d798c38..7bc0a3695 100644
--- a/src/common/slurm_accounting_storage.h
+++ b/src/common/slurm_accounting_storage.h
@@ -50,6 +50,8 @@
 #include <sys/types.h>
 #include <pwd.h>
 
+extern int with_slurmdbd;
+
 extern int slurm_acct_storage_init(char *loc); /* load the plugin */
 extern int slurm_acct_storage_fini(void); /* unload the plugin */
 
@@ -117,13 +119,21 @@ extern int acct_storage_g_add_accounts(void *db_conn, uint32_t uid,
 extern int acct_storage_g_add_clusters(void *db_conn, uint32_t uid,
 				       List cluster_list);
 
+/*
+ * add tres to accounting system
+ * IN:  tres_list List of slurmdb_tres_rec_t *
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
+ */
+extern int acct_storage_g_add_tres(void *db_conn, uint32_t uid,
+				   List tres_list_in);
+
 /*
  * add associations to accounting system
- * IN:  association_list List of slurmdb_association_rec_t *
+ * IN:  assoc_list List of slurmdb_assoc_rec_t *
  * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
-extern int acct_storage_g_add_associations(void *db_conn, uint32_t uid,
-					   List association_list);
+extern int acct_storage_g_add_assocs(void *db_conn, uint32_t uid,
+					   List assoc_list);
 
 /*
  * add qos's to accounting system
@@ -189,14 +199,14 @@ extern List acct_storage_g_modify_clusters(void *db_conn, uint32_t uid,
 
 /*
  * modify existing associations in the accounting system
- * IN:  slurmdb_association_cond_t *assoc_cond
- * IN:  slurmdb_association_rec_t *assoc
+ * IN:  slurmdb_assoc_cond_t *assoc_cond
+ * IN:  slurmdb_assoc_rec_t *assoc
  * RET: List containing (char *'s) else NULL on error
  */
-extern List acct_storage_g_modify_associations(
+extern List acct_storage_g_modify_assocs(
 	void *db_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond,
-	slurmdb_association_rec_t *assoc);
+	slurmdb_assoc_cond_t *assoc_cond,
+	slurmdb_assoc_rec_t *assoc);
 
 /*
  * modify existing job in the accounting system
@@ -281,11 +291,11 @@ extern List acct_storage_g_remove_clusters(void *db_conn, uint32_t uid,
 
 /*
  * remove associations from accounting system
- * IN:  slurmdb_association_cond_t *assoc_cond
+ * IN:  slurmdb_assoc_cond_t *assoc_cond
  * RET: List containing (char *'s) else NULL on error
  */
-extern List acct_storage_g_remove_associations(
-	void *db_conn, uint32_t uid, slurmdb_association_cond_t *assoc_cond);
+extern List acct_storage_g_remove_assocs(
+	void *db_conn, uint32_t uid, slurmdb_assoc_cond_t *assoc_cond);
 
 /*
  * remove qos from accounting system
@@ -357,12 +367,21 @@ extern List acct_storage_g_get_config(void *db_conn, char *config_name);
 
 /*
  * get info from the storage
- * IN:  slurmdb_association_cond_t *
- * RET: List of slurmdb_association_rec_t *
+ * IN:  slurmdb_tres_cond_t *
+ * RET: List of slurmdb_tres_rec_t *
+ * note List needs to be freed when called
+ */
+extern List acct_storage_g_get_tres(
+	void *db_conn, uint32_t uid, slurmdb_tres_cond_t *tres_cond);
+
+/*
+ * get info from the storage
+ * IN:  slurmdb_assoc_cond_t *
+ * RET: List of slurmdb_assoc_rec_t *
  * note List needs to be freed when called
  */
-extern List acct_storage_g_get_associations(
-	void *db_conn, uint32_t uid, slurmdb_association_cond_t *assoc_cond);
+extern List acct_storage_g_get_assocs(
+	void *db_conn, uint32_t uid, slurmdb_assoc_cond_t *assoc_cond);
 
 /*
  * get info from the storage
@@ -375,12 +394,12 @@ extern List acct_storage_g_get_events(
 
 /*
  * get info from the storage
- * IN:  slurmdb_association_cond_t *
- * RET: List of slurmdb_association_rec_t *
+ * IN:  slurmdb_assoc_cond_t *
+ * RET: List of slurmdb_assoc_rec_t *
  * note List needs to be freed when called
  */
 extern List acct_storage_g_get_problems(
-	void *db_conn, uint32_t uid, slurmdb_association_cond_t *assoc_cond);
+	void *db_conn, uint32_t uid, slurmdb_assoc_cond_t *assoc_cond);
 
 /*
  * get info from the storage
@@ -430,7 +449,7 @@ extern List acct_storage_g_get_txn(void *db_conn,  uint32_t uid,
 
 /*
  * get info from the storage
- * IN/OUT:  in void * (acct_association_rec_t *) or
+ * IN/OUT:  in void * (acct_assoc_rec_t *) or
  *          (acct_wckey_rec_t *) with the id set
  * IN:  type what type is 'in'
  * IN:  start time stamp for records >=
@@ -484,9 +503,9 @@ extern int clusteracct_storage_g_node_up(void *db_conn,
 					 struct node_record *node_ptr,
 					 time_t event_time);
 
-extern int clusteracct_storage_g_cluster_cpus(void *db_conn,
+extern int clusteracct_storage_g_cluster_tres(void *db_conn,
 					      char *cluster_nodes,
-					      uint32_t cpus,
+					      char *tres_str_in,
 					      time_t event_time);
 
 extern int clusteracct_storage_g_register_ctld(void *db_conn, uint16_t port);
@@ -495,6 +514,12 @@ extern int clusteracct_storage_g_register_disconn_ctld(
 extern int clusteracct_storage_g_fini_ctld(void *db_conn,
 					   slurmdb_cluster_rec_t *cluster_rec);
 
+/*
+ * load into the storage the start of a job
+ */
+extern int jobacct_storage_job_start_direct(void *db_conn,
+					    struct job_record *job_ptr);
+
 /*
  * load into the storage the start of a job
  */
diff --git a/src/common/slurm_acct_gather_energy.c b/src/common/slurm_acct_gather_energy.c
index 33a1013ae..242b2535a 100644
--- a/src/common/slurm_acct_gather_energy.c
+++ b/src/common/slurm_acct_gather_energy.c
@@ -131,9 +131,11 @@ extern int slurm_acct_gather_energy_init(void)
 
 done:
 	slurm_mutex_unlock(&g_context_lock);
-	xfree(type);
 	if (retval == SLURM_SUCCESS)
 		retval = acct_gather_conf_init();
+	if (retval != SLURM_SUCCESS)
+	fatal("can not open the %s plugin", type);
+	xfree(type);
 
 	return retval;
 }
@@ -152,10 +154,10 @@ extern int acct_gather_energy_fini(void)
 	return rc;
 }
 
-extern acct_gather_energy_t *acct_gather_energy_alloc(void)
+extern acct_gather_energy_t *acct_gather_energy_alloc(uint16_t cnt)
 {
 	acct_gather_energy_t *energy =
-		xmalloc(sizeof(struct acct_gather_energy));
+		xmalloc(sizeof(struct acct_gather_energy) * cnt);
 
 	return energy;
 }
@@ -168,7 +170,24 @@ extern void acct_gather_energy_destroy(acct_gather_energy_t *energy)
 extern void acct_gather_energy_pack(acct_gather_energy_t *energy, Buf buffer,
 				    uint16_t protocol_version)
 {
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!energy) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack64(0, buffer);
+			pack_time(0, buffer);
+			return;
+		}
+
+		pack64(energy->base_consumed_energy, buffer);
+		pack32(energy->base_watts, buffer);
+		pack64(energy->consumed_energy, buffer);
+		pack32(energy->current_watts, buffer);
+		pack64(energy->previous_consumed_energy, buffer);
+		pack_time(energy->poll_time, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!energy) {
 			int i;
 			for (i=0; i<5; i++)
@@ -177,35 +196,56 @@ extern void acct_gather_energy_pack(acct_gather_energy_t *energy, Buf buffer,
 			return;
 		}
 
-		pack32(energy->base_consumed_energy, buffer);
+		pack32((uint32_t) energy->base_consumed_energy, buffer);
 		pack32(energy->base_watts, buffer);
-		pack32(energy->consumed_energy, buffer);
+		pack32((uint32_t) energy->consumed_energy, buffer);
 		pack32(energy->current_watts, buffer);
-		pack32(energy->previous_consumed_energy, buffer);
+		pack32((uint32_t) energy->previous_consumed_energy, buffer);
 		pack_time(energy->poll_time, buffer);
 	}
 }
 
 extern int acct_gather_energy_unpack(acct_gather_energy_t **energy, Buf buffer,
-				     uint16_t protocol_version)
+				     uint16_t protocol_version, bool need_alloc)
 {
-	acct_gather_energy_t *energy_ptr = acct_gather_energy_alloc();
-	*energy = energy_ptr;
+	uint32_t uint32_tmp;
+	acct_gather_energy_t *energy_ptr;
+
+	if (need_alloc) {
+		energy_ptr = acct_gather_energy_alloc(1);
+		*energy = energy_ptr;
+	} else {
+		energy_ptr = *energy;
+	}
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
-		safe_unpack32(&energy_ptr->base_consumed_energy, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack64(&energy_ptr->base_consumed_energy, buffer);
+		safe_unpack32(&energy_ptr->base_watts, buffer);
+		safe_unpack64(&energy_ptr->consumed_energy, buffer);
+		safe_unpack32(&energy_ptr->current_watts, buffer);
+		safe_unpack64(&energy_ptr->previous_consumed_energy, buffer);
+		safe_unpack_time(&energy_ptr->poll_time, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&uint32_tmp, buffer);
+		energy_ptr->base_consumed_energy = (uint64_t) uint32_tmp;
 		safe_unpack32(&energy_ptr->base_watts, buffer);
-		safe_unpack32(&energy_ptr->consumed_energy, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		energy_ptr->consumed_energy = (uint64_t) uint32_tmp;
 		safe_unpack32(&energy_ptr->current_watts, buffer);
-		safe_unpack32(&energy_ptr->previous_consumed_energy, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		energy_ptr->previous_consumed_energy = (uint64_t) uint32_tmp;
 		safe_unpack_time(&energy_ptr->poll_time, buffer);
 	}
 
 	return SLURM_SUCCESS;
 
 unpack_error:
-	acct_gather_energy_destroy(energy_ptr);
-	*energy = NULL;
+	if (need_alloc) {
+		acct_gather_energy_destroy(energy_ptr);
+		*energy = NULL;
+	} else
+		memset(energy_ptr, 0, sizeof(acct_gather_energy_t));
+
 	return SLURM_ERROR;
 }
 
diff --git a/src/common/slurm_acct_gather_energy.h b/src/common/slurm_acct_gather_energy.h
index 9f9b0c2fa..3e6e6cfc7 100644
--- a/src/common/slurm_acct_gather_energy.h
+++ b/src/common/slurm_acct_gather_energy.h
@@ -75,12 +75,13 @@ typedef struct acct_energy_data {
 
 extern int acct_gather_energy_init(void); /* load the plugin */
 extern int acct_gather_energy_fini(void); /* unload the plugin */
-extern acct_gather_energy_t *acct_gather_energy_alloc(void);
+extern acct_gather_energy_t *acct_gather_energy_alloc(uint16_t cnt);
 extern void acct_gather_energy_destroy(acct_gather_energy_t *energy);
 extern void acct_gather_energy_pack(acct_gather_energy_t *energy, Buf buffer,
 				    uint16_t protocol_version);
 extern int acct_gather_energy_unpack(acct_gather_energy_t **energy, Buf buffer,
-				     uint16_t protocol_version);
+				     uint16_t protocol_version,
+				     bool need_alloc);
 
 extern int acct_gather_energy_g_update_node_energy(void);
 extern int acct_gather_energy_g_get_data(enum acct_energy_type data_type,
diff --git a/src/common/slurm_acct_gather_profile.c b/src/common/slurm_acct_gather_profile.c
index 8d5d766db..dc51887c5 100644
--- a/src/common/slurm_acct_gather_profile.c
+++ b/src/common/slurm_acct_gather_profile.c
@@ -70,8 +70,12 @@ typedef struct slurm_acct_gather_profile_ops {
 	int (*node_step_end)    (void);
 	int (*task_start)       (uint32_t);
 	int (*task_end)         (pid_t);
-	int (*add_sample_data)  (uint32_t, void*);
+	int (*create_group)     (const char*);
+	int (*create_dataset)   (const char*, int,
+				 acct_gather_profile_dataset_t *);
+	int (*add_sample_data)  (uint32_t, void*, time_t);
 	void (*conf_values)     (List *data);
+	bool (*is_active)     (uint32_t);
 
 } slurm_acct_gather_profile_ops_t;
 
@@ -88,8 +92,11 @@ static const char *syms[] = {
 	"acct_gather_profile_p_node_step_end",
 	"acct_gather_profile_p_task_start",
 	"acct_gather_profile_p_task_end",
+	"acct_gather_profile_p_create_group",
+	"acct_gather_profile_p_create_dataset",
 	"acct_gather_profile_p_add_sample_data",
 	"acct_gather_profile_p_conf_values",
+	"acct_gather_profile_p_is_active",
 };
 
 acct_gather_profile_timer_t acct_gather_profile_timer[PROFILE_CNT];
@@ -348,6 +355,40 @@ extern char *acct_gather_profile_type_t_name(acct_gather_profile_type_t type)
 	return "Unknown";
 }
 
+extern char *acct_gather_profile_dataset_str(
+	acct_gather_profile_dataset_t *dataset, void *data,
+	char *str, int str_len)
+{
+	int cur_loc = 0;
+
+        while (dataset && (dataset->type != PROFILE_FIELD_NOT_SET)) {
+		switch (dataset->type) {
+		case PROFILE_FIELD_UINT64:
+			cur_loc += snprintf(str+cur_loc, str_len-cur_loc,
+					    "%s%s=%"PRIu64,
+					    cur_loc ? " " : "",
+					    dataset->name, *(uint64_t *)data);
+			data += sizeof(uint64_t);
+			break;
+		case PROFILE_FIELD_DOUBLE:
+			cur_loc += snprintf(str+cur_loc, str_len-cur_loc,
+					    "%s%s=%lf",
+					    cur_loc ? " " : "",
+					    dataset->name, *(double *)data);
+			data += sizeof(double);
+			break;
+		case PROFILE_FIELD_NOT_SET:
+			break;
+		}
+
+		if (cur_loc >= str_len)
+			break;
+		dataset++;
+	}
+
+	return str;
+}
+
 extern int acct_gather_profile_startpoll(char *freq, char *freq_def)
 {
 	int retval = SLURM_SUCCESS;
@@ -548,7 +589,22 @@ extern int acct_gather_profile_g_task_end(pid_t taskpid)
 	return retval;
 }
 
-extern int acct_gather_profile_g_add_sample_data(uint32_t type, void* data)
+extern int acct_gather_profile_g_create_group(const char *name)
+{
+	int retval = SLURM_ERROR;
+
+	if (acct_gather_profile_init() < 0)
+		return retval;
+
+	slurm_mutex_lock(&profile_mutex);
+	retval = (*(ops.create_group))(name);
+	slurm_mutex_unlock(&profile_mutex);
+	return retval;
+}
+
+extern int acct_gather_profile_g_create_dataset(
+	const char *name, int parent,
+	acct_gather_profile_dataset_t *dataset)
 {
 	int retval = SLURM_ERROR;
 
@@ -556,7 +612,21 @@ extern int acct_gather_profile_g_add_sample_data(uint32_t type, void* data)
 		return retval;
 
 	slurm_mutex_lock(&profile_mutex);
-	retval = (*(ops.add_sample_data))(type, data);
+	retval = (*(ops.create_dataset))(name, parent, dataset);
+	slurm_mutex_unlock(&profile_mutex);
+	return retval;
+}
+
+extern int acct_gather_profile_g_add_sample_data(int dataset_id, void* data,
+						 time_t sample_time)
+{
+	int retval = SLURM_ERROR;
+
+	if (acct_gather_profile_init() < 0)
+		return retval;
+
+	slurm_mutex_lock(&profile_mutex);
+	retval = (*(ops.add_sample_data))(dataset_id, data, sample_time);
 	slurm_mutex_unlock(&profile_mutex);
 	return retval;
 }
@@ -568,3 +638,11 @@ extern void acct_gather_profile_g_conf_values(void *data)
 
 	(*(ops.conf_values))(data);
 }
+
+extern bool acct_gather_profile_g_is_active(uint32_t type)
+{
+	if (acct_gather_profile_init() < 0)
+		return false;
+
+	return (*(ops.is_active))(type);
+}
diff --git a/src/common/slurm_acct_gather_profile.h b/src/common/slurm_acct_gather_profile.h
index 5a6504e95..b5defd222 100644
--- a/src/common/slurm_acct_gather_profile.h
+++ b/src/common/slurm_acct_gather_profile.h
@@ -66,6 +66,8 @@
 #include "src/common/slurm_acct_gather.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
+#define NO_PARENT -1
+
 typedef enum {
 	PROFILE_ENERGY,
 	PROFILE_TASK,
@@ -74,6 +76,17 @@ typedef enum {
 	PROFILE_CNT
 } acct_gather_profile_type_t;
 
+typedef enum {
+	PROFILE_FIELD_NOT_SET,
+	PROFILE_FIELD_UINT64,
+	PROFILE_FIELD_DOUBLE
+} acct_gather_profile_field_type_t;
+
+typedef struct {
+	char *name;
+	acct_gather_profile_field_type_t type;
+} acct_gather_profile_dataset_t;
+
 typedef struct {
 	int freq;
 	time_t last_notify;
@@ -104,7 +117,9 @@ extern char *acct_gather_profile_type_to_string(uint32_t series);
 extern uint32_t acct_gather_profile_type_from_string(char *series_str);
 
 extern char *acct_gather_profile_type_t_name(acct_gather_profile_type_t type);
-
+extern char *acct_gather_profile_dataset_str(
+	acct_gather_profile_dataset_t *dataset, void *data,
+	char *str, int str_len);
 extern int acct_gather_profile_startpoll(char *freq, char *freq_def);
 extern void acct_gather_profile_endpoll(void);
 
@@ -184,19 +199,46 @@ extern int acct_gather_profile_g_task_start(uint32_t taskid);
  */
 extern int acct_gather_profile_g_task_end(pid_t taskpid);
 
+/*
+ * Create a new group which can contain datasets.
+ *
+ * Returns -- the identifier of the group on success,
+ *            a negative value on failure
+ */
+extern int acct_gather_profile_g_create_group(const char* name);
+
+/*
+ * Create a new dataset to record profiling data in the group "parent".
+ * Must be called by each accounting plugin in order to record data.
+ * A "Time" field is automatically added.
+ *
+ * Parameters
+ *  name        -- name of the dataset
+ *  parent      -- id of the parent group created with
+ *                 acct_gather_profile_g_create_group, or NO_PARENT for
+ *                 default group
+ *  profile_series -- profile_series_def_t array filled in with the
+ *                    series definition
+ * Returns -- an identifier to the dataset on success
+ *            a negative value on failure
+ */
+extern int acct_gather_profile_g_create_dataset(
+	const char *name, int parent, acct_gather_profile_dataset_t *dataset);
+
 /*
  * Put data at the Node Samples level. Typically called from something called
  * at either job_acct_gather interval or acct_gather_energy interval.
- * All samples in the same group will eventually be consolidated in one
- * dataset
+ * Time is automatically added.
  *
  * Parameters
- *	type  -- identifies the type of data.
- *	data  -- data structure to be put to the file.
+ *	dataset_id -- identifies the dataset to add data to.
+ *	data       -- data structure to be recorded
+ *      sample_time-- when the sample happened
  *
  * Returns -- SLURM_SUCCESS or SLURM_ERROR
  */
-extern int acct_gather_profile_g_add_sample_data(uint32_t type, void *data);
+extern int acct_gather_profile_g_add_sample_data(int dataset_id, void *data,
+						 time_t sample_time);
 
 /* Get the values from the plugin that are setup in the .conf
  * file. This function should most likely only be called from
@@ -204,4 +246,7 @@ extern int acct_gather_profile_g_add_sample_data(uint32_t type, void *data);
  */
 extern void acct_gather_profile_g_conf_values(void *data);
 
+/* Return true if the given type of plugin must be profiled */
+extern bool acct_gather_profile_g_is_active(uint32_t type);
+
 #endif /*__SLURM_ACCT_GATHER_PROFILE_H__*/
diff --git a/src/common/slurm_cred.c b/src/common/slurm_cred.c
index 91d05a4a8..ab03f35e2 100644
--- a/src/common/slurm_cred.c
+++ b/src/common/slurm_cred.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2015 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -62,6 +63,7 @@
 #include "src/common/plugrack.h"
 #include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -73,10 +75,15 @@ typedef struct sbcast_cred sbcast_cred_t;		/* opaque data type */
 
 /*
  * Default credential information expiration window.
- * Long enough for loading user environment, running prolog,
- * and dealing with the slurmd getting paged out of memory.
+ * Long enough for loading user environment, running prolog, paging slurmd
+ * into memory, plus sending a launch request to all compute nodes of a job
+ * (i.e. MessageTimeout * message_depth, where
+ * (TreeWidth ^^ message_depth) >= count_of_compute_nodes).
+ *
+ * The default value may be altered with the configuration option of this sort:
+ * "AuthInfo=cred_expire=600"
  */
-#define DEFAULT_EXPIRATION_WINDOW 1200
+#define DEFAULT_EXPIRATION_WINDOW 120
 
 #define EXTREME_DEBUG   0
 #define MAX_TIME 0x7fffffff
@@ -177,6 +184,7 @@ struct slurm_job_credential {
 	List job_gres_list;		/* Generic resources allocated to JOB */
 	List step_gres_list;		/* Generic resources allocated to STEP */
 
+	char     *job_constraints;	/* constraints in job allocation */
 	bitstr_t *job_core_bitmap;
 	uint16_t  job_core_spec;	/* Count of specialized cores */
 	uint32_t  job_nhosts;	/* count of nodes allocated to JOB */
@@ -233,6 +241,7 @@ static pthread_mutex_t g_context_lock = PTHREAD_MUTEX_INITIALIZER;
 static bool init_run = false;
 static time_t crypto_restart_time = (time_t) 0;
 static List sbcast_cache_list = NULL;
+static int cred_expire = DEFAULT_EXPIRATION_WINDOW;
 
 /*
  * Static prototypes:
@@ -291,6 +300,7 @@ static char * timestr (const time_t *tp, char *buf, size_t n);
 
 static int _slurm_crypto_init(void)
 {
+	char	*auth_info, *tok;
 	char    *plugin_type = "crypto";
 	char	*type = NULL;
 	int	retval = SLURM_SUCCESS;
@@ -298,6 +308,18 @@ static int _slurm_crypto_init(void)
 	if ( init_run && g_context )  /* mostly avoid locks for better speed */
 		return retval;
 
+	if ((auth_info = slurm_get_auth_info())) {
+		if ((tok = strstr(auth_info, "cred_expire="))) {
+			cred_expire = atoi(tok + 12);
+			if (cred_expire < 5) {
+				error("AuthInfo=cred_expire=%d invalid",
+				      cred_expire);
+				cred_expire = DEFAULT_EXPIRATION_WINDOW;
+			}
+		xfree(auth_info);
+		}
+	}
+
 	slurm_mutex_lock( &g_context_lock );
 	if (crypto_restart_time == (time_t) 0)
 		crypto_restart_time = time(NULL);
@@ -331,7 +353,7 @@ static int _slurm_crypto_fini(void)
 		return SLURM_SUCCESS;
 
 	init_run = false;
-	list_destroy(sbcast_cache_list);
+	FREE_NULL_LIST(sbcast_cache_list);
 	sbcast_cache_list = NULL;
 	rc = plugin_context_destroy(g_context);
 	g_context = NULL;
@@ -420,10 +442,8 @@ slurm_cred_ctx_destroy(slurm_cred_ctx_t ctx)
 		(*(ops.crypto_destroy_key))(ctx->exkey);
 	if (ctx->key)
 		(*(ops.crypto_destroy_key))(ctx->key);
-	if (ctx->job_list)
-		list_destroy(ctx->job_list);
-	if (ctx->state_list)
-		list_destroy(ctx->state_list);
+	FREE_NULL_LIST(ctx->job_list);
+	FREE_NULL_LIST(ctx->state_list);
 
 	xassert(ctx->magic = ~CRED_CTX_MAGIC);
 
@@ -536,32 +556,39 @@ slurm_cred_create(slurm_cred_ctx_t ctx, slurm_cred_arg_t *arg,
 	cred->step_hostlist   = xstrdup(arg->step_hostlist);
 #ifndef HAVE_BG
 	{
-		int i, sock_recs = 0;
-#ifndef HAVE_ALPS_CRAY
-		/* Zero compute node allocations allowed on a Cray for use
-		 * of front-end nodes */
-		xassert(arg->job_nhosts);
-#endif
-		for (i = 0; i < arg->job_nhosts; i++) {
-			sock_recs += arg->sock_core_rep_count[i];
-			if (sock_recs >= arg->job_nhosts)
-				break;
+		int i = 0, sock_recs = 0;
+		if (arg->sock_core_rep_count) {
+			for (i = 0; i < arg->job_nhosts; i++) {
+				sock_recs += arg->sock_core_rep_count[i];
+				if (sock_recs >= arg->job_nhosts)
+					break;
+			}
 		}
 		i++;
-		cred->job_core_bitmap = bit_copy(arg->job_core_bitmap);
-		cred->step_core_bitmap = bit_copy(arg->step_core_bitmap);
-		cred->core_array_size = i;
-		cred->cores_per_socket = xmalloc(sizeof(uint16_t) * i);
-		memcpy(cred->cores_per_socket, arg->cores_per_socket,
-		       (sizeof(uint16_t) * i));
-		cred->sockets_per_node = xmalloc(sizeof(uint16_t) * i);
-		memcpy(cred->sockets_per_node, arg->sockets_per_node,
-		       (sizeof(uint16_t) * i));
+		if (arg->job_core_bitmap)
+			cred->job_core_bitmap = bit_copy(arg->job_core_bitmap);
+		if (arg->step_core_bitmap)
+			cred->step_core_bitmap =bit_copy(arg->step_core_bitmap);
+		cred->core_array_size     = i;
+		cred->cores_per_socket    = xmalloc(sizeof(uint16_t) * i);
+		cred->sockets_per_node    = xmalloc(sizeof(uint16_t) * i);
 		cred->sock_core_rep_count = xmalloc(sizeof(uint32_t) * i);
-		memcpy(cred->sock_core_rep_count, arg->sock_core_rep_count,
-		       (sizeof(uint32_t) * i));
-		cred->job_nhosts = arg->job_nhosts;
-		cred->job_hostlist = xstrdup(arg->job_hostlist);
+		if (arg->cores_per_socket) {
+			memcpy(cred->cores_per_socket, arg->cores_per_socket,
+			       (sizeof(uint16_t) * i));
+		}
+		if (arg->sockets_per_node) {
+			memcpy(cred->sockets_per_node, arg->sockets_per_node,
+			       (sizeof(uint16_t) * i));
+		}
+		if (arg->sock_core_rep_count) {
+			memcpy(cred->sock_core_rep_count,
+			       arg->sock_core_rep_count,
+			       (sizeof(uint32_t) * i));
+		}
+		cred->job_constraints = xstrdup(arg->job_constraints);
+		cred->job_nhosts      = arg->job_nhosts;
+		cred->job_hostlist    = xstrdup(arg->job_hostlist);
 	}
 #endif
 	cred->ctime  = time(NULL);
@@ -622,8 +649,9 @@ slurm_cred_copy(slurm_cred_t *cred)
 					    rcred->core_array_size);
 	memcpy(rcred->sock_core_rep_count, cred->sock_core_rep_count,
 	       (sizeof(uint32_t) * rcred->core_array_size));
-	rcred->job_nhosts = cred->job_nhosts;
-	rcred->job_hostlist = xstrdup(cred->job_hostlist);
+	rcred->job_constraints = xstrdup(cred->job_constraints);
+	rcred->job_nhosts      = cred->job_nhosts;
+	rcred->job_hostlist    = xstrdup(cred->job_hostlist);
 #endif
 	rcred->ctime  = cred->ctime;
 	rcred->siglen = cred->siglen;
@@ -658,7 +686,7 @@ slurm_cred_faker(slurm_cred_arg_t *arg)
 #ifndef HAVE_BG
 	{
 		int sock_recs = 0;
-		for (i=0; i<arg->job_nhosts; i++) {
+		for (i = 0; i < arg->job_nhosts; i++) {
 			sock_recs += arg->sock_core_rep_count[i];
 			if (sock_recs >= arg->job_nhosts)
 				break;
@@ -676,8 +704,9 @@ slurm_cred_faker(slurm_cred_arg_t *arg)
 		cred->sock_core_rep_count = xmalloc(sizeof(uint32_t) * i);
 		memcpy(cred->sock_core_rep_count, arg->sock_core_rep_count,
 		       (sizeof(uint32_t) * i));
-		cred->job_nhosts = arg->job_nhosts;
-		cred->job_hostlist = xstrdup(arg->job_hostlist);
+		cred->job_constraints = xstrdup(arg->job_constraints);
+		cred->job_nhosts      = arg->job_nhosts;
+		cred->job_hostlist    = xstrdup(arg->job_hostlist);
 	}
 #endif
 	cred->ctime  = time(NULL);
@@ -714,6 +743,7 @@ void slurm_cred_free_args(slurm_cred_arg_t *arg)
 	FREE_NULL_LIST(arg->job_gres_list);
 	FREE_NULL_LIST(arg->step_gres_list);
 	xfree(arg->step_hostlist);
+	xfree(arg->job_constraints);
 	xfree(arg->job_hostlist);
 	xfree(arg->sock_core_rep_count);
 	xfree(arg->sockets_per_node);
@@ -743,6 +773,7 @@ int slurm_cred_get_args(slurm_cred_t *cred, slurm_cred_arg_t *arg)
 	arg->cores_per_socket = NULL;
 	arg->sockets_per_node = NULL;
 	arg->sock_core_rep_count = NULL;
+	arg->job_constraints = NULL;
 	arg->job_nhosts = 0;
 	arg->job_hostlist = NULL;
 #else
@@ -760,8 +791,9 @@ int slurm_cred_get_args(slurm_cred_t *cred, slurm_cred_arg_t *arg)
 					   cred->core_array_size);
 	memcpy(arg->sock_core_rep_count, cred->sock_core_rep_count,
 	       (sizeof(uint32_t) * cred->core_array_size));
-	arg->job_nhosts = cred->job_nhosts;
-	arg->job_hostlist = xstrdup(cred->job_hostlist);
+	arg->job_constraints = xstrdup(cred->job_constraints);
+	arg->job_nhosts      = cred->job_nhosts;
+	arg->job_hostlist    = xstrdup(cred->job_hostlist);
 #endif
 	slurm_mutex_unlock(&cred->mutex);
 
@@ -833,6 +865,7 @@ slurm_cred_verify(slurm_cred_ctx_t ctx, slurm_cred_t *cred,
 	arg->cores_per_socket = NULL;
 	arg->sockets_per_node = NULL;
 	arg->sock_core_rep_count = NULL;
+	arg->job_constraints = NULL;
 	arg->job_nhosts = 0;
 	arg->job_hostlist = NULL;
 #else
@@ -850,8 +883,9 @@ slurm_cred_verify(slurm_cred_ctx_t ctx, slurm_cred_t *cred,
 					   cred->core_array_size);
 	memcpy(arg->sock_core_rep_count, cred->sock_core_rep_count,
 	       (sizeof(uint32_t) * cred->core_array_size));
-	arg->job_nhosts = cred->job_nhosts;
-	arg->job_hostlist = xstrdup(cred->job_hostlist);
+	arg->job_constraints = xstrdup(cred->job_constraints);
+	arg->job_nhosts      = cred->job_nhosts;
+	arg->job_hostlist    = xstrdup(cred->job_hostlist);
 #endif
 	slurm_mutex_unlock(&cred->mutex);
 
@@ -879,6 +913,7 @@ slurm_cred_destroy(slurm_cred_t *cred)
 	FREE_NULL_BITMAP(cred->job_core_bitmap);
 	FREE_NULL_BITMAP(cred->step_core_bitmap);
 	xfree(cred->cores_per_socket);
+	xfree(cred->job_constraints);
 	xfree(cred->job_hostlist);
 	xfree(cred->sock_core_rep_count);
 	xfree(cred->sockets_per_node);
@@ -1269,7 +1304,7 @@ slurm_cred_unpack(Buf buffer, uint16_t protocol_version)
 
 	cred = _slurm_cred_alloc();
 	slurm_mutex_lock(&cred->mutex);
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&cred->jobid, buffer);
 		safe_unpack32(&cred->stepid, buffer);
 		safe_unpack32(&cred_uid, buffer);
@@ -1288,6 +1323,7 @@ slurm_cred_unpack(Buf buffer, uint16_t protocol_version)
 		safe_unpack16(&cred->job_core_spec, buffer);
 		safe_unpack32(&cred->job_mem_limit, buffer);
 		safe_unpack32(&cred->step_mem_limit, buffer);
+		safe_unpackstr_xmalloc(&cred->job_constraints, &len, buffer);
 		safe_unpackstr_xmalloc(&cred->step_hostlist, &len, buffer);
 		safe_unpack_time(&cred->ctime, buffer);
 
@@ -1333,7 +1369,7 @@ slurm_cred_unpack(Buf buffer, uint16_t protocol_version)
 		safe_unpackmem_xmalloc(sigp, &len, buffer);
 		cred->siglen = len;
 		xassert(len > 0);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpack32(&cred->jobid, buffer);
 		safe_unpack32(&cred->stepid, buffer);
 		safe_unpack32(&cred_uid, buffer);
@@ -1349,6 +1385,7 @@ slurm_cred_unpack(Buf buffer, uint16_t protocol_version)
 		    != SLURM_SUCCESS) {
 			goto unpack_error;
 		}
+		safe_unpack16(&cred->job_core_spec, buffer);
 		safe_unpack32(&cred->job_mem_limit, buffer);
 		safe_unpack32(&cred->step_mem_limit, buffer);
 		safe_unpackstr_xmalloc(&cred->step_hostlist, &len, buffer);
@@ -1446,6 +1483,9 @@ slurm_cred_ctx_unpack(slurm_cred_ctx_t ctx, Buf buffer)
 void
 slurm_cred_print(slurm_cred_t *cred)
 {
+	char *spec_type;
+	int spec_count;
+
 	if (cred == NULL)
 		return;
 
@@ -1453,14 +1493,25 @@ slurm_cred_print(slurm_cred_t *cred)
 
 	xassert(cred->magic == CRED_MAGIC);
 
+	if (cred->job_core_spec == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (cred->job_core_spec & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = cred->job_core_spec & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = cred->job_core_spec;
+	}
 	info("Cred: Jobid             %u",  cred->jobid         );
 	info("Cred: Stepid            %u",  cred->stepid        );
 	info("Cred: UID               %u",  (uint32_t) cred->uid);
-	info("Cred: Job_core_spec     %u",  cred->job_core_spec );
+	info("Cred: Job_constraints   %s",  cred->job_constraints );
+	info("Cred: Job_core_spec     %d %s", spec_count, spec_type );
 	info("Cred: Job_mem_limit     %u",  cred->job_mem_limit );
 	info("Cred: Step_mem_limit    %u",  cred->step_mem_limit );
 	info("Cred: Step hostlist     %s",  cred->step_hostlist );
-	info("Cred: ctime             %s",  slurm_ctime(&cred->ctime) );
+	info("Cred: ctime             %s",  slurm_ctime2(&cred->ctime) );
 	info("Cred: siglen            %u",  cred->siglen        );
 #ifndef HAVE_BG
 	{
@@ -1585,7 +1636,7 @@ _slurm_cred_ctx_alloc(void)
 	slurm_mutex_init(&ctx->mutex);
 	slurm_mutex_lock(&ctx->mutex);
 
-	ctx->expiry_window = DEFAULT_EXPIRATION_WINDOW;
+	ctx->expiry_window = cred_expire;
 	ctx->exkey_exp     = (time_t) -1;
 
 	xassert(ctx->magic = CRED_CTX_MAGIC);
@@ -1685,7 +1736,7 @@ _pack_cred(slurm_cred_t *cred, Buf buffer, uint16_t protocol_version)
 {
 	uint32_t cred_uid = (uint32_t) cred->uid;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack32(cred->jobid,          buffer);
 		pack32(cred->stepid,         buffer);
 		pack32(cred_uid,             buffer);
@@ -1699,12 +1750,14 @@ _pack_cred(slurm_cred_t *cred, Buf buffer, uint16_t protocol_version)
 		pack16(cred->job_core_spec,  buffer);
 		pack32(cred->job_mem_limit,  buffer);
 		pack32(cred->step_mem_limit, buffer);
+		packstr(cred->job_constraints, buffer);
 		packstr(cred->step_hostlist, buffer);
 		pack_time(cred->ctime,       buffer);
 #ifndef HAVE_BG
 		{
-			uint32_t tot_core_cnt;
-			tot_core_cnt = bit_size(cred->job_core_bitmap);
+			uint32_t tot_core_cnt = 0;
+			if (cred->job_core_bitmap)
+				tot_core_cnt = bit_size(cred->job_core_bitmap);
 			pack32(tot_core_cnt, buffer);
 			pack_bit_fmt(cred->job_core_bitmap, buffer);
 			pack_bit_fmt(cred->step_core_bitmap, buffer);
@@ -1735,6 +1788,7 @@ _pack_cred(slurm_cred_t *cred, Buf buffer, uint16_t protocol_version)
 		gres_plugin_step_state_pack(cred->step_gres_list, buffer,
 					    cred->jobid, cred->stepid,
 					    SLURM_PROTOCOL_VERSION);
+		pack16(cred->job_core_spec,  buffer);
 		pack32(cred->job_mem_limit,  buffer);
 		pack32(cred->step_mem_limit, buffer);
 		packstr(cred->step_hostlist, buffer);
@@ -1813,7 +1867,7 @@ extern char * timestr (const time_t *tp, char *buf, size_t n)
 	if (disabled)
 		return NULL;
 #endif
-	if (!localtime_r (tp, &tmval))
+	if (!slurm_localtime_r (tp, &tmval))
 		error ("localtime_r: %m");
 	slurm_strftime (buf, n, fmt, &tmval);
 	return (buf);
@@ -2427,6 +2481,6 @@ void  print_sbcast_cred(sbcast_cred_t *sbcast_cred)
 {
 	info("Sbcast_cred: Jobid   %u", sbcast_cred->jobid         );
 	info("Sbcast_cred: Nodes   %s", sbcast_cred->nodes         );
-	info("Sbcast_cred: ctime   %s", slurm_ctime(&sbcast_cred->ctime) );
-	info("Sbcast_cred: Expire  %s", slurm_ctime(&sbcast_cred->expiration) );
+	info("Sbcast_cred: ctime   %s", slurm_ctime2(&sbcast_cred->ctime) );
+	info("Sbcast_cred: Expire  %s", slurm_ctime2(&sbcast_cred->expiration));
 }
diff --git a/src/common/slurm_cred.h b/src/common/slurm_cred.h
index d1faeb3e4..1c17cb91f 100644
--- a/src/common/slurm_cred.h
+++ b/src/common/slurm_cred.h
@@ -156,6 +156,7 @@ typedef struct {
 	uint32_t *sock_core_rep_count;	/* Used for job/step_core_bitmaps */
 
 	/* JOB specific info */
+	char     *job_constraints;	/* constraints in job allocation */
 	bitstr_t *job_core_bitmap;	/* cores allocated to JOB */
 	uint16_t  job_core_spec;	/* count of specialized cores */
 	char     *job_hostlist;		/* list of nodes allocated to JOB */
diff --git a/src/common/slurm_errno.c b/src/common/slurm_errno.c
index a3038f17d..efbc86812 100644
--- a/src/common/slurm_errno.c
+++ b/src/common/slurm_errno.c
@@ -108,7 +108,7 @@ static slurm_errtab_t slurm_errtab[] = {
 	{ SLURMCTLD_COMMUNICATIONS_SHUTDOWN_ERROR,
 	  "Unable to contact slurm controller (shutdown failure)"},
 
-	/* _info.c/communcation layer RESPONSE_SLURM_RC message codes */
+	/* _info.c/communication layer RESPONSE_SLURM_RC message codes */
 
 	{ SLURM_NO_CHANGE_IN_DATA,	/* Not really an error */
 	  "Data has not changed since time specified"		},
@@ -147,6 +147,12 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "Requested node configuration is not available"	},
 	{ ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE,
 	  "Requested partition configuration not available now" },
+	{ ESLURM_POWER_NOT_AVAIL,
+	  "Required power not available now"},
+	{ ESLURM_POWER_RESERVED,
+	  "Required power at least partially reserved"},
+	{ ESLURM_INVALID_POWERCAP,
+	  "Required powercap is not valid, check min/max values"},
 	{ ESLURM_NODES_BUSY,
 	  "Requested nodes are busy"				},
 	{ ESLURM_INVALID_JOB_ID,
@@ -219,9 +225,9 @@ static slurm_errtab_t slurm_errtab[] = {
 	{ ESLURM_SAME_PARENT_ACCOUNT,
 	  "Account already child of parent account specified"   },
 	{ ESLURM_INVALID_QOS,
-	  "Job has invalid qos"					},
+	  "Invalid qos specification"				},
 	{ ESLURM_INVALID_WCKEY,
-	  "Job has invalid wckey"				},
+	  "Invalid wckey specification"				},
 	{ ESLURM_INVALID_LICENSES,
 	  "Invalid license specification"			},
 	{ ESLURM_NEED_RESTART,
@@ -306,6 +312,17 @@ static slurm_errtab_t slurm_errtab[] = {
 	  "Duplicate event trigger"				},
 	{ ESLURM_INTERNAL,
 	  "Slurm internal error, contact system administrator"	},
+	{ ESLURM_INVALID_BURST_BUFFER_CHANGE,
+	  "BurstBufferType change requires restart of slurmctld daemon "
+	  "to take effect"},
+	{ ESLURM_BURST_BUFFER_PERMISSION,
+	  "Burst Buffer permission denied"			},
+	{ ESLURM_BURST_BUFFER_LIMIT,
+	  "Burst Buffer resource limit exceeded"		},
+	{ ESLURM_INVALID_BURST_BUFFER_REQUEST,
+	  "Burst Buffer request invalid"			},
+	{ ESLURM_PRIO_RESET_FAIL,
+	  "Changes to job priority are not persistent, change nice instead" },
 
 	/* slurmd error codes */
 	{ ESLRUMD_PIPE_ERROR_ON_TASK_SPAWN,
@@ -371,7 +388,7 @@ static slurm_errtab_t slurm_errtab[] = {
 	{ ESCRIPT_CHDIR_FAILED,
 	  "unable to change directory to work directory"	},
 	{ ESCRIPT_OPEN_OUTPUT_FAILED,
-	  "cound not open output file"			        },
+	  "could not open output file"			        },
 	{ ESCRIPT_NON_ZERO_RETURN,
 	  "Script terminated with non-zero exit code"		},
 
diff --git a/src/common/slurm_ext_sensors.c b/src/common/slurm_ext_sensors.c
index aef340c78..274cb1dc5 100644
--- a/src/common/slurm_ext_sensors.c
+++ b/src/common/slurm_ext_sensors.c
@@ -144,32 +144,56 @@ extern void ext_sensors_destroy(ext_sensors_data_t *ext_sensors)
 extern void ext_sensors_data_pack(ext_sensors_data_t *ext_sensors, Buf buffer,
 				    uint16_t protocol_version)
 {
-	if (!ext_sensors) {
-		pack32(0, buffer);
-		pack32(0, buffer);
-		pack_time((time_t)0, buffer);
-		pack32(0, buffer);
-		return;
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!ext_sensors) {
+			pack64(0, buffer);
+			pack32(0, buffer);
+			pack_time((time_t)0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+
+		pack64(ext_sensors->consumed_energy, buffer);
+		pack32(ext_sensors->temperature, buffer);
+		pack_time(ext_sensors->energy_update_time, buffer);
+		pack32(ext_sensors->current_watts, buffer);
+	} else {
+		if (!ext_sensors) {
+			pack32(0, buffer);
+			pack32(0, buffer);
+			pack_time((time_t)0, buffer);
+			pack32(0, buffer);
+			return;
+		}
+
+		pack32((uint32_t)ext_sensors->consumed_energy, buffer);
+		pack32(ext_sensors->temperature, buffer);
+		pack_time(ext_sensors->energy_update_time, buffer);
+		pack32(ext_sensors->current_watts, buffer);
 	}
-
-	pack32(ext_sensors->consumed_energy, buffer);
-	pack32(ext_sensors->temperature, buffer);
-	pack_time(ext_sensors->energy_update_time, buffer);
-	pack32(ext_sensors->current_watts, buffer);
 }
 
 extern int ext_sensors_data_unpack(ext_sensors_data_t **ext_sensors, Buf buffer,
 				     uint16_t protocol_version)
 {
+	uint32_t uint32_tmp;
 	ext_sensors_data_t *ext_sensors_ptr = ext_sensors_alloc();
 	*ext_sensors = ext_sensors_ptr;
 	if (ext_sensors_ptr == NULL)
 		return SLURM_ERROR;
 
-	safe_unpack32(&ext_sensors_ptr->consumed_energy, buffer);
-	safe_unpack32(&ext_sensors_ptr->temperature, buffer);
-	safe_unpack_time(&ext_sensors_ptr->energy_update_time, buffer);
-	safe_unpack32(&ext_sensors_ptr->current_watts, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack64(&ext_sensors_ptr->consumed_energy, buffer);
+		safe_unpack32(&ext_sensors_ptr->temperature, buffer);
+		safe_unpack_time(&ext_sensors_ptr->energy_update_time, buffer);
+		safe_unpack32(&ext_sensors_ptr->current_watts, buffer);
+	} else {
+		safe_unpack32(&uint32_tmp, buffer);
+		ext_sensors_ptr->consumed_energy = (uint64_t) uint32_tmp;
+		safe_unpack32(&ext_sensors_ptr->temperature, buffer);
+		safe_unpack_time(&ext_sensors_ptr->energy_update_time, buffer);
+		safe_unpack32(&ext_sensors_ptr->current_watts, buffer);
+	}
 
 	return SLURM_SUCCESS;
 
diff --git a/src/common/slurm_jobacct_gather.c b/src/common/slurm_jobacct_gather.c
index ea0cd978d..04f237791 100644
--- a/src/common/slurm_jobacct_gather.c
+++ b/src/common/slurm_jobacct_gather.c
@@ -82,7 +82,8 @@ strong_alias(jobacctinfo_destroy, slurm_jobacctinfo_destroy);
  * at the end of the structure.
  */
 typedef struct slurm_jobacct_gather_ops {
-	void (*poll_data) (List task_list, bool pgid_plugin, uint64_t cont_id);
+	void (*poll_data) (List task_list, bool pgid_plugin, uint64_t cont_id,
+			   bool profile);
 	int (*endpoll)    ();
 	int (*add_task)   (pid_t pid, jobacct_id_t *jobacct_id);
 } slurm_jobacct_gather_ops_t;
@@ -105,7 +106,7 @@ static bool init_run = false;
 static int freq = 0;
 static bool pgid_plugin = false;
 static List task_list = NULL;
-static uint64_t cont_id = (uint64_t)NO_VAL;
+static uint64_t cont_id = NO_VAL64;
 static pthread_mutex_t task_list_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static bool jobacct_shutdown = true;
@@ -168,39 +169,12 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-static void _write_jobacct_id(int fd, jobacct_id_t *jobacct_id,
-			      uint16_t rpc_version)
-{
-	if (jobacct_id) {
-		safe_write(fd, &jobacct_id->nodeid, sizeof(uint32_t));
-		safe_write(fd, &jobacct_id->taskid, sizeof(uint16_t));
-	} else {
-		uint32_t no32 = NO_VAL;
-		uint16_t no16 = (uint16_t)NO_VAL;
-		safe_write(fd, &no32, sizeof(uint32_t));
-		safe_write(fd, &no16, sizeof(uint16_t));
-	}
-rwfail:
-	return;
-}
-
-static int _read_jobacct_id(int fd, jobacct_id_t *jobacct_id,
-			    uint16_t rpc_version)
-{
-	safe_read(fd, &jobacct_id->nodeid, sizeof(uint32_t));
-	safe_read(fd, &jobacct_id->taskid, sizeof(uint16_t));
-
-	return SLURM_SUCCESS;
-rwfail:
-	return SLURM_ERROR;
-}
-
-static void _poll_data(void)
+static void _poll_data(bool profile)
 {
 	/* Update the data */
 	slurm_mutex_lock(&task_list_lock);
 	if (task_list)
-		(*(ops.poll_data))(task_list, pgid_plugin, cont_id);
+		(*(ops.poll_data))(task_list, pgid_plugin, cont_id, profile);
 	slurm_mutex_unlock(&task_list_lock);
 }
 
@@ -227,13 +201,14 @@ static void *_watch_tasks(void *arg)
 	_task_sleep(1);
 	while (!jobacct_shutdown && acct_gather_profile_running) {
 		/* Do this until shutdown is requested */
-		_poll_data();
 		slurm_mutex_lock(&acct_gather_profile_timer[type].notify_mutex);
 		pthread_cond_wait(
 			&acct_gather_profile_timer[type].notify,
 			&acct_gather_profile_timer[type].notify_mutex);
 		slurm_mutex_unlock(&acct_gather_profile_timer[type].
 				   notify_mutex);
+		/* The initial poll is done after the last task is added */
+		_poll_data(1);
 	}
 	return NULL;
 }
@@ -366,9 +341,7 @@ extern int jobacct_gather_endpoll(void)
 
 	jobacct_shutdown = true;
 	slurm_mutex_lock(&task_list_lock);
-	if (task_list)
-		list_destroy(task_list);
-	task_list = NULL;
+	FREE_NULL_LIST(task_list);
 
 	retval = (*(ops.endpoll))();
 
@@ -403,6 +376,7 @@ extern int jobacct_gather_add_task(pid_t pid, jobacct_id_t *jobacct_id,
 	}
 
 	jobacct->pid = pid;
+	memcpy(&jobacct->id, jobacct_id, sizeof(jobacct_id_t));
 	jobacct->min_cpu = 0;
 	debug2("adding task %u pid %d on node %u to jobacct",
 	       jobacct_id->taskid, pid, jobacct_id->nodeid);
@@ -412,7 +386,7 @@ extern int jobacct_gather_add_task(pid_t pid, jobacct_id_t *jobacct_id,
 	(*(ops.add_task))(pid, jobacct_id);
 
 	if (poll == 1)
-		_poll_data();
+		_poll_data(1);
 
 	return SLURM_SUCCESS;
 error:
@@ -430,7 +404,7 @@ extern jobacctinfo_t *jobacct_gather_stat_task(pid_t pid)
 		struct jobacctinfo *ret_jobacct = NULL;
 		ListIterator itr = NULL;
 
-		_poll_data();
+		_poll_data(0);
 
 		slurm_mutex_lock(&task_list_lock);
 		if (!task_list) {
@@ -460,7 +434,7 @@ extern jobacctinfo_t *jobacct_gather_stat_task(pid_t pid)
 		 * spawned, which would prevent a valid checkpoint/restart
 		 * with some systems */
 		_task_sleep(1);
-		_poll_data();
+		_poll_data(0);
 		return NULL;
 	}
 }
@@ -475,7 +449,7 @@ extern jobacctinfo_t *jobacct_gather_remove_task(pid_t pid)
 
 	/* poll data one last time before removing task
 	 * mainly for updating energy consumption */
-	_poll_data();
+	_poll_data(1);
 
 	if (jobacct_shutdown)
 		return NULL;
@@ -510,7 +484,7 @@ extern int jobacct_gather_set_proctrack_container_id(uint64_t id)
 	if (!plugin_polling || pgid_plugin)
 		return SLURM_SUCCESS;
 
-	if (cont_id != (uint64_t)NO_VAL)
+	if (cont_id != NO_VAL64)
 		info("Warning: jobacct: set_proctrack_container_id: cont_id "
 		     "is already set to %"PRIu64" you are setting it to "
 		     "%"PRIu64"", cont_id, id);
@@ -612,6 +586,7 @@ extern jobacctinfo_t *jobacctinfo_create(jobacct_id_t *jobacct_id)
 		jobacct_id = &temp_id;
 	}
 	memset(jobacct, 0, sizeof(struct jobacctinfo));
+	jobacct->dataset_id = -1;
 	jobacct->sys_cpu_sec = 0;
 	jobacct->sys_cpu_usec = 0;
 	jobacct->user_cpu_sec = 0;
@@ -677,49 +652,6 @@ extern int jobacctinfo_setinfo(jobacctinfo_t *jobacct,
 			safe_write(*fd, &len, sizeof(int));
 			safe_write(*fd, get_buf_data(buffer), len);
 			free_buf(buffer);
-		} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-			safe_write(*fd, &jobacct->user_cpu_sec,
-				   sizeof(uint32_t));
-			safe_write(*fd, &jobacct->user_cpu_usec,
-				   sizeof(uint32_t));
-			safe_write(*fd, &jobacct->sys_cpu_sec,
-				   sizeof(uint32_t));
-			safe_write(*fd, &jobacct->sys_cpu_usec,
-				   sizeof(uint32_t));
-			safe_write(*fd, &jobacct->max_vsize, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->tot_vsize, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->max_rss, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->tot_rss, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->max_pages, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->tot_pages, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->min_cpu, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->tot_cpu, sizeof(uint32_t));
-			safe_write(*fd, &jobacct->act_cpufreq,
-				   sizeof(uint32_t));
-			safe_write(*fd, &jobacct->energy.consumed_energy,
-				   sizeof(uint32_t));
-
-			safe_write(*fd, &jobacct->max_disk_read,
-				   sizeof(double));
-			safe_write(*fd, &jobacct->tot_disk_read,
-				   sizeof(double));
-			safe_write(*fd, &jobacct->max_disk_write,
-				   sizeof(double));
-			safe_write(*fd, &jobacct->tot_disk_write,
-				   sizeof(double));
-
-			_write_jobacct_id(*fd, &jobacct->max_vsize_id,
-					  protocol_version);
-			_write_jobacct_id(*fd, &jobacct->max_rss_id,
-					  protocol_version);
-			_write_jobacct_id(*fd, &jobacct->max_pages_id,
-					  protocol_version);
-			_write_jobacct_id(*fd, &jobacct->min_cpu_id,
-					  protocol_version);
-			_write_jobacct_id(*fd, &jobacct->max_disk_read_id,
-					  protocol_version);
-			_write_jobacct_id(*fd, &jobacct->max_disk_write_id,
-					  protocol_version);
 		}
 
 		break;
@@ -765,13 +697,13 @@ extern int jobacctinfo_setinfo(jobacctinfo_t *jobacct,
 		jobacct->min_cpu_id = *jobacct_id;
 		break;
 	case JOBACCT_DATA_TOT_CPU:
-		jobacct->tot_cpu = *uint32;
+		jobacct->tot_cpu = *dub;
 		break;
 	case JOBACCT_DATA_ACT_CPUFREQ:
 		jobacct->act_cpufreq = *uint32;
 		break;
 	case JOBACCT_DATA_CONSUMED_ENERGY:
-		jobacct->energy.consumed_energy = *uint32;
+		jobacct->energy.consumed_energy = *uint64;
 		break;
 	case JOBACCT_DATA_MAX_DISK_READ:
 		jobacct->max_disk_read = *dub;
@@ -836,45 +768,6 @@ extern int jobacctinfo_getinfo(
 			jobacctinfo_unpack(&jobacct, protocol_version,
 					   PROTOCOL_TYPE_SLURM, buffer, 0);
 			free_buf(buffer);
-		} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-			safe_read(*fd, &jobacct->user_cpu_sec,
-				  sizeof(uint32_t));
-			safe_read(*fd, &jobacct->user_cpu_usec,
-				  sizeof(uint32_t));
-			safe_read(*fd, &jobacct->sys_cpu_sec, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->sys_cpu_usec,
-				  sizeof(uint32_t));
-			safe_read(*fd, &jobacct->max_vsize, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->tot_vsize, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->max_rss, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->tot_rss, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->max_pages, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->tot_pages, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->min_cpu, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->tot_cpu, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->act_cpufreq, sizeof(uint32_t));
-			safe_read(*fd, &jobacct->energy.consumed_energy,
-				  sizeof(uint32_t));
-
-			safe_read(*fd, &jobacct->max_disk_read, sizeof(double));
-			safe_read(*fd, &jobacct->tot_disk_read, sizeof(double));
-			safe_read(*fd, &jobacct->max_disk_write,
-				  sizeof(double));
-			safe_read(*fd, &jobacct->tot_disk_write,
-				  sizeof(double));
-
-			_read_jobacct_id(*fd, &jobacct->max_vsize_id,
-					 protocol_version);
-			_read_jobacct_id(*fd, &jobacct->max_rss_id,
-					 protocol_version);
-			_read_jobacct_id(*fd, &jobacct->max_pages_id,
-					 protocol_version);
-			_read_jobacct_id(*fd, &jobacct->min_cpu_id,
-					 protocol_version);
-			_read_jobacct_id(*fd, &jobacct->max_disk_read_id,
-					 protocol_version);
-			_read_jobacct_id(*fd, &jobacct->max_disk_write_id,
-					 protocol_version);
 		}
 
 		break;
@@ -919,13 +812,13 @@ extern int jobacctinfo_getinfo(
 		*jobacct_id = jobacct->min_cpu_id;
 		break;
 	case JOBACCT_DATA_TOT_CPU:
-		*uint32 = jobacct->tot_cpu;
+		*dub = jobacct->tot_cpu;
 		break;
 	case JOBACCT_DATA_ACT_CPUFREQ:
 		*uint32 = jobacct->act_cpufreq;
 		break;
 	case JOBACCT_DATA_CONSUMED_ENERGY:
-		*uint32 = jobacct->energy.consumed_energy;
+		*uint64 = jobacct->energy.consumed_energy;
 		break;
 	case JOBACCT_DATA_MAX_DISK_READ:
 		*dub = jobacct->max_disk_read;
@@ -957,25 +850,11 @@ extern void jobacctinfo_pack(jobacctinfo_t *jobacct,
 			     uint16_t rpc_version, uint16_t protocol_type,
 			     Buf buffer)
 {
-	int i = 0;
 	bool no_pack;
 
 	no_pack = (!plugin_polling && (protocol_type != PROTOCOL_TYPE_DBD));
 
-	/* The function can take calls from both DBD and from regular
-	 * SLURM functions.  We choose to standardize on using the
-	 * SLURM_PROTOCOL_VERSION here so if PROTOCOL_TYPE_DBD comes
-	 * in we need to translate the DBD rpc_version to use the
-	 * SLURM protocol_version.
-	 *
-	 * If this function ever changes make sure the
-	 * slurmdbd_translate_rpc function has been updated with the
-	 * new protocol version.
-	 */
-	if (protocol_type == PROTOCOL_TYPE_DBD)
-		rpc_version = slurmdbd_translate_rpc(rpc_version);
-
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (!jobacct || no_pack) {
 			pack8((uint8_t) 0, buffer);
 			return;
@@ -993,9 +872,9 @@ extern void jobacctinfo_pack(jobacctinfo_t *jobacct,
 		pack64(jobacct->max_pages, buffer);
 		pack64(jobacct->tot_pages, buffer);
 		pack32((uint32_t)jobacct->min_cpu, buffer);
-		pack32((uint32_t)jobacct->tot_cpu, buffer);
+		packdouble(jobacct->tot_cpu, buffer);
 		pack32((uint32_t)jobacct->act_cpufreq, buffer);
-		pack32((uint32_t)jobacct->energy.consumed_energy, buffer);
+		pack64((uint64_t)jobacct->energy.consumed_energy, buffer);
 
 		packdouble((double)jobacct->max_disk_read, buffer);
 		packdouble((double)jobacct->tot_disk_read, buffer);
@@ -1010,29 +889,23 @@ extern void jobacctinfo_pack(jobacctinfo_t *jobacct,
 			buffer);
 		_pack_jobacct_id(&jobacct->max_disk_write_id, rpc_version,
 			buffer);
-	} else if (rpc_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		if (no_pack)
-			return;
-		if (!jobacct) {
-			for (i = 0; i < 14; i++)
-				pack32((uint32_t) 0, buffer);
-			for (i = 0; i < 4; i++)
-				packdouble((double) 0, buffer);
-			for (i = 0; i < 6; i++)
-				_pack_jobacct_id(NULL, rpc_version, buffer);
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		if (!jobacct || no_pack) {
+			pack8((uint8_t) 0, buffer);
 			return;
 		}
+		pack8((uint8_t) 1, buffer);
 
 		pack32((uint32_t)jobacct->user_cpu_sec, buffer);
 		pack32((uint32_t)jobacct->user_cpu_usec, buffer);
 		pack32((uint32_t)jobacct->sys_cpu_sec, buffer);
 		pack32((uint32_t)jobacct->sys_cpu_usec, buffer);
-		pack32((uint32_t)jobacct->max_vsize, buffer);
-		pack32((uint32_t)jobacct->tot_vsize, buffer);
-		pack32((uint32_t)jobacct->max_rss, buffer);
-		pack32((uint32_t)jobacct->tot_rss, buffer);
-		pack32((uint32_t)jobacct->max_pages, buffer);
-		pack32((uint32_t)jobacct->tot_pages, buffer);
+		pack64(jobacct->max_vsize, buffer);
+		pack64(jobacct->tot_vsize, buffer);
+		pack64(jobacct->max_rss, buffer);
+		pack64(jobacct->tot_rss, buffer);
+		pack64(jobacct->max_pages, buffer);
+		pack64(jobacct->tot_pages, buffer);
 		pack32((uint32_t)jobacct->min_cpu, buffer);
 		pack32((uint32_t)jobacct->tot_cpu, buffer);
 		pack32((uint32_t)jobacct->act_cpufreq, buffer);
@@ -1063,25 +936,10 @@ extern int jobacctinfo_unpack(jobacctinfo_t **jobacct,
 {
 	uint32_t uint32_tmp;
 	uint8_t  uint8_tmp;
-	bool no_pack;
 
 	jobacct_gather_init();
-	no_pack = (!plugin_polling && (protocol_type != PROTOCOL_TYPE_DBD));
-
-	/* The function can take calls from both DBD and from regular
-	 * SLURM functions.  We choose to standardize on using the
-	 * SLURM_PROTOCOL_VERSION here so if PROTOCOL_TYPE_DBD comes
-	 * in we need to translate the DBD rpc_version to use the
-	 * SLURM protocol_version.
-	 *
-	 * If this function ever changes make sure the
-	 * slurmdbd_translate_rpc function has been updated with the
-	 * new protocol version.
-	 */
-	if (protocol_type == PROTOCOL_TYPE_DBD)
-		rpc_version = slurmdbd_translate_rpc(rpc_version);
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack8(&uint8_tmp, buffer);
 		if (uint8_tmp == (uint8_t) 0)
 			return SLURM_SUCCESS;
@@ -1102,9 +960,9 @@ extern int jobacctinfo_unpack(jobacctinfo_t **jobacct,
 		safe_unpack64(&(*jobacct)->max_pages, buffer);
 		safe_unpack64(&(*jobacct)->tot_pages, buffer);
 		safe_unpack32(&(*jobacct)->min_cpu, buffer);
-		safe_unpack32(&(*jobacct)->tot_cpu, buffer);
+		safe_unpackdouble(&(*jobacct)->tot_cpu, buffer);
 		safe_unpack32(&(*jobacct)->act_cpufreq, buffer);
-		safe_unpack32(&(*jobacct)->energy.consumed_energy, buffer);
+		safe_unpack64(&(*jobacct)->energy.consumed_energy, buffer);
 
 		safe_unpackdouble(&(*jobacct)->max_disk_read, buffer);
 		safe_unpackdouble(&(*jobacct)->tot_disk_read, buffer);
@@ -1129,8 +987,9 @@ extern int jobacctinfo_unpack(jobacctinfo_t **jobacct,
 		if (_unpack_jobacct_id(&(*jobacct)->max_disk_write_id,
 			rpc_version, buffer) != SLURM_SUCCESS)
 			goto unpack_error;
-	} else if (rpc_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		if (no_pack)
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		safe_unpack8(&uint8_tmp, buffer);
+		if (uint8_tmp == (uint8_t) 0)
 			return SLURM_SUCCESS;
 		if (alloc)
 			*jobacct = xmalloc(sizeof(struct jobacctinfo));
@@ -1142,16 +1001,18 @@ extern int jobacctinfo_unpack(jobacctinfo_t **jobacct,
 		(*jobacct)->sys_cpu_sec = uint32_tmp;
 		safe_unpack32(&uint32_tmp, buffer);
 		(*jobacct)->sys_cpu_usec = uint32_tmp;
-		safe_unpack32((uint32_t *)&(*jobacct)->max_vsize, buffer);
-		safe_unpack32((uint32_t *)&(*jobacct)->tot_vsize, buffer);
-		safe_unpack32((uint32_t *)&(*jobacct)->max_rss, buffer);
-		safe_unpack32((uint32_t *)&(*jobacct)->tot_rss, buffer);
-		safe_unpack32((uint32_t *)&(*jobacct)->max_pages, buffer);
-		safe_unpack32((uint32_t *)&(*jobacct)->tot_pages, buffer);
+		safe_unpack64(&(*jobacct)->max_vsize, buffer);
+		safe_unpack64(&(*jobacct)->tot_vsize, buffer);
+		safe_unpack64(&(*jobacct)->max_rss, buffer);
+		safe_unpack64(&(*jobacct)->tot_rss, buffer);
+		safe_unpack64(&(*jobacct)->max_pages, buffer);
+		safe_unpack64(&(*jobacct)->tot_pages, buffer);
 		safe_unpack32(&(*jobacct)->min_cpu, buffer);
-		safe_unpack32(&(*jobacct)->tot_cpu, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		(*jobacct)->tot_cpu = (double)uint32_tmp;
 		safe_unpack32(&(*jobacct)->act_cpufreq, buffer);
-		safe_unpack32(&(*jobacct)->energy.consumed_energy, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		(*jobacct)->energy.consumed_energy = (uint64_t) uint32_tmp;
 
 		safe_unpackdouble(&(*jobacct)->max_disk_read, buffer);
 		safe_unpackdouble(&(*jobacct)->tot_disk_read, buffer);
@@ -1295,10 +1156,10 @@ extern void jobacctinfo_2_stats(slurmdb_stats_t *stats, jobacctinfo_t *jobacct)
 	stats->cpu_min = jobacct->min_cpu;
 	stats->cpu_min_nodeid = jobacct->min_cpu_id.nodeid;
 	stats->cpu_min_taskid = jobacct->min_cpu_id.taskid;
-	stats->cpu_ave = (double)jobacct->tot_cpu;
+	stats->cpu_ave = jobacct->tot_cpu;
 	stats->act_cpufreq = (double)jobacct->act_cpufreq;
 	if (jobacct->energy.consumed_energy == NO_VAL)
-		stats->consumed_energy = (double)NO_VAL;
+		stats->consumed_energy = NO_VAL64;
 	else
 		stats->consumed_energy =
 			(double)jobacct->energy.consumed_energy;
diff --git a/src/common/slurm_jobacct_gather.h b/src/common/slurm_jobacct_gather.h
index e5270f529..93696d154 100644
--- a/src/common/slurm_jobacct_gather.h
+++ b/src/common/slurm_jobacct_gather.h
@@ -115,11 +115,11 @@ struct jobacctinfo {
 			     (used to figure out ave later) */
 	uint32_t min_cpu; /* min cpu time */
 	jobacct_id_t min_cpu_id; /* contains which task it was on */
-	uint32_t tot_cpu; /* total cpu time(used to figure out ave later) */
+	double tot_cpu; /* total cpu time(used to figure out ave later) */
 	uint32_t act_cpufreq; /* actual cpu frequency */
 	acct_gather_energy_t energy;
-	uint32_t last_total_cputime;
-	uint32_t this_sampled_cputime;
+	double last_total_cputime;
+	double this_sampled_cputime;
 	uint32_t current_weighted_freq;
 	uint32_t current_weighted_power;
 	double max_disk_read; /* max disk read data */
@@ -128,6 +128,14 @@ struct jobacctinfo {
 	double max_disk_write; /* max disk write data */
 	jobacct_id_t max_disk_write_id; /* max disk write data task id */
 	double tot_disk_write; /* total local disk writes in megabytes */
+
+	jobacct_id_t id;
+	int dataset_id; /* dataset associated to this task when profiling */
+
+	double last_tot_disk_read;
+	double last_tot_disk_write;
+	time_t cur_time;
+	time_t last_time;
 };
 
 /* Define jobacctinfo_t below to avoid including extraneous slurm headers */
diff --git a/src/common/slurm_priority.c b/src/common/slurm_priority.c
index 1ce4e8bec..a286778d4 100644
--- a/src/common/slurm_priority.c
+++ b/src/common/slurm_priority.c
@@ -45,7 +45,7 @@ typedef struct slurm_priority_ops {
 	uint32_t (*set)            (uint32_t last_prio,
 				    struct job_record *job_ptr);
 	void     (*reconfig)       (bool assoc_clear);
-	void     (*set_assoc_usage)(slurmdb_association_rec_t *assoc);
+	void     (*set_assoc_usage)(slurmdb_assoc_rec_t *assoc);
 	double   (*calc_fs_factor) (long double usage_efctv,
 				    long double shares_norm);
 	List	 (*get_priority_factors)
@@ -136,7 +136,7 @@ extern void priority_g_reconfig(bool assoc_clear)
 	return;
 }
 
-extern void priority_g_set_assoc_usage(slurmdb_association_rec_t *assoc)
+extern void priority_g_set_assoc_usage(slurmdb_assoc_rec_t *assoc)
 {
 	if (slurm_priority_init() < 0)
 		return;
diff --git a/src/common/slurm_priority.h b/src/common/slurm_priority.h
index 0c8978cd8..173e5166f 100644
--- a/src/common/slurm_priority.h
+++ b/src/common/slurm_priority.h
@@ -61,7 +61,7 @@ extern void priority_g_reconfig(bool assoc_clear);
  * association.
  * IN/OUT: assoc - association to have usage set.
  */
-extern void priority_g_set_assoc_usage(slurmdb_association_rec_t *assoc);
+extern void priority_g_set_assoc_usage(slurmdb_assoc_rec_t *assoc);
 extern double priority_g_calc_fs_factor(long double usage_efctv,
 					long double shares_norm);
 extern List priority_g_get_priority_factors_list(
diff --git a/src/common/slurm_protocol_api.c b/src/common/slurm_protocol_api.c
index f951404ab..e84c73e5d 100644
--- a/src/common/slurm_protocol_api.c
+++ b/src/common/slurm_protocol_api.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2013      Intel, Inc.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
@@ -59,6 +60,7 @@
 #include <ctype.h>
 
 /* PROJECT INCLUDES */
+#include "src/common/assoc_mgr.h"
 #include "src/common/fd.h"
 #include "src/common/macros.h"
 #include "src/common/pack.h"
@@ -69,12 +71,15 @@
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_common.h"
 #include "src/common/slurm_protocol_pack.h"
+#include "src/common/slurm_route.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 #include "src/common/log.h"
 #include "src/common/forward.h"
+#include "src/common/msg_aggr.h"
 #include "src/slurmdbd/read_config.h"
 #include "src/common/slurm_accounting_storage.h"
+#include "src/common/slurm_strcasestr.h"
 
 /* EXTERNAL VARIABLES */
 
@@ -94,6 +99,7 @@ static char *_global_auth_key(void);
 static void  _remap_slurmctld_errno(void);
 static int   _unpack_msg_uid(Buf buffer);
 static bool  _is_port_ok(int, uint16_t);
+static void _slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port);
 
 #if _DEBUG
 static void _print_data(char *data, int len);
@@ -213,6 +219,23 @@ uint32_t slurm_get_cpu_freq_def(void)
 	return cpu_freq_def;
 }
 
+/* slurm_get_cpu_freq_govs
+ * RET CpuFreqGovernors value from slurm.conf
+ */
+uint32_t slurm_get_cpu_freq_govs(void)
+{
+	uint32_t cpu_freq_govs = 0;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		cpu_freq_govs = conf->cpu_freq_govs;
+		slurm_conf_unlock();
+	}
+	return cpu_freq_govs;
+}
+
 /* update internal configuration data structure as needed.
  *	exit with lock set */
 /* static inline void _lock_update_config() */
@@ -477,6 +500,25 @@ char *slurm_get_mpi_params(void)
 	return mpi_params;
 }
 
+/* slurm_get_msg_aggr_params
+ * get message aggregation parameters value from slurmctld_conf object
+ * RET char *   - message aggregation value from slurm.conf,
+ * MUST be xfreed by caller
+ */
+char *slurm_get_msg_aggr_params(void)
+{
+	char *msg_aggr_params = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		msg_aggr_params = xstrdup(conf->msg_aggr_params);
+		slurm_conf_unlock();
+	}
+	return msg_aggr_params;
+}
+
 /* slurm_get_msg_timeout
  * get default message timeout value from slurmctld_conf object
  */
@@ -763,6 +805,102 @@ uint32_t slurm_get_priority_weight_qos(void)
 	return factor;
 }
 
+/* slurm_get_priority_weight_tres
+ * returns the priority weights for TRES' from slurmctld_conf object
+ * RET char * string of configured tres weights. MUST be xfreed by caller
+ */
+char *slurm_get_priority_weight_tres(void)
+{
+	char *weights = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		weights = xstrdup(conf->priority_weight_tres);
+		slurm_conf_unlock();
+	}
+
+	return weights;
+}
+
+static int _get_tres_id(char *type, char *name)
+{
+	slurmdb_tres_rec_t tres_rec;
+	memset(&tres_rec, 0, sizeof(slurmdb_tres_rec_t));
+	tres_rec.type = type;
+	tres_rec.name = name;
+
+	return assoc_mgr_find_tres_pos(&tres_rec, false);
+}
+
+static int _tres_weight_item(double *weights, char *item_str)
+{
+	char *type = NULL, *value = NULL, *name = NULL;
+	int tres_id;
+
+	if (!item_str) {
+		error("TRES weight item is null");
+		return SLURM_ERROR;
+	}
+
+	type = strtok_r(item_str, "=", &value);
+	if (strchr(type, '/'))
+		type = strtok_r(type, "/", &name);
+
+	if (!value || !*value) {
+		error("\"%s\" is an invalid TRES weight entry", item_str);
+		return SLURM_ERROR;
+	}
+
+	if ((tres_id = _get_tres_id(type, name)) == -1) {
+		error("TRES weight '%s%s%s' is not a configured TRES type.",
+		      type, (name) ? ":" : "", (name) ? name : "");
+		return SLURM_ERROR;
+	}
+
+	errno = 0;
+	weights[tres_id] = strtod(value, NULL);
+	if(errno) {
+		error("Unable to convert %s value to double in %s",
+		      __func__, value);
+		return SLURM_ERROR;
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/* slurm_get_priority_weight_tres_array
+ * IN weights_str - string of tres and weights to be parsed.
+ * IN tres_cnt - count of how many tres' are on the system (e.g.
+ * 		slurmctld_tres_cnt).
+ * RET double* of tres weights.
+ */
+double *slurm_get_tres_weight_array(char *weights_str, int tres_cnt)
+{
+	double *weights;
+	char *tmp_str = xstrdup(weights_str);
+	char *token, *last = NULL;
+
+	if (!weights_str || !*weights_str || !tres_cnt)
+		return NULL;
+
+	weights = xmalloc(sizeof(double) * tres_cnt);
+
+	token = strtok_r(tmp_str, ",", &last);
+	while (token) {
+		if (_tres_weight_item(weights, token)) {
+			xfree(weights);
+			xfree(tmp_str);
+			fatal("failed to parse tres weights str '%s'",
+			      weights_str);
+			return NULL;
+		}
+		token = strtok_r(NULL, ",", &last);
+	}
+	xfree(tmp_str);
+	return weights;
+}
 
 /* slurm_get_private_data
  * get private data from slurmctld_conf object
@@ -819,6 +957,24 @@ char *slurm_get_auth_type(void)
 	return auth_type;
 }
 
+/* slurm_get_bb_type
+ * returns the BurstBufferType (bb_type) from slurmctld_conf object
+ * RET char *    - BurstBufferType, MUST be xfreed by caller
+ */
+extern char *slurm_get_bb_type(void)
+{
+	char *bb_type = NULL;
+	slurm_ctl_conf_t *conf = NULL;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		bb_type = xstrdup(conf->bb_type);
+		slurm_conf_unlock();
+	}
+	return bb_type;
+}
+
 /* slurm_get_checkpoint_type
  * returns the checkpoint_type from slurmctld_conf object
  * RET char *    - checkpoint type, MUST be xfreed by caller
@@ -873,6 +1029,76 @@ extern char *slurm_get_crypto_type(void)
 	return crypto_type;
 }
 
+/* slurm_get_power_parameters
+ * returns the PowerParameters from slurmctld_conf object
+ * RET char *    - PowerParameters, MUST be xfreed by caller
+ */
+extern char *slurm_get_power_parameters(void)
+{
+	char *power_parameters = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		power_parameters = xstrdup(conf->power_parameters);
+		slurm_conf_unlock();
+	}
+	return power_parameters;
+}
+
+/* slurm_set_power_parameters
+ * reset the PowerParameters object
+ */
+extern void slurm_set_power_parameters(char *power_parameters)
+{
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		xfree(conf->power_parameters);
+		conf->power_parameters = xstrdup(power_parameters);
+		slurm_conf_unlock();
+	}
+}
+
+/* slurm_get_power_plugin
+ * returns the PowerPlugin from slurmctld_conf object
+ * RET char *    - PowerPlugin, MUST be xfreed by caller
+ */
+extern char *slurm_get_power_plugin(void)
+{
+	char *power_plugin = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		power_plugin = xstrdup(conf->power_plugin);
+		slurm_conf_unlock();
+	}
+	return power_plugin;
+}
+
+/* slurm_get_topology_param
+ * returns the value of topology_param in slurmctld_conf object
+ * RET char *    - topology parameters, MUST be xfreed by caller
+ */
+extern char * slurm_get_topology_param(void)
+{
+	char *topology_param = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		topology_param = xstrdup(conf->topology_param);
+		slurm_conf_unlock();
+	}
+	return topology_param;
+}
+
 /* slurm_get_topology_plugin
  * returns the value of topology_plugin in slurmctld_conf object
  * RET char *    - topology type, MUST be xfreed by caller
@@ -1190,6 +1416,26 @@ char *slurm_get_accounting_storage_type(void)
 
 }
 
+/* slurm_get_accounting_storage_tres
+ * returns the accounting storage tres from slurmctld_conf object
+ * RET char *    - accounting storage tres,  MUST be xfreed by caller
+ */
+char *slurm_get_accounting_storage_tres(void)
+{
+	char *accounting_tres;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+		accounting_tres = NULL;
+	} else {
+		conf = slurm_conf_lock();
+		accounting_tres = xstrdup(conf->accounting_storage_tres);
+		slurm_conf_unlock();
+	}
+	return accounting_tres;
+
+}
+
 /* slurm_get_accounting_storage_user
  * returns the storage user from slurmctld_conf object
  * RET char *    - storage user,  MUST be xfreed by caller
@@ -1389,22 +1635,18 @@ char *slurm_get_accounting_storage_pass(void)
 /* slurm_get_auth_info
  * returns the auth_info from slurmctld_conf object (AuthInfo parameter)
  * cache value in local buffer for best performance
- * RET char *    - auth info,  MUST be xfreed by caller
+ * WARNING: The return of this function can be used in many different
+ * places and SHOULD NOT BE FREED!
  */
 extern char *slurm_get_auth_info(void)
 {
-	static bool loaded_auth_info = false;
-	static char *auth_info = NULL;
+	char *auth_info;
 	slurm_ctl_conf_t *conf;
 
-	if (loaded_auth_info)
-		return auth_info;
-
 	conf = slurm_conf_lock();
 	auth_info = xstrdup(conf->authinfo);
 	slurm_conf_unlock();
 
-	loaded_auth_info = true;
 	return auth_info;
 }
 
@@ -1433,6 +1675,7 @@ extern int slurm_get_auth_ttl(void)
 	} else {
 		ttl = 0;
 	}
+	xfree(auth_info);
 
 	return ttl;
 }
@@ -1884,6 +2127,24 @@ uint16_t slurm_get_kill_wait(void)
 	return kill_wait;
 }
 
+/* slurm_get_launch_params
+ * get launch_params from slurmctld_conf object
+ * RET char *   - launch_params, MUST be xfreed by caller
+ */
+char *slurm_get_launch_params(void)
+{
+	char *launch_params = NULL;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		launch_params = xstrdup(conf->launch_params);
+		slurm_conf_unlock();
+	}
+	return launch_params;
+}
+
 /* slurm_get_launch_type
  * get launch_type from slurmctld_conf object
  * RET char *   - launch_type, MUST be xfreed by caller
@@ -2259,9 +2520,9 @@ char *slurm_get_task_plugin(void)
 }
 
 /* slurm_get_task_plugin_param */
-uint16_t slurm_get_task_plugin_param(void)
+uint32_t slurm_get_task_plugin_param(void)
 {
-	uint16_t task_plugin_param = 0;
+	uint32_t task_plugin_param = 0;
 	slurm_ctl_conf_t *conf;
 
 	if (slurmdbd_conf) {
@@ -2347,6 +2608,23 @@ char *slurm_get_layouts(void)
 	return layouts;
 }
 
+/*  slurm_get_srun_eio_timeout()
+ */
+int16_t
+slurm_get_srun_eio_timeout(void)
+{
+	int16_t eio_timeout = 0;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		eio_timeout = conf->eio_timeout;
+		slurm_conf_unlock();
+	}
+	return eio_timeout;
+}
+
 /* Change general slurm communication errors to slurmctld specific errors */
 static void _remap_slurmctld_errno(void)
 {
@@ -2381,7 +2659,7 @@ slurm_fd_t slurm_init_msg_engine_port(uint16_t port)
 
 	cnt = 0;
 eagain:
-	slurm_set_addr_any(&addr, port);
+	slurm_setup_sockaddr(&addr, port);
 	cc = slurm_init_msg_engine(&addr);
 	if (cc < 0 && port == 0) {
 		++cnt;
@@ -2439,15 +2717,26 @@ slurm_init_msg_engine_ports(uint16_t *ports)
 slurm_fd_t slurm_init_msg_engine_addrname_port(char *addr_name, uint16_t port)
 {
 	slurm_addr_t addr;
+	static uint32_t bind_addr = NO_VAL;
 
+	if (bind_addr == NO_VAL) {
 #ifdef BIND_SPECIFIC_ADDR
-	if (addr_name != NULL)
-		slurm_set_addr(&addr, port, addr_name);
-	else
-		slurm_set_addr_any(&addr, port);
+		bind_addr = 1;
 #else
-	slurm_set_addr_any(&addr, port);
+		char *topology_params = slurm_get_topology_param();
+		if (topology_params &&
+		    slurm_strcasestr(topology_params, "NoInAddrAny"))
+			bind_addr = 1;
+		else
+			bind_addr = 0;
+		xfree(topology_params);
 #endif
+	}
+
+	if (addr_name)
+		slurm_set_addr(&addr, port, addr_name);
+	else
+		_slurm_set_addr_any(&addr, port);
 
 	return slurm_init_msg_engine(&addr);
 }
@@ -2461,7 +2750,7 @@ slurm_fd_t slurm_init_msg_engine_addrname_port(char *addr_name, uint16_t port)
  */
 int slurm_shutdown_msg_engine(slurm_fd_t fd)
 {
-	int rc = _slurm_close(fd);
+	int rc = slurm_close(fd);
 	if (rc)
 		slurm_seterrno(SLURM_COMMUNICATIONS_SHUTDOWN_ERROR);
 	return rc;
@@ -2476,7 +2765,7 @@ int slurm_shutdown_msg_engine(slurm_fd_t fd)
  */
 int slurm_shutdown_msg_conn(slurm_fd_t fd)
 {
-	return _slurm_close(fd);
+	return slurm_close(fd);
 }
 
 /**********************************************************************\
@@ -2615,16 +2904,6 @@ slurm_fd_t slurm_open_controller_conn_spec(enum controller_id dest)
 	return rc;
 }
 
-/* In the bsd implmentation maps directly to a close call, to close
- *	the socket that was accepted
- * IN open_fd		- an open file descriptor to close
- * RET int		- the return code
- */
-int slurm_close_accepted_conn(slurm_fd_t open_fd)
-{
-	return _slurm_close_accepted_conn(open_fd);
-}
-
 /**********************************************************************\
  * receive message functions
 \**********************************************************************/
@@ -2647,7 +2926,6 @@ int slurm_receive_msg(slurm_fd_t fd, slurm_msg_t *msg, int timeout)
 	Buf buffer;
 
 	xassert(fd >= 0);
-
 	slurm_msg_t_init(msg);
 	msg->conn_fd = fd;
 
@@ -2668,7 +2946,7 @@ int slurm_receive_msg(slurm_fd_t fd, slurm_msg_t *msg, int timeout)
 	 *  length and allocate space on the heap for a buffer containing
 	 *  the message.
 	 */
-	if (_slurm_msg_recvfrom_timeout(fd, &buf, &buflen, 0, timeout) < 0) {
+	if (slurm_msg_recvfrom_timeout(fd, &buf, &buflen, 0, timeout) < 0) {
 		forward_init(&header.forward, NULL);
 		rc = errno;
 		goto total_return;
@@ -2710,7 +2988,7 @@ int slurm_receive_msg(slurm_fd_t fd, slurm_msg_t *msg, int timeout)
 		error("%s: we received more than one message back use "
 		      "slurm_receive_msgs instead", __func__);
 		header.ret_cnt = 0;
-		list_destroy(header.ret_list);
+		FREE_NULL_LIST(header.ret_list);
 		header.ret_list = NULL;
 	}
 
@@ -2731,8 +3009,9 @@ int slurm_receive_msg(slurm_fd_t fd, slurm_msg_t *msg, int timeout)
 		rc = g_slurm_auth_verify( auth_cred, NULL, 2,
 					  _global_auth_key() );
 	} else {
-		rc = g_slurm_auth_verify( auth_cred, NULL, 2,
-					  slurm_get_auth_info() );
+		char *auth_info = slurm_get_auth_info();
+		rc = g_slurm_auth_verify( auth_cred, NULL, 2, auth_info );
+		xfree(auth_info);
 	}
 
 	if (rc != SLURM_SUCCESS) {
@@ -2846,7 +3125,7 @@ List slurm_receive_msgs(slurm_fd_t fd, int steps, int timeout)
 	 *  length and allocate space on the heap for a buffer containing
 	 *  the message.
 	 */
-	if (_slurm_msg_recvfrom_timeout(fd, &buf, &buflen, 0, timeout) < 0) {
+	if (slurm_msg_recvfrom_timeout(fd, &buf, &buflen, 0, timeout) < 0) {
 		forward_init(&header.forward, NULL);
 		rc = errno;
 		goto total_return;
@@ -2909,8 +3188,9 @@ List slurm_receive_msgs(slurm_fd_t fd, int steps, int timeout)
 		rc = g_slurm_auth_verify( auth_cred, NULL, 2,
 					  _global_auth_key() );
 	} else {
-		rc = g_slurm_auth_verify( auth_cred, NULL, 2,
-					  slurm_get_auth_info() );
+		char *auth_info = slurm_get_auth_info();
+		rc = g_slurm_auth_verify( auth_cred, NULL, 2, auth_info );
+		xfree(auth_info);
 	}
 
 	if (rc != SLURM_SUCCESS) {
@@ -2980,7 +3260,7 @@ static int _unpack_msg_uid(Buf buffer)
 
 	if ((auth_cred = g_slurm_auth_unpack(buffer)) == NULL)
 		return uid;
-	uid = (int) g_slurm_auth_get_uid(auth_cred, NULL);
+	uid = (int) g_slurm_auth_get_uid(auth_cred, slurm_get_auth_info());
 	g_slurm_auth_destroy(auth_cred);
 
 	return uid;
@@ -3044,7 +3324,7 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 	 *  length and allocate space on the heap for a buffer containing
 	 *  the message.
 	 */
-	if (_slurm_msg_recvfrom_timeout(fd, &buf, &buflen, 0, timeout) < 0) {
+	if (slurm_msg_recvfrom_timeout(fd, &buf, &buflen, 0, timeout) < 0) {
 		forward_init(&header.forward, NULL);
 		rc = errno;
 		goto total_return;
@@ -3085,7 +3365,7 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 		error("we received more than one message back use "
 		      "slurm_receive_msgs instead");
 		header.ret_cnt = 0;
-		list_destroy(header.ret_list);
+		FREE_NULL_LIST(header.ret_list);
 		header.ret_list = NULL;
 	}
 	//info("ret_cnt = %d",header.ret_cnt);
@@ -3093,7 +3373,7 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 /* 		while ((ret_data_info = list_pop(header.ret_list))) */
 /* 			list_push(msg->ret_list, ret_data_info); */
 /* 		header.ret_cnt = 0; */
-/* 		list_destroy(header.ret_list); */
+/* 		FREE_NULL_LIST(header.ret_list); */
 /* 		header.ret_list = NULL; */
 /* 	} */
 	/*
@@ -3114,9 +3394,6 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 		slurm_mutex_init(&msg->forward_struct->forward_mutex);
 		pthread_cond_init(&msg->forward_struct->notify, NULL);
 
-		msg->forward_struct->forward_msg =
-			xmalloc(sizeof(forward_msg_t) * header.forward.cnt);
-
 		msg->forward_struct->buf_len = remaining_buf(buffer);
 		msg->forward_struct->buf =
 			xmalloc(sizeof(char) * msg->forward_struct->buf_len);
@@ -3151,8 +3428,9 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 		rc = g_slurm_auth_verify( auth_cred, NULL, 2,
 					  _global_auth_key() );
 	} else {
-		rc = g_slurm_auth_verify( auth_cred, NULL, 2,
-					  slurm_get_auth_info() );
+		char *auth_info = slurm_get_auth_info();
+		rc = g_slurm_auth_verify( auth_cred, NULL, 2, auth_info );
+		xfree(auth_info);
 	}
 
 	if (rc != SLURM_SUCCESS) {
@@ -3171,6 +3449,11 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 	msg->msg_type = header.msg_type;
 	msg->flags = header.flags;
 
+	if (header.msg_type == MESSAGE_COMPOSITE) {
+		msg_aggr_add_comp(buffer, auth_cred, &header);
+		goto total_return;
+	}
+
 	if ( (header.body_length > remaining_buf(buffer)) ||
 	     (unpack_msg(msg, buffer) != SLURM_SUCCESS) ) {
 		(void) g_slurm_auth_destroy(auth_cred);
@@ -3178,7 +3461,6 @@ int slurm_receive_msg_and_forward(slurm_fd_t fd, slurm_addr_t *orig_addr,
 		rc = ESLURM_PROTOCOL_INCOMPLETE_PACKET;
 		goto total_return;
 	}
-
 	msg->auth_cred = (void *) auth_cred;
 
 	free_buf(buffer);
@@ -3248,10 +3530,13 @@ int slurm_send_node_msg(slurm_fd_t fd, slurm_msg_t * msg)
 	 * but we may need to generate the credential again later if we
 	 * wait too long for the incoming message.
 	 */
-	if (msg->flags & SLURM_GLOBAL_AUTH_KEY)
+	if (msg->flags & SLURM_GLOBAL_AUTH_KEY) {
 		auth_cred = g_slurm_auth_create(NULL, 2, _global_auth_key());
-	else
-		auth_cred = g_slurm_auth_create(NULL, 2, slurm_get_auth_info());
+	} else {
+		char *auth_info = slurm_get_auth_info();
+		auth_cred = g_slurm_auth_create(NULL, 2, auth_info);
+		xfree(auth_info);
+	}
 
 	if (msg->forward.init != FORWARD_INIT) {
 		forward_init(&msg->forward, NULL);
@@ -3265,8 +3550,9 @@ int slurm_send_node_msg(slurm_fd_t fd, slurm_msg_t * msg)
 			auth_cred = g_slurm_auth_create(NULL, 2,
 							_global_auth_key());
 		} else {
-			auth_cred = g_slurm_auth_create(NULL, 2,
-							slurm_get_auth_info());
+			char *auth_info = slurm_get_auth_info();
+			auth_cred = g_slurm_auth_create(NULL, 2, auth_info);
+			xfree(auth_info);
 		}
 	}
 	if (auth_cred == NULL) {
@@ -3306,9 +3592,9 @@ int slurm_send_node_msg(slurm_fd_t fd, slurm_msg_t * msg)
 	/*
 	 * Send message
 	 */
-	rc = _slurm_msg_sendto( fd, get_buf_data(buffer),
-				get_buf_offset(buffer),
-				SLURM_PROTOCOL_NO_SEND_RECV_FLAGS );
+	rc = slurm_msg_sendto( fd, get_buf_data(buffer),
+			       get_buf_offset(buffer),
+			       SLURM_PROTOCOL_NO_SEND_RECV_FLAGS );
 
 	if ((rc < 0) && (errno == ENOTCONN)) {
 		debug3("slurm_msg_sendto: peer has disappeared for msg_type=%u",
@@ -3349,16 +3635,16 @@ int slurm_send_node_msg(slurm_fd_t fd, slurm_msg_t * msg)
  */
 size_t slurm_write_stream(slurm_fd_t open_fd, char *buffer, size_t size)
 {
-	return _slurm_send_timeout(open_fd, buffer, size,
-				   SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
-				   (slurm_get_msg_timeout() * 1000));
+	return slurm_send_timeout(open_fd, buffer, size,
+				  SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
+				  (slurm_get_msg_timeout() * 1000));
 }
 size_t slurm_write_stream_timeout(slurm_fd_t open_fd, char *buffer,
 				  size_t size, int timeout)
 {
-	return _slurm_send_timeout(open_fd, buffer, size,
-				   SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
-				   timeout);
+	return slurm_send_timeout(open_fd, buffer, size,
+				  SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
+				  timeout);
 }
 
 /* slurm_read_stream
@@ -3371,76 +3657,30 @@ size_t slurm_write_stream_timeout(slurm_fd_t open_fd, char *buffer,
  */
 size_t slurm_read_stream(slurm_fd_t open_fd, char *buffer, size_t size)
 {
-	return _slurm_recv_timeout(open_fd, buffer, size,
+	return slurm_recv_timeout(open_fd, buffer, size,
 				   SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
 				   (slurm_get_msg_timeout() * 1000));
 }
 size_t slurm_read_stream_timeout(slurm_fd_t open_fd, char *buffer,
 				 size_t size, int timeout)
 {
-	return _slurm_recv_timeout(open_fd, buffer, size,
+	return slurm_recv_timeout(open_fd, buffer, size,
 				   SLURM_PROTOCOL_NO_SEND_RECV_FLAGS,
 				   timeout);
 }
 
-/* slurm_get_stream_addr
- * esentially a encapsilated get_sockname
- * IN open_fd		- file descriptor to retreive slurm_addr_t for
- * OUT address		- address that open_fd to bound to
- */
-int slurm_get_stream_addr(slurm_fd_t open_fd, slurm_addr_t * address)
-{
-	return _slurm_get_stream_addr(open_fd, address);
-}
-
-/* slurm_close_stream
- * closes either a server or client stream file_descriptor
- * IN open_fd	- an open file descriptor to close
- * RET int	- the return code
- */
-int slurm_close_stream(slurm_fd_t open_fd)
-{
-	return _slurm_close_stream(open_fd);
-}
-
-/* make an open slurm connection blocking or non-blocking
- *	(i.e. wait or do not wait for i/o completion )
- * IN open_fd	- an open file descriptor to change the effect
- * RET int	- the return code
- */
-int slurm_set_stream_non_blocking(slurm_fd_t open_fd)
-{
-	return _slurm_set_stream_non_blocking(open_fd);
-}
-int slurm_set_stream_blocking(slurm_fd_t open_fd)
-{
-	return _slurm_set_stream_blocking(open_fd);
-}
-
 /**********************************************************************\
  * address conversion and management functions
 \**********************************************************************/
 
-/* slurm_set_addr_uint
- * initializes the slurm_address with the supplied port and ip_address
- * OUT slurm_address	- slurm_addr_t to be filled in
- * IN port		- port in host order
- * IN ip_address	- ipv4 address in uint32 host order form
- */
-void slurm_set_addr_uint(slurm_addr_t * slurm_address, uint16_t port,
-			 uint32_t ip_address)
-{
-	_slurm_set_addr_uint(slurm_address, port, ip_address);
-}
-
 /* slurm_set_addr_any
  * initialized the slurm_address with the supplied port on INADDR_ANY
  * OUT slurm_address	- slurm_addr_t to be filled in
  * IN port		- port in host order
  */
-void slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port)
+static void _slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port)
 {
-	_slurm_set_addr_uint(slurm_address, port, SLURM_INADDR_ANY);
+	slurm_set_addr_uint(slurm_address, port, SLURM_INADDR_ANY);
 }
 
 /* slurm_set_addr
@@ -3451,42 +3691,7 @@ void slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port)
  */
 void slurm_set_addr(slurm_addr_t * slurm_address, uint16_t port, char *host)
 {
-	_slurm_set_addr_char(slurm_address, port, host);
-}
-
-/* reset_slurm_addr
- * resets the address field of a slurm_addr, port and family unchanged
- * OUT slurm_address	- slurm_addr_t to be reset in
- * IN new_address	- source of address to write into slurm_address
- */
-void reset_slurm_addr(slurm_addr_t * slurm_address, slurm_addr_t new_address)
-{
-	_reset_slurm_addr(slurm_address, new_address);
-}
-
-/* slurm_set_addr_char
- * initializes the slurm_address with the supplied port and host
- * OUT slurm_address	- slurm_addr_t to be filled in
- * IN port		- port in host order
- * IN host		- hostname or dns name
- */
-void slurm_set_addr_char(slurm_addr_t * slurm_address, uint16_t port,
-			 char *host)
-{
-	_slurm_set_addr_char(slurm_address, port, host);
-}
-
-/* slurm_get_addr
- * given a slurm_address it returns its port and hostname
- * IN slurm_address	- slurm_addr_t to be queried
- * OUT port		- port number
- * OUT host		- hostname
- * IN buf_len		- length of hostname buffer
- */
-void slurm_get_addr(slurm_addr_t * slurm_address, uint16_t * port,
-		    char *host, unsigned int buf_len)
-{
-	_slurm_get_addr(slurm_address, port, host, buf_len);
+	slurm_set_addr_char(slurm_address, port, host);
 }
 
 /* slurm_get_ip_str
@@ -3515,49 +3720,16 @@ int slurm_get_peer_addr(slurm_fd_t fd, slurm_addr_t * slurm_address)
 	socklen_t namelen = (socklen_t) sizeof(struct sockaddr);
 	int rc;
 
-	if ((rc = _slurm_getpeername((int) fd, &name, &namelen)))
+	if ((rc = slurm_getpeername((int) fd, &name, &namelen)))
 		return rc;
 	memcpy(slurm_address, &name, sizeof(slurm_addr_t));
 	return 0;
 }
 
-/* slurm_print_slurm_addr
- * prints a slurm_addr_t into a buf
- * IN address		- slurm_addr_t to print
- * IN buf		- space for string representation of slurm_addr
- * IN n			- max number of bytes to write (including NUL)
- */
-void slurm_print_slurm_addr(slurm_addr_t * address, char *buf, size_t n)
-{
-	_slurm_print_slurm_addr(address, buf, n);
-}
-
 /**********************************************************************\
  * slurm_addr_t pack routines
 \**********************************************************************/
 
-/* slurm_pack_slurm_addr
- * packs a slurm_addr_t into a buffer to serialization transport
- * IN slurm_address	- slurm_addr_t to pack
- * IN/OUT buffer	- buffer to pack the slurm_addr_t into
- */
-void slurm_pack_slurm_addr(slurm_addr_t * slurm_address, Buf buffer)
-{
-	_slurm_pack_slurm_addr(slurm_address, buffer);
-}
-
-/* slurm_unpack_slurm_addr
- * unpacks a buffer into a slurm_addr_t after serialization transport
- * OUT slurm_address	- slurm_addr_t to unpack to
- * IN/OUT buffer	- buffer to unpack the slurm_addr_t from
- * returns		- SLURM error code
- */
-int slurm_unpack_slurm_addr_no_alloc(slurm_addr_t * slurm_address,
-				     Buf buffer)
-{
-	return _slurm_unpack_slurm_addr_no_alloc(slurm_address, buffer);
-}
-
 /* slurm_pack_slurm_addr_array
  * packs an array of slurm_addrs into a buffer
  * OUT slurm_address	- slurm_addr_t to pack
@@ -3610,6 +3782,24 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void _rc_msg_setup(slurm_msg_t *msg, slurm_msg_t *resp_msg,
+			  return_code_msg_t *rc_msg, int rc)
+{
+	memset(rc_msg, 0, sizeof(return_code_msg_t));
+	rc_msg->return_code = rc;
+
+	slurm_msg_t_init(resp_msg);
+	resp_msg->protocol_version = msg->protocol_version;
+	resp_msg->address  = msg->address;
+	resp_msg->msg_type = RESPONSE_SLURM_RC;
+	resp_msg->data     = rc_msg;
+	resp_msg->flags = msg->flags;
+	resp_msg->forward = msg->forward;
+	resp_msg->forward_struct = msg->forward_struct;
+	resp_msg->ret_list = msg->ret_list;
+	resp_msg->orig_addr = msg->orig_addr;
+}
+
 
 /**********************************************************************\
  * simplified communication routines
@@ -3625,28 +3815,34 @@ unpack_error:
  */
 int slurm_send_rc_msg(slurm_msg_t *msg, int rc)
 {
-	slurm_msg_t resp_msg;
-	return_code_msg_t rc_msg;
+	if (msg->msg_index && msg->ret_list) {
+		slurm_msg_t *resp_msg = xmalloc_nz(sizeof(slurm_msg_t));
+		return_code_msg_t *rc_msg =
+			xmalloc_nz(sizeof(return_code_msg_t));
 
-	if (msg->conn_fd < 0) {
-		slurm_seterrno(ENOTCONN);
-		return SLURM_ERROR;
-	}
-	rc_msg.return_code = rc;
+		_rc_msg_setup(msg, resp_msg, rc_msg, rc);
 
-	slurm_msg_t_init(&resp_msg);
-	resp_msg.protocol_version = msg->protocol_version;
-	resp_msg.address  = msg->address;
-	resp_msg.msg_type = RESPONSE_SLURM_RC;
-	resp_msg.data     = &rc_msg;
-	resp_msg.flags = msg->flags;
-	resp_msg.forward = msg->forward;
-	resp_msg.forward_struct = msg->forward_struct;
-	resp_msg.ret_list = msg->ret_list;
-	resp_msg.orig_addr = msg->orig_addr;
+		resp_msg->msg_index = msg->msg_index;
+		resp_msg->ret_list = NULL;
+		/* The return list here is the list we are sending to
+		   the node, so after we attach this message to it set
+		   it to NULL to remove it.
+		*/
+		list_append(msg->ret_list, resp_msg);
+		return SLURM_SUCCESS;
+	} else {
+		slurm_msg_t resp_msg;
+		return_code_msg_t rc_msg;
 
-	/* send message */
-	return slurm_send_node_msg(msg->conn_fd, &resp_msg);
+		if (msg->conn_fd < 0) {
+			slurm_seterrno(ENOTCONN);
+			return SLURM_ERROR;
+		}
+		_rc_msg_setup(msg, &resp_msg, &rc_msg, rc);
+
+		/* send message */
+		return slurm_send_node_msg(msg->conn_fd, &resp_msg);
+	}
 }
 
 /* slurm_send_rc_err_msg
@@ -4051,8 +4247,6 @@ List slurm_send_addr_recv_msgs(slurm_msg_t *msg, char *name, int timeout)
 	return ret_list;
 }
 
-
-
 /*
  *  Open a connection to the "address" specified in the slurm msg "req".
  *    Then read back an "rc" message returning the "return_code" specified
@@ -4120,7 +4314,6 @@ int slurm_send_recv_controller_rc_msg(slurm_msg_t *req, int *rc)
  *		    containing the number of nodes to send to each hop
  *		    on the span.
  */
-
 extern int *set_span(int total,  uint16_t tree_width)
 {
 	int *span = NULL;
@@ -4172,15 +4365,13 @@ extern int *set_span(int total,  uint16_t tree_width)
  */
 extern void slurm_free_msg(slurm_msg_t * msg)
 {
-	if (msg->auth_cred)
-		(void) g_slurm_auth_destroy(msg->auth_cred);
-
-	if (msg->ret_list) {
-		list_destroy(msg->ret_list);
-		msg->ret_list = NULL;
+	if (msg) {
+		if (msg->auth_cred) {
+			(void) g_slurm_auth_destroy(msg->auth_cred);
+		}
+		FREE_NULL_LIST(msg->ret_list);
+		xfree(msg);
 	}
-
-	xfree(msg);
 }
 
 extern char *nodelist_nth_host(const char *nodelist, int inx)
@@ -4200,7 +4391,7 @@ extern int nodelist_find(const char *nodelist, const char *name)
 }
 
 extern void convert_num_unit2(double num, char *buf, int buf_size,
-			      int orig_type, int divisor, bool exact)
+			      int orig_type, int divisor, uint32_t flags)
 {
 	char *unit = "\0KMGTP?";
 	uint64_t i;
@@ -4208,7 +4399,7 @@ extern void convert_num_unit2(double num, char *buf, int buf_size,
 	if ((int64_t)num == 0) {
 		snprintf(buf, buf_size, "0");
 		return;
-	} else if (exact) {
+	} else if (flags & CONVERT_NUM_UNIT_EXACT) {
 		i = (uint64_t)num % (divisor / 2);
 
 		if (i > 0) {
@@ -4218,9 +4409,11 @@ extern void convert_num_unit2(double num, char *buf, int buf_size,
 		}
 	}
 
-	while (num > divisor) {
-		num /= divisor;
-		orig_type++;
+	if (!(flags & CONVERT_NUM_UNIT_NO)) {
+		while (num > divisor) {
+			num /= divisor;
+			orig_type++;
+		}
 	}
 
 	if (orig_type < UNIT_NONE || orig_type > UNIT_PETA)
@@ -4236,9 +4429,10 @@ extern void convert_num_unit2(double num, char *buf, int buf_size,
 		snprintf(buf, buf_size, "%.2f%c", num, unit[orig_type]);
 }
 
-extern void convert_num_unit(double num, char *buf, int buf_size, int orig_type)
+extern void convert_num_unit(double num, char *buf, int buf_size,
+			     int orig_type, uint32_t flags)
 {
-	convert_num_unit2(num, buf, buf_size, orig_type, 1024, true);
+	convert_num_unit2(num, buf, buf_size, orig_type, 1024, flags);
 }
 
 extern int revert_num_unit(const char *buf)
@@ -4323,6 +4517,39 @@ slurm_forward_data(char *nodelist, char *address, uint32_t len, char *data)
 	return rc;
 }
 
+extern void slurm_setup_sockaddr(struct sockaddr_in *sin, uint16_t port)
+{
+	static uint32_t s_addr = NO_VAL;
+
+	memset(sin, 0, sizeof(struct sockaddr_in));
+	sin->sin_family = AF_SLURM;
+	sin->sin_port = htons(port);
+
+	if (s_addr == NO_VAL) {
+		/* On systems with multiple interfaces we might not
+		 * want to get just any address.  This is the case on
+		 * a Cray system with RSIP.
+		 */
+		char *topology_params = slurm_get_topology_param();
+		if (topology_params &&
+		    slurm_strcasestr(topology_params, "NoInAddrAny")) {
+			char host[MAXHOSTNAMELEN];
+
+			if (!gethostname(host, MAXHOSTNAMELEN)) {
+				slurm_set_addr_char(sin, port, host);
+				s_addr = sin->sin_addr.s_addr;
+			} else
+				fatal("slurm_setup_sockaddr: "
+				      "Can't get hostname or addr: %m");
+		} else
+			s_addr = htonl(INADDR_ANY);
+
+		xfree(topology_params);
+	}
+
+	sin->sin_addr.s_addr = s_addr;
+}
+
 /* sock_bind_range()
  */
 int
@@ -4368,10 +4595,7 @@ _is_port_ok(int s, uint16_t port)
 {
 	struct sockaddr_in sin;
 
-	memset(&sin, 0, sizeof(sin));
-	sin.sin_family = AF_INET;
-	sin.sin_addr.s_addr = htonl(INADDR_ANY);
-	sin.sin_port = htons(port);
+	slurm_setup_sockaddr(&sin, port);
 
 	if (bind(s, (struct sockaddr *)&sin, sizeof(sin)) < 0) {
 		debug("%s: bind() failed port %d sock %d %m",
@@ -4381,6 +4605,22 @@ _is_port_ok(int s, uint16_t port)
 
 	return true;
 }
-/*
- * vi: shiftwidth=8 tabstop=8 expandtab
+
+/* slurm_get_prolog_timeout
+ * Get prolog/epilog timeout
  */
+uint16_t slurm_get_prolog_timeout(void)
+{
+	uint16_t timeout = 0;
+	slurm_ctl_conf_t *conf;
+
+	if (slurmdbd_conf) {
+	} else {
+		conf = slurm_conf_lock();
+		timeout = conf->prolog_epilog_timeout;
+		slurm_conf_unlock();
+	}
+
+	return timeout;
+}
+
diff --git a/src/common/slurm_protocol_api.h b/src/common/slurm_protocol_api.h
index 8a31f3491..ef36ad0e8 100644
--- a/src/common/slurm_protocol_api.h
+++ b/src/common/slurm_protocol_api.h
@@ -69,6 +69,9 @@
 #define MIN_NOALLOC_JOBID ((uint32_t) 0xffff0000)
 #define MAX_NOALLOC_JOBID ((uint32_t) 0xfffffffd)
 
+#define CONVERT_NUM_UNIT_EXACT 0x00000001
+#define CONVERT_NUM_UNIT_NO    0x00000002
+
 enum controller_id {
 	PRIMARY_CONTROLLER = 1,
 	SECONDARY_CONTROLLER = 2
@@ -92,7 +95,6 @@ enum {
 /* slurm_get_auth_info
  * returns the auth_info from slurmctld_conf object (AuthInfo parameter)
  * cache value in local buffer for best performance
- * RET char *    - auth info,  MUST be xfreed by caller
  */
 char *slurm_get_auth_info(void);
 
@@ -145,6 +147,11 @@ uint16_t slurm_get_complete_wait(void);
  */
 uint32_t slurm_get_cpu_freq_def(void);
 
+/* slurm_get_cpu_freq_govs
+ * RET CpuFreqGovernors value from slurm.conf
+ */
+uint32_t slurm_get_cpu_freq_govs(void);
+
 /* slurm_get_prolog_flags
  * RET PrologFlags value from slurm.conf
  */
@@ -201,6 +208,13 @@ char *slurm_get_mpi_default(void);
  */
 char *slurm_get_mpi_params(void);
 
+/* slurm_get_msg_aggr_params
+ * get message aggregation parameters value from slurmctld_conf object
+ * RET char *   - msg aggregation parameters default value from slurm.conf,
+ *                MUST be xfreed by caller
+ */
+char *slurm_get_msg_aggr_params(void);
+
 /* slurm_get_msg_timeout
  * get default message timeout value from slurmctld_conf object
  */
@@ -341,6 +355,20 @@ uint32_t slurm_get_priority_weight_partition(void);
  */
 uint32_t slurm_get_priority_weight_qos(void);
 
+/* slurm_get_priority_weight_tres
+ * returns the priority weights for TRES' from slurmctld_conf object
+ * RET char * of the tres weights.
+ */
+char *slurm_get_priority_weight_tres(void);
+
+/* slurm_get_priority_weight_tres_array
+ * IN weights_str - string of tres and weights to be parsed.
+ * IN tres_cnt - count of how many tres' are on the system (e.g.
+ * 		slurmctld_tres_cnt).
+ * RET double* of tres weights.
+ */
+double *slurm_get_tres_weight_array(char *weights_str, int tres_cnt);
+
 /* slurm_get_private_data
  * get private data from slurmctld_conf object
  * RET uint16_t   - private_data
@@ -366,6 +394,24 @@ extern char *slurm_get_auth_type(void);
  */
 extern int slurm_set_auth_type(char *auth_type);
 
+/* slurm_get_auth_type
+ * returns the authentication type from slurmctld_conf object
+ * RET char *    - auth type, MUST be xfreed by caller
+ */
+extern char *slurm_get_auth_type(void);
+
+/* slurm_get_bb_params
+ * returns the BurstBufferParameters (bb_params) from slurmctld_conf object
+ * RET char *    - BurstBufferParameters, MUST be xfreed by caller
+ */
+extern char *slurm_get_bb_params(void);
+
+/* slurm_get_bb_type
+ * returns the BurstBufferType (bb_type) from slurmctld_conf object
+ * RET char *    - BurstBufferType, MUST be xfreed by caller
+ */
+extern char *slurm_get_bb_type(void);
+
 /* slurm_get_checkpoint_type
  * returns the checkpoint_type from slurmctld_conf object
  * RET char *    - checkpoint type, MUST be xfreed by caller
@@ -394,11 +440,34 @@ extern uint16_t slurm_get_fast_schedule(void);
  */
 extern uint16_t slurm_get_use_spec_resources(void);
 
+/* slurm_get_power_parameters
+ * returns the PowerParameters from slurmctld_conf object
+ * RET char *    - PowerParameters, MUST be xfreed by caller
+ */
+extern char *slurm_get_power_parameters(void);
+
+/* slurm_set_power_parameters
+ * reset the PowerParameters object
+ */
+extern void slurm_set_power_parameters(char *power_parameters);
+
+/* slurm_get_power_plugin
+ * returns the PowerPlugin from slurmctld_conf object
+ * RET char *    - PowerPlugin, MUST be xfreed by caller
+ */
+extern char *slurm_get_power_plugin(void);
+
 /* slurm_get_track_wckey
  * returns the value of track_wckey in slurmctld_conf object
  */
 extern uint16_t slurm_get_track_wckey(void);
 
+/* slurm_get_topology_param
+ * returns the value of topology_param in slurmctld_conf object
+ * RET char *    - topology parameters, MUST be xfreed by caller
+ */
+extern char * slurm_get_topology_param(void);
+
 /* slurm_get_topology_plugin
  * returns the value of topology_plugin in slurmctld_conf object
  * RET char *    - topology type, MUST be xfreed by caller
@@ -427,6 +496,12 @@ extern uint16_t slurm_get_vsize_factor(void);
  */
 char *slurm_get_accounting_storage_type(void);
 
+/* slurm_get_accounting_storage_tres
+ * returns the accounting storage tres from slurmctld_conf object
+ * RET char *    - accounting storage tres,  MUST be xfreed by caller
+ */
+char *slurm_get_accounting_storage_tres(void);
+
 /* slurm_get_accounting_storage_user
  * returns the storage user from slurmctld_conf object
  * RET char *    - storage user,  MUST be xfreed by caller
@@ -497,6 +572,12 @@ uint32_t slurm_get_accounting_storage_port(void);
  */
 int slurm_set_accounting_storage_port(uint32_t storage_port);
 
+/* slurm_get_launch_params
+ * get launch_params from slurmctld_conf object
+ * RET char *   - launch_params, MUST be xfreed by caller
+ */
+char *slurm_get_launch_params(void);
+
 /* slurm_get_launch_type
  * get launch_type from slurmctld_conf object
  * RET char *   - launch_type, MUST be xfreed by caller
@@ -750,7 +831,7 @@ char *slurm_get_task_prolog(void);
 char *slurm_get_task_plugin(void);
 
 /* slurm_get_task_plugin_param */
-uint16_t slurm_get_task_plugin_param(void);
+uint32_t slurm_get_task_plugin_param(void);
 
 /* Get SchedulerTimeSlice (secs) */
 uint16_t slurm_get_time_slice(void);
@@ -772,6 +853,18 @@ char *slurm_get_slurmd_spooldir(void);
  */
 char *slurm_get_layouts(void);
 
+/* slurm_get_srun_eio_timeout()
+ *
+ * Return the eio timeout for srun.
+ */
+int16_t slurm_get_srun_eio_timeout(void);
+
+/* slurm_get_prolog_timeout()
+ *
+ * Return the timeout used for prolog/epilog
+ */
+extern uint16_t slurm_get_prolog_timeout(void);
+
 /**********************************************************************\
  * general message management functions used by slurmctld, slurmd
 \**********************************************************************/
@@ -824,13 +917,6 @@ extern slurm_fd_t slurm_init_msg_engine(slurm_addr_t * slurm_address);
 extern slurm_fd_t slurm_accept_msg_conn(slurm_fd_t open_fd,
 				      slurm_addr_t * slurm_address);
 
-/* In the bsd implmentation maps directly to a close call, to close
- *	the socket that was accepted
- * IN open_fd		- an open file descriptor to close
- * RET int		- the return code
- */
-extern int slurm_close_accepted_conn(slurm_fd_t open_fd);
-
 /* just calls close on an established msg connection
  * IN open_fd	- an open file descriptor to close
  * RET int	- the return code
@@ -939,13 +1025,6 @@ extern int slurm_shutdown_msg_conn(slurm_fd_t open_fd);
  * stream functions
 \**********************************************************************/
 
-/* slurm_close_stream
- * closes either a server or client stream file_descriptor
- * IN open_fd	- an open file descriptor to close
- * RET int	- the return code
- */
-extern int slurm_close_stream(slurm_fd_t open_fd);
-
 /* slurm_write_stream
  * writes a buffer out a stream file descriptor
  * IN open_fd		- file descriptor to write on
@@ -974,41 +1053,10 @@ extern size_t slurm_read_stream_timeout(slurm_fd_t open_fd,
 					char *buffer, size_t size,
 					int timeout);
 
-/* slurm_get_stream_addr
- * esentially a encapsilated get_sockname
- * IN open_fd 		- file descriptor to retreive slurm_addr_t for
- * OUT address		- address that open_fd to bound to
- */
-extern int slurm_get_stream_addr(slurm_fd_t open_fd, slurm_addr_t * address);
-
-/* make an open slurm connection blocking or non-blocking
- *	(i.e. wait or do not wait for i/o completion )
- * IN open_fd	- an open file descriptor to change the effect
- * RET int	- the return code
- */
-extern int slurm_set_stream_non_blocking(slurm_fd_t open_fd);
-extern int slurm_set_stream_blocking(slurm_fd_t open_fd);
-
 /**********************************************************************\
  * address conversion and management functions
 \**********************************************************************/
 
-/* slurm_set_addr_uint
- * initializes the slurm_address with the supplied port and ip_address
- * OUT slurm_address	- slurm_addr_t to be filled in
- * IN port		- port in host order
- * IN ip_address	- ipv4 address in uint32 host order form
- */
-extern void slurm_set_addr_uint(slurm_addr_t * slurm_address,
-				uint16_t port, uint32_t ip_address);
-
-/* reset_slurm_addr
- * resets the address field of a slurm_addr, port and family unchanged
- * OUT slurm_address	- slurm_addr_t to be reset in
- * IN new_address	- source of address to write into slurm_address
- */
-void reset_slurm_addr(slurm_addr_t * slurm_address, slurm_addr_t new_address);
-
 /* slurm_set_addr
  * initializes the slurm_address with the supplied port and ip_address
  * OUT slurm_address	- slurm_addr_t to be filled in
@@ -1018,32 +1066,6 @@ void reset_slurm_addr(slurm_addr_t * slurm_address, slurm_addr_t new_address);
 extern void slurm_set_addr(slurm_addr_t * slurm_address,
 			   uint16_t port, char *host);
 
-/* slurm_set_addr_any
- * initialized the slurm_address with the supplied port on INADDR_ANY
- * OUT slurm_address	- slurm_addr_t to be filled in
- * IN port		- port in host order
- */
-extern void slurm_set_addr_any(slurm_addr_t * slurm_address, uint16_t port);
-
-/* slurm_set_addr_char
- * initializes the slurm_address with the supplied port and host
- * OUT slurm_address	- slurm_addr_t to be filled in
- * IN port		- port in host order
- * IN host		- hostname or dns name
- */
-extern void slurm_set_addr_char(slurm_addr_t * slurm_address,
-				uint16_t port, char *host);
-
-/* slurm_get_addr
- * given a slurm_address it returns to port and hostname
- * IN slurm_address	- slurm_addr_t to be queried
- * OUT port		- port number
- * OUT host		- hostname
- * IN buf_len		- length of hostname buffer
- */
-extern void slurm_get_addr(slurm_addr_t * slurm_address,
-			   uint16_t * port, char *host, uint32_t buf_len);
-
 /* slurm_get_ip_str
  * given a slurm_address it returns its port and ip address string
  * IN slurm_address	- slurm_addr_t to be queried
@@ -1061,35 +1083,10 @@ extern void slurm_get_ip_str(slurm_addr_t * slurm_address, uint16_t * port,
  */
 extern int slurm_get_peer_addr(slurm_fd_t fd, slurm_addr_t * slurm_address);
 
-/* slurm_print_slurm_addr
- * prints a slurm_addr_t into a buf
- * IN address		- slurm_addr_t to print
- * IN buf		- space for string representation of slurm_addr
- * IN n			- max number of bytes to write (including NUL)
- */
-extern void slurm_print_slurm_addr(slurm_addr_t * address,
-				   char *buf, size_t n);
-
 /**********************************************************************\
  * slurm_addr_t pack routines
 \**********************************************************************/
 
-/* slurm_pack_slurm_addr
- * packs a slurm_addr_t into a buffer to serialization transport
- * IN slurm_address	- slurm_addr_t to pack
- * IN/OUT buffer	- buffer to pack the slurm_addr_t into
- */
-extern void slurm_pack_slurm_addr(slurm_addr_t * slurm_address, Buf buffer);
-
-/* slurm_pack_slurm_addr
- * unpacks a buffer into a slurm_addr_t after serialization transport
- * OUT slurm_address	- slurm_addr_t to unpack to
- * IN/OUT buffer	- buffer to upack the slurm_addr_t from
- * returns 		- SLURM error code
- */
-extern int slurm_unpack_slurm_addr_no_alloc(slurm_addr_t * slurm_address,
-					    Buf buffer);
-
 /* slurm_pack_slurm_addr_array
  * packs an array of slurm_addrs into a buffer
  * OUT slurm_address	- slurm_addr_t to pack
@@ -1224,9 +1221,9 @@ extern void slurm_free_msg(slurm_msg_t * msg);
 extern char *nodelist_nth_host(const char *nodelist, int inx);
 extern int nodelist_find(const char *nodelist, const char *name);
 extern void convert_num_unit2(double num, char *buf, int buf_size,
-			      int orig_type, int divisor, bool exact);
+			      int orig_type, int divisor, uint32_t flags);
 extern void convert_num_unit(double num, char *buf, int buf_size,
-			     int orig_type);
+			     int orig_type, uint32_t flags);
 extern int revert_num_unit(const char *buf);
 extern void parse_int_to_array(int in, int *out);
 
@@ -1253,6 +1250,17 @@ extern int slurm_job_step_create (
  * RET: error code
  */
 extern int slurm_forward_data(char *nodelist, char *address, uint32_t len,
-	char *data);
+			      char *data);
+
+/*
+ * slurm_setup_sockaddr - setup a sockaddr_in struct to be used for
+ *                        communication. If TopologyParameters has
+ *                        NoInAddrAny set it will work of the
+ *                        interface given from gethostname from the
+ *                        hostname of the node.
+ * OUT sin - uninitialized sockaddr_in
+ * IN  port - port to used, we will call htons on it
+ */
+extern void slurm_setup_sockaddr(struct sockaddr_in *sin, uint16_t port);
 
 #endif
diff --git a/src/common/slurm_protocol_common.h b/src/common/slurm_protocol_common.h
index c42cbc150..7f39dbb1e 100644
--- a/src/common/slurm_protocol_common.h
+++ b/src/common/slurm_protocol_common.h
@@ -73,16 +73,21 @@
  *       version in the code is referenced as a uint16_t which if 1403 was the
  *       api it would go over the limit.  So keep is a relatively
  *       small number.
-*/
+ * NOTE: These values must be Moved to
+ * src/plugins/accounting_storage/mysql/as_mysql_archive.c when we are
+ * done here with them since we have to support old version of archive
+ * files since they don't update once they are created.
+ */
+#define SLURM_15_08_PROTOCOL_VERSION ((29 << 8) | 0)
 #define SLURM_14_11_PROTOCOL_VERSION ((28 << 8) | 0)
 #define SLURM_14_03_PROTOCOL_VERSION ((27 << 8) | 0)
-#define SLURM_2_6_PROTOCOL_VERSION ((26 << 8) | 0)
 
-#define SLURM_PROTOCOL_VERSION SLURM_14_11_PROTOCOL_VERSION
-#define SLURM_MIN_PROTOCOL_VERSION SLURM_2_6_PROTOCOL_VERSION
+#define SLURM_PROTOCOL_VERSION SLURM_15_08_PROTOCOL_VERSION
+#define SLURM_MIN_PROTOCOL_VERSION SLURM_14_03_PROTOCOL_VERSION
 
 #if 0
 /* SLURM version 14.11 code removed support for protocol versions before 2.5 */
+#define SLURM_2_6_PROTOCOL_VERSION ((26 << 8) | 0)
 #define SLURM_2_5_PROTOCOL_VERSION ((25 << 8) | 0)
 #define SLURM_2_4_PROTOCOL_VERSION ((24 << 8) | 0)
 #define SLURM_2_3_PROTOCOL_VERSION ((23 << 8) | 0)
diff --git a/src/common/slurm_protocol_defs.c b/src/common/slurm_protocol_defs.c
index fd23ea2d6..94e14a9d6 100644
--- a/src/common/slurm_protocol_defs.c
+++ b/src/common/slurm_protocol_defs.c
@@ -5,7 +5,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010-2014 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2015 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -55,12 +55,14 @@
 #include "src/common/job_options.h"
 #include "src/common/log.h"
 #include "src/common/node_select.h"
+#include "src/common/power.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_acct_gather_energy.h"
 #include "src/common/slurm_cred.h"
 #include "src/common/slurm_ext_sensors.h"
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_protocol_defs.h"
+#include "src/common/slurm_time.h"
 #include "src/common/switch.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -166,6 +168,27 @@ extern char *slurm_add_slash_to_quotes(char *str)
 	return copy;
 }
 
+extern List slurm_copy_char_list(List char_list)
+{
+	List ret_list = NULL;
+	char *tmp_char = NULL;
+	ListIterator itr = NULL;
+
+	if (!char_list || !list_count(char_list))
+		return NULL;
+
+	itr = list_iterator_create(char_list);
+	ret_list = list_create(slurm_destroy_char);
+
+	while ((tmp_char = list_next(itr)))
+		list_append(ret_list, xstrdup(tmp_char));
+
+	list_iterator_destroy(itr);
+
+	return ret_list;
+}
+
+
 /* returns number of objects added to list */
 extern int slurm_addto_char_list(List char_list, char *names)
 {
@@ -201,8 +224,7 @@ extern int slurm_addto_char_list(List char_list, char *names)
 				if (!names[i+1])
 					break;
 
-				name = xmalloc((i-start+1));
-				memcpy(name, names+start, (i-start));
+				name = xstrndup(names+start, (i-start));
 				//info("got %s %d", name, i-start);
 
 				while ((tmp_char = list_next(itr))) {
@@ -237,8 +259,7 @@ extern int slurm_addto_char_list(List char_list, char *names)
 			i++;
 		}
 
-		name = xmalloc((i-start)+1);
-		memcpy(name, names+start, (i-start));
+		name = xstrndup(names+start, (i-start));
 		while ((tmp_char = list_next(itr))) {
 			if (!strcasecmp(tmp_char, name))
 			break;
@@ -263,6 +284,163 @@ endit:
 	return count;
 }
 
+/* returns number of objects added to list */
+extern int slurm_addto_step_list(List step_list, char *names)
+{
+	int i=0, start=0;
+	char *name = NULL, *dot = NULL;
+	slurmdb_selected_step_t *selected_step = NULL;
+	slurmdb_selected_step_t *curr_step = NULL;
+
+	ListIterator itr = NULL;
+	char quote_c = '\0';
+	int quote = 0;
+	int count = 0;
+
+	if (!step_list) {
+		error("No list was given to fill in");
+		return 0;
+	}
+
+	itr = list_iterator_create(step_list);
+	if (names) {
+		if (names[i] == '\"' || names[i] == '\'') {
+			quote_c = names[i];
+			quote = 1;
+			i++;
+		}
+		start = i;
+		while(names[i]) {
+			//info("got %d - %d = %d", i, start, i-start);
+			if (quote && names[i] == quote_c)
+				break;
+			else if (names[i] == '\"' || names[i] == '\'')
+				names[i] = '`';
+			else if (names[i] == ',') {
+				if ((i-start) > 0) {
+					char *dot = NULL;
+					name = xmalloc((i-start+1));
+					memcpy(name, names+start, (i-start));
+
+					selected_step = xmalloc(
+						sizeof(slurmdb_selected_step_t));
+					dot = strstr(name, ".");
+					if (dot == NULL) {
+						debug2("No jobstep requested");
+						selected_step->stepid = NO_VAL;
+					} else {
+						*dot++ = 0;
+						/* can't use NO_VAL
+						 * since that means all */
+						if (!strcmp(dot, "batch"))
+							selected_step->stepid =
+								INFINITE;
+						else
+							selected_step->stepid =
+								atoi(dot);
+					}
+
+					dot = strstr(name, "_");
+					if (dot == NULL) {
+						debug2("No jobarray requested");
+						selected_step->array_task_id =
+							NO_VAL;
+					} else {
+						*dot++ = 0;
+						/* INFINITE means give
+						 * me all the tasks of
+						 * the array */
+						if (!dot)
+							selected_step->
+								array_task_id =
+								INFINITE;
+						else
+							selected_step->
+								array_task_id =
+								atoi(dot);
+					}
+
+					selected_step->jobid = atoi(name);
+					xfree(name);
+
+					while((curr_step = list_next(itr))) {
+						if ((curr_step->jobid
+						    == selected_step->jobid)
+						   && (curr_step->stepid
+						       == selected_step->
+						       stepid))
+							break;
+					}
+
+					if (!curr_step) {
+						list_append(step_list,
+							    selected_step);
+						count++;
+					} else
+						slurmdb_destroy_selected_step(
+							selected_step);
+					list_iterator_reset(itr);
+				}
+				i++;
+				start = i;
+			}
+			i++;
+		}
+		if ((i-start) > 0) {
+			name = xmalloc((i-start)+1);
+			memcpy(name, names+start, (i-start));
+
+			selected_step =
+				xmalloc(sizeof(slurmdb_selected_step_t));
+
+			dot = strstr(name, ".");
+			if (dot == NULL) {
+				debug2("No jobstep requested");
+				selected_step->stepid = NO_VAL;
+			} else {
+				*dot++ = 0;
+				/* can't use NO_VAL since that means all */
+				if (!strcmp(dot, "batch"))
+					selected_step->stepid = INFINITE;
+				else
+					selected_step->stepid = atoi(dot);
+			}
+			dot = strstr(name, "_");
+			if (dot == NULL) {
+				debug2("No jobarray requested");
+				selected_step->array_task_id =
+					NO_VAL;
+			} else {
+				*dot++ = 0;
+				/* INFINITE means give me all the tasks of
+				 * the array */
+				if (dot[0])
+					selected_step->array_task_id =
+						atoi(dot);
+			}
+
+			selected_step->jobid = atoi(name);
+			xfree(name);
+
+			while((curr_step = list_next(itr))) {
+				if ((curr_step->jobid == selected_step->jobid)
+				   && (curr_step->stepid
+				       == selected_step->stepid))
+					break;
+			}
+
+			if (!curr_step) {
+				list_append(step_list, selected_step);
+				count++;
+			} else
+				slurmdb_destroy_selected_step(
+					selected_step);
+		}
+	}
+	list_iterator_destroy(itr);
+	return count;
+}
+
 extern int slurm_sort_char_list_asc(void *v1, void *v2)
 {
 	char *name_a = *(char **)v1;
@@ -314,6 +492,11 @@ extern void slurm_free_job_alloc_info_msg(job_alloc_info_msg_t * msg)
 	xfree(msg);
 }
 
+extern void slurm_free_step_alloc_info_msg(step_alloc_info_msg_t * msg)
+{
+	xfree(msg);
+}
+
 extern void slurm_free_return_code_msg(return_code_msg_t * msg)
 {
 	xfree(msg);
@@ -409,7 +592,9 @@ extern void slurm_free_job_desc_msg(job_desc_msg_t * msg)
 		FREE_NULL_BITMAP(msg->array_bitmap);
 		xfree(msg->array_inx);
 		xfree(msg->blrtsimage);
+		xfree(msg->burst_buffer);
 		xfree(msg->ckpt_dir);
+		xfree(msg->clusters);
 		xfree(msg->comment);
 		xfree(msg->cpu_bind);
 		xfree(msg->dependency);
@@ -443,6 +628,7 @@ extern void slurm_free_job_desc_msg(job_desc_msg_t * msg)
 		for (i = 0; i < msg->spank_job_env_size; i++)
 			xfree(msg->spank_job_env[i]);
 		xfree(msg->spank_job_env);
+		xfree(msg->tres_req_cnt);
 		xfree(msg->wckey);
 		xfree(msg->work_dir);
 		xfree(msg);
@@ -464,6 +650,7 @@ extern void slurm_free_prolog_launch_msg(prolog_launch_msg_t * msg)
 		for (i = 0; i < msg->spank_job_env_size; i++)
 			xfree(msg->spank_job_env[i]);
 		xfree(msg->spank_job_env);
+		slurm_cred_destroy(msg->cred);
 
 		xfree(msg);
 	}
@@ -480,6 +667,7 @@ extern void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 	int i;
 
 	if (msg) {
+		xfree(msg->account);
 		xfree(msg->acctg_freq);
 		xfree(msg->user_name);
 		xfree(msg->alias_list);
@@ -492,16 +680,22 @@ extern void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 		xfree(msg->std_err);
 		xfree(msg->std_in);
 		xfree(msg->std_out);
+		xfree(msg->qos);
 		xfree(msg->work_dir);
 		xfree(msg->ckpt_dir);
 		xfree(msg->restart_dir);
 
-		for (i = 0; i < msg->argc; i++)
-			xfree(msg->argv[i]);
-		xfree(msg->argv);
-		for (i = 0; i < msg->spank_job_env_size; i++)
-			xfree(msg->spank_job_env[i]);
-		xfree(msg->spank_job_env);
+		if (msg->argv) {
+			for (i = 0; i < msg->argc; i++)
+				xfree(msg->argv[i]);
+			xfree(msg->argv);
+		}
+
+		if (msg->spank_job_env) {
+			for (i = 0; i < msg->spank_job_env_size; i++)
+				xfree(msg->spank_job_env[i]);
+			xfree(msg->spank_job_env);
+		}
 
 		if (msg->environment) {
 			for (i = 0; i < msg->envc; i++)
@@ -513,7 +707,7 @@ extern void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg)
 		msg->select_jobinfo = NULL;
 
 		slurm_cred_destroy(msg->cred);
-
+		xfree(msg->resv_name);
 		xfree(msg);
 	}
 }
@@ -561,6 +755,7 @@ extern void slurm_free_job_info_members(job_info_t * job)
 		xfree(job->std_err);
 		xfree(job->std_in);
 		xfree(job->std_out);
+		xfree(job->tres_alloc_str);
 		xfree(job->wckey);
 		xfree(job->work_dir);
 	}
@@ -628,6 +823,15 @@ extern void slurm_free_update_node_msg(update_node_msg_t * msg)
 	}
 }
 
+extern void slurm_free_update_layout_msg(update_layout_msg_t * msg)
+{
+	if (msg) {
+		xfree(msg->layout);
+		xfree(msg->arg);
+		xfree(msg);
+	}
+}
+
 extern void slurm_free_update_part_msg(update_part_msg_t * msg)
 {
 	if (msg) {
@@ -648,6 +852,7 @@ extern void slurm_free_resv_desc_msg(resv_desc_msg_t * msg)
 {
 	if (msg) {
 		xfree(msg->accounts);
+		xfree(msg->burst_buffer);
 		xfree(msg->core_cnt);
 		xfree(msg->features);
 		xfree(msg->licenses);
@@ -673,6 +878,27 @@ extern void slurm_free_resv_info_request_msg(resv_info_request_msg_t * msg)
 	xfree(msg);
 }
 
+extern void slurm_free_layout_info_request_msg(layout_info_request_msg_t * msg)
+{
+	if (msg) {
+		xfree(msg->layout_type);
+		xfree(msg->entities);
+		xfree(msg);
+	}
+}
+
+extern void slurm_free_layout_info_msg(layout_info_msg_t * msg)
+{
+	int i;
+
+	if (msg) {
+		for (i = 0; i < msg->record_count; i++)
+			xfree(msg->records[i]);
+		xfree(msg->records);
+		xfree(msg);
+	}
+}
+
 extern void slurm_free_job_step_create_request_msg(
 		job_step_create_request_msg_t *msg)
 {
@@ -1104,61 +1330,61 @@ extern char *job_reason_string(enum job_state_reason inx)
 	case WAIT_QOS_GRP_CPU:
 		return "QOSGrpCpuLimit";
 	case WAIT_QOS_GRP_CPU_MIN:
-		return "QOSGrpCPUMinsLimit";
+		return "QOSGrpCPUMinutesLimit";
 	case WAIT_QOS_GRP_CPU_RUN_MIN:
-		return "QOSGrpCPURunMinsLimit";
+		return "QOSGrpCPURunMinutesLimit";
 	case WAIT_QOS_GRP_JOB:
 		return"QOSGrpJobsLimit";
-	case WAIT_QOS_GRP_MEMORY:
-		return "QOSGrpMemoryLimit";
-	case WAIT_QOS_GRP_NODES:
-		return "QOSGrpNodesLimit";
+	case WAIT_QOS_GRP_MEM:
+		return "QOSGrpMemLimit";
+	case WAIT_QOS_GRP_NODE:
+		return "QOSGrpNodeLimit";
 	case WAIT_QOS_GRP_SUB_JOB:
 		return "QOSGrpSubmitJobsLimit";
 	case WAIT_QOS_GRP_WALL:
 		return "QOSGrpWallLimit";
-	case WAIT_QOS_MAX_CPUS_PER_JOB:
-		return "QOSMaxCpusPerJobLimit";
+	case WAIT_QOS_MAX_CPU_PER_JOB:
+		return "QOSMaxCpuPerJobLimit";
 	case WAIT_QOS_MAX_CPU_MINS_PER_JOB:
-		return "QOSMaxCpusMinsPerJobLimit";
+		return "QOSMaxCpuMinutesPerJobLimit";
 	case WAIT_QOS_MAX_NODE_PER_JOB:
-		return "QOSMaxNodesPerJobLimit";
+		return "QOSMaxNodePerJobLimit";
 	case WAIT_QOS_MAX_WALL_PER_JOB:
 		return "QOSMaxWallDurationPerJobLimit";
 	case WAIT_QOS_MAX_CPU_PER_USER:
-		return "QOSMaxCpusPerUserLimit";
+		return "QOSMaxCpuPerUserLimit";
 	case WAIT_QOS_MAX_JOB_PER_USER:
 		return "QOSMaxJobsPerUserLimit";
 	case WAIT_QOS_MAX_NODE_PER_USER:
-		return "QOSMaxNodesPerUserLimit";
+		return "QOSMaxNodePerUserLimit";
 	case WAIT_QOS_MAX_SUB_JOB:
 		return "QOSMaxSubmitJobPerUserLimit";
-	case WAIT_QOS_MIN_CPUS:
-		return "QOSMinCPUsNotSatisfied";
+	case WAIT_QOS_MIN_CPU:
+		return "QOSMinCpuNotSatisfied";
 	case WAIT_ASSOC_GRP_CPU:
 		return "AssocGrpCpuLimit";
 	case WAIT_ASSOC_GRP_CPU_MIN:
-		return "AssocGrpCPUMinsLimit";
+		return "AssocGrpCPUMinutesLimit";
 	case WAIT_ASSOC_GRP_CPU_RUN_MIN:
-		return "AssocGrpCPURunMinsLimit";
+		return "AssocGrpCPURunMinutesLimit";
 	case WAIT_ASSOC_GRP_JOB:
 		return"AssocGrpJobsLimit";
-	case WAIT_ASSOC_GRP_MEMORY:
-		return "AssocGrpMemoryLimit";
-	case WAIT_ASSOC_GRP_NODES:
-		return "AssocGrpNodesLimit";
+	case WAIT_ASSOC_GRP_MEM:
+		return "AssocGrpMemLimit";
+	case WAIT_ASSOC_GRP_NODE:
+		return "AssocGrpNodeLimit";
 	case WAIT_ASSOC_GRP_SUB_JOB:
 		return "AssocGrpSubmitJobsLimit";
 	case WAIT_ASSOC_GRP_WALL:
 		return "AssocGrpWallLimit";
 	case WAIT_ASSOC_MAX_JOBS:
 		return "AssocMaxJobsLimit";
-	case WAIT_ASSOC_MAX_CPUS_PER_JOB:
-		return "AssocMaxCpusPerJobLimit";
+	case WAIT_ASSOC_MAX_CPU_PER_JOB:
+		return "AssocMaxCpuPerJobLimit";
 	case WAIT_ASSOC_MAX_CPU_MINS_PER_JOB:
-		return "AssocMaxCpusMinsPerJobLimit";
+		return "AssocMaxCpuMinutesPerJobLimit";
 	case WAIT_ASSOC_MAX_NODE_PER_JOB:
-		return "AssocMaxNodesPerJobLimit";
+		return "AssocMaxNodePerJobLimit";
 	case WAIT_ASSOC_MAX_WALL_PER_JOB:
 		return "AssocMaxWallDurationPerJobLimit";
 	case WAIT_ASSOC_MAX_SUB_JOB:
@@ -1167,6 +1393,195 @@ extern char *job_reason_string(enum job_state_reason inx)
 		return "JobHoldMaxRequeue";
 	case WAIT_ARRAY_TASK_LIMIT:
 		return "JobArrayTaskLimit";
+	case WAIT_BURST_BUFFER_RESOURCE:
+		return "BurstBufferResources";
+	case WAIT_BURST_BUFFER_STAGING:
+		return "BurstBufferStageIn";
+	case FAIL_BURST_BUFFER_OP:
+		return "BurstBufferOperation";
+	case WAIT_POWER_NOT_AVAIL:
+		return "PowerNotAvail";
+	case WAIT_POWER_RESERVED:
+		return "PowerReserved";
+	case WAIT_ASSOC_GRP_UNK:
+		return "AssocGrpUnknown";
+	case WAIT_ASSOC_GRP_UNK_MIN:
+		return "AssocGrpUnknownMinutes";
+	case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+		return "AssocGrpUnknownRunMinutes";
+	case WAIT_ASSOC_MAX_UNK_PER_JOB:
+		return "AssocMaxUnknownPerJob";
+	case WAIT_ASSOC_MAX_UNK_PER_NODE:
+		return "AssocMaxUnknownPerNode";
+	case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+		return "AssocMaxUnknownMinutesPerJob";
+	case WAIT_ASSOC_MAX_CPU_PER_NODE:
+		return "AssocMaxCpuPerNode";
+	case WAIT_ASSOC_GRP_MEM_MIN:
+		return "AssocGrpMemMinutes";
+	case WAIT_ASSOC_GRP_MEM_RUN_MIN:
+		return "AssocGrpMemRunMinutes";
+	case WAIT_ASSOC_MAX_MEM_PER_JOB:
+		return "AssocMaxMemPerJob";
+	case WAIT_ASSOC_MAX_MEM_PER_NODE:
+		return "AssocMaxMemPerNode";
+	case WAIT_ASSOC_MAX_MEM_MINS_PER_JOB:
+		return "AssocMaxMemMinutesPerJob";
+	case WAIT_ASSOC_GRP_NODE_MIN:
+		return "AssocGrpNodeMinutes";
+	case WAIT_ASSOC_GRP_NODE_RUN_MIN:
+		return "AssocGrpNodeRunMinutes";
+	case WAIT_ASSOC_MAX_NODE_MINS_PER_JOB:
+		return "AssocMaxNodeMinutesPerJob";
+	case WAIT_ASSOC_GRP_ENERGY:
+		return "AssocGrpEnergy";
+	case WAIT_ASSOC_GRP_ENERGY_MIN:
+		return "AssocGrpEnergyMinutes";
+	case WAIT_ASSOC_GRP_ENERGY_RUN_MIN:
+		return "AssocGrpEnergyRunMinutes";
+	case WAIT_ASSOC_MAX_ENERGY_PER_JOB:
+		return "AssocMaxEnergyPerJob";
+	case WAIT_ASSOC_MAX_ENERGY_PER_NODE:
+		return "AssocMaxEnergyPerNode";
+	case WAIT_ASSOC_MAX_ENERGY_MINS_PER_JOB:
+		return "AssocMaxEnergyMinutesPerJob";
+	case WAIT_ASSOC_GRP_GRES:
+		return "AssocGrpGRES";
+	case WAIT_ASSOC_GRP_GRES_MIN:
+		return "AssocGrpGRESMinutes";
+	case WAIT_ASSOC_GRP_GRES_RUN_MIN:
+		return "AssocGrpGRESRunMinutes";
+	case WAIT_ASSOC_MAX_GRES_PER_JOB:
+		return "AssocMaxGRESPerJob";
+	case WAIT_ASSOC_MAX_GRES_PER_NODE:
+		return "AssocMaxGRESPerNode";
+	case WAIT_ASSOC_MAX_GRES_MINS_PER_JOB:
+		return "AssocMaxGRESMinutesPerJob";
+	case WAIT_ASSOC_GRP_LIC:
+		return "AssocGrpLicense";
+	case WAIT_ASSOC_GRP_LIC_MIN:
+		return "AssocGrpLicenseMinutes";
+	case WAIT_ASSOC_GRP_LIC_RUN_MIN:
+		return "AssocGrpLicenseRunMinutes";
+	case WAIT_ASSOC_MAX_LIC_PER_JOB:
+		return "AssocMaxLicensePerJob";
+	case WAIT_ASSOC_MAX_LIC_MINS_PER_JOB:
+		return "AssocMaxLicenseMinutesPerJob";
+	case WAIT_ASSOC_GRP_BB:
+		return "AssocGrpBB";
+	case WAIT_ASSOC_GRP_BB_MIN:
+		return "AssocGrpBBMinutes";
+	case WAIT_ASSOC_GRP_BB_RUN_MIN:
+		return "AssocGrpBBRunMinutes";
+	case WAIT_ASSOC_MAX_BB_PER_JOB:
+		return "AssocMaxBBPerJob";
+	case WAIT_ASSOC_MAX_BB_PER_NODE:
+		return "AssocMaxBBPerNode";
+	case WAIT_ASSOC_MAX_BB_MINS_PER_JOB:
+		return "AssocMaxBBMinutesPerJob";
+
+	case WAIT_QOS_GRP_UNK:
+		return "QOSGrpUnknown";
+	case WAIT_QOS_GRP_UNK_MIN:
+		return "QOSGrpUnknownMinutes";
+	case WAIT_QOS_GRP_UNK_RUN_MIN:
+		return "QOSGrpUnknownRunMinutes";
+	case WAIT_QOS_MAX_UNK_PER_JOB:
+		return "QOSMaxUnknownPerJob";
+	case WAIT_QOS_MAX_UNK_PER_NODE:
+		return "QOSMaxUnknownPerNode";
+	case WAIT_QOS_MAX_UNK_PER_USER:
+		return "QOSMaxUnknownPerUser";
+	case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+		return "QOSMaxUnknownMinutesPerJob";
+	case WAIT_QOS_MIN_UNK:
+		return "QOSMinUnknown";
+	case WAIT_QOS_MAX_CPU_PER_NODE:
+		return "QOSMaxCpuPerNode";
+	case WAIT_QOS_GRP_MEM_MIN:
+		return "QOSGrpMemoryMinutes";
+	case WAIT_QOS_GRP_MEM_RUN_MIN:
+		return "QOSGrpMemoryRunMinutes";
+	case WAIT_QOS_MAX_MEM_PER_JOB:
+		return "QOSMaxMemoryPerJob";
+	case WAIT_QOS_MAX_MEM_PER_NODE:
+		return "QOSMaxMemoryPerNode";
+	case WAIT_QOS_MAX_MEM_PER_USER:
+		return "QOSMaxMemoryPerUser";
+	case WAIT_QOS_MAX_MEM_MINS_PER_JOB:
+		return "QOSMaxMemoryMinutesPerJob";
+	case WAIT_QOS_MIN_MEM:
+		return "QOSMinMemory";
+	case WAIT_QOS_GRP_NODE_MIN:
+		return "QOSGrpNodeMinutes";
+	case WAIT_QOS_GRP_NODE_RUN_MIN:
+		return "QOSGrpNodeRunMinutes";
+	case WAIT_QOS_MAX_NODE_MINS_PER_JOB:
+		return "QOSMaxNodeMinutesPerJob";
+	case WAIT_QOS_MIN_NODE:
+		return "QOSMinNode";
+	case WAIT_QOS_GRP_ENERGY:
+		return "QOSGrpEnergy";
+	case WAIT_QOS_GRP_ENERGY_MIN:
+		return "QOSGrpEnergyMinutes";
+	case WAIT_QOS_GRP_ENERGY_RUN_MIN:
+		return "QOSGrpEnergyRunMinutes";
+	case WAIT_QOS_MAX_ENERGY_PER_JOB:
+		return "QOSMaxEnergyPerJob";
+	case WAIT_QOS_MAX_ENERGY_PER_NODE:
+		return "QOSMaxEnergyPerNode";
+	case WAIT_QOS_MAX_ENERGY_PER_USER:
+		return "QOSMaxEnergyPerUser";
+	case WAIT_QOS_MAX_ENERGY_MINS_PER_JOB:
+		return "QOSMaxEnergyMinutesPerJob";
+	case WAIT_QOS_MIN_ENERGY:
+		return "QOSMinEnergy";
+	case WAIT_QOS_GRP_GRES:
+		return "QOSGrpGRES";
+	case WAIT_QOS_GRP_GRES_MIN:
+		return "QOSGrpGRESMinutes";
+	case WAIT_QOS_GRP_GRES_RUN_MIN:
+		return "QOSGrpGRESRunMinutes";
+	case WAIT_QOS_MAX_GRES_PER_JOB:
+		return "QOSMaxGRESPerJob";
+	case WAIT_QOS_MAX_GRES_PER_NODE:
+		return "QOSMaxGRESPerNode";
+	case WAIT_QOS_MAX_GRES_PER_USER:
+		return "QOSMaxGRESPerUser";
+	case WAIT_QOS_MAX_GRES_MINS_PER_JOB:
+		return "QOSMaxGRESMinutesPerJob";
+	case WAIT_QOS_MIN_GRES:
+		return "QOSMinGRES";
+	case WAIT_QOS_GRP_LIC:
+		return "QOSGrpLicense";
+	case WAIT_QOS_GRP_LIC_MIN:
+		return "QOSGrpLicenseMinutes";
+	case WAIT_QOS_GRP_LIC_RUN_MIN:
+		return "QOSGrpLicenseRunMinutes";
+	case WAIT_QOS_MAX_LIC_PER_JOB:
+		return "QOSMaxLicensePerJob";
+	case WAIT_QOS_MAX_LIC_PER_USER:
+		return "QOSMaxLicensePerUser";
+	case WAIT_QOS_MAX_LIC_MINS_PER_JOB:
+		return "QOSMaxLicenseMinutesPerJob";
+	case WAIT_QOS_MIN_LIC:
+		return "QOSMinLicense";
+	case WAIT_QOS_GRP_BB:
+		return "QOSGrpBB";
+	case WAIT_QOS_GRP_BB_MIN:
+		return "QOSGrpBBMinutes";
+	case WAIT_QOS_GRP_BB_RUN_MIN:
+		return "QOSGrpBBRunMinutes";
+	case WAIT_QOS_MAX_BB_PER_JOB:
+		return "QOSMaxBBPerJob";
+	case WAIT_QOS_MAX_BB_PER_NODE:
+		return "QOSMaxBBPerNode";
+	case WAIT_QOS_MAX_BB_PER_USER:
+		return "QOSMaxBBPerUser";
+	case WAIT_QOS_MAX_BB_MINS_PER_JOB:
+		return "AssocMaxBBMinutesPerJob";
+	case WAIT_QOS_MIN_BB:
+		return "QOSMinBB";
 	default:
 		snprintf(val, sizeof(val), "%d", inx);
 		return val;
@@ -1185,8 +1600,7 @@ extern void slurm_free_will_run_response_msg(will_run_response_msg_t *msg)
 {
         if (msg) {
                 xfree(msg->node_list);
-		if (msg->preemptee_job_id)
-			list_destroy(msg->preemptee_job_id);
+		FREE_NULL_LIST(msg->preemptee_job_id);
                 xfree(msg);
         }
 }
@@ -1343,7 +1757,7 @@ extern uint16_t log_string2num(char *name)
 	return (uint16_t) NO_VAL;
 }
 
-extern char *job_state_string(uint16_t inx)
+extern char *job_state_string(uint32_t inx)
 {
 	/* Process JOB_STATE_FLAGS */
 	if (inx & JOB_COMPLETING)
@@ -1352,10 +1766,15 @@ extern char *job_state_string(uint16_t inx)
 		return "CONFIGURING";
 	if (inx & JOB_RESIZING)
 		return "RESIZING";
-	if (inx & JOB_SPECIAL_EXIT)
-		return "SPECIAL_EXIT";
 	if (inx & JOB_REQUEUE)
 		return "REQUEUED";
+	if (inx & JOB_REQUEUE_HOLD)
+		return "REQUEUE_HOLD";
+	if (inx & JOB_SPECIAL_EXIT)
+		return "SPECIAL_EXIT";
+	if (inx & JOB_STOPPED)
+		return "STOPPED";
+
 
 	/* Process JOB_STATE_BASE */
 	switch (inx & JOB_STATE_BASE) {
@@ -1384,7 +1803,7 @@ extern char *job_state_string(uint16_t inx)
 	}
 }
 
-extern char *job_state_string_compact(uint16_t inx)
+extern char *job_state_string_compact(uint32_t inx)
 {
 	/* Process JOB_STATE_FLAGS */
 	if (inx & JOB_COMPLETING)
@@ -1393,10 +1812,14 @@ extern char *job_state_string_compact(uint16_t inx)
 		return "CF";
 	if (inx & JOB_RESIZING)
 		return "RS";
-	if (inx & JOB_SPECIAL_EXIT)
-		return "SE";
 	if (inx & JOB_REQUEUE)
 		return "RQ";
+	if (inx & JOB_REQUEUE_HOLD)
+		return "RH";
+	if (inx & JOB_SPECIAL_EXIT)
+		return "SE";
+	if (inx & JOB_STOPPED)
+		return "ST";
 
 	/* Process JOB_STATE_BASE */
 	switch (inx & JOB_STATE_BASE) {
@@ -1425,7 +1848,7 @@ extern char *job_state_string_compact(uint16_t inx)
 	}
 }
 
-static bool _job_name_test(int state_num, const char *state_name)
+static bool _job_name_test(uint32_t state_num, const char *state_name)
 {
 	if (!strcasecmp(state_name, job_state_string(state_num)) ||
 	    !strcasecmp(state_name, job_state_string_compact(state_num))) {
@@ -1434,9 +1857,9 @@ static bool _job_name_test(int state_num, const char *state_name)
 	return false;
 }
 
-extern int job_state_num(const char *state_name)
+extern uint32_t job_state_num(const char *state_name)
 {
-	int i;
+	uint32_t i;
 
 	for (i=0; i<JOB_END; i++) {
 		if (_job_name_test(i, state_name))
@@ -1452,7 +1875,7 @@ extern int job_state_num(const char *state_name)
 	if (_job_name_test(JOB_SPECIAL_EXIT, state_name))
 		return JOB_SPECIAL_EXIT;
 
-	return -1;
+	return NO_VAL;
 }
 
 extern char *trigger_res_type(uint16_t res_type)
@@ -1602,15 +2025,15 @@ extern char *reservation_flags_string(uint32_t flags)
 			xstrcat(flag_str, ",");
 		xstrcat(flag_str, "SPEC_NODES");
 	}
-	if (flags & RESERVE_FLAG_LIC_ONLY) {
+	if (flags & RESERVE_FLAG_ANY_NODES) {
 		if (flag_str[0])
 			xstrcat(flag_str, ",");
-		xstrcat(flag_str, "LICENSE_ONLY");
+		xstrcat(flag_str, "ANY_NODES");
 	}
-	if (flags & RESERVE_FLAG_NO_LIC_ONLY) {
+	if (flags & RESERVE_FLAG_NO_ANY_NODES) {
 		if (flag_str[0])
 			xstrcat(flag_str, ",");
-		xstrcat(flag_str, "NO_LICENSE_ONLY");
+		xstrcat(flag_str, "NO_ANY_NODES");
 	}
 	if (flags & RESERVE_FLAG_STATIC) {
 		if (flag_str[0])
@@ -1642,6 +2065,11 @@ extern char *reservation_flags_string(uint32_t flags)
 			xstrcat(flag_str, ",");
 		xstrcat(flag_str, "TIME_FLOAT");
 	}
+	if (flags & RESERVE_FLAG_REPLACE) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "REPLACE");
+	}
 	return flag_str;
 }
 
@@ -1662,11 +2090,6 @@ extern char *priority_flags_string(uint16_t priority_flags)
 			xstrcat(flag_str, ",");
 		xstrcat(flag_str, "CALCULATE_RUNNING");
 	}
-	if (priority_flags & PRIORITY_FLAGS_TICKET_BASED) {
-		if (flag_str[0])
-			xstrcat(flag_str, ",");
-		xstrcat(flag_str, "TICKET_BASED");
-	}
 	if (priority_flags & PRIORITY_FLAGS_DEPTH_OBLIVIOUS) {
 		if (flag_str[0])
 			xstrcat(flag_str, ",");
@@ -1677,10 +2100,74 @@ extern char *priority_flags_string(uint16_t priority_flags)
 			xstrcat(flag_str, ",");
 		xstrcat(flag_str, "FAIR_TREE");
 	}
+	if (priority_flags & PRIORITY_FLAGS_MAX_TRES) {
+		if (flag_str[0])
+			xstrcat(flag_str, ",");
+		xstrcat(flag_str, "MAX_TRES");
+	}
 
 	return flag_str;
 }
 
+/* Translate a burst buffer numeric value to its equivalant state string */
+extern char *bb_state_string(uint16_t state)
+{
+	static char buf[16];
+
+	if (state == BB_STATE_PENDING)
+		return "pending";
+	if (state == BB_STATE_ALLOCATING)
+		return "allocating";
+	if (state == BB_STATE_ALLOCATED)
+		return "allocated";
+	if (state == BB_STATE_STAGING_IN)
+		return "staging-in";
+	if (state == BB_STATE_STAGED_IN)
+		return "staged-in";
+	if (state == BB_STATE_RUNNING)
+		return "running";
+	if (state == BB_STATE_SUSPEND)
+		return "suspended";
+	if (state == BB_STATE_STAGING_OUT)
+		return "staging-out";
+	if (state == BB_STATE_STAGED_OUT)
+		return "staged-out";
+	if (state == BB_STATE_TEARDOWN)
+		return "teardown";
+	if (state == BB_STATE_COMPLETE)
+		return "complete";
+	snprintf(buf, sizeof(buf), "%u", state);
+	return buf;
+}
+
+/* Translate a burst buffer state string to its equivalant numeric value */
+extern uint16_t bb_state_num(char *tok)
+{
+	if (!strcasecmp(tok, "pending"))
+		return BB_STATE_PENDING;
+	if (!strcasecmp(tok, "allocating"))
+		return BB_STATE_ALLOCATING;
+	if (!strcasecmp(tok, "allocated"))
+		return BB_STATE_ALLOCATED;
+	if (!strcasecmp(tok, "staging-in"))
+		return BB_STATE_STAGING_IN;
+	if (!strcasecmp(tok, "staged-in"))
+		return BB_STATE_STAGED_IN;
+	if (!strcasecmp(tok, "running"))
+		return BB_STATE_RUNNING;
+	if (!strcasecmp(tok, "suspend"))
+		return BB_STATE_SUSPEND;
+	if (!strcasecmp(tok, "staging-out"))
+		return BB_STATE_STAGING_OUT;
+	if (!strcasecmp(tok, "staged-out"))
+		return BB_STATE_STAGED_OUT;
+	if (!strcasecmp(tok, "teardown"))
+		return BB_STATE_TEARDOWN;
+	if (!strcasecmp(tok, "complete"))
+		return BB_STATE_COMPLETE;
+	return 0;
+}
+
 extern char *node_state_string(uint32_t inx)
 {
 	int  base            = (inx & NODE_STATE_BASE);
@@ -1943,6 +2430,34 @@ extern char *node_state_string_compact(uint32_t inx)
 	return "?";
 }
 
+extern uint16_t power_flags_id(char *power_flags)
+{
+	char *tmp, *tok, *save_ptr = NULL;
+	uint16_t rc = 0;
+
+	if (!power_flags)
+		return rc;
+
+	tmp = xstrdup(power_flags);
+	tok = strtok_r(tmp, ",", &save_ptr);
+	while (tok) {
+		if (!strcasecmp(tok, "level"))
+			rc |= SLURM_POWER_FLAGS_LEVEL;
+		else
+			error("Ignoring unrecognized power option (%s)", tok);
+		tok = strtok_r(NULL, ",", &save_ptr);
+	}
+	xfree(tmp);
+
+	return rc;
+}
+
+extern char *power_flags_str(uint16_t power_flags)
+{
+	if (power_flags & SLURM_POWER_FLAGS_LEVEL)
+		return "LEVEL";
+	return "";
+}
 
 extern void private_data_string(uint16_t private_data, char *str, int str_len)
 {
@@ -2221,28 +2736,41 @@ extern char *cray_nodelist2nids(hostlist_t hl_in, char *nodelist)
 	return nids;
 }
 
-
-/*
- * slurm_free_resource_allocation_response_msg - free slurm resource
- *	allocation response message
- * IN msg - pointer to allocation response message
- * NOTE: buffer is loaded by slurm_allocate_resources
- */
-extern void slurm_free_resource_allocation_response_msg (
+extern void slurm_free_resource_allocation_response_msg_members (
 	resource_allocation_response_msg_t * msg)
 {
+	int i;
+
 	if (msg) {
 		select_g_select_jobinfo_free(msg->select_jobinfo);
 		msg->select_jobinfo = NULL;
+		xfree(msg->account);
 		xfree(msg->alias_list);
 		xfree(msg->cpus_per_node);
 		xfree(msg->cpu_count_reps);
+		for (i = 0; i < msg->env_size; i++)
+			xfree(msg->environment[i]);
+		xfree(msg->environment);
 		xfree(msg->node_list);
 		xfree(msg->partition);
-		xfree(msg);
+		xfree(msg->qos);
+		xfree(msg->resv_name);
 	}
 }
 
+/*
+ * slurm_free_resource_allocation_response_msg - free slurm resource
+ *	allocation response message
+ * IN msg - pointer to allocation response message
+ * NOTE: buffer is loaded by slurm_allocate_resources
+ */
+extern void slurm_free_resource_allocation_response_msg (
+	resource_allocation_response_msg_t * msg)
+{
+	slurm_free_resource_allocation_response_msg_members(msg);
+	xfree(msg);
+}
+
 /*
  * slurm_free_sbcast_cred_msg - free slurm resource allocation response
  *	message including an sbcast credential
@@ -2415,6 +2943,7 @@ extern void slurm_free_job_step_info_members (job_step_info_t * msg)
 		xfree(msg->resv_ports);
 		select_g_select_jobinfo_free(msg->select_jobinfo);
 		msg->select_jobinfo = NULL;
+		xfree(msg->tres_alloc_str);
 	}
 }
 
@@ -2492,6 +3021,7 @@ extern void slurm_free_node_info_members(node_info_t * node)
 		xfree(node->cpu_spec_list);
 		acct_gather_energy_destroy(node->energy);
 		ext_sensors_destroy(node->ext_sensors);
+		power_mgmt_data_free(node->power);
 		xfree(node->features);
 		xfree(node->gres);
 		xfree(node->gres_drain);
@@ -2553,6 +3083,7 @@ extern void slurm_free_partition_info_members(partition_info_t * part)
 		xfree(part->name);
 		xfree(part->nodes);
 		xfree(part->node_inx);
+		xfree(part->qos_char);
 	}
 }
 
@@ -2581,9 +3112,10 @@ static void  _free_all_reservations(reserve_info_msg_t *msg)
 	    (msg->reservation_array == NULL))
 		return;
 
-	for (i = 0; i < msg->record_count; i++)
+	for (i = 0; i < msg->record_count; i++) {
 		slurm_free_reserve_info_members(
 			&msg->reservation_array[i]);
+	}
 
 }
 
@@ -2591,12 +3123,14 @@ extern void slurm_free_reserve_info_members(reserve_info_t * resv)
 {
 	if (resv) {
 		xfree(resv->accounts);
+		xfree(resv->burst_buffer);
 		xfree(resv->features);
 		xfree(resv->licenses);
 		xfree(resv->name);
 		xfree(resv->node_inx);
 		xfree(resv->node_list);
 		xfree(resv->partition);
+		xfree(resv->tres_str);
 		xfree(resv->users);
 	}
 }
@@ -2622,6 +3156,71 @@ extern void slurm_free_topo_info_msg(topo_info_response_msg_t *msg)
 	}
 }
 
+/*
+ * slurm_free_burst_buffer_info_msg - free buffer returned by
+ *	slurm_load_burst_buffer
+ * IN burst_buffer_info_msg_ptr - pointer to burst_buffer_info_msg_t
+ * RET 0 or a slurm error code
+ */
+extern void slurm_free_burst_buffer_info_msg(burst_buffer_info_msg_t *msg)
+{
+	int i, j, k;
+	burst_buffer_gres_t *bb_gres_ptr;
+	burst_buffer_info_t *bb_info_ptr;
+	burst_buffer_resv_t *bb_resv_ptr;
+
+	if (msg) {
+		for (i = 0, bb_info_ptr = msg->burst_buffer_array;
+		     i < msg->record_count; i++, bb_info_ptr++) {
+			xfree(bb_info_ptr->allow_users);
+			xfree(bb_info_ptr->create_buffer);
+			xfree(bb_info_ptr->deny_users);
+			xfree(bb_info_ptr->destroy_buffer);
+			xfree(bb_info_ptr->get_sys_state);
+			for (k = 0, bb_gres_ptr = bb_info_ptr->gres_ptr;
+			     k < bb_info_ptr->gres_cnt; k++, bb_gres_ptr++) {
+				xfree(bb_info_ptr->name);
+			}
+			xfree(bb_info_ptr->gres_ptr);
+			xfree(bb_info_ptr->name);
+			xfree(bb_info_ptr->start_stage_in);
+			xfree(bb_info_ptr->start_stage_out);
+			xfree(bb_info_ptr->stop_stage_in);
+			xfree(bb_info_ptr->stop_stage_out);
+			for (j = 0,
+			     bb_resv_ptr = bb_info_ptr->burst_buffer_resv_ptr;
+			     j < bb_info_ptr->buffer_count;
+			     j++, bb_resv_ptr++) {
+				for (k = 0, bb_gres_ptr = bb_resv_ptr->gres_ptr;
+				     k < bb_resv_ptr->gres_cnt;
+				     k++, bb_gres_ptr++) {
+					xfree(bb_gres_ptr->name);
+				}
+				xfree(bb_resv_ptr->account);
+				xfree(bb_resv_ptr->gres_ptr);
+				xfree(bb_resv_ptr->name);
+				xfree(bb_resv_ptr->partition);
+				xfree(bb_resv_ptr->qos);
+			}
+			xfree(bb_info_ptr->burst_buffer_resv_ptr);
+			xfree(bb_info_ptr->burst_buffer_use_ptr);
+		}
+		xfree(msg->burst_buffer_array);
+		xfree(msg);
+	}
+}
+
+/*
+ * slurm_free_powercap_info_msg - free the powercapping information
+ *	response message
+ * IN msg - pointer to powercapping information response message
+ * NOTE: buffer is loaded by slurm_load_powercap.
+ */
+extern void slurm_free_powercap_info_msg(powercap_info_msg_t *msg)
+{
+	xfree(msg);
+}
+
 
 extern void slurm_free_file_bcast_msg(file_bcast_msg_t *msg)
 {
@@ -2662,6 +3261,18 @@ extern void slurm_free_job_step_pids(void *object)
 	}
 }
 
+extern void slurm_free_network_callerid_msg(network_callerid_msg_t *mesg)
+{
+	xfree(mesg);
+}
+
+extern void slurm_free_network_callerid_resp(network_callerid_resp_t *resp)
+{
+	if (resp) {
+		xfree(resp->node_name);
+		xfree(resp);
+	}
+}
 
 extern void slurm_free_block_job_info(void *object)
 {
@@ -2740,15 +3351,19 @@ extern void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg)
 	xfree(msg);
 }
 
-extern void slurm_destroy_association_shares_object(void *object)
+extern void slurm_destroy_assoc_shares_object(void *object)
 {
-	association_shares_object_t *obj_ptr =
-		(association_shares_object_t *)object;
+	assoc_shares_object_t *obj_ptr =
+		(assoc_shares_object_t *)object;
 
 	if (obj_ptr) {
 		xfree(obj_ptr->cluster);
 		xfree(obj_ptr->name);
 		xfree(obj_ptr->parent);
+		xfree(obj_ptr->partition);
+		xfree(obj_ptr->tres_run_secs);
+		xfree(obj_ptr->tres_grp_mins);
+		xfree(obj_ptr->usage_tres_raw);
 		xfree(obj_ptr);
 	}
 }
@@ -2756,10 +3371,8 @@ extern void slurm_destroy_association_shares_object(void *object)
 extern void slurm_free_shares_request_msg(shares_request_msg_t *msg)
 {
 	if (msg) {
-		if (msg->acct_list)
-			list_destroy(msg->acct_list);
-		if (msg->user_list)
-			list_destroy(msg->user_list);
+		FREE_NULL_LIST(msg->acct_list);
+		FREE_NULL_LIST(msg->user_list);
 		xfree(msg);
 	}
 }
@@ -2767,8 +3380,13 @@ extern void slurm_free_shares_request_msg(shares_request_msg_t *msg)
 extern void slurm_free_shares_response_msg(shares_response_msg_t *msg)
 {
 	if (msg) {
-		if (msg->assoc_shares_list)
-			list_destroy(msg->assoc_shares_list);
+		int i;
+		if (msg->tres_names) {
+			for (i=0; i<msg->tres_cnt; i++)
+				xfree(msg->tres_names[i]);
+			xfree(msg->tres_names);
+		}
+		FREE_NULL_LIST(msg->assoc_shares_list);
 		xfree(msg);
 	}
 }
@@ -2784,17 +3402,46 @@ extern void slurm_destroy_priority_factors_object(void *object)
 {
 	priority_factors_object_t *obj_ptr =
 		(priority_factors_object_t *)object;
+	xfree(obj_ptr->tres_weights);
+	xfree(obj_ptr->tres_names);
+	xfree(obj_ptr->priority_tres);
 	xfree(obj_ptr);
 }
 
+extern void slurm_copy_priority_factors_object(priority_factors_object_t *dest,
+					       priority_factors_object_t *src)
+{
+	int size;
+
+	if (!dest || !src)
+		return;
+
+	size = sizeof(double) * src->tres_cnt;
+
+	memcpy(dest, src, sizeof(priority_factors_object_t));
+	if (src->priority_tres) {
+		dest->priority_tres = xmalloc(size);
+		memcpy(dest->priority_tres, src->priority_tres, size);
+	}
+
+	if (src->tres_names) {
+		int char_size = sizeof(char *) * src->tres_cnt;
+		dest->tres_names = xmalloc(char_size);
+		memcpy(dest->tres_names, src->tres_names, char_size);
+	}
+
+	if (src->tres_weights) {
+		dest->tres_weights = xmalloc(size);
+		memcpy(dest->tres_weights, src->tres_weights, size);
+	}
+}
+
 extern void slurm_free_priority_factors_request_msg(
 	priority_factors_request_msg_t *msg)
 {
 	if (msg) {
-		if (msg->job_id_list)
-			list_destroy(msg->job_id_list);
-		if (msg->uid_list)
-			list_destroy(msg->uid_list);
+		FREE_NULL_LIST(msg->job_id_list);
+		FREE_NULL_LIST(msg->uid_list);
 		xfree(msg);
 	}
 }
@@ -2803,8 +3450,7 @@ extern void slurm_free_priority_factors_response_msg(
 	priority_factors_response_msg_t *msg)
 {
 	if (msg) {
-		if (msg->priority_factors_list)
-			list_destroy(msg->priority_factors_list);
+		FREE_NULL_LIST(msg->priority_factors_list);
 		xfree(msg);
 	}
 }
@@ -2813,8 +3459,29 @@ extern void slurm_free_priority_factors_response_msg(
 extern void slurm_free_accounting_update_msg(accounting_update_msg_t *msg)
 {
 	if (msg) {
-		if (msg->update_list)
-			list_destroy(msg->update_list);
+		FREE_NULL_LIST(msg->update_list);
+		xfree(msg);
+	}
+}
+
+extern void slurm_free_comp_msg_list(void *x)
+{
+	slurm_msg_t *msg = (slurm_msg_t*)x;
+	if (msg) {
+		if (msg->data_size) {
+			free_buf(msg->data);
+			msg->data = NULL;
+		} else
+			slurm_free_msg_data(msg->msg_type, msg->data);
+
+		slurm_free_msg(msg);
+	}
+}
+
+extern void slurm_free_composite_msg(composite_msg_t *msg)
+{
+	if (msg) {
+		FREE_NULL_LIST(msg->msg_list);
 		xfree(msg);
 	}
 }
@@ -2879,8 +3546,12 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 		break;
 	case REQUEST_JOB_END_TIME:
 	case REQUEST_JOB_ALLOCATION_INFO:
+	case REQUEST_JOB_ALLOCATION_INFO_LITE:
 		slurm_free_job_alloc_info_msg(data);
 		break;
+	case REQUEST_JOB_SBCAST_CRED:
+		slurm_free_step_alloc_info_msg(data);
+		break;
 	case REQUEST_SHUTDOWN:
 		slurm_free_shutdown_msg(data);
 		break;
@@ -2890,10 +3561,16 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_UPDATE_NODE:
 		slurm_free_update_node_msg(data);
 		break;
+	case REQUEST_UPDATE_LAYOUT:
+		slurm_free_update_layout_msg(data);
+		break;
 	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		slurm_free_update_part_msg(data);
 		break;
+	case REQUEST_UPDATE_POWERCAP:
+		slurm_free_powercap_info_msg(data);
+		break;
 	case REQUEST_DELETE_PARTITION:
 		slurm_free_delete_part_msg(data);
 		break;
@@ -2908,6 +3585,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_RESERVATION_INFO:
 		slurm_free_resv_info_request_msg(data);
 		break;
+	case REQUEST_LAYOUT_INFO:
+		slurm_free_layout_info_request_msg(data);
+		break;
 	case REQUEST_NODE_REGISTRATION_STATUS:
 		slurm_free_node_registration_status_msg(data);
 		break;
@@ -2954,6 +3634,7 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 		slurm_free_block_info_request_msg(data);
 		break;
 	case REQUEST_STEP_COMPLETE:
+	case REQUEST_STEP_COMPLETE_AGGR:
 		slurm_free_step_complete_msg(data);
 		break;
 	case RESPONSE_JOB_STEP_STAT:
@@ -3023,6 +3704,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case ACCOUNTING_FIRST_REG:
 	case ACCOUNTING_REGISTER_CTLD:
 	case REQUEST_TOPO_INFO:
+ 	case REQUEST_BURST_BUFFER_INFO:
+	case REQUEST_SICP_INFO:
+	case REQUEST_POWERCAP_INFO:
 		/* No body to free */
 		break;
 	case REQUEST_REBOOT_NODES:
@@ -3049,6 +3733,9 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case RESPONSE_JOB_ARRAY_ERRORS:
 		slurm_free_job_array_resp(data);
 		break;
+	case RESPONSE_BURST_BUFFER_INFO:
+		slurm_free_burst_buffer_info_msg(data);
+		break;
 	case REQUEST_TRIGGER_GET:
 	case RESPONSE_TRIGGER_GET:
 	case REQUEST_TRIGGER_SET:
@@ -3056,6 +3743,10 @@ extern int slurm_free_msg_data(slurm_msg_type_t type, void *data)
 	case REQUEST_TRIGGER_PULL:
 		slurm_free_trigger_msg(data);
 		break;
+	case MESSAGE_COMPOSITE:
+	case RESPONSE_MESSAGE_COMPOSITE:
+		slurm_free_composite_msg(data);
+		break;
 	default:
 		error("invalid type trying to be freed %u", type);
 		break;
@@ -3131,29 +3822,6 @@ extern bool valid_spank_job_env(char **spank_job_env,
 	return true;
 }
 
-/* Return ctime like string without the newline.
- * Not thread safe */
-extern char *slurm_ctime(const time_t *timep)
-{
-	static char time_str[25];
-
-	strftime(time_str, sizeof(time_str), "%a %b %d %T %Y",
-		 localtime(timep));
-
-	return time_str;
-}
-
-/* Return ctime like string without the newline, thread safe. */
-extern char *slurm_ctime_r(const time_t *timep, char *time_str)
-{
-	struct tm newtime;
-	localtime_r(timep, &newtime);
-
-	strftime(time_str, 25, "%a %b %d %T %Y", &newtime);
-
-	return time_str;
-}
-
 /* slurm_free_license_info()
  *
  * Free the license info returned previously
@@ -3285,6 +3953,10 @@ rpc_num2string(uint16_t opcode)
 		return "REQUEST_RESERVATION_INFO";
 	case RESPONSE_RESERVATION_INFO:
 		return "RESPONSE_RESERVATION_INFO";
+	case REQUEST_LAYOUT_INFO:
+		return "REQUEST_LAYOUT_INFO";
+	case RESPONSE_LAYOUT_INFO:
+		return "RESPONSE_LAYOUT_INFO";
 	case REQUEST_PRIORITY_FACTORS:
 		return "REQUEST_PRIORITY_FACTORS";
 	case RESPONSE_PRIORITY_FACTORS:
@@ -3307,10 +3979,6 @@ rpc_num2string(uint16_t opcode)
 		return "REQUEST_STATS_INFO";
 	case RESPONSE_STATS_INFO:
 		return "RESPONSE_STATS_INFO";
-	case REQUEST_STATS_RESET:
-		return "REQUEST_STATS_RESET";
-	case RESPONSE_STATS_RESET:
-		return "RESPONSE_STATS_RESET";
 	case REQUEST_JOB_USER_INFO:
 		return "REQUEST_JOB_USER_INFO";
 	case REQUEST_NODE_INFO_SINGLE:
@@ -3319,6 +3987,8 @@ rpc_num2string(uint16_t opcode)
 		return "REQUEST_UPDATE_JOB";
 	case REQUEST_UPDATE_NODE:
 		return "REQUEST_UPDATE_NODE";
+	case REQUEST_UPDATE_LAYOUT:
+		return "REQUEST_UPDATE_LAYOUT";
 	case REQUEST_CREATE_PARTITION:
 		return "REQUEST_CREATE_PARTITION";
 	case REQUEST_DELETE_PARTITION:
@@ -3417,6 +4087,8 @@ rpc_num2string(uint16_t opcode)
 		return "RESPONSE_SUSPEND";
 	case REQUEST_STEP_COMPLETE:
 		return "REQUEST_STEP_COMPLETE";
+	case REQUEST_STEP_COMPLETE_AGGR:
+		return "REQUEST_STEP_COMPLETE_AGGR";
 	case REQUEST_COMPLETE_JOB_ALLOCATION:
 		return "REQUEST_COMPLETE_JOB_ALLOCATION";
 	case REQUEST_COMPLETE_BATCH_SCRIPT:
@@ -3525,8 +4197,134 @@ rpc_num2string(uint16_t opcode)
 		return "ACCOUNTING_FIRST_REG";
 	case ACCOUNTING_REGISTER_CTLD:
 		return "ACCOUNTING_REGISTER_CTLD";
+	case MESSAGE_COMPOSITE:
+		return "MESSAGE_COMPOSITE";
+	case RESPONSE_MESSAGE_COMPOSITE:
+		return "RESPONSE_MESSAGE_COMPOSITE";
+	case REQUEST_BURST_BUFFER_INFO:
+		return "REQUEST_BURST_BUFFER_INFO";
+	case RESPONSE_BURST_BUFFER_INFO:
+		return "RESPONSE_BURST_BUFFER_INFO";
 	default:
 		(void) snprintf(buf, sizeof(buf), "%u", opcode);
 		return buf;
 	}
 }
+
+extern char *
+slurm_bb_flags2str(uint32_t bb_flags)
+{
+	static char bb_str[1024];
+
+	bb_str[0] = '\0';
+	if (bb_flags & BB_FLAG_DISABLE_PERSISTENT) {
+		if (bb_str[0])
+			strcat(bb_str, ",");
+		strcat(bb_str, "DisablePersistent");
+	}
+	if (bb_flags & BB_FLAG_EMULATE_CRAY) {
+		if (bb_str[0])
+			strcat(bb_str, ",");
+		strcat(bb_str, "EmulateCray");
+	}
+	if (bb_flags & BB_FLAG_ENABLE_PERSISTENT) {
+		if (bb_str[0])
+			strcat(bb_str, ",");
+		strcat(bb_str, "EnablePersistent");
+	}
+	if (bb_flags & BB_FLAG_PRIVATE_DATA) {
+		if (bb_str[0])
+			strcat(bb_str, ",");
+		strcat(bb_str, "PrivateData");
+	}
+
+	return bb_str;
+}
+
+extern uint32_t
+slurm_bb_str2flags(char *bb_str)
+{
+	uint32_t bb_flags = 0;
+
+	if (bb_str && strstr(bb_str, "DisablePersistent"))
+		bb_flags |= BB_FLAG_DISABLE_PERSISTENT;
+	if (bb_str && strstr(bb_str, "EmulateCray"))
+		bb_flags |= BB_FLAG_EMULATE_CRAY;
+	if (bb_str && strstr(bb_str, "EnablePersistent"))
+		bb_flags |= BB_FLAG_ENABLE_PERSISTENT;
+	if (bb_str && strstr(bb_str, "PrivateData"))
+		bb_flags |= BB_FLAG_PRIVATE_DATA;
+
+	return bb_flags;
+}
+
+extern void
+slurm_free_assoc_mgr_info_msg(assoc_mgr_info_msg_t *msg)
+{
+	if (!msg)
+		return;
+
+	FREE_NULL_LIST(msg->assoc_list);
+	FREE_NULL_LIST(msg->qos_list);
+	if (msg->tres_names) {
+		int i;
+		for (i=0; i<msg->tres_cnt; i++)
+			xfree(msg->tres_names);
+	}
+	FREE_NULL_LIST(msg->user_list);
+	xfree(msg);
+}
+
+extern void slurm_free_assoc_mgr_info_request_msg(
+	assoc_mgr_info_request_msg_t *msg)
+{
+	if (!msg)
+		return;
+
+	FREE_NULL_LIST(msg->acct_list);
+	FREE_NULL_LIST(msg->qos_list);
+	FREE_NULL_LIST(msg->user_list);
+	xfree(msg);
+}
+
+extern int slurm_load_sicp(sicp_info_msg_t **sicp_buffer_pptr)
+{
+	int rc;
+	slurm_msg_t resp_msg;
+	slurm_msg_t req_msg;
+
+	slurm_msg_t_init(&req_msg);
+	slurm_msg_t_init(&resp_msg);
+
+	req_msg.msg_type = REQUEST_SICP_INFO;
+	req_msg.data     = NULL;
+
+//FIXME: This needs to be modified to communicate with an arbitrary host/port
+	if (slurm_send_recv_controller_msg(&req_msg, &resp_msg) < 0)
+		return SLURM_ERROR;
+
+	switch (resp_msg.msg_type) {
+	case RESPONSE_SICP_INFO:
+		*sicp_buffer_pptr = (sicp_info_msg_t *)resp_msg.data;
+		break;
+	case RESPONSE_SLURM_RC:
+		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
+		slurm_free_return_code_msg(resp_msg.data);
+		if (rc)
+			slurm_seterrno_ret(rc);
+		break;
+	default:
+		slurm_seterrno_ret(SLURM_UNEXPECTED_MSG_ERROR);
+		break;
+	}
+
+	return SLURM_PROTOCOL_SUCCESS;
+}
+
+extern void slurm_free_sicp_msg(sicp_info_msg_t *sicp_buffer_ptr)
+{
+	if (sicp_buffer_ptr) {
+		xfree(sicp_buffer_ptr->sicp_array);
+		xfree(sicp_buffer_ptr);
+	}
+}
diff --git a/src/common/slurm_protocol_defs.h b/src/common/slurm_protocol_defs.h
index 95a7770ed..6bc06134a 100644
--- a/src/common/slurm_protocol_defs.h
+++ b/src/common/slurm_protocol_defs.h
@@ -3,7 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2014 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -105,6 +105,8 @@
 	(_X->job_state & JOB_RESIZING)
 #define IS_JOB_REQUEUED(_X)		\
 	(_X->job_state & JOB_REQUEUE)
+#define IS_JOB_UPDATE_DB(_X)		\
+	(_X->job_state & JOB_UPDATE_DB)
 
 /* Defined node states */
 #define IS_NODE_UNKNOWN(_X)		\
@@ -202,7 +204,7 @@ typedef enum {
 	REQUEST_LICENSE_INFO,
 	RESPONSE_LICENSE_INFO,
 
-	REQUEST_BUILD_INFO = 2001,
+	REQUEST_BUILD_INFO	= 2001,
 	RESPONSE_BUILD_INFO,
 	REQUEST_JOB_INFO,
 	RESPONSE_JOB_INFO,
@@ -211,7 +213,7 @@ typedef enum {
 	REQUEST_NODE_INFO,
 	RESPONSE_NODE_INFO,
 	REQUEST_PARTITION_INFO,
-	RESPONSE_PARTITION_INFO,
+	RESPONSE_PARTITION_INFO,	/* 2010 */
 	REQUEST_ACCTING_INFO,
 	RESPONSE_ACCOUNTING_INFO,
 	REQUEST_JOB_ID,
@@ -221,7 +223,7 @@ typedef enum {
 	REQUEST_TRIGGER_SET,
 	REQUEST_TRIGGER_GET,
 	REQUEST_TRIGGER_CLEAR,
-	RESPONSE_TRIGGER_GET,
+	RESPONSE_TRIGGER_GET,		/* 2020 */
 	REQUEST_JOB_INFO_SINGLE,
 	REQUEST_SHARE_INFO,
 	RESPONSE_SHARE_INFO,
@@ -231,17 +233,25 @@ typedef enum {
 	RESPONSE_PRIORITY_FACTORS,
 	REQUEST_TOPO_INFO,
 	RESPONSE_TOPO_INFO,
-	REQUEST_TRIGGER_PULL,
+	REQUEST_TRIGGER_PULL,		/* 2030 */
 	REQUEST_FRONT_END_INFO,
 	RESPONSE_FRONT_END_INFO,
 	REQUEST_SPANK_ENVIRONMENT,
 	RESPONCE_SPANK_ENVIRONMENT,
 	REQUEST_STATS_INFO,
 	RESPONSE_STATS_INFO,
-	REQUEST_STATS_RESET,		/* VESTIGIAL, UNUSED */
-	RESPONSE_STATS_RESET,		/* VESTIGIAL, UNUSED */
+	REQUEST_BURST_BUFFER_INFO,
+	RESPONSE_BURST_BUFFER_INFO,
 	REQUEST_JOB_USER_INFO,
-	REQUEST_NODE_INFO_SINGLE,
+	REQUEST_NODE_INFO_SINGLE,  /* 2040 */
+	REQUEST_POWERCAP_INFO,
+	RESPONSE_POWERCAP_INFO,
+	REQUEST_ASSOC_MGR_INFO,
+	RESPONSE_ASSOC_MGR_INFO,
+	REQUEST_SICP_INFO,
+	RESPONSE_SICP_INFO,
+	REQUEST_LAYOUT_INFO,
+	RESPONSE_LAYOUT_INFO,
 
 	REQUEST_UPDATE_JOB = 3001,
 	REQUEST_UPDATE_NODE,
@@ -254,6 +264,8 @@ typedef enum {
 	REQUEST_UPDATE_RESERVATION,
 	REQUEST_UPDATE_BLOCK,
 	REQUEST_UPDATE_FRONT_END,
+	REQUEST_UPDATE_LAYOUT,
+	REQUEST_UPDATE_POWERCAP,
 
 	REQUEST_RESOURCE_ALLOCATION = 4001,
 	RESPONSE_RESOURCE_ALLOCATION,
@@ -314,6 +326,9 @@ typedef enum {
 	REQUEST_KILL_JOB,       /* 5032 */
 	REQUEST_KILL_JOBSTEP,
 	RESPONSE_JOB_ARRAY_ERRORS,
+	REQUEST_NETWORK_CALLERID,
+	RESPONSE_NETWORK_CALLERID,
+	REQUEST_STEP_COMPLETE_AGGR,
 
 	REQUEST_LAUNCH_TASKS = 6001,
 	RESPONSE_LAUNCH_TASKS,
@@ -361,6 +376,9 @@ typedef enum {
 	ACCOUNTING_UPDATE_MSG = 10001,
 	ACCOUNTING_FIRST_REG,
 	ACCOUNTING_REGISTER_CTLD,
+
+	MESSAGE_COMPOSITE = 11001,
+	RESPONSE_MESSAGE_COMPOSITE,
 } slurm_msg_type_t;
 
 typedef enum {
@@ -382,6 +400,7 @@ typedef struct forward {
 typedef struct slurm_protocol_header {
 	uint16_t version;
 	uint16_t flags;
+	uint16_t msg_index;
 	uint16_t msg_type; /* really slurm_msg_type_t but needs to be
 			      uint16_t for packing purposes. */
 	uint32_t body_length;
@@ -391,27 +410,22 @@ typedef struct slurm_protocol_header {
 	List ret_list;
 } header_t;
 
-typedef struct forward_message {
-	header_t header;
+typedef struct forward_struct {
 	char *buf;
 	int buf_len;
-	int timeout;
-	List ret_list;
-	pthread_mutex_t *forward_mutex;
-	pthread_cond_t *notify;
-} forward_msg_t;
-
-typedef struct forward_struct {
-	int timeout;
 	uint16_t fwd_cnt;
 	pthread_mutex_t forward_mutex;
 	pthread_cond_t notify;
-	forward_msg_t *forward_msg;
-	char *buf;
-	int buf_len;
 	List ret_list;
+	int timeout;
 } forward_struct_t;
 
+typedef struct forward_message {
+	forward_struct_t *fwd_struct;
+	header_t header;
+	int timeout;
+} forward_msg_t;
+
 typedef struct slurm_protocol_config {
 	slurm_addr_t primary_controller;
 	slurm_addr_t secondary_controller;
@@ -424,6 +438,7 @@ typedef struct slurm_msg {
 	void *data;
 	uint32_t data_size;
 	uint16_t flags;
+	uint16_t msg_index;
 	uint16_t msg_type; /* really a slurm_msg_type_t but needs to be
 			    * this way for packing purposes.  message type */
 	uint16_t protocol_version; /* DON'T PACK!  Only used if
@@ -450,30 +465,32 @@ typedef struct ret_data_info {
  * Slurm Protocol Data Structures
 \*****************************************************************************/
 
-typedef struct association_shares_object {
+typedef struct assoc_shares_object {
 	uint32_t assoc_id;	/* association ID */
 
 	char *cluster;          /* cluster name */
-	uint64_t cpu_run_mins;	/* currently running cpu-minutes
-				 *  = grp_used_cpu_run_secs / 60 */
-	uint64_t grp_cpu_mins;	/* cpu-minute limit */
-
 	char *name;             /* name */
 	char *parent;           /* parent name */
+	char *partition;	/* partition */
 
 	double shares_norm;     /* normalized shares */
 	uint32_t shares_raw;	/* number of shares allocated */
 
+	uint64_t *tres_run_secs; /* currently running tres-secs
+				  * = grp_used_tres_run_secs */
+	uint64_t *tres_grp_mins; /* tres-minute limit */
+
 	double usage_efctv;	/* effective, normalized usage */
 	double usage_norm;	/* normalized usage */
-	uint64_t usage_raw;	/* measure of resource usage */
+	uint64_t usage_raw;	/* measure of TRESBillableUnits usage */
+	long double *usage_tres_raw; /* measure of each TRES usage */
 	double fs_factor;	/* fairshare factor */
 	double level_fs;	/* fairshare factor at this level. stored on an
 				 * assoc as a long double, but that is not
 				 * needed for display in sshare */
 	uint16_t user;          /* 1 if user association 0 if account
 				 * association */
-} association_shares_object_t;
+} assoc_shares_object_t;
 
 typedef struct shares_request_msg {
 	List acct_list;
@@ -481,8 +498,10 @@ typedef struct shares_request_msg {
 } shares_request_msg_t;
 
 typedef struct shares_response_msg {
-	List assoc_shares_list; /* list of association_shares_object_t *'s */
+	List assoc_shares_list; /* list of assoc_shares_object_t *'s */
 	uint64_t tot_shares;
+	uint32_t tres_cnt;
+	char **tres_names;
 } shares_response_msg_t;
 
 typedef struct priority_factors_object {
@@ -495,6 +514,11 @@ typedef struct priority_factors_object {
 	double	 priority_part;
 	double	 priority_qos;
 
+	double   *priority_tres;/* tres priorities with weights applied. */
+	uint32_t  tres_cnt;     /* number of configured tres' on system. */
+	char    **tres_names;	/* packed as assoc_mgr_tres_names[] */
+	double   *tres_weights; /* PriorityWeightTRES weights as an array */
+
 	uint16_t nice;
 } priority_factors_object_t;
 
@@ -576,6 +600,13 @@ typedef struct resv_info_request_msg {
         time_t last_update;
 } resv_info_request_msg_t;
 
+typedef struct layout_info_request_msg {
+	char* layout_type;
+	char* entities;
+	char* type;
+	uint32_t no_relation;
+} layout_info_request_msg_t;
+
 typedef struct complete_job_allocation {
 	uint32_t job_id;
 	uint32_t job_rc;
@@ -648,7 +679,9 @@ typedef struct job_step_specs {
 	uint16_t ckpt_interval;	/* checkpoint creation interval (minutes) */
 	char *ckpt_dir; 	/* path to store checkpoint image files */
 	uint32_t cpu_count;	/* count of required processors */
-	uint32_t cpu_freq;	/* requested cpu frequency */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
 	uint16_t exclusive;	/* 1 if CPUs not shared with other steps */
 	char *features;		/* required node features, default NONE */
 	char *gres;		/* generic resources required */
@@ -675,7 +708,7 @@ typedef struct job_step_specs {
 	uint16_t port;		/* port to contact initiating srun */
 	uint16_t relative;	/* first node to use of job's allocation */
 	uint16_t resv_port_cnt;	/* reserve ports for MPI if set */
-	uint16_t task_dist;	/* see enum task_dist_state */
+	uint32_t task_dist;	/* see enum task_dist_state in slurm.h */
 	uint32_t time_limit;	/* maximum run time in minutes, default is
 				 * partition limit */
 	uint32_t user_id;	/* user the job runs as */
@@ -690,6 +723,9 @@ typedef struct job_step_create_response_msg {
 	dynamic_plugin_data_t *select_jobinfo;	/* select opaque data type */
 	switch_jobinfo_t *switch_job;	/* switch context, opaque
                                          * data structure */
+	uint16_t use_protocol_ver;   /* Lowest protocol version running on
+				      * the slurmd's in this step.
+				      */
 } job_step_create_response_msg_t;
 
 typedef struct launch_tasks_request_msg {
@@ -717,11 +753,12 @@ typedef struct launch_tasks_request_msg {
 	char     *cpu_bind;	/* binding map for map/mask_cpu           */
 	uint16_t mem_bind_type;	/* --mem_bind=                    */
 	char     *mem_bind;	/* binding map for tasks to memory        */
+	uint16_t accel_bind_type; /* --accel-bind= */
 	uint16_t  num_resp_port;
 	uint16_t  *resp_port;   /* array of available response ports      */
 
         /* Distribution at the lowest level of logical processor (lllp) */
-	uint16_t task_dist;  /* --distribution=, -m dist	*/
+	uint32_t task_dist;  /* --distribution=, -m dist	*/
 	uint16_t  task_flags;
 	uint32_t **global_task_ids;
 	slurm_addr_t orig_addr;	  /* where message really came from for io */
@@ -731,7 +768,9 @@ typedef struct launch_tasks_request_msg {
 	uint8_t open_mode;	/* stdout/err append or truncate */
 	uint8_t pty;		/* use pseudo tty */
 	char *acctg_freq;	/* accounting polling intervals */
-	uint32_t cpu_freq;	/* requested cpu frequency */
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	uint16_t job_core_spec;	/* Count of specialized cores */
 
 	/********** START "normal" IO only options **********/
@@ -778,6 +817,26 @@ typedef struct return_code2_msg {
 	char *err_msg;
 } return_code2_msg_t;
 
+/* defined in slurm.h
+typedef struct network_callerid_msg {
+	unsigned char ip_src[16];
+	unsigned char ip_dst[16];
+	uint32_t port_src;
+	uint32_t port_dst;
+	int32_t af;	// NOTE: un/packed as uint32_t
+} network_callerid_msg_t; */
+
+typedef struct network_callerid_resp {
+	uint32_t job_id;
+	uint32_t return_code;
+	char *node_name;
+} network_callerid_resp_t;
+
+typedef struct composite_msg {
+	slurm_addr_t sender;	/* address of sending node/port */
+	List	 msg_list;
+} composite_msg_t;
+
 /* Note: We include the node list here for reliable cleanup on XCPU systems.
  *
  * Note: We include select_jobinfo here in addition to the job launch
@@ -797,7 +856,7 @@ typedef struct return_code2_msg {
 typedef struct kill_job_msg {
 	uint32_t job_id;
 	uint32_t step_id;
-	uint16_t job_state;
+	uint32_t job_state;
 	uint32_t job_uid;
 	time_t   time;		/* slurmctld's time of request */
 	time_t   start_time;	/* time of job start, track job requeue */
@@ -839,21 +898,25 @@ typedef struct reattach_tasks_response_msg {
 } reattach_tasks_response_msg_t;
 
 typedef struct prolog_launch_msg {
-	uint32_t job_id;		/* slurm job_id */
-	uint32_t uid;
-	uint32_t gid;
 	char *alias_list;		/* node name/address/hostnamne aliases */
+	slurm_cred_t *cred;
+	uint32_t gid;
+	uint32_t job_id;		/* slurm job_id */
+	uint32_t job_mem_limit;		/* job's memory limit, passed via cred */
+	uint32_t nnodes;			/* count of nodes, passed via cred */
 	char *nodes;			/* list of nodes allocated to job_step */
 	char *partition;		/* partition the job is running in */
+	dynamic_plugin_data_t *select_jobinfo;	/* opaque data type */
+	char **spank_job_env;		/* SPANK job environment variables */
+	uint32_t spank_job_env_size;	/* size of spank_job_env */
 	char *std_err;			/* pathname of stderr */
 	char *std_out;			/* pathname of stdout */
+	uint32_t uid;
 	char *work_dir;			/* full pathname of working directory */
-	char **spank_job_env;	/* SPANK job environment variables */
-	uint32_t spank_job_env_size;			/* size of spank_job_env */
-	dynamic_plugin_data_t *select_jobinfo;	/* opaque data type */
 } prolog_launch_msg_t;
 
 typedef struct batch_job_launch_msg {
+	char *account;          /* account under which the job is running */
 	uint32_t array_job_id;	/* job array master job ID */
 	uint32_t array_task_id;	/* job array ID or NO_VAL */
 	uint32_t job_id;
@@ -882,6 +945,7 @@ typedef struct batch_job_launch_msg {
 	char *script;		/* the actual job script, default NONE */
 	char *std_err;		/* pathname of stderr */
 	char *std_in;		/* pathname of stdin */
+	char *qos;              /* qos the job is running under */
 	char *std_out;		/* pathname of stdout */
 	char *work_dir;		/* full pathname of working directory */
 	char *ckpt_dir;		/* location to store checkpoint image */
@@ -900,11 +964,14 @@ typedef struct batch_job_launch_msg {
 				  * real memory per CPU | MEM_PER_CPU,
 				  * default=0 (no limit) */
 	char *acctg_freq;	/* accounting polling intervals	*/
-	uint32_t cpu_freq;	/* requested cpu frequency */
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	uint32_t job_mem;	/* memory limit for job		*/
 	uint16_t restart_cnt;	/* batch job restart count	*/
 	char **spank_job_env;	/* SPANK job environment variables */
 	uint32_t spank_job_env_size;	/* size of spank_job_env */
+	char *resv_name;        /* job's reservation */
 } batch_job_launch_msg_t;
 
 typedef struct job_id_request_msg {
@@ -996,14 +1063,6 @@ typedef struct pty_winsz {
 	uint16_t rows;
 } pty_winsz_t;
 
-typedef struct will_run_response_msg {
-	uint32_t job_id;	/* ID of job to start */
-	char *node_list;	/* nodes where job will start */
-	List preemptee_job_id;	/* jobs preempted to start this job */
-	uint32_t proc_cnt;	/* CPUs allocated to job at start */
-	time_t start_time;	/* time when job will start */
-} will_run_response_msg_t;
-
 typedef struct forward_data_msg {
 	char *address;
 	uint32_t len;
@@ -1021,6 +1080,7 @@ typedef struct suspend_int_msg {
 
 typedef struct ping_slurmd_resp_msg {
 	uint32_t cpu_load;	/* CPU load * 100 */
+	uint32_t free_mem;	/* Free memory in MiB */
 } ping_slurmd_resp_msg_t;
 
 typedef struct license_info_request_msg {
@@ -1036,6 +1096,7 @@ typedef struct slurm_node_registration_status_msg {
 	uint16_t cores;
 	uint16_t cpus;
 	uint32_t cpu_load;	/* CPU load * 100 */
+	uint32_t free_mem;	/* Free memory in MiB */
 	char *cpu_spec_list;	/* list of specialized CPUs */
 	acct_gather_energy_t *energy;
 	Buf gres_info;		/* generic resource info */
@@ -1109,7 +1170,9 @@ extern void slurm_destroy_char(void *object);
 extern void slurm_destroy_uint32_ptr(void *object);
 /* here to add \\ to all \" in a string this needs to be xfreed later */
 extern char *slurm_add_slash_to_quotes(char *str);
+extern List slurm_copy_char_list(List char_list);
 extern int slurm_addto_char_list(List char_list, char *names);
+extern int slurm_addto_step_list(List step_list, char *names);
 extern int slurm_sort_char_list_asc(void *, void *);
 extern int slurm_sort_char_list_desc(void *, void *);
 
@@ -1128,18 +1191,23 @@ extern void slurm_free_node_info_single_msg(node_info_single_msg_t *msg);
 extern void slurm_free_part_info_request_msg(part_info_request_msg_t *msg);
 extern void slurm_free_stats_info_request_msg(stats_info_request_msg_t *msg);
 extern void slurm_free_stats_response_msg(stats_info_response_msg_t *msg);
+extern void slurm_free_step_alloc_info_msg(step_alloc_info_msg_t * msg);
 extern void slurm_free_resv_info_request_msg(resv_info_request_msg_t *msg);
 extern void slurm_free_set_debug_flags_msg(set_debug_flags_msg_t *msg);
 extern void slurm_free_set_debug_level_msg(set_debug_level_msg_t *msg);
-extern void slurm_destroy_association_shares_object(void *object);
+extern void slurm_destroy_assoc_shares_object(void *object);
 extern void slurm_free_shares_request_msg(shares_request_msg_t *msg);
 extern void slurm_free_shares_response_msg(shares_response_msg_t *msg);
 extern void slurm_destroy_priority_factors_object(void *object);
+extern void slurm_copy_priority_factors_object(priority_factors_object_t *dest,
+					       priority_factors_object_t *src);
 extern void slurm_free_priority_factors_request_msg(
 	priority_factors_request_msg_t *msg);
 extern void slurm_free_priority_factors_response_msg(
 	priority_factors_response_msg_t *msg);
 extern void slurm_free_forward_data_msg(forward_data_msg_t *msg);
+extern void slurm_free_comp_msg_list(void *x);
+extern void slurm_free_composite_msg(composite_msg_t *msg);
 extern void slurm_free_ping_slurmd_resp(ping_slurmd_resp_msg_t *msg);
 
 #define	slurm_free_timelimit_msg(msg) \
@@ -1169,6 +1237,7 @@ extern void slurm_free_job_launch_msg(batch_job_launch_msg_t * msg);
 
 extern void slurm_free_update_front_end_msg(update_front_end_msg_t * msg);
 extern void slurm_free_update_node_msg(update_node_msg_t * msg);
+extern void slurm_free_update_layout_msg(update_layout_msg_t * msg);
 extern void slurm_free_update_part_msg(update_part_msg_t * msg);
 extern void slurm_free_delete_part_msg(delete_part_msg_t * msg);
 extern void slurm_free_resv_desc_msg(resv_desc_msg_t * msg);
@@ -1216,6 +1285,8 @@ extern void slurm_free_checkpoint_resp_msg(checkpoint_resp_msg_t *msg);
 extern void slurm_free_suspend_msg(suspend_msg_t *msg);
 extern void slurm_free_suspend_int_msg(suspend_int_msg_t *msg);
 extern void slurm_free_update_step_msg(step_update_request_msg_t * msg);
+extern void slurm_free_resource_allocation_response_msg_members (
+	resource_allocation_response_msg_t * msg);
 extern void slurm_free_resource_allocation_response_msg (
 		resource_allocation_response_msg_t * msg);
 extern void slurm_free_job_alloc_info_response_msg (
@@ -1235,6 +1306,8 @@ extern void slurm_free_node_info_msg(node_info_msg_t * msg);
 extern void slurm_free_node_info_members(node_info_t * node);
 extern void slurm_free_partition_info_msg(partition_info_msg_t * msg);
 extern void slurm_free_partition_info_members(partition_info_t * part);
+extern void slurm_free_layout_info_msg(layout_info_msg_t * msg);
+extern void slurm_free_layout_info_request_msg(layout_info_request_msg_t * msg);
 extern void slurm_free_reservation_info_msg(reserve_info_msg_t * msg);
 extern void slurm_free_get_kvs_msg(kvs_get_msg_t *msg);
 extern void slurm_free_will_run_response_msg(will_run_response_msg_t *msg);
@@ -1263,6 +1336,11 @@ extern void slurm_free_requeue_msg(requeue_msg_t *);
 extern int slurm_free_msg_data(slurm_msg_type_t type, void *data);
 extern void slurm_free_license_info_request_msg(license_info_request_msg_t *msg);
 extern uint32_t slurm_get_return_code(slurm_msg_type_t type, void *data);
+extern void slurm_free_network_callerid_msg(network_callerid_msg_t *mesg);
+extern void slurm_free_network_callerid_resp(network_callerid_resp_t *resp);
+
+extern int  slurm_load_sicp(sicp_info_msg_t **sicp_buffer_pptr);
+extern void slurm_free_sicp_msg(sicp_info_msg_t * sicp_buffer_ptr);
 
 extern char *preempt_mode_string(uint16_t preempt_mode);
 extern uint16_t preempt_mode_num(const char *preempt_mode);
@@ -1270,16 +1348,25 @@ extern uint16_t preempt_mode_num(const char *preempt_mode);
 extern char *log_num2string(uint16_t inx);
 extern uint16_t log_string2num(char *name);
 
+/* Translate a burst buffer numeric value to its equivalent state string */
+extern char *bb_state_string(uint16_t state);
+/* Translate a burst buffer state string to its equivalent numeric value */
+extern uint16_t bb_state_num(char *tok);
+
 /* Convert HealthCheckNodeState numeric value to a string.
  * Caller must xfree() the return value */
 extern char *health_check_node_state_str(uint32_t node_state);
 
 extern char *job_reason_string(enum job_state_reason inx);
-extern char *job_state_string(uint16_t inx);
-extern char *job_state_string_compact(uint16_t inx);
-extern int   job_state_num(const char *state_name);
+extern char *job_state_string(uint32_t inx);
+extern char *job_state_string_compact(uint32_t inx);
+extern uint32_t job_state_num(const char *state_name);
 extern char *node_state_string(uint32_t inx);
 extern char *node_state_string_compact(uint32_t inx);
+
+extern uint16_t power_flags_id(char *power_flags);
+extern char    *power_flags_str(uint16_t power_flags);
+
 extern void  private_data_string(uint16_t private_data, char *str, int str_len);
 extern void  accounting_enforce_string(uint16_t enforce,
 				       char *str, int str_len);
@@ -1311,12 +1398,9 @@ extern char *priority_flags_string(uint16_t priority_flags);
 /* user needs to xfree return value */
 extern char *reservation_flags_string(uint32_t flags);
 
-/* Return ctime like string without the newline.
- * Not thread safe */
-extern char *slurm_ctime(const time_t *timep);
-
-/* Return ctime like string without the newline, thread safe. */
-extern char *slurm_ctime_r(const time_t *timep, char *time_str);
+/* Functions to convert burst buffer flags between strings and numbers */
+extern char *   slurm_bb_flags2str(uint32_t bb_flags);
+extern uint32_t slurm_bb_str2flags(char *bb_str);
 
 /* Given a protocol opcode return its string
  * description mapping the slurm_msg_type_t
diff --git a/src/common/slurm_protocol_interface.h b/src/common/slurm_protocol_interface.h
index 3de1a2bf0..72089526b 100644
--- a/src/common/slurm_protocol_interface.h
+++ b/src/common/slurm_protocol_interface.h
@@ -94,44 +94,23 @@ typedef enum slurm_socket_type {
  * for the slurm protocol the general purpose functions just wrap
  * standard socket calls, so if the underlying layer implements a
  * socket like interface, it can be used as a low level transport
- * plugin with slurm the _slurm_recv and _slurm_send functions are
+ * plugin with slurm the slurm_recv and slurm_send functions are
  * also needed
  */
 
 
-/*****************************/
-/* socket creation functions */
-/*****************************/
-
-/* Create a socket of the specified type
- * IN type - SLURM_STREAM or SLURM_MESSAGE
- */
-slurm_fd_t _slurm_create_socket (slurm_socket_type_t type)  ;
-
 /*****************/
 /* msg functions */
 /*****************/
 
-/* _slurm_msg_recvfrom
- * Get message over the given connection, default timeout value
- * IN  fd     - an open file descriptor
- * OUT pbuf   - xmalloc'd buffer, loaded with message data
- * OUT buflen - size of allocated buffer in bytes
- * IN  flags  - communication specific flags
- *
- * RET number of bytes read
- */
-ssize_t _slurm_msg_recvfrom(slurm_fd_t fd, char **pbuf, size_t *buflen,
-		            uint32_t flags);
-
-/* _slurm_msg_recvfrom_timeout reads len bytes from file descriptor fd
+/* slurm_msg_recvfrom_timeout reads len bytes from file descriptor fd
  * timing out after `timeout' milliseconds.
  *
  */
-ssize_t _slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **buf, size_t *len,
-		                    uint32_t flags, int timeout);
+extern ssize_t slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **buf,
+		size_t *len, uint32_t flags, int timeout);
 
-/* _slurm_msg_sendto
+/* slurm_msg_sendto
  * Send message over the given connection, default timeout value
  * IN open_fd - an open file descriptor
  * IN buffer - data to transmit
@@ -139,20 +118,12 @@ ssize_t _slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **buf, size_t *len,
  * IN flags - communication specific flags
  * RET number of bytes written
  */
-ssize_t _slurm_msg_sendto ( slurm_fd_t open_fd, char *buffer ,
-			    size_t size , uint32_t flags ) ;
-/* _slurm_msg_sendto_timeout is identical to _slurm_msg_sendto except
+extern ssize_t slurm_msg_sendto ( slurm_fd_t open_fd, char *buffer ,
+			   size_t size , uint32_t flags ) ;
+/* slurm_msg_sendto_timeout is identical to _slurm_msg_sendto except
  * IN timeout - maximum time to wait for a message in milliseconds */
-ssize_t _slurm_msg_sendto_timeout ( slurm_fd_t open_fd, char *buffer,
-				    size_t size, uint32_t flags, int timeout );
-
-/* _slurm_close_accepted_conn
- * In the bsd implmentation maps directly to a close call, to close
- *	the socket that was accepted
- * IN open_fd		- an open file descriptor to close
- * RET int		- the return code
- */
-int _slurm_close_accepted_conn ( slurm_fd_t open_fd ) ;
+extern ssize_t slurm_msg_sendto_timeout ( slurm_fd_t open_fd, char *buffer,
+				   size_t size, uint32_t flags, int timeout );
 
 /********************/
 /* stream functions */
@@ -163,7 +134,7 @@ int _slurm_close_accepted_conn ( slurm_fd_t open_fd ) ;
  * IN slurm_address 	- slurm_addr_t to bind the server stream to
  * RET slurm_fd		- file descriptor of the stream created
  */
-slurm_fd_t slurm_init_msg_engine ( slurm_addr_t * slurm_address ) ;
+extern slurm_fd_t slurm_init_msg_engine ( slurm_addr_t * slurm_address ) ;
 
 /* slurm_accept_msg_conn
  * accepts a incoming stream connection on a stream server slurm_fd
@@ -171,7 +142,7 @@ slurm_fd_t slurm_init_msg_engine ( slurm_addr_t * slurm_address ) ;
  * OUT slurm_address 	- slurm_addr_t of the accepted connection
  * RET slurm_fd		- file descriptor of the accepted connection
  */
-slurm_fd_t slurm_accept_msg_conn ( slurm_fd_t open_fd ,
+extern slurm_fd_t slurm_accept_msg_conn ( slurm_fd_t open_fd ,
 				slurm_addr_t * slurm_address ) ;
 
 /* slurm_open_stream
@@ -181,35 +152,21 @@ slurm_fd_t slurm_accept_msg_conn ( slurm_fd_t open_fd ,
  *                        to avoid socket address collision
  * RET slurm_fd_t         - file descriptor of the connection created
  */
-slurm_fd_t slurm_open_stream ( slurm_addr_t * slurm_address, bool retry ) ;
+extern slurm_fd_t slurm_open_stream ( slurm_addr_t * slurm_address,
+				      bool retry ) ;
 
-/* _slurm_get_stream_addr
+/* slurm_get_stream_addr
  * esentially a encapsilated get_sockname
  * IN open_fd 		- file descriptor to retreive slurm_addr_t for
  * OUT address		- address that open_fd to bound to
  */
-extern int _slurm_get_stream_addr ( slurm_fd_t open_fd ,
-				    slurm_addr_t * address ) ;
-
-/* _slurm_close_stream
- * closes either a server or client stream file_descriptor
- * IN open_fd	- an open file descriptor to close
- * RET int	- the return code
- */
-extern int _slurm_close_stream ( slurm_fd_t open_fd ) ;
-
-/* make an open slurm connection blocking or non-blocking
- *	(i.e. wait or do not wait for i/o completion )
- * IN open_fd	- an open file descriptor to change the effect
- * RET int	- the return code
- */
-extern int _slurm_set_stream_non_blocking ( slurm_fd_t open_fd ) ;
-extern int _slurm_set_stream_blocking ( slurm_fd_t open_fd ) ;
+extern int slurm_get_stream_addr ( slurm_fd_t open_fd ,
+				   slurm_addr_t * address ) ;
 
-int _slurm_send_timeout ( slurm_fd_t open_fd, char *buffer ,
-			  size_t size , uint32_t flags, int timeout ) ;
-int _slurm_recv_timeout ( slurm_fd_t open_fd, char *buffer ,
-			  size_t size , uint32_t flags, int timeout ) ;
+extern int slurm_send_timeout ( slurm_fd_t open_fd, char *buffer ,
+				size_t size , uint32_t flags, int timeout ) ;
+extern int slurm_recv_timeout ( slurm_fd_t open_fd, char *buffer ,
+				size_t size , uint32_t flags, int timeout ) ;
 
 /***************************/
 /* slurm address functions */
@@ -219,21 +176,16 @@ int _slurm_recv_timeout ( slurm_fd_t open_fd, char *buffer ,
  * IN port - port to be used
  * IN ip_address - the IP address to connect with
  */
-extern void _slurm_set_addr_uint ( slurm_addr_t * slurm_address ,
-				   uint16_t port , uint32_t ip_address ) ;
-
-/* resets the address field of a slurm_addr, port and family are unchanged */
-extern void _reset_slurm_addr ( slurm_addr_t * slurm_address ,
-				slurm_addr_t new_address );
-
+extern void slurm_set_addr_uint ( slurm_addr_t * slurm_address ,
+				  uint16_t port , uint32_t ip_address ) ;
 
 /* build a slurm address bassed upon host name and port number
  * OUT slurm_address - the constructed slurm_address
  * IN port - port to be used
  * IN host - name of host to connect with
  */
-extern void _slurm_set_addr_char ( slurm_addr_t * slurm_address ,
-				   uint16_t port , char * host ) ;
+extern void slurm_set_addr_char ( slurm_addr_t * slurm_address ,
+				  uint16_t port , char * host ) ;
 
 /* given a slurm_address it returns its port and hostname
  * IN slurm_address	- slurm_addr_t to be queried
@@ -241,24 +193,37 @@ extern void _slurm_set_addr_char ( slurm_addr_t * slurm_address ,
  * OUT host		- hostname
  * IN buf_len		- length of hostname buffer
  */
-extern void _slurm_get_addr ( slurm_addr_t * slurm_address ,
-			      uint16_t * port , char * host ,
-			      uint32_t buf_len ) ;
+extern void slurm_get_addr ( slurm_addr_t * slurm_address ,
+			     uint16_t * port , char * host ,
+			     uint32_t buf_len ) ;
 
 /* prints a slurm_addr_t into a buf
  * IN address		- slurm_addr_t to print
  * IN buf		- space for string representation of slurm_addr
  * IN n			- max number of bytes to write (including NUL)
  */
-extern void _slurm_print_slurm_addr ( slurm_addr_t * address,
-				      char *buf, size_t n ) ;
+extern void slurm_print_slurm_addr ( slurm_addr_t * address,
+				     char *buf, size_t n ) ;
 
 /*****************************/
 /* slurm addr pack functions */
 /*****************************/
-extern void _slurm_pack_slurm_addr ( slurm_addr_t * slurm_address ,
-				     Buf buffer ) ;
-extern int _slurm_unpack_slurm_addr_no_alloc (
+
+/* slurm_pack_slurm_addr
+ * packs a slurm_addr_t into a buffer to serialization transport
+ * IN slurm_address	- slurm_addr_t to pack
+ * IN/OUT buffer	- buffer to pack the slurm_addr_t into
+ */
+extern void slurm_pack_slurm_addr ( slurm_addr_t * slurm_address ,
+				    Buf buffer ) ;
+
+/* slurm_unpack_slurm_addr_no_alloc
+ * unpacks a buffer into a slurm_addr_t after serialization transport
+ * OUT slurm_address	- slurm_addr_t to unpack to
+ * IN/OUT buffer	- buffer to upack the slurm_addr_t from
+ * returns 		- SLURM error code
+ */
+extern int slurm_unpack_slurm_addr_no_alloc (
 	slurm_addr_t * slurm_address , Buf buffer ) ;
 
 
@@ -266,110 +231,17 @@ extern int _slurm_unpack_slurm_addr_no_alloc (
  ** BSD LINUX SOCKET FUNCTIONS  **
  \*******************************/
 
-/* Create a new socket of type TYPE in domain DOMAIN, using
- * protocol PROTOCOL.  If PROTOCOL is zero, one is chosen automatically.
- * Returns a file descriptor for the new socket, or -1 for errors.  */
-extern int _slurm_socket (int __domain, int __type, int __protocol)  ;
-
-/* Create two new sockets, of type TYPE in domain DOMAIN and using
- * protocol PROTOCOL, which are connected to each other, and put file
- * descriptors for them in FDS[0] and FDS[1].  If PROTOCOL is zero,
- * one will be chosen automatically.  Returns 0 on success, -1 for errors.  */
-extern int _slurm_socketpair (int __domain, int __type, int __protocol,
-			      int __fds[2]) ;
-
-/* Give the socket FD the local address ADDR (which is LEN bytes long).  */
-extern int _slurm_bind (int __fd, struct sockaddr const * __addr,
-			socklen_t __len) ;
-
-/* Open a connection on socket FD to peer at ADDR (which LEN bytes long).
- * For connectionless socket types, just set the default address to send to
- * and the only address from which to accept transmissions.
- * Return 0 on success, -1 for errors.  */
-extern int _slurm_connect (int __fd, struct sockaddr const * __addr,
-			   socklen_t __len) ;
-
-/* Prepare to accept connections on socket FD.
- * N connection requests will be queued before further requests are refused.
- * Returns 0 on success, -1 for errors.  */
-extern int _slurm_listen (int __fd, int __n) ;
-
-/* Await a connection on socket FD.
- * When a connection arrives, open a new socket to communicate with it,
- * set *ADDR (which is *ADDR_LEN bytes long) to the address of the connecting
- * peer and *ADDR_LEN to the address's actual length, and return the
- * new socket's descriptor, or -1 for errors.  */
-extern int _slurm_accept (int __fd, struct sockaddr * __addr,
-			  socklen_t *__restrict __addr_len) ;
-
-/* Put the local address of FD into *ADDR and its length in *LEN.  */
-extern int _slurm_getsockname (int __fd, struct sockaddr * __addr,
-			       socklen_t *__restrict __len) ;
-
 /* Put the address of the peer connected to socket FD into *ADDR
  * (which is *LEN bytes long), and its actual length into *LEN.  */
-extern int _slurm_getpeername (int __fd, struct sockaddr * __addr,
-			       socklen_t *__restrict __len) ;
-
-/* Send N bytes of BUF to socket FD.  Returns the number sent or -1.  */
-extern ssize_t _slurm_send (int __fd, __const void *__buf,
-			    size_t __n, int __flags) ;
-extern ssize_t _slurm_write (int __fd, __const void *__buf, size_t __n) ;
-
-/* Read N bytes into BUF from socket FD.
- * Returns the number read or -1 for errors.  */
-extern ssize_t _slurm_recv (int __fd, void *__buf, size_t __n, int __flags) ;
-extern ssize_t _slurm_read (int __fd, void *__buf, size_t __n) ;
-
-/* Send N bytes of BUF on socket FD to peer at address ADDR (which is
- * ADDR_LEN bytes long).  Returns the number sent, or -1 for errors.  */
-extern ssize_t _slurm_sendto (int __fd, __const void *__buf, size_t __n,
-			      int __flags, struct sockaddr const * __addr,
-			      socklen_t __addr_len) ;
-
-/* Send a msg described MESSAGE on socket FD.
- * Returns the number of bytes sent, or -1 for errors.  */
-extern ssize_t _slurm_sendmsg (int __fd, __const struct msghdr *__msg,
-			       int __flags)  ;
-
-/* Read N bytes into BUF through socket FD.
- * If ADDR is not NULL, fill in *ADDR_LEN bytes of it with tha address of
- * the sender, and store the actual size of the address in *ADDR_LEN.
- * Returns the number of bytes read or -1 for errors.  */
-extern ssize_t _slurm_recvfrom (int __fd, void *__restrict __buf,
-				size_t __n, int __flags,
-				struct sockaddr * __addr,
-				socklen_t *__restrict __addr_len) ;
-
-/* Send a msg described MESSAGE on socket FD.
- * Returns the number of bytes read or -1 for errors.  */
-extern ssize_t _slurm_recvmsg (int __fd, struct msghdr *__msg,
-			       int __flags)  ;
-
-/* Put the current value for socket FD's option OPTNAME at protocol level LEVEL
- * into OPTVAL (which is *OPTLEN bytes long), and set *OPTLEN to the value's
- * actual length.  Returns 0 on success, -1 for errors.  */
-extern int _slurm_getsockopt (int __fd, int __level, int __optname,
-			      void *__restrict __optval,
-			      socklen_t *__restrict __optlen) ;
+extern int slurm_getpeername (int __fd, struct sockaddr * __addr,
+			      socklen_t *__restrict __len) ;
 
 /* Set socket FD's option OPTNAME at protocol level LEVEL
  * to *OPTVAL (which is OPTLEN bytes long).
  * Returns 0 on success, -1 for errors.  */
-extern int _slurm_setsockopt (int __fd, int __level, int __optname,
-			      __const void *__optval, socklen_t __optlen) ;
-
-/* Shut down all or part of the connection open on socket FD.
- * HOW determines what to shut down:
- * SHUT_RD   = No more receptions;
- * SHUT_WR   = No more transmissions;
- * SHUT_RDWR = No more receptions or transmissions.
- * Returns 0 on success, -1 for errors.  */
-extern int _slurm_shutdown (int __fd, int __how) ;
-extern int _slurm_close (int __fd ) ;
+extern int slurm_setsockopt (int __fd, int __level, int __optname,
+			     __const void *__optval, socklen_t __optlen) ;
 
-extern int _slurm_fcntl(int fd, int cmd, ... );
-extern int _slurm_vfcntl(int fd, int cmd, va_list va );
+extern int slurm_close (int __fd ) ;
 
-extern int _slurm_ioctl(int d, int request, ...);
 #endif /* !_SLURM_PROTOCOL_INTERFACE_H */
diff --git a/src/common/slurm_protocol_pack.c b/src/common/slurm_protocol_pack.c
index a8fcc8747..4d47f63de 100644
--- a/src/common/slurm_protocol_pack.c
+++ b/src/common/slurm_protocol_pack.c
@@ -3,7 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Kevin Tew <tew1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -48,12 +48,14 @@
 #include <string.h>
 
 #include "src/api/slurm_pmi.h"
+#include "src/common/assoc_mgr.h"
 #include "src/common/bitstring.h"
 #include "src/common/forward.h"
 #include "src/common/job_options.h"
 #include "src/common/log.h"
 #include "src/common/node_select.h"
 #include "src/common/pack.h"
+#include "src/common/power.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_acct_gather_energy.h"
@@ -74,16 +76,20 @@
 #define _pack_job_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
 #define _pack_job_step_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_block_info_resp_msg(msg,buf)	_pack_buffer_msg(msg,buf)
+#define _pack_burst_buffer_info_resp_msg(msg,buf) _pack_buffer_msg(msg,buf)
 #define _pack_front_end_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_node_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
 #define _pack_partition_info_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_stats_response_msg(msg,buf)	_pack_buffer_msg(msg,buf)
 #define _pack_reserve_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
+#define _pack_sicp_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
+#define _pack_layout_info_msg(msg,buf)		_pack_buffer_msg(msg,buf)
+#define _pack_assoc_mgr_info_msg(msg,buf)      _pack_buffer_msg(msg,buf)
 
-static void _pack_assoc_shares_object(void *in, Buf buffer,
+static void _pack_assoc_shares_object(void *in, uint32_t tres_cnt, Buf buffer,
 				      uint16_t protocol_version);
-static int _unpack_assoc_shares_object(void **object, Buf buffer,
-				       uint16_t protocol_version);
+static int _unpack_assoc_shares_object(void **object, uint32_t tres_cnt,
+				       Buf buffer, uint16_t protocol_version);
 static void _pack_shares_request_msg(shares_request_msg_t * msg, Buf buffer,
 				     uint16_t protocol_version);
 static int _unpack_shares_request_msg(shares_request_msg_t ** msg, Buf buffer,
@@ -115,6 +121,11 @@ static void _pack_update_node_msg(update_node_msg_t * msg, Buf buffer,
 static int _unpack_update_node_msg(update_node_msg_t ** msg, Buf buffer,
 				   uint16_t protocol_version);
 
+static void _pack_update_layout_msg(update_layout_msg_t * msg, Buf buffer,
+				    uint16_t protocol_version);
+static int _unpack_update_layout_msg(update_layout_msg_t ** msg, Buf buffer,
+				     uint16_t protocol_version);
+
 static void
 _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 				   msg, Buf buffer,
@@ -199,6 +210,11 @@ static void _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer,
 static int _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
 					uint16_t protocol_version);
 
+static void _pack_update_powercap_msg(update_powercap_msg_t * msg, Buf buffer,
+				      uint16_t protocol_version);
+static int _unpack_update_powercap_msg(update_powercap_msg_t ** msg, Buf buffer,
+				       uint16_t protocol_version);
+
 static void _pack_delete_partition_msg(delete_part_msg_t * msg, Buf buffer,
 				       uint16_t protocol_version);
 static int _unpack_delete_partition_msg(delete_part_msg_t ** msg, Buf buffer,
@@ -253,6 +269,13 @@ static int _unpack_partition_info_members(partition_info_t * part,
 					  Buf buffer,
 					  uint16_t protocol_version);
 
+static void _pack_layout_info_request_msg(layout_info_request_msg_t * msg,
+					  Buf buffer, uint16_t protocol_version);
+static int _unpack_layout_info_request_msg(layout_info_request_msg_t ** msg,
+					    Buf buffer, uint16_t protocol_version);
+static int _unpack_layout_info_msg(layout_info_msg_t ** msg, Buf buffer,
+				   uint16_t protocol_version);
+
 static int _unpack_reserve_info_msg(reserve_info_msg_t ** msg,
 				    Buf buffer, uint16_t protocol_version);
 static int _unpack_reserve_info_members(reserve_info_t * resv,
@@ -325,6 +348,14 @@ _unpack_job_alloc_info_msg(job_alloc_info_msg_t **job_desc_buffer_ptr,
 			   Buf buffer,
 			   uint16_t protocol_version);
 
+static void _pack_step_alloc_info_msg(step_alloc_info_msg_t * job_desc_ptr,
+				      Buf buffer,
+				      uint16_t protocol_version);
+static int
+_unpack_step_alloc_info_msg(step_alloc_info_msg_t **job_desc_buffer_ptr,
+			    Buf buffer,
+			    uint16_t protocol_version);
+
 static void _pack_return_code_msg(return_code_msg_t * msg, Buf buffer,
 				  uint16_t protocol_version);
 static int _unpack_return_code_msg(return_code_msg_t ** msg, Buf buffer,
@@ -435,6 +466,9 @@ static int _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr,
 static int _unpack_job_info_msg(job_info_msg_t ** msg, Buf buffer,
 				uint16_t protocol_version);
 
+static int _unpack_sicp_info_msg(sicp_info_msg_t ** msg, Buf buffer,
+				 uint16_t protocol_version);
+
 static void _pack_last_update_msg(last_update_msg_t * msg, Buf buffer,
 				  uint16_t protocol_version);
 static int _unpack_last_update_msg(last_update_msg_t ** msg, Buf buffer,
@@ -620,6 +654,11 @@ static int  _unpack_topo_info_msg(topo_info_response_msg_t **msg,
 				  Buf buffer,
 				  uint16_t protocol_version);
 
+static void _pack_powercap_info_msg(powercap_info_msg_t *msg, Buf buffer,
+				    uint16_t protocol_version);
+static int  _unpack_powercap_info_msg(powercap_info_msg_t **msg,
+				      Buf buffer, uint16_t protocol_version);
+
 static void _pack_job_sbcast_cred_msg(job_sbcast_cred_msg_t *msg, Buf buffer,
 				      uint16_t protocol_version);
 static int  _unpack_job_sbcast_cred_msg(job_sbcast_cred_msg_t **msg,
@@ -660,15 +699,15 @@ static int _unpack_ping_slurmd_resp(ping_slurmd_resp_msg_t **msg_ptr,
 				    Buf buffer, uint16_t protocol_version);
 
 static void _pack_license_info_request_msg(license_info_request_msg_t *msg,
-                                           Buf buffer,
-                                           uint16_t protocol_version);
+					   Buf buffer,
+					   uint16_t protocol_version);
 static int _unpack_license_info_request_msg(license_info_request_msg_t **msg,
-                                            Buf buffer,
-                                            uint16_t protocol_version);
+					    Buf buffer,
+					    uint16_t protocol_version);
 static inline void _pack_license_info_msg(slurm_msg_t *msg, Buf buffer);
 static int _unpack_license_info_msg(license_info_msg_t **msg,
-                                    Buf buffer,
-                                    uint16_t protocol_version);
+				    Buf buffer,
+				    uint16_t protocol_version);
 
 static void _pack_job_requeue_msg(requeue_msg_t *msg, Buf buf,
 				  uint16_t protocol_version);
@@ -680,6 +719,162 @@ static void _pack_job_array_resp_msg(job_array_resp_msg_t *msg, Buf buffer,
 static int  _unpack_job_array_resp_msg(job_array_resp_msg_t **msg, Buf buffer,
 				       uint16_t protocol_version);
 
+static void _pack_composite_msg(composite_msg_t *msg, Buf buffer,
+				uint16_t protocol_version);
+static int  _unpack_composite_msg(composite_msg_t **msg, Buf buffer,
+				  uint16_t protocol_version);
+static int
+_unpack_burst_buffer_info_msg(burst_buffer_info_msg_t **burst_buffer_info,
+			      Buf buffer,
+			      uint16_t protocol_version);
+static void
+_pack_assoc_mgr_info_request_msg(assoc_mgr_info_request_msg_t *,
+			     Buf,
+			     uint16_t);
+static int
+_unpack_assoc_mgr_info_request_msg(assoc_mgr_info_request_msg_t **,
+			       Buf,
+			       uint16_t);
+
+static void
+_pack_network_callerid_msg(network_callerid_msg_t *msg, Buf buffer,
+				uint16_t protocol_version);
+static int
+_unpack_network_callerid_msg(network_callerid_msg_t **msg_ptr, Buf buffer,
+				uint16_t protocol_version);
+
+static void
+_pack_network_callerid_resp_msg(network_callerid_resp_t *msg,
+					    Buf buffer,
+					    uint16_t protocol_version);
+
+static int
+_unpack_network_callerid_resp_msg(network_callerid_resp_t **msg_ptr,
+					    Buf buffer,
+					    uint16_t protocol_version);
+
+/* These functions should be removed when version 14.11 protocol is no longer
+ * supported */
+#define OLD_DIST_CYCLIC		1
+#define OLD_DIST_BLOCK		2
+#define OLD_DIST_ARBITRARY	3
+#define OLD_DIST_PLANE		4
+#define OLD_DIST_CYCLIC_CYCLIC	5
+#define OLD_DIST_CYCLIC_BLOCK	6
+#define OLD_DIST_BLOCK_CYCLIC	7
+#define OLD_DIST_BLOCK_BLOCK	8
+#define OLD_DIST_NO_LLLP	9
+#define OLD_DIST_UNKNOWN	10
+#define OLD_DIST_CYCLIC_CFULL	11
+#define OLD_DIST_BLOCK_CFULL	12
+#define OLD_DIST_STATE_BASE	0x00ff
+#define OLD_DIST_STATE_FLAGS	0xff00
+#define OLD_DIST_PACK_NODES	0x8000
+#define OLD_DIST_NO_PACK_NODES	0x4000
+/* Translate task_dist value from v15.08+ format to v14.11- format */
+extern uint16_t task_dist_new2old(uint32_t new_task_dist)
+{
+	uint16_t old_task_dist = 0;
+
+	if (new_task_dist == NO_VAL)
+		return ((uint16_t) NO_VAL);
+
+	switch (new_task_dist & SLURM_DIST_NODESOCKMASK) {
+	case SLURM_DIST_CYCLIC:
+		old_task_dist = OLD_DIST_CYCLIC;
+		break;
+	case SLURM_DIST_BLOCK:
+		old_task_dist = OLD_DIST_BLOCK;
+		break;
+	case SLURM_DIST_ARBITRARY:
+		old_task_dist = OLD_DIST_ARBITRARY;
+		break;
+	case SLURM_DIST_CYCLIC_CYCLIC:
+		old_task_dist = OLD_DIST_CYCLIC_CYCLIC;
+		break;
+	case SLURM_DIST_CYCLIC_BLOCK:
+		old_task_dist = OLD_DIST_CYCLIC_BLOCK;
+		break;
+	case SLURM_DIST_BLOCK_CYCLIC:
+		old_task_dist = OLD_DIST_BLOCK_CYCLIC;
+		break;
+	case SLURM_DIST_BLOCK_BLOCK:
+		old_task_dist = OLD_DIST_BLOCK_BLOCK;
+		break;
+	case SLURM_DIST_NO_LLLP:
+		old_task_dist = OLD_DIST_NO_LLLP;
+		break;
+	case SLURM_DIST_CYCLIC_CFULL:
+		old_task_dist = OLD_DIST_CYCLIC_CFULL;
+		break;
+	case SLURM_DIST_BLOCK_CFULL:
+		old_task_dist = OLD_DIST_BLOCK_CFULL;
+		break;
+	default:
+		new_task_dist = OLD_DIST_UNKNOWN;
+		break;
+	}
+
+	if (new_task_dist & SLURM_DIST_PACK_NODES)
+		old_task_dist |= OLD_DIST_PACK_NODES;
+	if (new_task_dist & SLURM_DIST_NO_PACK_NODES)
+		old_task_dist |= OLD_DIST_NO_PACK_NODES;
+
+	return old_task_dist;
+}
+/* Translate task_dist value from v14.11- format to v15.08+ format */
+extern uint32_t task_dist_old2new(uint16_t old_task_dist)
+{
+	uint32_t new_task_dist = 0;
+
+	if (new_task_dist == (uint16_t) NO_VAL)
+		return NO_VAL;
+
+	switch (old_task_dist & OLD_DIST_STATE_BASE) {
+	case OLD_DIST_CYCLIC:
+		new_task_dist = SLURM_DIST_CYCLIC;
+		break;
+	case OLD_DIST_BLOCK:
+		new_task_dist = SLURM_DIST_BLOCK;
+		break;
+	case OLD_DIST_ARBITRARY:
+		new_task_dist = SLURM_DIST_ARBITRARY;
+		break;
+	case OLD_DIST_CYCLIC_CYCLIC:
+		new_task_dist = SLURM_DIST_CYCLIC_CYCLIC;
+		break;
+	case OLD_DIST_CYCLIC_BLOCK:
+		new_task_dist = SLURM_DIST_CYCLIC_BLOCK;
+		break;
+	case OLD_DIST_BLOCK_CYCLIC:
+		new_task_dist = SLURM_DIST_BLOCK_CYCLIC;
+		break;
+	case OLD_DIST_BLOCK_BLOCK:
+		new_task_dist = SLURM_DIST_BLOCK_BLOCK;
+		break;
+	case OLD_DIST_NO_LLLP:
+		new_task_dist = SLURM_DIST_NO_LLLP;
+		break;
+	case OLD_DIST_CYCLIC_CFULL:
+		new_task_dist = SLURM_DIST_CYCLIC_CFULL;
+		break;
+	case OLD_DIST_BLOCK_CFULL:
+		new_task_dist = SLURM_DIST_BLOCK_CFULL;
+		break;
+	default:
+		new_task_dist = SLURM_DIST_UNKNOWN;
+		break;
+	}
+
+	if (old_task_dist & OLD_DIST_PACK_NODES)
+		new_task_dist |= SLURM_DIST_PACK_NODES;
+	if (old_task_dist & OLD_DIST_NO_PACK_NODES)
+		new_task_dist |= SLURM_DIST_NO_PACK_NODES;
+
+	return new_task_dist;
+}
+
+
 /* pack_header
  * packs a slurm protocol header that precedes every slurm message
  * IN header - the header structure to pack
@@ -690,20 +885,41 @@ void
 pack_header(header_t * header, Buf buffer)
 {
 	pack16((uint16_t)header->version, buffer);
-	pack16((uint16_t)header->flags, buffer);
-	pack16((uint16_t)header->msg_type, buffer);
-	pack32((uint32_t)header->body_length, buffer);
-	pack16((uint16_t)header->forward.cnt, buffer);
-	if (header->forward.cnt > 0) {
-		packstr(header->forward.nodelist, buffer);
-		pack32((uint32_t)header->forward.timeout, buffer);
-	}
-	pack16((uint16_t)header->ret_cnt, buffer);
-	if (header->ret_cnt > 0) {
-		_pack_ret_list(header->ret_list,
-			       header->ret_cnt, buffer, header->version);
+
+	if (header->version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack16((uint16_t)header->flags, buffer);
+		pack16((uint16_t)header->msg_index, buffer);
+		pack16((uint16_t)header->msg_type, buffer);
+		pack32((uint32_t)header->body_length, buffer);
+		pack16((uint16_t)header->forward.cnt, buffer);
+		if (header->forward.cnt > 0) {
+			packstr(header->forward.nodelist, buffer);
+			pack32((uint32_t)header->forward.timeout, buffer);
+		}
+		pack16((uint16_t)header->ret_cnt, buffer);
+		if (header->ret_cnt > 0) {
+			_pack_ret_list(header->ret_list,
+				       header->ret_cnt, buffer,
+				       header->version);
+		}
+		slurm_pack_slurm_addr(&header->orig_addr, buffer);
+	} else {
+		pack16((uint16_t)header->flags, buffer);
+		pack16((uint16_t)header->msg_type, buffer);
+		pack32((uint32_t)header->body_length, buffer);
+		pack16((uint16_t)header->forward.cnt, buffer);
+		if (header->forward.cnt > 0) {
+			packstr(header->forward.nodelist, buffer);
+			pack32((uint32_t)header->forward.timeout, buffer);
+		}
+		pack16((uint16_t)header->ret_cnt, buffer);
+		if (header->ret_cnt > 0) {
+			_pack_ret_list(header->ret_list,
+				       header->ret_cnt, buffer,
+				       header->version);
+		}
+		slurm_pack_slurm_addr(&header->orig_addr, buffer);
 	}
-	slurm_pack_slurm_addr(&header->orig_addr, buffer);
 }
 
 /* unpack_header
@@ -722,33 +938,58 @@ unpack_header(header_t * header, Buf buffer)
 	forward_init(&header->forward, NULL);
 	header->ret_list = NULL;
 	safe_unpack16(&header->version, buffer);
-	safe_unpack16(&header->flags, buffer);
-	safe_unpack16(&header->msg_type, buffer);
-	safe_unpack32(&header->body_length, buffer);
-	safe_unpack16(&header->forward.cnt, buffer);
-	if (header->forward.cnt > 0) {
-		safe_unpackstr_xmalloc(&header->forward.nodelist,
-				       &uint32_tmp, buffer);
-		safe_unpack32(&header->forward.timeout, buffer);
-	}
 
-	safe_unpack16(&header->ret_cnt, buffer);
-	if (header->ret_cnt > 0) {
-		if (_unpack_ret_list(&(header->ret_list),
-				     header->ret_cnt, buffer, header->version))
-			goto unpack_error;
+	if (header->version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack16(&header->flags, buffer);
+		safe_unpack16(&header->msg_index, buffer);
+		safe_unpack16(&header->msg_type, buffer);
+		safe_unpack32(&header->body_length, buffer);
+		safe_unpack16(&header->forward.cnt, buffer);
+		if (header->forward.cnt > 0) {
+			safe_unpackstr_xmalloc(&header->forward.nodelist,
+					       &uint32_tmp, buffer);
+			safe_unpack32(&header->forward.timeout, buffer);
+		}
+
+		safe_unpack16(&header->ret_cnt, buffer);
+		if (header->ret_cnt > 0) {
+			if (_unpack_ret_list(&(header->ret_list),
+					     header->ret_cnt, buffer,
+					     header->version))
+				goto unpack_error;
+		} else {
+			header->ret_list = NULL;
+		}
+		slurm_unpack_slurm_addr_no_alloc(&header->orig_addr, buffer);
 	} else {
-		header->ret_list = NULL;
+		safe_unpack16(&header->flags, buffer);
+		safe_unpack16(&header->msg_type, buffer);
+		safe_unpack32(&header->body_length, buffer);
+		safe_unpack16(&header->forward.cnt, buffer);
+		if (header->forward.cnt > 0) {
+			safe_unpackstr_xmalloc(&header->forward.nodelist,
+					       &uint32_tmp, buffer);
+			safe_unpack32(&header->forward.timeout, buffer);
+		}
+
+		safe_unpack16(&header->ret_cnt, buffer);
+		if (header->ret_cnt > 0) {
+			if (_unpack_ret_list(&(header->ret_list),
+					     header->ret_cnt, buffer,
+					     header->version))
+				goto unpack_error;
+		} else {
+			header->ret_list = NULL;
+		}
+		slurm_unpack_slurm_addr_no_alloc(&header->orig_addr, buffer);
 	}
-	slurm_unpack_slurm_addr_no_alloc(&header->orig_addr, buffer);
 
 	return SLURM_SUCCESS;
 
 unpack_error:
 	error("unpacking header");
 	destroy_forward(&header->forward);
-	if (header->ret_list)
-		list_destroy(header->ret_list);
+	FREE_NULL_LIST(header->ret_list);
 	return SLURM_ERROR;
 }
 
@@ -784,6 +1025,11 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 					    msg->data, buffer,
 					    msg->protocol_version);
 		break;
+	case REQUEST_LAYOUT_INFO:
+		_pack_layout_info_request_msg((layout_info_request_msg_t *)
+					      msg->data, buffer,
+					      msg->protocol_version);
+		break;
 	case REQUEST_BUILD_INFO:
 	case REQUEST_ACCTING_INFO:
 		_pack_last_update_msg((last_update_msg_t *)
@@ -831,11 +1077,15 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case REQUEST_JOB_END_TIME:
 	case REQUEST_JOB_ALLOCATION_INFO:
 	case REQUEST_JOB_ALLOCATION_INFO_LITE:
-	case REQUEST_JOB_SBCAST_CRED:
 		_pack_job_alloc_info_msg((job_alloc_info_msg_t *) msg->data,
 					 buffer,
 					 msg->protocol_version);
 		break;
+	case REQUEST_JOB_SBCAST_CRED:
+		_pack_step_alloc_info_msg((step_alloc_info_msg_t *) msg->data,
+					  buffer,
+					  msg->protocol_version);
+		break;
 	case REQUEST_NODE_REGISTRATION_STATUS:
 	case REQUEST_RECONFIGURE:
 	case REQUEST_SHUTDOWN_IMMEDIATE:
@@ -848,6 +1098,9 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case ACCOUNTING_FIRST_REG:
 	case ACCOUNTING_REGISTER_CTLD:
 	case REQUEST_TOPO_INFO:
+	case REQUEST_BURST_BUFFER_INFO:
+	case REQUEST_SICP_INFO:
+	case REQUEST_POWERCAP_INFO:
 		/* Message contains no body/information */
 		break;
 	case REQUEST_ACCT_GATHER_ENERGY:
@@ -895,12 +1148,22 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 				      buffer,
 				      msg->protocol_version);
 		break;
+	case REQUEST_UPDATE_LAYOUT:
+		_pack_update_layout_msg((update_layout_msg_t *) msg->data,
+					buffer,
+					msg->protocol_version);
+		break;
 	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		_pack_update_partition_msg((update_part_msg_t *) msg->
 					   data, buffer,
 					   msg->protocol_version);
 		break;
+	case REQUEST_UPDATE_POWERCAP:
+		_pack_update_powercap_msg((update_powercap_msg_t *) msg->
+					  data, buffer,
+					  msg->protocol_version);
+		break;
 	case REQUEST_DELETE_PARTITION:
 		_pack_delete_partition_msg((delete_part_msg_t *) msg->
 					   data, buffer,
@@ -915,6 +1178,9 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case RESPONSE_RESERVATION_INFO:
 		_pack_reserve_info_msg((slurm_msg_t *) msg, buffer);
 		break;
+	case RESPONSE_LAYOUT_INFO:
+		_pack_layout_info_msg((slurm_msg_t *) msg, buffer);
+		break;
 	case REQUEST_DELETE_RESERVATION:
 	case RESPONSE_CREATE_RESERVATION:
 		_pack_resv_name_msg((reservation_name_msg_t *) msg->
@@ -996,6 +1262,7 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 			msg->protocol_version);
 		break;
 	case REQUEST_STEP_COMPLETE:
+	case REQUEST_STEP_COMPLETE_AGGR:
 		_pack_step_complete_msg((step_complete_msg_t *)msg->data,
 					buffer,
 					msg->protocol_version);
@@ -1176,8 +1443,8 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 
 	case REQUEST_JOB_REQUEUE:
 		_pack_job_requeue_msg((requeue_msg_t *)msg->data,
-		                      buffer,
-		                      msg->protocol_version);
+				      buffer,
+				      msg->protocol_version);
 		break;
 
 	case REQUEST_JOB_USER_INFO:
@@ -1215,6 +1482,9 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 	case RESPONSE_BLOCK_INFO:
 		_pack_block_info_resp_msg((slurm_msg_t *) msg, buffer);
 		break;
+	case RESPONSE_BURST_BUFFER_INFO:
+		_pack_burst_buffer_info_resp_msg((slurm_msg_t *) msg, buffer);
+		break;
 	case REQUEST_FILE_BCAST:
 		_pack_file_bcast((file_bcast_msg_t *) msg->data, buffer,
 				 msg->protocol_version);
@@ -1270,6 +1540,11 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 			(topo_info_response_msg_t *)msg->data, buffer,
 			msg->protocol_version);
 		break;
+	case RESPONSE_POWERCAP_INFO:
+		_pack_powercap_info_msg(
+			(powercap_info_msg_t *)msg->data, buffer,
+			msg->protocol_version);
+		break;
 	case RESPONSE_JOB_SBCAST_CRED:
 		_pack_job_sbcast_cred_msg(
 			(job_sbcast_cred_msg_t *)msg->data, buffer,
@@ -1314,17 +1589,43 @@ pack_msg(slurm_msg_t const *msg, Buf buffer)
 		break;
 	case REQUEST_LICENSE_INFO:
 		 _pack_license_info_request_msg((license_info_request_msg_t *)
-		                                msg->data,
-		                                buffer,
-		                                msg->protocol_version);
+						msg->data,
+						buffer,
+						msg->protocol_version);
 			break;
 	case RESPONSE_LICENSE_INFO:
 		_pack_license_info_msg((slurm_msg_t *) msg, buffer);
 		break;
+	case MESSAGE_COMPOSITE:
+	case RESPONSE_MESSAGE_COMPOSITE:
+		_pack_composite_msg((composite_msg_t *) msg->data, buffer,
+				     msg->protocol_version);
+		break;
 	case RESPONSE_JOB_ARRAY_ERRORS:
 		_pack_job_array_resp_msg((job_array_resp_msg_t *) msg->data,
 					 buffer, msg->protocol_version);
 		break;
+	case REQUEST_ASSOC_MGR_INFO:
+		_pack_assoc_mgr_info_request_msg(
+			(assoc_mgr_info_request_msg_t *)msg->data,
+			buffer, msg->protocol_version);
+		break;
+	case RESPONSE_ASSOC_MGR_INFO:
+		_pack_assoc_mgr_info_msg((slurm_msg_t *) msg, buffer);
+		break;
+	case REQUEST_NETWORK_CALLERID:
+		_pack_network_callerid_msg((network_callerid_msg_t *)
+						  msg->data, buffer,
+						  msg->protocol_version);
+		break;
+	case RESPONSE_NETWORK_CALLERID:
+		_pack_network_callerid_resp_msg((network_callerid_resp_t *)
+						  msg->data, buffer,
+						  msg->protocol_version);
+		break;
+	case RESPONSE_SICP_INFO:
+		_pack_sicp_info_msg((slurm_msg_t *) msg, buffer);
+		break;
 	default:
 		debug("No pack method for msg type %u", msg->msg_type);
 		return EINVAL;
@@ -1368,6 +1669,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 						   & (msg->data), buffer,
 						   msg->protocol_version);
 		break;
+	case REQUEST_LAYOUT_INFO:
+		rc = _unpack_layout_info_request_msg((layout_info_request_msg_t **)
+						     & (msg->data), buffer,
+						     msg->protocol_version);
+		break;
 	case REQUEST_BUILD_INFO:
 	case REQUEST_ACCTING_INFO:
 		rc = _unpack_last_update_msg((last_update_msg_t **) &
@@ -1423,11 +1729,15 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	case REQUEST_JOB_END_TIME:
 	case REQUEST_JOB_ALLOCATION_INFO:
 	case REQUEST_JOB_ALLOCATION_INFO_LITE:
-	case REQUEST_JOB_SBCAST_CRED:
 		rc = _unpack_job_alloc_info_msg((job_alloc_info_msg_t **) &
 						(msg->data), buffer,
 						msg->protocol_version);
 		break;
+	case REQUEST_JOB_SBCAST_CRED:
+		rc = _unpack_step_alloc_info_msg((step_alloc_info_msg_t **) &
+						 (msg->data), buffer,
+						 msg->protocol_version);
+		break;
 	case REQUEST_NODE_REGISTRATION_STATUS:
 	case REQUEST_RECONFIGURE:
 	case REQUEST_SHUTDOWN_IMMEDIATE:
@@ -1440,6 +1750,9 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	case ACCOUNTING_FIRST_REG:
 	case ACCOUNTING_REGISTER_CTLD:
 	case REQUEST_TOPO_INFO:
+	case REQUEST_BURST_BUFFER_INFO:
+	case REQUEST_SICP_INFO:
+	case REQUEST_POWERCAP_INFO:
 		/* Message contains no body/information */
 		break;
 	case REQUEST_ACCT_GATHER_ENERGY:
@@ -1489,12 +1802,22 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 					     (msg->data), buffer,
 					     msg->protocol_version);
 		break;
+	case REQUEST_UPDATE_LAYOUT:
+		rc = _unpack_update_layout_msg((update_layout_msg_t **) &
+					       (msg->data), buffer,
+					       msg->protocol_version);
+		break;
 	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		rc = _unpack_update_partition_msg((update_part_msg_t **) &
 						  (msg->data), buffer,
 						  msg->protocol_version);
 		break;
+	case REQUEST_UPDATE_POWERCAP:
+		rc = _unpack_update_powercap_msg((update_powercap_msg_t **) &
+						 (msg->data), buffer,
+						 msg->protocol_version);
+		break;
 	case REQUEST_DELETE_PARTITION:
 		rc = _unpack_delete_partition_msg((delete_part_msg_t **) &
 						  (msg->data), buffer,
@@ -1522,6 +1845,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 					      &(msg->data), buffer,
 					      msg->protocol_version);
 		break;
+	case RESPONSE_LAYOUT_INFO:
+		rc = _unpack_layout_info_msg((layout_info_msg_t **)
+					     &(msg->data), buffer,
+					     msg->protocol_version);
+		break;
 	case REQUEST_LAUNCH_TASKS:
 		rc = _unpack_launch_tasks_request_msg(
 			(launch_tasks_request_msg_t **)
@@ -1598,6 +1926,7 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			msg->protocol_version);
 		break;
 	case REQUEST_STEP_COMPLETE:
+	case REQUEST_STEP_COMPLETE_AGGR:
 		rc = _unpack_step_complete_msg((step_complete_msg_t
 						**) & (msg->data),
 					       buffer,
@@ -1683,7 +2012,8 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		break;
 	case REQUEST_LAUNCH_PROLOG:
 		rc = _unpack_prolog_launch_msg((prolog_launch_msg_t **)
-						  & (msg->data), buffer, msg->protocol_version);
+					       & (msg->data),
+					       buffer, msg->protocol_version);
 		break;
 	case RESPONSE_PROLOG_EXECUTING:
 	case RESPONSE_JOB_READY:
@@ -1789,14 +2119,14 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	case REQUEST_JOB_READY:
 	case REQUEST_JOB_INFO_SINGLE:
 		rc = _unpack_job_ready_msg((job_id_msg_t **)
-		                           & msg->data, buffer,
-		                           msg->protocol_version);
+					   & msg->data, buffer,
+					   msg->protocol_version);
 		break;
 
 	case REQUEST_JOB_REQUEUE:
 		rc = _unpack_job_requeue_msg((requeue_msg_t **)&msg->data,
-		                             buffer,
-		                             msg->protocol_version);
+					     buffer,
+					     msg->protocol_version);
 		break;
 
 	case REQUEST_JOB_USER_INFO:
@@ -1840,6 +2170,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			(block_info_msg_t **) &(msg->data), buffer,
 			msg->protocol_version);
 		break;
+	case RESPONSE_BURST_BUFFER_INFO:
+		rc = _unpack_burst_buffer_info_msg(
+			(burst_buffer_info_msg_t **) &(msg->data), buffer,
+			msg->protocol_version);
+		break;
 	case REQUEST_FILE_BCAST:
 		rc = _unpack_file_bcast( (file_bcast_msg_t **)
 					 & msg->data, buffer,
@@ -1900,6 +2235,11 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 			(topo_info_response_msg_t **)&msg->data, buffer,
 			msg->protocol_version);
 		break;
+	case RESPONSE_POWERCAP_INFO:
+		rc = _unpack_powercap_info_msg(
+			(powercap_info_msg_t **)&msg->data, buffer,
+			msg->protocol_version);
+		break;
 	case RESPONSE_JOB_SBCAST_CRED:
 		rc = _unpack_job_sbcast_cred_msg(
 			(job_sbcast_cred_msg_t **)&msg->data, buffer,
@@ -1950,20 +2290,50 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 		break;
 	case RESPONSE_LICENSE_INFO:
 		rc = _unpack_license_info_msg((license_info_msg_t **)&(msg->data),
-		                              buffer,
-		                              msg->protocol_version);
+					      buffer,
+					      msg->protocol_version);
 		break;
 	case REQUEST_LICENSE_INFO:
 		rc = _unpack_license_info_request_msg((license_info_request_msg_t **)
-		                                      &(msg->data),
-		                                      buffer,
-		                                      msg->protocol_version);
+						      &(msg->data),
+						      buffer,
+						      msg->protocol_version);
+		break;
+	case MESSAGE_COMPOSITE:
+	case RESPONSE_MESSAGE_COMPOSITE:
+		rc = _unpack_composite_msg((composite_msg_t **) &(msg->data),
+					    buffer, msg->protocol_version);
 		break;
 	case RESPONSE_JOB_ARRAY_ERRORS:
 		rc = _unpack_job_array_resp_msg((job_array_resp_msg_t **)
 						&(msg->data), buffer,
 						msg->protocol_version);
 		break;
+	case REQUEST_ASSOC_MGR_INFO:
+		rc = _unpack_assoc_mgr_info_request_msg(
+			(assoc_mgr_info_request_msg_t **)&(msg->data),
+			buffer, msg->protocol_version);
+		break;
+	case RESPONSE_ASSOC_MGR_INFO:
+		rc = assoc_mgr_info_unpack_msg((assoc_mgr_info_msg_t **)
+					       &(msg->data),
+					       buffer,
+					       msg->protocol_version);
+		break;
+	case REQUEST_NETWORK_CALLERID:
+		rc = _unpack_network_callerid_msg((network_callerid_msg_t **)
+						  &(msg->data), buffer,
+						  msg->protocol_version);
+		break;
+	case RESPONSE_NETWORK_CALLERID:
+		rc = _unpack_network_callerid_resp_msg((network_callerid_resp_t **)
+						  &(msg->data), buffer,
+						  msg->protocol_version);
+		break;
+	case RESPONSE_SICP_INFO:
+		rc = _unpack_sicp_info_msg((sicp_info_msg_t **) & (msg->data),
+					   buffer, msg->protocol_version);
+		break;
 	default:
 		debug("No unpack method for msg type %u", msg->msg_type);
 		return EINVAL;
@@ -1977,12 +2347,64 @@ unpack_msg(slurm_msg_t * msg, Buf buffer)
 	return rc;
 }
 
-static void _pack_assoc_shares_object(void *in, Buf buffer,
+static void _pack_assoc_shares_object(void *in, uint32_t tres_cnt, Buf buffer,
 				      uint16_t protocol_version)
 {
-	association_shares_object_t *object = (association_shares_object_t *)in;
+	assoc_shares_object_t *object = (assoc_shares_object_t *)in;
+	uint64_t tmp64;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!object) {
+			pack32(0, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+
+			packdouble(0, buffer);
+			pack32(0, buffer);
+
+			pack64_array(NULL, 0, buffer);
+			pack64_array(NULL, 0, buffer);
+
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			pack64(0, buffer);
+			packlongdouble_array(NULL, 0, buffer);
+
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+
+			pack16(0, buffer);
+
+			return;
+		}
+
+		pack32(object->assoc_id, buffer);
+
+		packstr(object->cluster, buffer);
+		packstr(object->name, buffer);
+		packstr(object->parent, buffer);
+		packstr(object->partition, buffer);
+
+		packdouble(object->shares_norm, buffer);
+		pack32(object->shares_raw, buffer);
+
+		pack64_array(object->tres_run_secs, tres_cnt, buffer);
+		pack64_array(object->tres_grp_mins, tres_cnt, buffer);
+
+		packdouble(object->usage_efctv, buffer);
+		packdouble(object->usage_norm, buffer);
+		pack64(object->usage_raw, buffer);
+		packlongdouble_array(object->usage_tres_raw, tres_cnt, buffer);
+
+		packdouble(object->fs_factor, buffer);
+		packdouble(object->level_fs, buffer);
+
+		pack16(object->user, buffer);
+
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(0, buffer);
 
@@ -2019,9 +2441,16 @@ static void _pack_assoc_shares_object(void *in, Buf buffer,
 		packdouble(object->usage_efctv, buffer);
 		packdouble(object->usage_norm, buffer);
 		pack64(object->usage_raw, buffer);
-
-		pack64(object->grp_cpu_mins, buffer);
-		pack64(object->cpu_run_mins, buffer);
+		if (object->tres_grp_mins)
+			tmp64 = object->tres_grp_mins[TRES_ARRAY_CPU];
+		else
+			tmp64 = 0;
+		pack64(tmp64, buffer);
+		if (object->tres_run_secs)
+			tmp64 = object->tres_run_secs[TRES_ARRAY_CPU] / 60;
+		else
+			tmp64 = 0;
+		pack64(tmp64, buffer);
 		packdouble(object->fs_factor, buffer);
 		packdouble(object->level_fs, buffer);
 
@@ -2062,8 +2491,16 @@ static void _pack_assoc_shares_object(void *in, Buf buffer,
 		packdouble(object->usage_norm, buffer);
 		pack64(object->usage_raw, buffer);
 
-		pack64(object->grp_cpu_mins, buffer);
-		pack64(object->cpu_run_mins, buffer);
+		if (object->tres_grp_mins)
+			tmp64 = object->tres_grp_mins[TRES_ARRAY_CPU];
+		else
+			tmp64 = 0;
+		pack64(tmp64, buffer);
+		if (object->tres_run_secs)
+			tmp64 = object->tres_run_secs[TRES_ARRAY_CPU] / 60;
+		else
+			tmp64 = 0;
+		pack64(tmp64, buffer);
 
 		pack16(object->user, buffer);
 	} else {
@@ -2072,16 +2509,16 @@ static void _pack_assoc_shares_object(void *in, Buf buffer,
 	}
 }
 
-static int _unpack_assoc_shares_object(void **object, Buf buffer,
-				       uint16_t protocol_version)
+static int _unpack_assoc_shares_object(void **object, uint32_t tres_cnt,
+				       Buf buffer, uint16_t protocol_version)
 {
 	uint32_t uint32_tmp;
-	association_shares_object_t *object_ptr =
-		xmalloc(sizeof(association_shares_object_t));
+	assoc_shares_object_t *object_ptr =
+		xmalloc(sizeof(assoc_shares_object_t));
 
 	*object = (void *) object_ptr;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&object_ptr->assoc_id, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->cluster,
@@ -2089,6 +2526,40 @@ static int _unpack_assoc_shares_object(void **object, Buf buffer,
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&object_ptr->parent,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->partition,
+				       &uint32_tmp, buffer);
+
+		safe_unpackdouble(&object_ptr->shares_norm, buffer);
+		safe_unpack32(&object_ptr->shares_raw, buffer);
+
+		safe_unpack64_array(&object_ptr->tres_run_secs,
+				    &uint32_tmp, buffer);
+		if (uint32_tmp != tres_cnt)
+			goto unpack_error;
+		safe_unpack64_array(&object_ptr->tres_grp_mins,
+				    &uint32_tmp, buffer);
+		if (uint32_tmp != tres_cnt)
+			goto unpack_error;
+
+		safe_unpackdouble(&object_ptr->usage_efctv, buffer);
+		safe_unpackdouble(&object_ptr->usage_norm, buffer);
+		safe_unpack64(&object_ptr->usage_raw, buffer);
+		safe_unpacklongdouble_array(&object_ptr->usage_tres_raw,
+					    &uint32_tmp, buffer);
+
+		safe_unpackdouble(&object_ptr->fs_factor, buffer);
+		safe_unpackdouble(&object_ptr->level_fs, buffer);
+
+		safe_unpack16(&object_ptr->user, buffer);
+
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		safe_unpack32(&object_ptr->assoc_id, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->cluster,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->partition,
+				       &uint32_tmp, buffer);
 
 		safe_unpackdouble(&object_ptr->shares_norm, buffer);
 		safe_unpack32(&object_ptr->shares_raw, buffer);
@@ -2097,8 +2568,24 @@ static int _unpack_assoc_shares_object(void **object, Buf buffer,
 		safe_unpackdouble(&object_ptr->usage_norm, buffer);
 		safe_unpack64(&object_ptr->usage_raw, buffer);
 
-		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
-		safe_unpack64(&object_ptr->cpu_run_mins, buffer);
+		object_ptr->tres_grp_mins = xmalloc(
+			sizeof(uint64_t) * tres_cnt);
+		safe_unpack64(&object_ptr->tres_grp_mins[TRES_ARRAY_CPU],
+			      buffer);
+		object_ptr->tres_run_secs = xmalloc(
+			sizeof(uint64_t) * tres_cnt);
+		safe_unpack64(&object_ptr->tres_run_secs[TRES_ARRAY_CPU],
+			      buffer);
+		object_ptr->tres_run_secs[TRES_ARRAY_CPU] *= 60;
+
+		safe_unpackdouble(&object_ptr->usage_efctv, buffer);
+		safe_unpackdouble(&object_ptr->usage_norm, buffer);
+		safe_unpack64(&object_ptr->usage_raw, buffer);
+		object_ptr->usage_tres_raw = xmalloc(
+			sizeof(long double) * tres_cnt);
+		object_ptr->usage_tres_raw[TRES_ARRAY_CPU] =
+			(long double)object_ptr->usage_raw;
+
 		safe_unpackdouble(&object_ptr->fs_factor, buffer);
 		safe_unpackdouble(&object_ptr->level_fs, buffer);
 
@@ -2118,9 +2605,20 @@ static int _unpack_assoc_shares_object(void **object, Buf buffer,
 		safe_unpackdouble(&object_ptr->usage_efctv, buffer);
 		safe_unpackdouble(&object_ptr->usage_norm, buffer);
 		safe_unpack64(&object_ptr->usage_raw, buffer);
-
-		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
-		safe_unpack64(&object_ptr->cpu_run_mins, buffer);
+		object_ptr->usage_tres_raw = xmalloc(
+			sizeof(long double) * tres_cnt);
+		object_ptr->usage_tres_raw[TRES_ARRAY_CPU] =
+			(long double)object_ptr->usage_raw;
+
+		object_ptr->tres_grp_mins = xmalloc(
+			sizeof(uint64_t) * tres_cnt);
+		safe_unpack64(&object_ptr->tres_grp_mins[TRES_ARRAY_CPU],
+			      buffer);
+		object_ptr->tres_run_secs = xmalloc(
+			sizeof(uint64_t) * tres_cnt);
+		safe_unpack64(&object_ptr->tres_run_secs[TRES_ARRAY_CPU],
+			      buffer);
+		object_ptr->tres_run_secs[0] *= 60;
 
 		safe_unpack16(&object_ptr->user, buffer);
 	} else {
@@ -2131,11 +2629,114 @@ static int _unpack_assoc_shares_object(void **object, Buf buffer,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurm_destroy_association_shares_object(object_ptr);
+	slurm_destroy_assoc_shares_object(object_ptr);
 	*object = NULL;
 	return SLURM_ERROR;
 }
 
+/* _pack_network_callerid_msg()
+ */
+static void
+_pack_network_callerid_msg(network_callerid_msg_t *msg, Buf buffer,
+				uint16_t protocol_version)
+{
+	xassert(msg != NULL);
+
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packmem((char *)msg->ip_src, 16, buffer);
+		packmem((char *)msg->ip_dst, 16, buffer);
+		pack32(msg->port_src, buffer);
+		pack32(msg->port_dst,	buffer);
+		pack32((uint32_t)msg->af, buffer);
+	} else {
+		error("_pack_network_callerid_msg: protocol_version "
+		      "%hu not supported", protocol_version);
+	}
+}
+
+/* _unpack_network_callerid_msg()
+ */
+static int
+_unpack_network_callerid_msg(network_callerid_msg_t **msg_ptr, Buf buffer,
+			     uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+	char *charptr_tmp;
+	network_callerid_msg_t *msg;
+	xassert(msg_ptr != NULL);
+
+	msg = xmalloc(sizeof(network_callerid_msg_t));
+	*msg_ptr = msg;
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackmem_xmalloc(&charptr_tmp, &uint32_tmp, buffer);
+		memcpy(msg->ip_src, charptr_tmp, uint32_tmp);
+		safe_unpackmem_xmalloc(&charptr_tmp, &uint32_tmp, buffer);
+		memcpy(msg->ip_dst, charptr_tmp, uint32_tmp);
+		safe_unpack32(&msg->port_src,		buffer);
+		safe_unpack32(&msg->port_dst,		buffer);
+		safe_unpack32((uint32_t *)&msg->af,	buffer);
+	} else {
+
+		error("_unpack_network_callerid_msg: protocol_version "
+		      "%hu not supported", protocol_version);
+		goto unpack_error;
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	info("_unpack_network_callerid_msg error");
+	*msg_ptr = NULL;
+	slurm_free_network_callerid_msg(msg);
+	return SLURM_ERROR;
+}
+
+static void _pack_network_callerid_resp_msg(network_callerid_resp_t *msg,
+					    Buf buffer,
+					    uint16_t protocol_version)
+{
+	xassert(msg != NULL);
+
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->job_id,		buffer);
+		pack32(msg->return_code,	buffer);
+		packstr(msg->node_name,		buffer);
+	} else {
+		error("_pack_network_callerid_resp_msg: protocol_version "
+		      "%hu not supported", protocol_version);
+	}
+}
+
+static int _unpack_network_callerid_resp_msg(network_callerid_resp_t **msg_ptr,
+					    Buf buffer,
+					    uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+	network_callerid_resp_t *msg;
+	xassert(msg_ptr != NULL);
+
+	msg = xmalloc(sizeof(network_callerid_resp_t));
+	*msg_ptr = msg;
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&msg->job_id,		buffer);
+		safe_unpack32(&msg->return_code,	buffer);
+		safe_unpackmem_xmalloc(&msg->node_name, &uint32_tmp, buffer);
+	} else {
+
+		error("_unpack_network_callerid_msg: protocol_version "
+		      "%hu not supported", protocol_version);
+		goto unpack_error;
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	info("_unpack_network_callerid_msg error");
+	*msg_ptr = NULL;
+	slurm_free_network_callerid_resp(msg);
+	return SLURM_ERROR;
+}
+
 static void _pack_shares_request_msg(shares_request_msg_t * msg, Buf buffer,
 				     uint16_t protocol_version)
 {
@@ -2215,21 +2816,44 @@ static void _pack_shares_response_msg(shares_response_msg_t * msg, Buf buffer,
 				      uint16_t protocol_version)
 {
 	ListIterator itr = NULL;
-	association_shares_object_t *share = NULL;
+	assoc_shares_object_t *share = NULL;
 	uint32_t count = NO_VAL;
 
 	xassert(msg != NULL);
-	if (msg->assoc_shares_list)
-		count = list_count(msg->assoc_shares_list);
-	pack32(count, buffer);
-	if (count && count != NO_VAL) {
-		itr = list_iterator_create(msg->assoc_shares_list);
-		while ((share = list_next(itr)))
-			_pack_assoc_shares_object(share, buffer,
-						  protocol_version);
-		list_iterator_destroy(itr);
+
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr_array(msg->tres_names, msg->tres_cnt, buffer);
+
+		if (!msg->assoc_shares_list ||
+		    !(count = list_count(msg->assoc_shares_list)))
+			count = NO_VAL;
+
+		pack32(count, buffer);
+		if (count != NO_VAL) {
+			itr = list_iterator_create(msg->assoc_shares_list);
+			while ((share = list_next(itr)))
+				_pack_assoc_shares_object(
+					share, msg->tres_cnt, buffer,
+					protocol_version);
+			list_iterator_destroy(itr);
+		}
+		pack64(msg->tot_shares, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		if (!msg->assoc_shares_list ||
+		    !(count = list_count(msg->assoc_shares_list)))
+			count = NO_VAL;
+
+		pack32(count, buffer);
+		if (count != NO_VAL) {
+			itr = list_iterator_create(msg->assoc_shares_list);
+			while ((share = list_next(itr)))
+				_pack_assoc_shares_object(
+					share, msg->tres_cnt, buffer,
+					protocol_version);
+			list_iterator_destroy(itr);
+		}
+		pack64(msg->tot_shares, buffer);
 	}
-	pack64(msg->tot_shares, buffer);
 }
 
 static int _unpack_shares_response_msg(shares_response_msg_t ** msg,
@@ -2240,25 +2864,55 @@ static int _unpack_shares_response_msg(shares_response_msg_t ** msg,
 	int i = 0;
 	void *tmp_info = NULL;
 	shares_response_msg_t *object_ptr = NULL;
-	xassert(msg != NULL);
+
+	xassert(msg);
 
 	object_ptr = xmalloc(sizeof(shares_response_msg_t));
 	*msg = object_ptr;
 
-	safe_unpack32(&count, buffer);
-	if (count != NO_VAL) {
-		object_ptr->assoc_shares_list =
-			list_create(slurm_destroy_association_shares_object);
-		for (i=0; i<count; i++) {
-			if (_unpack_assoc_shares_object(&tmp_info, buffer,
-						       protocol_version)
-			   != SLURM_SUCCESS)
-				goto unpack_error;
-			list_append(object_ptr->assoc_shares_list, tmp_info);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_array(&object_ptr->tres_names,
+				     &object_ptr->tres_cnt, buffer);
+
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->assoc_shares_list =
+				list_create(slurm_destroy_assoc_shares_object);
+			for (i=0; i<count; i++) {
+				if (_unpack_assoc_shares_object(
+					    &tmp_info, object_ptr->tres_cnt,
+					    buffer, protocol_version)
+				    != SLURM_SUCCESS)
+					goto unpack_error;
+				list_append(object_ptr->assoc_shares_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack64(&object_ptr->tot_shares, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		object_ptr->tres_names = xmalloc(sizeof(char *));
+		object_ptr->tres_names[0] = xstrdup("cpu");
+		object_ptr->tres_cnt = 1;
+
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->assoc_shares_list =
+				list_create(slurm_destroy_assoc_shares_object);
+			for (i=0; i<count; i++) {
+				if (_unpack_assoc_shares_object(
+					    &tmp_info, object_ptr->tres_cnt,
+					    buffer, protocol_version)
+				    != SLURM_SUCCESS)
+					goto unpack_error;
+				list_append(object_ptr->assoc_shares_list,
+					    tmp_info);
+			}
 		}
+
+		safe_unpack64(&object_ptr->tot_shares, buffer);
 	}
 
-	safe_unpack64(&object_ptr->tot_shares, buffer);
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -2273,50 +2927,109 @@ static void _pack_priority_factors_object(void *in, Buf buffer,
 {
 	priority_factors_object_t *object = (priority_factors_object_t *)in;
 
-	if (!object) {
-		pack32(0, buffer);
-		pack32(0, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!object) {
+			pack32(0, buffer);
+			pack32(0, buffer);
 
-		packdouble(0, buffer);
-		packdouble(0, buffer);
-		packdouble(0, buffer);
-		packdouble(0, buffer);
-		packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
 
-		pack16(0, buffer);
+			pack16(0, buffer);
 
-		return;
-	}
+			return;
+		}
+
+		pack32(object->job_id, buffer);
+		pack32(object->user_id, buffer);
+
+		packdouble(object->priority_age, buffer);
+		packdouble(object->priority_fs, buffer);
+		packdouble(object->priority_js, buffer);
+		packdouble(object->priority_part, buffer);
+		packdouble(object->priority_qos, buffer);
 
-	pack32(object->job_id, buffer);
-	pack32(object->user_id, buffer);
+		packdouble_array(object->priority_tres, object->tres_cnt,
+				 buffer);
+		pack32(object->tres_cnt, buffer);
+		packstr_array(assoc_mgr_tres_name_array, object->tres_cnt,
+			      buffer);
+		packdouble_array(object->tres_weights, object->tres_cnt,
+				 buffer);
 
-	packdouble(object->priority_age, buffer);
-	packdouble(object->priority_fs, buffer);
-	packdouble(object->priority_js, buffer);
-	packdouble(object->priority_part, buffer);
-	packdouble(object->priority_qos, buffer);
+		pack16(object->nice, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		if (!object) {
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
+			packdouble(0, buffer);
 
-	pack16(object->nice, buffer);
+			pack16(0, buffer);
+
+			return;
+		}
+
+		pack32(object->job_id, buffer);
+		pack32(object->user_id, buffer);
+
+		packdouble(object->priority_age, buffer);
+		packdouble(object->priority_fs, buffer);
+		packdouble(object->priority_js, buffer);
+		packdouble(object->priority_part, buffer);
+		packdouble(object->priority_qos, buffer);
+
+		pack16(object->nice, buffer);
+	}
 }
 
 static int _unpack_priority_factors_object(void **object, Buf buffer,
 					   uint16_t protocol_version)
 {
+	uint32_t tmp32;
+
 	priority_factors_object_t *object_ptr =
 		xmalloc(sizeof(priority_factors_object_t));
-
 	*object = (void *) object_ptr;
-	safe_unpack32(&object_ptr->job_id, buffer);
-	safe_unpack32(&object_ptr->user_id, buffer);
 
-	safe_unpackdouble(&object_ptr->priority_age, buffer);
-	safe_unpackdouble(&object_ptr->priority_fs, buffer);
-	safe_unpackdouble(&object_ptr->priority_js, buffer);
-	safe_unpackdouble(&object_ptr->priority_part, buffer);
-	safe_unpackdouble(&object_ptr->priority_qos, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&object_ptr->job_id, buffer);
+		safe_unpack32(&object_ptr->user_id, buffer);
+
+		safe_unpackdouble(&object_ptr->priority_age, buffer);
+		safe_unpackdouble(&object_ptr->priority_fs, buffer);
+		safe_unpackdouble(&object_ptr->priority_js, buffer);
+		safe_unpackdouble(&object_ptr->priority_part, buffer);
+		safe_unpackdouble(&object_ptr->priority_qos, buffer);
+
+		safe_unpackdouble_array(&object_ptr->priority_tres, &tmp32,
+					buffer);
+		safe_unpack32(&object_ptr->tres_cnt, buffer);
+		safe_unpackstr_array(&object_ptr->tres_names,
+				     &object_ptr->tres_cnt, buffer);
+		safe_unpackdouble_array(&object_ptr->tres_weights, &tmp32,
+					buffer);
+
+		safe_unpack16(&object_ptr->nice, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&object_ptr->job_id, buffer);
+		safe_unpack32(&object_ptr->user_id, buffer);
 
-	safe_unpack16(&object_ptr->nice, buffer);
+		safe_unpackdouble(&object_ptr->priority_age, buffer);
+		safe_unpackdouble(&object_ptr->priority_fs, buffer);
+		safe_unpackdouble(&object_ptr->priority_js, buffer);
+		safe_unpackdouble(&object_ptr->priority_part, buffer);
+		safe_unpackdouble(&object_ptr->priority_qos, buffer);
+
+		safe_unpack16(&object_ptr->nice, buffer);
+	}
 
 	return SLURM_SUCCESS;
 
@@ -2613,32 +3326,107 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void
+_pack_update_layout_msg(update_layout_msg_t * msg, Buf buffer,
+			uint16_t protocol_version)
+{
+	xassert(msg != NULL);
+
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(msg->layout, buffer);
+		packstr(msg->arg, buffer);
+	} else {
+		error("%s: protocol_version %hu not supported",
+		      __func__, protocol_version);
+	}
+}
+
+static int _unpack_update_layout_msg(update_layout_msg_t ** msg, Buf buffer,
+				      uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+	update_layout_msg_t *tmp_ptr;
+
+	/* alloc memory for structure */
+	xassert(msg != NULL);
+	tmp_ptr = xmalloc(sizeof(update_layout_msg_t));
+	*msg = tmp_ptr;
+
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&tmp_ptr->layout,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->arg,
+				       &uint32_tmp, buffer);
+	} else {
+		error("%s: protocol_version %hu not supported",
+		      __func__, protocol_version);
+		goto unpack_error;
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_update_layout_msg(tmp_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 static void
 _pack_acct_gather_node_resp_msg(acct_gather_node_resp_msg_t *msg,
 				Buf buffer, uint16_t protocol_version)
 {
+	unsigned int i;
+
 	xassert(msg != NULL);
 
-	packstr(msg->node_name, buffer);
-	acct_gather_energy_pack(msg->energy, buffer, protocol_version);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(msg->node_name, buffer);
+		pack16(msg->sensor_cnt, buffer);
+		for (i = 0; i < msg->sensor_cnt; i++)
+			acct_gather_energy_pack(&msg->energy[i],
+						buffer, protocol_version);
+	} else {
+		acct_gather_energy_t *energy = NULL;
+
+		packstr(msg->node_name, buffer);
+		if (msg->sensor_cnt)
+			energy = &msg->energy[0];
+		acct_gather_energy_pack(energy, buffer, protocol_version);
+	}
+
 }
 static int
 _unpack_acct_gather_node_resp_msg(acct_gather_node_resp_msg_t **msg,
 				  Buf buffer, uint16_t protocol_version)
 {
+	unsigned int i;
 	acct_gather_node_resp_msg_t *node_data_ptr;
 	uint32_t uint32_tmp;
+	acct_gather_energy_t *e;
 	/* alloc memory for structure */
 	xassert(msg != NULL);
 	node_data_ptr = xmalloc(sizeof(acct_gather_node_resp_msg_t));
 	*msg = node_data_ptr;
-
-	safe_unpackstr_xmalloc(&node_data_ptr->node_name,
-			       &uint32_tmp, buffer);
-	if (acct_gather_energy_unpack(&node_data_ptr->energy, buffer,
-				      protocol_version) != SLURM_SUCCESS)
-		goto unpack_error;
-
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&node_data_ptr->node_name,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&node_data_ptr->sensor_cnt, buffer);
+		node_data_ptr->energy = xmalloc(sizeof(acct_gather_energy_t)
+						* node_data_ptr->sensor_cnt);
+		for (i = 0; i < node_data_ptr->sensor_cnt; ++i) {
+			e = &node_data_ptr->energy[i];
+			if (acct_gather_energy_unpack(
+				    &e, buffer, protocol_version, 0)
+			    != SLURM_SUCCESS)
+				goto unpack_error;
+		}
+	} else {
+		safe_unpackstr_xmalloc(&node_data_ptr->node_name,
+				       &uint32_tmp, buffer);
+		if (acct_gather_energy_unpack(&node_data_ptr->energy, buffer,
+					      protocol_version, 1)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -2686,7 +3474,7 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 	uint32_t gres_info_size = 0;
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack_time(msg->timestamp, buffer);
 		pack_time(msg->slurmd_start_time, buffer);
 		pack32(msg->status, buffer);
@@ -2704,6 +3492,7 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 		pack32(msg->up_time, buffer);
 		pack32(msg->hash_val, buffer);
 		pack32(msg->cpu_load, buffer);
+		pack32(msg->free_mem, buffer);
 
 		pack32(msg->job_count, buffer);
 		for (i = 0; i < msg->job_count; i++) {
@@ -2725,12 +3514,13 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 		}
 		acct_gather_energy_pack(msg->energy, buffer, protocol_version);
 		packstr(msg->version, buffer);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		pack_time(msg->timestamp, buffer);
 		pack_time(msg->slurmd_start_time, buffer);
 		pack32(msg->status, buffer);
 		packstr(msg->node_name, buffer);
 		packstr(msg->arch, buffer);
+		packstr(msg->cpu_spec_list, buffer);
 		packstr(msg->os, buffer);
 		pack16(msg->cpus, buffer);
 		pack16(msg->boards, buffer);
@@ -2763,7 +3553,7 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 		}
 		acct_gather_energy_pack(msg->energy, buffer, protocol_version);
 		packstr(msg->version, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		pack_time(msg->timestamp, buffer);
 		pack_time(msg->slurmd_start_time, buffer);
 		pack32(msg->status, buffer);
@@ -2800,6 +3590,7 @@ _pack_node_registration_status_msg(slurm_node_registration_status_msg_t *
 				buffer);
 		}
 		acct_gather_energy_pack(msg->energy, buffer, protocol_version);
+		packstr(msg->version, buffer);
 	} else {
 		error("_pack_node_registration_status_msg: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -2821,7 +3612,7 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 	node_reg_ptr = xmalloc(sizeof(slurm_node_registration_status_msg_t));
 	*msg = node_reg_ptr;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&node_reg_ptr->timestamp, buffer);
 		safe_unpack_time(&node_reg_ptr->slurmd_start_time, buffer);
@@ -2844,6 +3635,7 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 		safe_unpack32(&node_reg_ptr->up_time, buffer);
 		safe_unpack32(&node_reg_ptr->hash_val, buffer);
 		safe_unpack32(&node_reg_ptr->cpu_load, buffer);
+		safe_unpack32(&node_reg_ptr->free_mem, buffer);
 
 		safe_unpack32(&node_reg_ptr->job_count, buffer);
 		node_reg_ptr->job_id =
@@ -2875,12 +3667,12 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 							     gres_info_size);
 		}
 		if (acct_gather_energy_unpack(&node_reg_ptr->energy, buffer,
-					      protocol_version)
+					      protocol_version, 1)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
 		safe_unpackstr_xmalloc(&node_reg_ptr->version,
 				       &uint32_tmp, buffer);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&node_reg_ptr->timestamp, buffer);
 		safe_unpack_time(&node_reg_ptr->slurmd_start_time, buffer);
@@ -2890,6 +3682,8 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node_reg_ptr->arch,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node_reg_ptr->cpu_spec_list,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node_reg_ptr->os, &uint32_tmp, buffer);
 		safe_unpack16(&node_reg_ptr->cpus, buffer);
 		safe_unpack16(&node_reg_ptr->boards, buffer);
@@ -2932,12 +3726,12 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 							     gres_info_size);
 		}
 		if (acct_gather_energy_unpack(&node_reg_ptr->energy, buffer,
-					      protocol_version)
+					      protocol_version, 1)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
 		safe_unpackstr_xmalloc(&node_reg_ptr->version,
 				       &uint32_tmp, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&node_reg_ptr->timestamp, buffer);
 		safe_unpack_time(&node_reg_ptr->slurmd_start_time, buffer);
@@ -2989,9 +3783,11 @@ _unpack_node_registration_status_msg(slurm_node_registration_status_msg_t
 							     gres_info_size);
 		}
 		if (acct_gather_energy_unpack(&node_reg_ptr->energy, buffer,
-					      protocol_version)
+					      protocol_version, 1)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
+		safe_unpackstr_xmalloc(&node_reg_ptr->version,
+				       &uint32_tmp, buffer);
 	} else {
 		error("_unpack_node_registration_status_msg: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -3012,30 +3808,38 @@ _pack_resource_allocation_response_msg(resource_allocation_response_msg_t *msg,
 {
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(msg->account, buffer);
+		packstr(msg->alias_list, buffer);
+		packstr_array(msg->environment, msg->env_size, buffer);
 		pack32(msg->error_code, buffer);
 		pack32(msg->job_id, buffer);
-		pack32(msg->pn_min_memory, buffer);
-		packstr(msg->alias_list, buffer);
+		pack32(msg->node_cnt, buffer);
 		packstr(msg->node_list, buffer);
-		packstr(msg->partition, buffer);
-
 		pack32(msg->num_cpu_groups, buffer);
 		if (msg->num_cpu_groups) {
-			pack16_array(msg->cpus_per_node, msg->num_cpu_groups, buffer);
-			pack32_array(msg->cpu_count_reps, msg->num_cpu_groups, buffer);
+			pack16_array(msg->cpus_per_node,
+				     msg->num_cpu_groups,
+				     buffer);
+			pack32_array(msg->cpu_count_reps,
+				     msg->num_cpu_groups,
+				     buffer);
 		}
-
-		pack32(msg->node_cnt, buffer);
-
-		select_g_select_jobinfo_pack(msg->select_jobinfo, buffer,
+		packstr(msg->partition, buffer);
+		pack32(msg->pn_min_memory, buffer);
+		packstr(msg->qos, buffer);
+		packstr(msg->resv_name, buffer);
+		select_g_select_jobinfo_pack(msg->select_jobinfo,
+					     buffer,
 					     protocol_version);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		pack32(msg->error_code, buffer);
 		pack32(msg->job_id, buffer);
 		pack32(msg->pn_min_memory, buffer);
 		packstr(msg->alias_list, buffer);
 		packstr(msg->node_list, buffer);
+		packstr(msg->partition, buffer);
 
 		pack32(msg->num_cpu_groups, buffer);
 		if (msg->num_cpu_groups) {
@@ -3066,18 +3870,18 @@ _unpack_resource_allocation_response_msg(
 	tmp_ptr = xmalloc(sizeof(resource_allocation_response_msg_t));
 	*msg = tmp_ptr;
 
-	/* load the data values */
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
-		safe_unpack32(&tmp_ptr->error_code, buffer);
-		safe_unpack32(&tmp_ptr->job_id, buffer);
-		safe_unpack32(&tmp_ptr->pn_min_memory, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&tmp_ptr->account, &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->alias_list, &uint32_tmp,
 				       buffer);
+		safe_unpackstr_array(&tmp_ptr->environment,
+				     &tmp_ptr->env_size, buffer);
+		safe_unpack32(&tmp_ptr->error_code, buffer);
+		safe_unpack32(&tmp_ptr->job_id, buffer);
+		safe_unpack32(&tmp_ptr->node_cnt, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp,
 				       buffer);
-		safe_unpackstr_xmalloc(&tmp_ptr->partition, &uint32_tmp,
-				       buffer);
-
 		safe_unpack32(&tmp_ptr->num_cpu_groups, buffer);
 		if (tmp_ptr->num_cpu_groups > 0) {
 			safe_unpack16_array(&tmp_ptr->cpus_per_node,
@@ -3092,12 +3896,17 @@ _unpack_resource_allocation_response_msg(
 			tmp_ptr->cpus_per_node = NULL;
 			tmp_ptr->cpu_count_reps = NULL;
 		}
-
-		safe_unpack32(&tmp_ptr->node_cnt, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->partition, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&tmp_ptr->pn_min_memory, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->qos, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->resv_name, &uint32_tmp,
+				       buffer);
 		if (select_g_select_jobinfo_unpack(&tmp_ptr->select_jobinfo,
 						   buffer, protocol_version))
 			goto unpack_error;
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpack32(&tmp_ptr->error_code, buffer);
 		safe_unpack32(&tmp_ptr->job_id, buffer);
 		safe_unpack32(&tmp_ptr->pn_min_memory, buffer);
@@ -3105,6 +3914,8 @@ _unpack_resource_allocation_response_msg(
 				       buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->node_list, &uint32_tmp,
 				       buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->partition, &uint32_tmp,
+				       buffer);
 
 		safe_unpack32(&tmp_ptr->num_cpu_groups, buffer);
 		if (tmp_ptr->num_cpu_groups > 0) {
@@ -3368,7 +4179,7 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 
 	tmp_state = node->node_state;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&node->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->node_hostname, &uint32_tmp,
 				       buffer);
@@ -3385,12 +4196,14 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 		safe_unpack32(&node->real_memory, buffer);
 		safe_unpack32(&node->tmp_disk, buffer);
 
+		safe_unpack32(&node->owner, buffer);
 		safe_unpack16(&node->core_spec_cnt, buffer);
 		safe_unpack32(&node->mem_spec_limit, buffer);
 		safe_unpackstr_xmalloc(&node->cpu_spec_list, &uint32_tmp,
 				       buffer);
 
 		safe_unpack32(&node->cpu_load, buffer);
+		safe_unpack32(&node->free_mem, buffer);
 		safe_unpack32(&node->weight, buffer);
 		safe_unpack32(&node->reason_uid, buffer);
 
@@ -3409,19 +4222,25 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 		safe_unpackstr_xmalloc(&node->os, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->reason, &uint32_tmp, buffer);
 		if (acct_gather_energy_unpack(&node->energy, buffer,
-					      protocol_version)
+					      protocol_version, 1)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
 		if (ext_sensors_data_unpack(&node->ext_sensors, buffer,
 					      protocol_version)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		if (power_mgmt_data_unpack(&node->power, buffer,
+					   protocol_version) != SLURM_SUCCESS)
+			goto unpack_error;
+
+		safe_unpackstr_xmalloc(&node->tres_fmt_str, &uint32_tmp,
+				       buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&node->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->node_hostname, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&node->node_addr, &uint32_tmp, buffer);
-		safe_unpack16(&tmp_state, buffer);
+		safe_unpack32(&node->node_state, buffer);
 		safe_unpackstr_xmalloc(&node->version, &uint32_tmp, buffer);
 
 		safe_unpack16(&node->cpus, buffer);
@@ -3432,6 +4251,12 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 
 		safe_unpack32(&node->real_memory, buffer);
 		safe_unpack32(&node->tmp_disk, buffer);
+
+		safe_unpack16(&node->core_spec_cnt, buffer);
+		safe_unpack32(&node->mem_spec_limit, buffer);
+		safe_unpackstr_xmalloc(&node->cpu_spec_list, &uint32_tmp,
+				       buffer);
+
 		safe_unpack32(&node->cpu_load, buffer);
 		safe_unpack32(&node->weight, buffer);
 		safe_unpack32(&node->reason_uid, buffer);
@@ -3446,22 +4271,26 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 		safe_unpackstr_xmalloc(&node->arch, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->features, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->gres, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->gres_drain, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&node->gres_used, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->os, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->reason, &uint32_tmp, buffer);
 		if (acct_gather_energy_unpack(&node->energy, buffer,
-					      protocol_version)
+					      protocol_version, 1)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
 		if (ext_sensors_data_unpack(&node->ext_sensors, buffer,
 					      protocol_version)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&node->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->node_hostname, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&node->node_addr, &uint32_tmp, buffer);
 		safe_unpack16(&tmp_state, buffer);
+		safe_unpackstr_xmalloc(&node->version, &uint32_tmp, buffer);
+
 		safe_unpack16(&node->cpus, buffer);
 		safe_unpack16(&node->boards, buffer);
 		safe_unpack16(&node->sockets, buffer);
@@ -3487,7 +4316,7 @@ _unpack_node_info_members(node_info_t * node, Buf buffer,
 		safe_unpackstr_xmalloc(&node->os, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node->reason, &uint32_tmp, buffer);
 		if (acct_gather_energy_unpack(&node->energy, buffer,
-					      protocol_version)
+					      protocol_version, 1)
 		    != SLURM_SUCCESS)
 			goto unpack_error;
 		if (ext_sensors_data_unpack(&node->ext_sensors, buffer,
@@ -3512,11 +4341,12 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer,
 {
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		packstr(msg->allow_accounts, buffer);
 		packstr(msg->allow_alloc_nodes, buffer);
 		packstr(msg->allow_groups, buffer);
 		packstr(msg->allow_qos,    buffer);
+		packstr(msg->qos_char,     buffer);
 		packstr(msg->alternate,    buffer);
 		packstr(msg->deny_accounts, buffer);
 		packstr(msg->deny_qos,     buffer);
@@ -3537,9 +4367,17 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer,
 		pack16(msg-> preempt_mode, buffer);
 		pack16(msg-> priority,     buffer);
 		pack16(msg-> state_up,     buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		packstr(msg->allow_accounts, buffer);
+		packstr(msg->allow_alloc_nodes, buffer);
 		packstr(msg->allow_groups, buffer);
+		packstr(msg->allow_qos,    buffer);
 		packstr(msg->alternate,    buffer);
+		packstr(msg->deny_accounts, buffer);
+		packstr(msg->deny_qos,     buffer);
+		packstr(msg->name,         buffer);
+		packstr(msg->nodes,        buffer);
+
 		pack32(msg-> grace_time,   buffer);
 		pack32(msg-> max_time,     buffer);
 		pack32(msg-> default_time, buffer);
@@ -3548,15 +4386,12 @@ _pack_update_partition_msg(update_part_msg_t * msg, Buf buffer,
 		pack32(msg-> max_cpus_per_node, buffer);
 		pack32(msg-> def_mem_per_cpu, buffer);
 		pack32(msg-> max_mem_per_cpu, buffer);
-		packstr(msg->name,         buffer);
-		packstr(msg->nodes,        buffer);
+
 		pack16(msg-> flags,        buffer);
 		pack16(msg-> max_share,    buffer);
 		pack16(msg-> preempt_mode, buffer);
 		pack16(msg-> priority,     buffer);
 		pack16(msg-> state_up,     buffer);
-
-		packstr(msg->allow_alloc_nodes, buffer);
 	} else {
 		error("_pack_update_partition_msg: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -3576,7 +4411,7 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
 	tmp_ptr = xmalloc(sizeof(update_part_msg_t));
 	*msg = tmp_ptr;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&tmp_ptr->allow_accounts,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->allow_alloc_nodes,
@@ -3585,6 +4420,8 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->allow_qos,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->qos_char,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->alternate, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->deny_accounts,
@@ -3608,11 +4445,24 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
 		safe_unpack16(&tmp_ptr->preempt_mode, buffer);
 		safe_unpack16(&tmp_ptr->priority,  buffer);
 		safe_unpack16(&tmp_ptr->state_up,  buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&tmp_ptr->allow_accounts,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->allow_alloc_nodes,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->allow_groups,
 				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&tmp_ptr->alternate,
+		safe_unpackstr_xmalloc(&tmp_ptr->allow_qos,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->alternate, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->deny_accounts,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->deny_qos,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&tmp_ptr->nodes, &uint32_tmp, buffer);
+
 		safe_unpack32(&tmp_ptr->grace_time, buffer);
 		safe_unpack32(&tmp_ptr->max_time, buffer);
 		safe_unpack32(&tmp_ptr->default_time, buffer);
@@ -3621,17 +4471,12 @@ _unpack_update_partition_msg(update_part_msg_t ** msg, Buf buffer,
 		safe_unpack32(&tmp_ptr->max_cpus_per_node, buffer);
 		safe_unpack32(&tmp_ptr->def_mem_per_cpu, buffer);
 		safe_unpack32(&tmp_ptr->max_mem_per_cpu, buffer);
-		safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&tmp_ptr->nodes, &uint32_tmp, buffer);
 
 		safe_unpack16(&tmp_ptr->flags,     buffer);
 		safe_unpack16(&tmp_ptr->max_share, buffer);
 		safe_unpack16(&tmp_ptr->preempt_mode, buffer);
 		safe_unpack16(&tmp_ptr->priority,  buffer);
 		safe_unpack16(&tmp_ptr->state_up,  buffer);
-
-		safe_unpackstr_xmalloc(&tmp_ptr->allow_alloc_nodes,
-				       &uint32_tmp, buffer);
 	} else {
 		error("_unpack_update_partition_msg: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -3645,6 +4490,22 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void
+_pack_update_powercap_msg(update_powercap_msg_t * msg, Buf buffer,
+			  uint16_t protocol_version)
+{
+	_pack_powercap_info_msg((powercap_info_msg_t *) msg,
+				buffer, protocol_version);
+}
+
+static int
+_unpack_update_powercap_msg(update_powercap_msg_t ** msg, Buf buffer,
+			    uint16_t protocol_version)
+{
+	return _unpack_powercap_info_msg((powercap_info_msg_t **) msg,
+					 buffer, protocol_version);
+}
+
 static void
 _pack_update_resv_msg(resv_desc_msg_t * msg, Buf buffer,
 		      uint16_t protocol_version)
@@ -3652,7 +4513,7 @@ _pack_update_resv_msg(resv_desc_msg_t * msg, Buf buffer,
 	uint32_t array_len;
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		packstr(msg->name,         buffer);
 		pack_time(msg->start_time, buffer);
 		pack_time(msg->end_time,   buffer);
@@ -3681,16 +4542,16 @@ _pack_update_resv_msg(resv_desc_msg_t * msg, Buf buffer,
 		packstr(msg->licenses,     buffer);
 		packstr(msg->partition,    buffer);
 
+		pack32(msg->resv_watts,    buffer);
 		packstr(msg->users,        buffer);
 		packstr(msg->accounts,     buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		uint16_t flags;
+		packstr(msg->burst_buffer, buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		packstr(msg->name,         buffer);
 		pack_time(msg->start_time, buffer);
 		pack_time(msg->end_time,   buffer);
 		pack32(msg->duration,      buffer);
-		flags = (uint16_t) msg->flags;
-		pack16(flags,              buffer);
+		pack32(msg->flags,         buffer);
 		if (msg->node_cnt) {
 			for (array_len = 0; msg->node_cnt[array_len];
 			     array_len++) {
@@ -3735,7 +4596,7 @@ _unpack_update_resv_msg(resv_desc_msg_t ** msg, Buf buffer,
 	tmp_ptr = xmalloc(sizeof(resv_desc_msg_t));
 	*msg = tmp_ptr;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
 		safe_unpack_time(&tmp_ptr->start_time, buffer);
 		safe_unpack_time(&tmp_ptr->end_time,   buffer);
@@ -3767,19 +4628,20 @@ _unpack_update_resv_msg(resv_desc_msg_t ** msg, Buf buffer,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->partition,
 				       &uint32_tmp, buffer);
+		safe_unpack32(&tmp_ptr->resv_watts, buffer);
 
 		safe_unpackstr_xmalloc(&tmp_ptr->users,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->accounts,
 				       &uint32_tmp, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		uint16_t flags;
+		safe_unpackstr_xmalloc(&tmp_ptr->burst_buffer,
+				       &uint32_tmp, buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&tmp_ptr->name, &uint32_tmp, buffer);
 		safe_unpack_time(&tmp_ptr->start_time, buffer);
 		safe_unpack_time(&tmp_ptr->end_time,   buffer);
 		safe_unpack32(&tmp_ptr->duration,      buffer);
-		safe_unpack16(&flags,                  buffer);
-		tmp_ptr->flags = flags;
+		safe_unpack32(&tmp_ptr->flags,         buffer);
 		safe_unpack32_array(&tmp_ptr->node_cnt, &uint32_tmp, buffer);
 		if (uint32_tmp > 0) {
 			/* Must be zero terminated */
@@ -3806,6 +4668,7 @@ _unpack_update_resv_msg(resv_desc_msg_t ** msg, Buf buffer,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&tmp_ptr->partition,
 				       &uint32_tmp, buffer);
+		tmp_ptr->resv_watts = NO_VAL;
 
 		safe_unpackstr_xmalloc(&tmp_ptr->users,
 				       &uint32_tmp, buffer);
@@ -3915,19 +4778,53 @@ pack_job_step_create_request_msg(job_step_create_request_msg_t * msg,
 {
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack32(msg->job_id, buffer);
 		pack32(msg->user_id, buffer);
 		pack32(msg->min_nodes, buffer);
 		pack32(msg->max_nodes, buffer);
 		pack32(msg->cpu_count, buffer);
-		pack32(msg->cpu_freq, buffer);
+		pack32(msg->cpu_freq_min, buffer);
+		pack32(msg->cpu_freq_max, buffer);
+		pack32(msg->cpu_freq_gov, buffer);
 		pack32(msg->num_tasks, buffer);
 		pack32(msg->pn_min_memory, buffer);
 		pack32(msg->time_limit, buffer);
 
 		pack16(msg->relative, buffer);
-		pack16(msg->task_dist, buffer);
+		pack32(msg->task_dist, buffer);
+		pack16(msg->plane_size, buffer);
+		pack16(msg->port, buffer);
+		pack16(msg->ckpt_interval, buffer);
+		pack16(msg->exclusive, buffer);
+		pack16(msg->immediate, buffer);
+		pack16(msg->resv_port_cnt, buffer);
+
+		packstr(msg->host, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->network, buffer);
+		packstr(msg->node_list, buffer);
+		packstr(msg->ckpt_dir, buffer);
+		packstr(msg->features, buffer);
+		packstr(msg->gres, buffer);
+
+		pack8(msg->no_kill, buffer);
+		pack8(msg->overcommit, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
+		pack32(msg->job_id, buffer);
+		pack32(msg->user_id, buffer);
+		pack32(msg->min_nodes, buffer);
+		pack32(msg->max_nodes, buffer);
+		pack32(msg->cpu_count, buffer);
+		pack32(msg->cpu_freq_max, buffer);
+		pack32(msg->num_tasks, buffer);
+		pack32(msg->pn_min_memory, buffer);
+		pack32(msg->time_limit, buffer);
+
+		pack16(msg->relative, buffer);
+		old_task_dist = task_dist_new2old(msg->task_dist);
+		pack16(old_task_dist, buffer);
 		pack16(msg->plane_size, buffer);
 		pack16(msg->port, buffer);
 		pack16(msg->ckpt_interval, buffer);
@@ -3964,19 +4861,57 @@ unpack_job_step_create_request_msg(job_step_create_request_msg_t ** msg,
 	tmp_ptr = xmalloc(sizeof(job_step_create_request_msg_t));
 	*msg = tmp_ptr;
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&(tmp_ptr->job_id), buffer);
 		safe_unpack32(&(tmp_ptr->user_id), buffer);
 		safe_unpack32(&(tmp_ptr->min_nodes), buffer);
 		safe_unpack32(&(tmp_ptr->max_nodes), buffer);
 		safe_unpack32(&(tmp_ptr->cpu_count), buffer);
-		safe_unpack32(&(tmp_ptr->cpu_freq), buffer);
+		safe_unpack32(&(tmp_ptr->cpu_freq_min), buffer);
+		safe_unpack32(&(tmp_ptr->cpu_freq_max), buffer);
+		safe_unpack32(&(tmp_ptr->cpu_freq_gov), buffer);
 		safe_unpack32(&(tmp_ptr->num_tasks), buffer);
 		safe_unpack32(&(tmp_ptr->pn_min_memory), buffer);
 		safe_unpack32(&(tmp_ptr->time_limit), buffer);
 
 		safe_unpack16(&(tmp_ptr->relative), buffer);
-		safe_unpack16(&(tmp_ptr->task_dist), buffer);
+		safe_unpack32(&(tmp_ptr->task_dist), buffer);
+		safe_unpack16(&(tmp_ptr->plane_size), buffer);
+		safe_unpack16(&(tmp_ptr->port), buffer);
+		safe_unpack16(&(tmp_ptr->ckpt_interval), buffer);
+		safe_unpack16(&(tmp_ptr->exclusive), buffer);
+		safe_unpack16(&(tmp_ptr->immediate), buffer);
+		safe_unpack16(&(tmp_ptr->resv_port_cnt), buffer);
+
+		safe_unpackstr_xmalloc(&(tmp_ptr->host), &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->name), &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->network), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->node_list), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->ckpt_dir), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->features), &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->gres), &uint32_tmp, buffer);
+
+		safe_unpack8(&(tmp_ptr->no_kill), buffer);
+		safe_unpack8(&(tmp_ptr->overcommit), buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
+		safe_unpack32(&(tmp_ptr->job_id), buffer);
+		safe_unpack32(&(tmp_ptr->user_id), buffer);
+		safe_unpack32(&(tmp_ptr->min_nodes), buffer);
+		safe_unpack32(&(tmp_ptr->max_nodes), buffer);
+		safe_unpack32(&(tmp_ptr->cpu_count), buffer);
+		safe_unpack32(&(tmp_ptr->cpu_freq_max), buffer);
+		safe_unpack32(&(tmp_ptr->num_tasks), buffer);
+		safe_unpack32(&(tmp_ptr->pn_min_memory), buffer);
+		safe_unpack32(&(tmp_ptr->time_limit), buffer);
+
+		safe_unpack16(&(tmp_ptr->relative), buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		tmp_ptr->task_dist = task_dist_old2new(old_task_dist);
 		safe_unpack16(&(tmp_ptr->plane_size), buffer);
 		safe_unpack16(&(tmp_ptr->port), buffer);
 		safe_unpack16(&(tmp_ptr->ckpt_interval), buffer);
@@ -4017,7 +4952,19 @@ _pack_kill_job_msg(kill_job_msg_t * msg, Buf buffer, uint16_t protocol_version)
 {
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->job_id,  buffer);
+		pack32(msg->step_id,  buffer);
+		pack32(msg->job_state, buffer);
+		pack32(msg->job_uid, buffer);
+		pack_time(msg->time, buffer);
+		pack_time(msg->start_time, buffer);
+		packstr(msg->nodes, buffer);
+		select_g_select_jobinfo_pack(msg->select_jobinfo, buffer,
+					     protocol_version);
+		packstr_array(msg->spank_job_env, msg->spank_job_env_size,
+			      buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		pack32(msg->job_id,  buffer);
 		pack32(msg->step_id,  buffer);
 		pack16(msg->job_state, buffer);
@@ -4040,6 +4987,7 @@ _unpack_kill_job_msg(kill_job_msg_t ** msg, Buf buffer,
 		     uint16_t protocol_version)
 {
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
 	kill_job_msg_t *tmp_ptr;
 
 	/* alloc memory for structure */
@@ -4047,10 +4995,24 @@ _unpack_kill_job_msg(kill_job_msg_t ** msg, Buf buffer,
 	tmp_ptr = xmalloc(sizeof(kill_job_msg_t));
 	*msg = tmp_ptr;
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&(tmp_ptr->job_id),  buffer);
 		safe_unpack32(&(tmp_ptr->step_id),  buffer);
-		safe_unpack16(&(tmp_ptr->job_state),  buffer);
+		safe_unpack32(&(tmp_ptr->job_state),  buffer);
+		safe_unpack32(&(tmp_ptr->job_uid), buffer);
+		safe_unpack_time(&(tmp_ptr->time), buffer);
+		safe_unpack_time(&(tmp_ptr->start_time), buffer);
+		safe_unpackstr_xmalloc(&(tmp_ptr->nodes), &uint32_tmp, buffer);
+		if (select_g_select_jobinfo_unpack(&tmp_ptr->select_jobinfo,
+						   buffer, protocol_version))
+			goto unpack_error;
+		safe_unpackstr_array(&(tmp_ptr->spank_job_env),
+				     &tmp_ptr->spank_job_env_size, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&(tmp_ptr->job_id),  buffer);
+		safe_unpack32(&(tmp_ptr->step_id),  buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		tmp_ptr->job_state = uint16_tmp;
 		safe_unpack32(&(tmp_ptr->job_uid), buffer);
 		safe_unpack_time(&(tmp_ptr->time), buffer);
 		safe_unpack_time(&(tmp_ptr->start_time), buffer);
@@ -4123,7 +5085,6 @@ _pack_epilog_comp_msg(epilog_complete_msg_t * msg, Buf buffer,
 		      uint16_t protocol_version)
 {
 	xassert(msg != NULL);
-
 	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		pack32((uint32_t)msg->job_id, buffer);
 		pack32((uint32_t)msg->return_code, buffer);
@@ -4147,7 +5108,6 @@ _unpack_epilog_comp_msg(epilog_complete_msg_t ** msg, Buf buffer,
 {
 	epilog_complete_msg_t *tmp_ptr;
 	uint32_t uint32_tmp;
-
 	/* alloc memory for structure */
 	xassert(msg);
 	tmp_ptr = xmalloc(sizeof(epilog_complete_msg_t));
@@ -4170,14 +5130,142 @@ _unpack_epilog_comp_msg(epilog_complete_msg_t ** msg, Buf buffer,
 			switch_g_free_node_info(&switch_nodeinfo);
 			goto unpack_error;
 		}
-		switch_g_free_node_info(&switch_nodeinfo);
+		switch_g_free_node_info(&switch_nodeinfo);
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_epilog_complete_msg(tmp_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+static void
+_pack_composite_msg(composite_msg_t *msg, Buf buffer, uint16_t protocol_version)
+{
+	uint32_t count;
+	slurm_msg_t *tmp_info = NULL;
+	ListIterator itr = NULL;
+	Buf tmp_buf;
+
+	xassert(msg);
+
+	if (msg->msg_list)
+		count = list_count(msg->msg_list);
+	else
+		count = NO_VAL;
+
+	pack32(count, buffer);
+
+	slurm_pack_slurm_addr(&msg->sender, buffer);
+	if (count && count != NO_VAL) {
+		itr = list_iterator_create(msg->msg_list);
+		while ((tmp_info = list_next(itr))) {
+			if (tmp_info->protocol_version == (uint16_t)NO_VAL)
+				tmp_info->protocol_version = protocol_version;
+			pack16(tmp_info->protocol_version, buffer);
+			pack16(tmp_info->msg_type, buffer);
+			pack16(tmp_info->flags, buffer);
+			pack16(tmp_info->msg_index, buffer);
+
+			if (!tmp_info->auth_cred) {
+				char *auth = slurm_get_auth_info();
+				/* FIXME: this should handle the
+				   _global_auth_key() as well.
+				*/
+				tmp_info->auth_cred =
+					g_slurm_auth_create(NULL, 2, auth);
+				xfree(auth);
+			}
+
+			g_slurm_auth_pack(tmp_info->auth_cred, buffer);
+
+			if (!tmp_info->data_size) {
+				pack_msg(tmp_info, buffer);
+				continue;
+			}
+
+			/* If we are here it means we are already
+			 * packed so just add our packed buffer to the
+			 * mix.
+			 */
+			if (remaining_buf(buffer) < tmp_info->data_size) {
+				int new_size = buffer->processed +
+					tmp_info->data_size;
+				new_size += 1024; /* padded for paranoia */
+				xrealloc_nz(buffer->head, new_size);
+				buffer->size = new_size;
+			}
+			tmp_buf = tmp_info->data;
+
+			memcpy(&buffer->head[buffer->processed],
+			       &tmp_buf->head[tmp_buf->processed],
+			       tmp_info->data_size);
+			buffer->processed += tmp_info->data_size;
+		}
+		list_iterator_destroy(itr);
+	}
+}
+
+static int
+_unpack_composite_msg(composite_msg_t **msg, Buf buffer,
+		      uint16_t protocol_version)
+{
+	uint32_t count = NO_VAL;
+	int i, rc;
+	slurm_msg_t *tmp_info;
+	composite_msg_t *object_ptr = NULL;
+	char *auth = slurm_get_auth_info();
+
+	xassert(msg);
+	object_ptr = xmalloc(sizeof(composite_msg_t));
+	*msg = object_ptr;
+	safe_unpack32(&count, buffer);
+	slurm_unpack_slurm_addr_no_alloc(&object_ptr->sender, buffer);
+
+	if (count != NO_VAL) {
+		object_ptr->msg_list = list_create(slurm_free_comp_msg_list);
+		for (i=0; i<count; i++) {
+			tmp_info = xmalloc_nz(sizeof(slurm_msg_t));
+			slurm_msg_t_init(tmp_info);
+			safe_unpack16(&tmp_info->protocol_version, buffer);
+			safe_unpack16(&tmp_info->msg_type, buffer);
+			safe_unpack16(&tmp_info->flags, buffer);
+			safe_unpack16(&tmp_info->msg_index, buffer);
+
+			if (!(tmp_info->auth_cred =
+			      g_slurm_auth_unpack(buffer))) {
+				error("authentication: %s ",
+				      g_slurm_auth_errstr(
+					      g_slurm_auth_errno(NULL)));
+				free_buf(buffer);
+				slurm_seterrno(ESLURM_PROTOCOL_INCOMPLETE_PACKET);
+				goto unpack_error;
+			}
+
+			if (unpack_msg(tmp_info, buffer) != SLURM_SUCCESS)
+				goto unpack_error;
+
+			rc = g_slurm_auth_verify(
+				tmp_info->auth_cred, NULL, 2, auth);
+
+			if (rc != SLURM_SUCCESS) {
+				error("authentication: %s ",
+				      g_slurm_auth_errstr(
+					      g_slurm_auth_errno(
+						      tmp_info->auth_cred)));
+				slurm_free_comp_msg_list(tmp_info);
+			} else
+				list_append(object_ptr->msg_list, tmp_info);
+		}
 	}
-
+	xfree(auth);
 	return SLURM_SUCCESS;
-
 unpack_error:
-	slurm_free_epilog_complete_msg(tmp_ptr);
+	slurm_free_composite_msg(object_ptr);
 	*msg = NULL;
+	xfree(auth);
 	return SLURM_ERROR;
 }
 
@@ -4229,7 +5317,18 @@ pack_job_step_create_response_msg(job_step_create_response_msg_t * msg,
 {
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(msg->resv_ports, buffer);
+		pack32(msg->job_step_id, buffer);
+		pack_slurm_step_layout(
+			msg->step_layout, buffer, protocol_version);
+		slurm_cred_pack(msg->cred, buffer, protocol_version);
+		select_g_select_jobinfo_pack(
+			msg->select_jobinfo, buffer, protocol_version);
+		switch_g_pack_jobinfo(msg->switch_job, buffer,
+				      protocol_version);
+		pack16(msg->use_protocol_ver, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		packstr(msg->resv_ports, buffer);
 		pack32(msg->job_step_id, buffer);
 		pack_slurm_step_layout(
@@ -4257,7 +5356,32 @@ unpack_job_step_create_response_msg(job_step_create_response_msg_t ** msg,
 	tmp_ptr = xmalloc(sizeof(job_step_create_response_msg_t));
 	*msg = tmp_ptr;
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(
+			&tmp_ptr->resv_ports, &uint32_tmp, buffer);
+		safe_unpack32(&tmp_ptr->job_step_id, buffer);
+		if (unpack_slurm_step_layout(&tmp_ptr->step_layout, buffer,
+					     protocol_version))
+			goto unpack_error;
+
+		if (!(tmp_ptr->cred = slurm_cred_unpack(
+			      buffer, protocol_version)))
+			goto unpack_error;
+
+		if (select_g_select_jobinfo_unpack(
+			    &tmp_ptr->select_jobinfo, buffer, protocol_version))
+			goto unpack_error;
+		switch_g_alloc_jobinfo(&tmp_ptr->switch_job, NO_VAL,
+				       tmp_ptr->job_step_id);
+		if (switch_g_unpack_jobinfo(tmp_ptr->switch_job, buffer,
+					    protocol_version)) {
+			error("switch_g_unpack_jobinfo: %m");
+			switch_g_free_jobinfo(tmp_ptr->switch_job);
+			goto unpack_error;
+		}
+		safe_unpack16(&tmp_ptr->use_protocol_ver, buffer);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		tmp_ptr->use_protocol_ver = protocol_version;
 		safe_unpackstr_xmalloc(
 			&tmp_ptr->resv_ports, &uint32_tmp, buffer);
 		safe_unpack32(&tmp_ptr->job_step_id, buffer);
@@ -4340,7 +5464,7 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 	uint32_t uint32_tmp;
 	char *node_inx_str = NULL;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&part->name, &uint32_tmp, buffer);
 		if (part->name == NULL)
 			part->name = xmalloc(1);/* part->name = "" implicit */
@@ -4369,6 +5493,8 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 				       buffer);
 		safe_unpackstr_xmalloc(&part->allow_qos, &uint32_tmp,
 				       buffer);
+		safe_unpackstr_xmalloc(&part->qos_char, &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&part->alternate, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&part->deny_accounts, &uint32_tmp,
 				       buffer);
@@ -4383,8 +5509,11 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 			xfree(node_inx_str);
 			node_inx_str = NULL;
 		}
-
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&part->billing_weights_str, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&part->tres_fmt_str, &uint32_tmp,
+				       buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&part->name, &uint32_tmp, buffer);
 		if (part->name == NULL)
 			part->name = xmalloc(1);/* part->name = "" implicit */
@@ -4405,11 +5534,19 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 		safe_unpack16(&part->state_up,     buffer);
 		safe_unpack16(&part->cr_type ,     buffer);
 
+		safe_unpackstr_xmalloc(&part->allow_accounts, &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&part->allow_groups, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&part->allow_alloc_nodes, &uint32_tmp,
 				       buffer);
+		safe_unpackstr_xmalloc(&part->allow_qos, &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&part->alternate, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&part->deny_accounts, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&part->deny_qos, &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&part->nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node_inx_str, &uint32_tmp, buffer);
 		if (node_inx_str == NULL)
@@ -4419,6 +5556,7 @@ _unpack_partition_info_members(partition_info_t * part, Buf buffer,
 			xfree(node_inx_str);
 			node_inx_str = NULL;
 		}
+
 	} else {
 		error("_unpack_partition_info_members: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -4431,6 +5569,50 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static int
+_unpack_layout_info_msg(layout_info_msg_t ** msg, Buf buffer,
+			uint16_t protocol_version)
+{
+	int i;
+	char **records;
+	uint32_t utmp32, record_count = 0;
+	char *tmp_str = NULL;
+
+	xassert(msg != NULL);
+
+	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		*msg = xmalloc(sizeof(layout_info_msg_t));
+		(*msg)->record_count = 0;
+		safe_unpack32(&record_count, buffer);
+		(*msg)->records = xmalloc(sizeof(char*) * record_count);
+		records = (*msg)->records;
+		for (i = 0; i < record_count; i++) {
+			safe_unpackstr_xmalloc(&tmp_str, &utmp32, buffer);
+			if (tmp_str != NULL) {
+				if (tmp_str[0] == '\0') {
+					xfree(tmp_str);
+				} else {
+					records[(*msg)->record_count] = tmp_str;
+					// tmp_str = NULL; /* Nothing to free */
+					((*msg)->record_count)++;
+				}
+			}
+		}
+	} else {
+		error("%s: protocol_version %hu not supported",
+		      __func__, protocol_version);
+		goto unpack_error;
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_layout_info_msg(*msg);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
+
+
 static int
 _unpack_reserve_info_msg(reserve_info_msg_t ** msg, Buf buffer,
 			 uint16_t protocol_version)
@@ -4476,9 +5658,10 @@ _unpack_reserve_info_members(reserve_info_t * resv, Buf buffer,
 	char *node_inx_str = NULL;
 	uint32_t uint32_tmp;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&resv->accounts,	&uint32_tmp, buffer);
-		safe_unpack32(&resv->core_cnt,		buffer);
+		safe_unpackstr_xmalloc(&resv->burst_buffer,&uint32_tmp, buffer);
+		safe_unpack32(&resv->core_cnt,          buffer);
 		safe_unpack_time(&resv->end_time,	buffer);
 		safe_unpackstr_xmalloc(&resv->features,	&uint32_tmp, buffer);
 		safe_unpack32(&resv->flags,		buffer);
@@ -4487,7 +5670,10 @@ _unpack_reserve_info_members(reserve_info_t * resv, Buf buffer,
 		safe_unpack32(&resv->node_cnt,		buffer);
 		safe_unpackstr_xmalloc(&resv->node_list, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&resv->partition, &uint32_tmp, buffer);
+		safe_unpack32(&resv->resv_watts,        buffer);
 		safe_unpack_time(&resv->start_time,	buffer);
+
+		safe_unpackstr_xmalloc(&resv->tres_str, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&resv->users,	&uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&node_inx_str,   &uint32_tmp, buffer);
 		if (node_inx_str == NULL)
@@ -4497,14 +5683,14 @@ _unpack_reserve_info_members(reserve_info_t * resv, Buf buffer,
 			xfree(node_inx_str);
 			node_inx_str = NULL;
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		uint16_t flags;
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&resv->accounts,	&uint32_tmp, buffer);
-		safe_unpack32(&resv->core_cnt,		buffer);
+		safe_unpack32(&resv->core_cnt,          buffer);
+		resv->tres_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, resv->core_cnt);
 		safe_unpack_time(&resv->end_time,	buffer);
 		safe_unpackstr_xmalloc(&resv->features,	&uint32_tmp, buffer);
-		safe_unpack16(&flags,			buffer);
-		resv->flags = flags;
+		safe_unpack32(&resv->flags,		buffer);
 		safe_unpackstr_xmalloc(&resv->licenses, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&resv->name,	&uint32_tmp, buffer);
 		safe_unpack32(&resv->node_cnt,		buffer);
@@ -4544,10 +5730,10 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 			      uint16_t protocol_version)
 {
 	uint32_t uint32_tmp = 0;
-	uint16_t uint16_tmp = 0;
+	uint16_t uint16_tmp;
 	char *node_inx_str;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&step->array_job_id, buffer);
 		safe_unpack32(&step->array_task_id, buffer);
 		safe_unpack32(&step->job_id, buffer);
@@ -4555,10 +5741,13 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 		safe_unpack16(&step->ckpt_interval, buffer);
 		safe_unpack32(&step->user_id, buffer);
 		safe_unpack32(&step->num_cpus, buffer);
-		safe_unpack32(&step->cpu_freq, buffer);
+		safe_unpack32(&step->cpu_freq_min, buffer);
+		safe_unpack32(&step->cpu_freq_max, buffer);
+		safe_unpack32(&step->cpu_freq_gov, buffer);
 		safe_unpack32(&step->num_tasks, buffer);
+		safe_unpack32(&step->task_dist, buffer);
 		safe_unpack32(&step->time_limit, buffer);
-		safe_unpack16(&step->state, buffer);
+		safe_unpack32(&step->state, buffer);
 
 		safe_unpack_time(&step->start_time, buffer);
 		safe_unpack_time(&step->run_time, buffer);
@@ -4577,25 +5766,28 @@ _unpack_job_step_info_members(job_step_info_t * step, Buf buffer,
 			step->node_inx = bitfmt2int(node_inx_str);
 			xfree(node_inx_str);
 		}
+
 		if (select_g_select_jobinfo_unpack(&step->select_jobinfo,
 						   buffer, protocol_version))
 			goto unpack_error;
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+
+		safe_unpackstr_xmalloc(&step->tres_alloc_str,
+				       &uint32_tmp, buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpack32(&step->array_job_id, buffer);
-		safe_unpack16(&uint16_tmp, buffer);
-		if (uint16_tmp == (uint16_t) NO_VAL)
-			step->array_task_id = NO_VAL;
-		else
-			step->array_task_id = (uint32_t) uint16_tmp;
+		safe_unpack32(&step->array_task_id, buffer);
 		safe_unpack32(&step->job_id, buffer);
 		safe_unpack32(&step->step_id, buffer);
 		safe_unpack16(&step->ckpt_interval, buffer);
 		safe_unpack32(&step->user_id, buffer);
 		safe_unpack32(&step->num_cpus, buffer);
-		safe_unpack32(&step->cpu_freq, buffer);
+		step->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, step->num_cpus);
+		safe_unpack32(&step->cpu_freq_max, buffer);
 		safe_unpack32(&step->num_tasks, buffer);
 		safe_unpack32(&step->time_limit, buffer);
-		safe_unpack16(&step->state, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		step->state = uint16_tmp;
 
 		safe_unpack_time(&step->start_time, buffer);
 		safe_unpack_time(&step->run_time, buffer);
@@ -4711,6 +5903,66 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static int
+_unpack_sicp_info_msg(sicp_info_msg_t ** msg, Buf buffer,
+		      uint16_t protocol_version)
+{
+	int i;
+	sicp_info_t *job = NULL;
+	uint16_t uint16_tmp;
+
+	xassert(msg != NULL);
+	*msg = xmalloc(sizeof(sicp_info_msg_t));
+
+	/* load buffer's header (data structure version and time) */
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&((*msg)->record_count), buffer);
+
+		if (protocol_version == SLURM_PROTOCOL_VERSION) {
+			job = (*msg)->sicp_array =
+				xmalloc_nz(sizeof(sicp_info_t) *
+					   (*msg)->record_count);
+		} else {
+			job = (*msg)->sicp_array =
+				xmalloc(sizeof(sicp_info_t) *
+					(*msg)->record_count);
+		}
+		/* load individual inter-cluster job info */
+		for (i = 0; i < (*msg)->record_count; i++, job++) {
+			safe_unpack32(&job->job_id, buffer);
+			safe_unpack32(&job->job_state, buffer);
+		}
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&((*msg)->record_count), buffer);
+
+		if (protocol_version == SLURM_PROTOCOL_VERSION) {
+			job = (*msg)->sicp_array =
+				xmalloc_nz(sizeof(sicp_info_t) *
+					   (*msg)->record_count);
+		} else {
+			job = (*msg)->sicp_array =
+				xmalloc(sizeof(sicp_info_t) *
+					(*msg)->record_count);
+		}
+		/* load individual inter-cluster job info */
+		for (i = 0; i < (*msg)->record_count; i++, job++) {
+			safe_unpack32(&job->job_id, buffer);
+			safe_unpack16(&uint16_tmp, buffer);
+			job->job_state = uint16_tmp;
+		}
+	} else {
+		error("_unpack_sicp_info_msg: protocol_version "
+		      "%hu not supported", protocol_version);
+		goto unpack_error;
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_sicp_msg(*msg);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 /* Translate bitmap representation from hex to decimal format, replacing
  * array_task_str and store the bitmap in job->array_bitmap. */
 static void _xlate_task_str(job_info_t *job_ptr)
@@ -4813,7 +6065,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 
 	job->ntasks_per_node = (uint16_t)NO_VAL;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&job->array_job_id, buffer);
 		safe_unpack32(&job->array_task_id, buffer);
 		/* The array_task_str value is stored in slurmctld and passed
@@ -4830,10 +6082,12 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack32(&job->group_id, buffer);
 		safe_unpack32(&job->profile,  buffer);
 
-		safe_unpack16(&job->job_state,    buffer);
+		safe_unpack32(&job->job_state,    buffer);
 		safe_unpack16(&job->batch_flag,   buffer);
 		safe_unpack16(&job->state_reason, buffer);
+		safe_unpack8 (&job->power_flags,  buffer);
 		safe_unpack8 (&job->reboot,       buffer);
+		safe_unpack8 (&job->sicp_mode,    buffer);
 		safe_unpack16(&job->restart_cnt,  buffer);
 		safe_unpack16(&job->show_flags,   buffer);
 
@@ -4852,6 +6106,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack_time(&job->resize_time, buffer);
 		safe_unpack_time(&job->preempt_time, buffer);
 		safe_unpack32(&job->priority, buffer);
+		safe_unpackdouble(&job->billable_tres, buffer);
 		safe_unpackstr_xmalloc(&job->nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->sched_nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->partition, &uint32_tmp, buffer);
@@ -4861,6 +6116,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpackstr_xmalloc(&job->gres, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->batch_host, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->batch_script, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->burst_buffer, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->qos, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->licenses, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->state_desc, &uint32_tmp, buffer);
@@ -4902,8 +6158,12 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack16(&job->requeue,     buffer);
 		safe_unpack16(&job->ntasks_per_node, buffer);
 
-		/*** unpack pending job details ***/
 		safe_unpack16(&job->shared,        buffer);
+		safe_unpack32(&job->cpu_freq_min, buffer);
+		safe_unpack32(&job->cpu_freq_max, buffer);
+		safe_unpack32(&job->cpu_freq_gov, buffer);
+
+		/*** unpack pending job details ***/
 		safe_unpack16(&job->contiguous,    buffer);
 		safe_unpack16(&job->core_spec,     buffer);
 		safe_unpack16(&job->cpus_per_task, buffer);
@@ -4946,24 +6206,39 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 			job->ntasks_per_core   = mc_ptr->ntasks_per_core;
 			xfree(mc_ptr);
 		}
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		safe_unpack32(&job->bitflags, buffer);
+		safe_unpackstr_xmalloc(&job->tres_alloc_str,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->tres_req_str,
+				       &uint32_tmp, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		safe_unpack32(&job->array_job_id, buffer);
 		safe_unpack32(&job->array_task_id, buffer);
+		/* The array_task_str value is stored in slurmctld and passed
+		 * here in hex format for best scalability. Its format needs
+		 * to be converted to human readable form by the client. */
+		safe_unpackstr_xmalloc(&job->array_task_str, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&job->array_max_tasks, buffer);
+		_xlate_task_str(job);
+
 		safe_unpack32(&job->assoc_id, buffer);
-		safe_unpack32(&job->job_id, buffer);
-		safe_unpack32(&job->user_id, buffer);
+		safe_unpack32(&job->job_id,   buffer);
+		safe_unpack32(&job->user_id,  buffer);
 		safe_unpack32(&job->group_id, buffer);
-		safe_unpack32(&job->profile, buffer);
+		safe_unpack32(&job->profile,  buffer);
 
-		safe_unpack16(&job->job_state,    buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		job->job_state = uint16_tmp;
 		safe_unpack16(&job->batch_flag,   buffer);
 		safe_unpack16(&job->state_reason, buffer);
-		safe_unpack16(&job->restart_cnt, buffer);
-		safe_unpack16(&job->show_flags, buffer);
+		safe_unpack8 (&job->reboot,       buffer);
+		safe_unpack16(&job->restart_cnt,  buffer);
+		safe_unpack16(&job->show_flags,   buffer);
 
 		safe_unpack32(&job->alloc_sid,    buffer);
 		safe_unpack32(&job->time_limit,   buffer);
-		safe_unpack32(&job->time_min,   buffer);
+		safe_unpack32(&job->time_min,     buffer);
 
 		safe_unpack16(&job->nice, buffer);
 
@@ -4977,6 +6252,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpack_time(&job->preempt_time, buffer);
 		safe_unpack32(&job->priority, buffer);
 		safe_unpackstr_xmalloc(&job->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->sched_nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->partition, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->account, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job->network, &uint32_tmp, buffer);
@@ -5019,6 +6295,9 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpackstr_xmalloc(&job->command,    &uint32_tmp, buffer);
 
 		safe_unpack32(&job->num_cpus, buffer);
+		job->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, job->num_cpus);
+
 		safe_unpack32(&job->max_cpus, buffer);
 		safe_unpack32(&job->num_nodes,   buffer);
 		safe_unpack32(&job->max_nodes,   buffer);
@@ -5069,20 +6348,17 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 			job->ntasks_per_core   = mc_ptr->ntasks_per_core;
 			xfree(mc_ptr);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpack32(&job->array_job_id, buffer);
-		safe_unpack16(&uint16_tmp, buffer);
-		if (uint16_tmp == (uint16_t) NO_VAL)
-			job->array_task_id = NO_VAL;
-		else
-			job->array_task_id = (uint32_t) uint16_tmp;
+		safe_unpack32(&job->array_task_id, buffer);
 		safe_unpack32(&job->assoc_id, buffer);
 		safe_unpack32(&job->job_id, buffer);
 		safe_unpack32(&job->user_id, buffer);
 		safe_unpack32(&job->group_id, buffer);
 		safe_unpack32(&job->profile, buffer);
 
-		safe_unpack16(&job->job_state,    buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		job->job_state = uint16_tmp;
 		safe_unpack16(&job->batch_flag,   buffer);
 		safe_unpack16(&job->state_reason, buffer);
 		safe_unpack16(&job->restart_cnt, buffer);
@@ -5146,6 +6422,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		safe_unpackstr_xmalloc(&job->command,    &uint32_tmp, buffer);
 
 		safe_unpack32(&job->num_cpus, buffer);
+
 		safe_unpack32(&job->max_cpus, buffer);
 		safe_unpack32(&job->num_nodes,   buffer);
 		safe_unpack32(&job->max_nodes,   buffer);
@@ -5155,6 +6432,7 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 		/*** unpack pending job details ***/
 		safe_unpack16(&job->shared,        buffer);
 		safe_unpack16(&job->contiguous,    buffer);
+		safe_unpack16(&job->core_spec,     buffer);
 		safe_unpack16(&job->cpus_per_task, buffer);
 		safe_unpack16(&job->pn_min_cpus, buffer);
 
@@ -5178,6 +6456,10 @@ _unpack_job_info_members(job_info_t * job, Buf buffer,
 			xfree(node_inx_str);
 		}
 
+		safe_unpackstr_xmalloc(&job->std_err, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->std_in,  &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job->std_out, &uint32_tmp, buffer);
+
 		if (unpack_multi_core_data(&mc_ptr, buffer, protocol_version))
 			goto unpack_error;
 		if (mc_ptr) {
@@ -5210,7 +6492,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 	uint32_t count = NO_VAL;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack_time(build_ptr->last_update, buffer);
 
 		pack16(build_ptr->accounting_storage_enforce, buffer);
@@ -5218,6 +6500,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->accounting_storage_host, buffer);
 		packstr(build_ptr->accounting_storage_loc, buffer);
 		pack32(build_ptr->accounting_storage_port, buffer);
+		packstr(build_ptr->accounting_storage_tres, buffer);
 		packstr(build_ptr->accounting_storage_type, buffer);
 		packstr(build_ptr->accounting_storage_user, buffer);
 		pack16(build_ptr->acctng_store_job_comment, buffer);
@@ -5251,7 +6534,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->backup_controller, buffer);
 		pack16(build_ptr->batch_start_timeout, buffer);
 		pack_time(build_ptr->boot_time, buffer);
-
+		packstr(build_ptr->bb_type, buffer);
 		packstr(build_ptr->checkpoint_type, buffer);
 		packstr(build_ptr->chos_loc, buffer);
 		packstr(build_ptr->cluster_name, buffer);
@@ -5260,13 +6543,14 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->control_machine, buffer);
 		packstr(build_ptr->core_spec_plugin, buffer);
 		pack32(build_ptr->cpu_freq_def, buffer);
+		pack32(build_ptr->cpu_freq_govs, buffer);
 		packstr(build_ptr->crypto_type, buffer);
 
 		pack32(build_ptr->def_mem_per_cpu, buffer);
 		pack64(build_ptr->debug_flags, buffer);
 		pack16(build_ptr->disable_root_jobs, buffer);
-		pack16(build_ptr->dynalloc_port, buffer);
 
+		pack16(build_ptr->eio_timeout, buffer);
 		pack16(build_ptr->enforce_part_limits, buffer);
 		packstr(build_ptr->epilog, buffer);
 		pack32(build_ptr->epilog_msg_time, buffer);
@@ -5330,6 +6614,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->kill_on_bad_exit, buffer);
 		pack16(build_ptr->kill_wait, buffer);
 
+		packstr(build_ptr->launch_params, buffer);
 		packstr(build_ptr->launch_type, buffer);
 		packstr(build_ptr->layouts, buffer);
 		packstr(build_ptr->licenses, buffer);
@@ -5343,9 +6628,10 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->max_step_cnt, buffer);
 		pack16(build_ptr->max_tasks_per_node, buffer);
 		pack16(build_ptr->mem_limit_enforce, buffer);
-		pack16(build_ptr->min_job_age, buffer);
+		pack32(build_ptr->min_job_age, buffer);
 		packstr(build_ptr->mpi_default, buffer);
 		packstr(build_ptr->mpi_params, buffer);
+		packstr(build_ptr->msg_aggr_params, buffer);
 		pack16(build_ptr->msg_timeout, buffer);
 
 		pack32(build_ptr->next_job_id, buffer);
@@ -5355,6 +6641,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 
 		packstr(build_ptr->plugindir, buffer);
 		packstr(build_ptr->plugstack, buffer);
+		packstr(build_ptr->power_parameters, buffer);
+		packstr(build_ptr->power_plugin, buffer);
 		pack16(build_ptr->preempt_mode, buffer);
 		packstr(build_ptr->preempt_type, buffer);
 
@@ -5371,10 +6659,12 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->priority_weight_js, buffer);
 		pack32(build_ptr->priority_weight_part, buffer);
 		pack32(build_ptr->priority_weight_qos, buffer);
+		packstr(build_ptr->priority_weight_tres, buffer);
 
 		pack16(build_ptr->private_data, buffer);
 		packstr(build_ptr->proctrack_type, buffer);
 		packstr(build_ptr->prolog, buffer);
+		pack16(build_ptr->prolog_epilog_timeout, buffer);
 		packstr(build_ptr->prolog_slurmctld, buffer);
 		pack16(build_ptr->prolog_flags, buffer);
 		pack16(build_ptr->propagate_prio_process, buffer);
@@ -5459,8 +6749,9 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->task_epilog, buffer);
 		packstr(build_ptr->task_prolog, buffer);
 		packstr(build_ptr->task_plugin, buffer);
-		pack16(build_ptr->task_plugin_param, buffer);
+		pack32(build_ptr->task_plugin_param, buffer);
 		packstr(build_ptr->tmp_fs, buffer);
+		packstr(build_ptr->topology_param, buffer);
 		packstr(build_ptr->topology_plugin, buffer);
 		pack16(build_ptr->track_wckey, buffer);
 		pack16(build_ptr->tree_width, buffer);
@@ -5476,7 +6767,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->z_16, buffer);
 		pack32(build_ptr->z_32, buffer);
 		packstr(build_ptr->z_char, buffer);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		uint16_t dynalloc_port = 0;
 		pack_time(build_ptr->last_update, buffer);
 
 		pack16(build_ptr->accounting_storage_enforce, buffer);
@@ -5519,17 +6811,19 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack_time(build_ptr->boot_time, buffer);
 
 		packstr(build_ptr->checkpoint_type, buffer);
+		packstr(build_ptr->chos_loc, buffer);
 		packstr(build_ptr->cluster_name, buffer);
 		pack16(build_ptr->complete_wait, buffer);
 		packstr(build_ptr->control_addr, buffer);
 		packstr(build_ptr->control_machine, buffer);
 		packstr(build_ptr->core_spec_plugin, buffer);
+		pack32(build_ptr->cpu_freq_def, buffer);
 		packstr(build_ptr->crypto_type, buffer);
 
 		pack32(build_ptr->def_mem_per_cpu, buffer);
-		pack32((uint32_t)build_ptr->debug_flags, buffer);
+		pack64(build_ptr->debug_flags, buffer);
 		pack16(build_ptr->disable_root_jobs, buffer);
-		pack16(build_ptr->dynalloc_port, buffer);
+		pack16(dynalloc_port, buffer);
 
 		pack16(build_ptr->enforce_part_limits, buffer);
 		packstr(build_ptr->epilog, buffer);
@@ -5595,6 +6889,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->kill_wait, buffer);
 
 		packstr(build_ptr->launch_type, buffer);
+		packstr(build_ptr->layouts, buffer);
 		packstr(build_ptr->licenses, buffer);
 		packstr(build_ptr->licenses_used, buffer);
 
@@ -5605,7 +6900,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32(build_ptr->max_mem_per_cpu, buffer);
 		pack32(build_ptr->max_step_cnt, buffer);
 		pack16(build_ptr->max_tasks_per_node, buffer);
-		pack16(build_ptr->min_job_age, buffer);
+		pack16(build_ptr->mem_limit_enforce, buffer);
+		pack16((uint16_t)build_ptr->min_job_age, buffer);
 		packstr(build_ptr->mpi_default, buffer);
 		packstr(build_ptr->mpi_params, buffer);
 		pack16(build_ptr->msg_timeout, buffer);
@@ -5625,6 +6921,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->priority_favor_small, buffer);
 		pack16(build_ptr->priority_flags, buffer);
 		pack32(build_ptr->priority_max_age, buffer);
+		packstr(build_ptr->priority_params, buffer);
 		pack16(build_ptr->priority_reset_period, buffer);
 		packstr(build_ptr->priority_type, buffer);
 		pack32(build_ptr->priority_weight_age, buffer);
@@ -5644,6 +6941,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 
 		packstr(build_ptr->reboot_program, buffer);
 		pack16(build_ptr->reconfig_flags, buffer);
+		packstr(build_ptr->requeue_exit, buffer);
+		packstr(build_ptr->requeue_exit_hold, buffer);
 		packstr(build_ptr->resume_program, buffer);
 		pack16(build_ptr->resume_rate, buffer);
 		pack16(build_ptr->resume_timeout, buffer);
@@ -5652,6 +6951,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->resv_prolog, buffer);
 		pack16(build_ptr->ret2service, buffer);
 
+		packstr(build_ptr->route_plugin, buffer);
 		packstr(build_ptr->salloc_default_command, buffer);
 		packstr(build_ptr->sched_params, buffer);
 		pack16(build_ptr->schedport, buffer);
@@ -5702,6 +7002,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->slurmd_spooldir, buffer);
 		pack16(build_ptr->slurmd_timeout, buffer);
 		packstr(build_ptr->srun_epilog, buffer);
+		pack16(build_ptr->srun_port_range[0], buffer);
+		pack16(build_ptr->srun_port_range[1], buffer);
 		packstr(build_ptr->srun_prolog, buffer);
 		packstr(build_ptr->state_save_location, buffer);
 		packstr(build_ptr->suspend_exc_nodes, buffer);
@@ -5722,6 +7024,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->tree_width, buffer);
 
 		pack16(build_ptr->use_pam, buffer);
+		pack16(build_ptr->use_spec_resources, buffer);
 		packstr(build_ptr->unkillable_program, buffer);
 		pack16(build_ptr->unkillable_timeout, buffer);
 		packstr(build_ptr->version, buffer);
@@ -5731,7 +7034,8 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->z_16, buffer);
 		pack32(build_ptr->z_32, buffer);
 		packstr(build_ptr->z_char, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint16_t dynalloc_port = 0;
 		pack_time(build_ptr->last_update, buffer);
 
 		pack16(build_ptr->accounting_storage_enforce, buffer);
@@ -5742,12 +7046,30 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->accounting_storage_type, buffer);
 		packstr(build_ptr->accounting_storage_user, buffer);
 		pack16(build_ptr->acctng_store_job_comment, buffer);
+
+		if (build_ptr->acct_gather_conf)
+			count = list_count(build_ptr->acct_gather_conf);
+
+		pack32(count, buffer);
+		if (count && count != NO_VAL) {
+			ListIterator itr = list_iterator_create(
+				(List)build_ptr->acct_gather_conf);
+			config_key_pair_t *key_pair = NULL;
+			while ((key_pair = list_next(itr))) {
+				pack_config_key_pair(key_pair,
+						     protocol_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
 		packstr(build_ptr->acct_gather_energy_type, buffer);
 		packstr(build_ptr->acct_gather_filesystem_type, buffer);
 		packstr(build_ptr->acct_gather_infiniband_type, buffer);
 		pack16(build_ptr->acct_gather_node_freq, buffer);
 		packstr(build_ptr->acct_gather_profile_type, buffer);
 
+		packstr(build_ptr->authinfo, buffer);
 		packstr(build_ptr->authtype, buffer);
 
 		packstr(build_ptr->backup_addr, buffer);
@@ -5760,22 +7082,41 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->complete_wait, buffer);
 		packstr(build_ptr->control_addr, buffer);
 		packstr(build_ptr->control_machine, buffer);
+		packstr(build_ptr->core_spec_plugin, buffer);
 		packstr(build_ptr->crypto_type, buffer);
 
 		pack32(build_ptr->def_mem_per_cpu, buffer);
 		pack32((uint32_t)build_ptr->debug_flags, buffer);
 		pack16(build_ptr->disable_root_jobs, buffer);
-		pack16(build_ptr->dynalloc_port, buffer);
+		pack16(dynalloc_port, buffer);
 
 		pack16(build_ptr->enforce_part_limits, buffer);
 		packstr(build_ptr->epilog, buffer);
 		pack32(build_ptr->epilog_msg_time, buffer);
 		packstr(build_ptr->epilog_slurmctld, buffer);
+
+		if (build_ptr->ext_sensors_conf)
+			count = list_count(build_ptr->ext_sensors_conf);
+
+		pack32(count, buffer);
+		if (count && count != NO_VAL) {
+			ListIterator itr = list_iterator_create(
+				(List)build_ptr->ext_sensors_conf);
+			config_key_pair_t *key_pair = NULL;
+			while ((key_pair = list_next(itr))) {
+				pack_config_key_pair(key_pair,
+						     protocol_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
 		packstr(build_ptr->ext_sensors_type, buffer);
 		pack16(build_ptr->ext_sensors_freq, buffer);
 
 		pack16(build_ptr->fast_schedule, buffer);
 		pack32(build_ptr->first_job_id, buffer);
+		pack16(build_ptr->fs_dampening_factor, buffer);
 
 		pack16(build_ptr->get_env_timeout, buffer);
 		packstr(build_ptr->gres_plugins, buffer);
@@ -5791,6 +7132,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 
 		packstr(build_ptr->job_acct_gather_freq, buffer);
 		packstr(build_ptr->job_acct_gather_type, buffer);
+		packstr(build_ptr->job_acct_gather_params, buffer);
 
 		packstr(build_ptr->job_ckpt_dir, buffer);
 
@@ -5799,6 +7141,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack32((uint32_t)build_ptr->job_comp_port, buffer);
 		packstr(build_ptr->job_comp_type, buffer);
 		packstr(build_ptr->job_comp_user, buffer);
+		packstr(build_ptr->job_container_plugin, buffer);
 
 		packstr(build_ptr->job_credential_private_key, buffer);
 		packstr(build_ptr->job_credential_public_certificate, buffer);
@@ -5814,14 +7157,14 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->licenses, buffer);
 		packstr(build_ptr->licenses_used, buffer);
 
-		pack16((uint16_t) build_ptr->max_array_sz, buffer);
+		pack32(build_ptr->max_array_sz, buffer);
 		packstr(build_ptr->mail_prog, buffer);
 		pack32(build_ptr->max_job_cnt, buffer);
 		pack32(build_ptr->max_job_id, buffer);
 		pack32(build_ptr->max_mem_per_cpu, buffer);
 		pack32(build_ptr->max_step_cnt, buffer);
 		pack16(build_ptr->max_tasks_per_node, buffer);
-		pack16(build_ptr->min_job_age, buffer);
+		pack16((uint16_t)build_ptr->min_job_age, buffer);
 		packstr(build_ptr->mpi_default, buffer);
 		packstr(build_ptr->mpi_params, buffer);
 		pack16(build_ptr->msg_timeout, buffer);
@@ -5853,6 +7196,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		packstr(build_ptr->proctrack_type, buffer);
 		packstr(build_ptr->prolog, buffer);
 		packstr(build_ptr->prolog_slurmctld, buffer);
+		pack16(build_ptr->prolog_flags, buffer);
 		pack16(build_ptr->propagate_prio_process, buffer);
 		packstr(build_ptr->propagate_rlimits, buffer);
 		packstr(build_ptr->propagate_rlimits_except, buffer);
@@ -5910,6 +7254,7 @@ _pack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t * build_ptr, Buf buffer,
 		pack16(build_ptr->slurmd_debug, buffer);
 		packstr(build_ptr->slurmd_logfile, buffer);
 		packstr(build_ptr->slurmd_pidfile, buffer);
+		packstr(build_ptr->slurmd_plugstack, buffer);
 		if (!(cluster_flags & CLUSTER_FLAG_MULTSD))
 			pack32(build_ptr->slurmd_port, buffer);
 
@@ -5957,7 +7302,6 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 {
 	uint32_t count = NO_VAL;
 	uint32_t uint32_tmp = 0;
-	uint16_t uint16_tmp = 0;
 	slurm_ctl_conf_info_msg_t *build_ptr;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
@@ -5970,7 +7314,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 
 	/* load the data values */
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&build_ptr->last_update, buffer);
 
@@ -5983,6 +7327,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_loc,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&build_ptr->accounting_storage_port, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_tres,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_type,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_user,
@@ -5994,7 +7340,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 			List tmp_list = list_create(destroy_config_key_pair);
 			config_key_pair_t *object = NULL;
 			int i;
-			for (i=0; i<count; i++) {
+			for (i = 0; i < count; i++) {
 				if (unpack_config_key_pair(
 					    (void *)&object, protocol_version,
 					    buffer)
@@ -6026,6 +7372,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->batch_start_timeout, buffer);
 		safe_unpack_time(&build_ptr->boot_time, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->bb_type,
+				       &uint32_tmp, buffer);
 
 		safe_unpackstr_xmalloc(&build_ptr->checkpoint_type,
 				       &uint32_tmp, buffer);
@@ -6041,14 +7389,15 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->core_spec_plugin,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&build_ptr->cpu_freq_def, buffer);
+		safe_unpack32(&build_ptr->cpu_freq_govs, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->crypto_type, &uint32_tmp,
 				       buffer);
 
 		safe_unpack32(&build_ptr->def_mem_per_cpu, buffer);
 		safe_unpack64(&build_ptr->debug_flags, buffer);
 		safe_unpack16(&build_ptr->disable_root_jobs, buffer);
-		safe_unpack16(&build_ptr->dynalloc_port, buffer);
 
+		safe_unpack16(&build_ptr->eio_timeout, buffer);
 		safe_unpack16(&build_ptr->enforce_part_limits, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->epilog, &uint32_tmp,
 				       buffer);
@@ -6061,7 +7410,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 			List tmp_list = list_create(destroy_config_key_pair);
 			config_key_pair_t *object = NULL;
 			int i;
-			for (i=0; i<count; i++) {
+			for (i = 0; i < count; i++) {
 				if (unpack_config_key_pair(
 					    (void *)&object, protocol_version,
 					    buffer)
@@ -6130,6 +7479,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack16(&build_ptr->kill_on_bad_exit, buffer);
 		safe_unpack16(&build_ptr->kill_wait, buffer);
 
+		safe_unpackstr_xmalloc(&build_ptr->launch_params,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->launch_type,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->layouts,
@@ -6148,11 +7499,13 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->max_step_cnt, buffer);
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
 		safe_unpack16(&build_ptr->mem_limit_enforce, buffer);
-		safe_unpack16(&build_ptr->min_job_age, buffer);
+		safe_unpack32(&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_params,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->msg_aggr_params,
+				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->msg_timeout, buffer);
 
 		safe_unpack32(&build_ptr->next_job_id, buffer);
@@ -6165,6 +7518,11 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->plugstack,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->power_parameters,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->power_plugin,
+				       &uint32_tmp, buffer);
+
 		safe_unpack16(&build_ptr->preempt_mode, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->preempt_type,
 				       &uint32_tmp, buffer);
@@ -6184,12 +7542,15 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->priority_weight_js, buffer);
 		safe_unpack32(&build_ptr->priority_weight_part, buffer);
 		safe_unpack32(&build_ptr->priority_weight_qos, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->priority_weight_tres,
+				       &uint32_tmp, buffer);
 
 		safe_unpack16(&build_ptr->private_data, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->proctrack_type, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&build_ptr->prolog, &uint32_tmp,
 				       buffer);
+		safe_unpack16(&build_ptr->prolog_epilog_timeout, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->prolog_slurmctld,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->prolog_flags, buffer);
@@ -6240,7 +7601,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 			List tmp_list = list_create(destroy_config_key_pair);
 			config_key_pair_t *object = NULL;
 			int i;
-			for (i=0; i<count; i++) {
+			for (i = 0; i < count; i++) {
 				if (unpack_config_key_pair(
 					    (void *)&object, protocol_version,
 					    buffer)
@@ -6316,9 +7677,11 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->task_plugin,
 				       &uint32_tmp, buffer);
-		safe_unpack16(&build_ptr->task_plugin_param, buffer);
+		safe_unpack32(&build_ptr->task_plugin_param, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint32_tmp,
 				       buffer);
+		safe_unpackstr_xmalloc(&build_ptr->topology_param,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->topology_plugin,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->track_wckey, buffer);
@@ -6339,7 +7702,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->z_32, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp,
 				       buffer);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		uint16_t dynalloc_port = 0;
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&build_ptr->last_update, buffer);
 
@@ -6398,6 +7762,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 
 		safe_unpackstr_xmalloc(&build_ptr->checkpoint_type,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->chos_loc,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->cluster_name,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->complete_wait, buffer);
@@ -6407,14 +7773,14 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp,buffer);
 		safe_unpackstr_xmalloc(&build_ptr->core_spec_plugin,
 				       &uint32_tmp, buffer);
+		safe_unpack32(&build_ptr->cpu_freq_def, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->crypto_type, &uint32_tmp,
 				       buffer);
 
 		safe_unpack32(&build_ptr->def_mem_per_cpu, buffer);
-		safe_unpack32(&uint32_tmp, buffer);
-		build_ptr->debug_flags = (uint64_t)uint32_tmp;
+		safe_unpack64(&build_ptr->debug_flags, buffer);
 		safe_unpack16(&build_ptr->disable_root_jobs, buffer);
-		safe_unpack16(&build_ptr->dynalloc_port, buffer);
+		safe_unpack16(&dynalloc_port, buffer);
 
 		safe_unpack16(&build_ptr->enforce_part_limits, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->epilog, &uint32_tmp,
@@ -6499,6 +7865,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 
 		safe_unpackstr_xmalloc(&build_ptr->launch_type,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->layouts,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->licenses,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->licenses_used,
@@ -6512,7 +7880,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
 		safe_unpack32(&build_ptr->max_step_cnt, buffer);
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
-		safe_unpack16(&build_ptr->min_job_age, buffer);
+		safe_unpack16(&build_ptr->mem_limit_enforce, buffer);
+		safe_unpack16((uint16_t *)&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_params,
@@ -6529,15 +7898,17 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->plugstack,
 				       &uint32_tmp, buffer);
+
 		safe_unpack16(&build_ptr->preempt_mode, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->preempt_type,
 				       &uint32_tmp, buffer);
-
 		safe_unpack32(&build_ptr->priority_decay_hl, buffer);
 		safe_unpack32(&build_ptr->priority_calc_period, buffer);
 		safe_unpack16(&build_ptr->priority_favor_small, buffer);
 		safe_unpack16(&build_ptr->priority_flags, buffer);
 		safe_unpack32(&build_ptr->priority_max_age, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->priority_params, &uint32_tmp,
+				       buffer);
 		safe_unpack16(&build_ptr->priority_reset_period, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->priority_type, &uint32_tmp,
 				       buffer);
@@ -6564,6 +7935,12 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->reboot_program, &uint32_tmp,
 				       buffer);
 		safe_unpack16(&build_ptr->reconfig_flags, buffer);
+
+		safe_unpackstr_xmalloc(&build_ptr->requeue_exit,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->requeue_exit_hold,
+				       &uint32_tmp, buffer);
+
 		safe_unpackstr_xmalloc(&build_ptr->resume_program,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->resume_rate, buffer);
@@ -6575,6 +7952,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       buffer);
 		safe_unpack16(&build_ptr->ret2service, buffer);
 
+		safe_unpackstr_xmalloc(&build_ptr->route_plugin,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->salloc_default_command,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->sched_params,
@@ -6643,6 +8022,11 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 
 		safe_unpackstr_xmalloc(&build_ptr->srun_epilog,
 				       &uint32_tmp, buffer);
+
+		build_ptr->srun_port_range = xmalloc(2 * sizeof(uint16_t));
+		safe_unpack16(&build_ptr->srun_port_range[0], buffer);
+		safe_unpack16(&build_ptr->srun_port_range[1], buffer);
+
 		safe_unpackstr_xmalloc(&build_ptr->srun_prolog,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->state_save_location,
@@ -6665,7 +8049,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->task_plugin,
 				       &uint32_tmp, buffer);
-		safe_unpack16(&build_ptr->task_plugin_param, buffer);
+		safe_unpack16((uint16_t *)&build_ptr->task_plugin_param, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&build_ptr->topology_plugin,
@@ -6674,6 +8058,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack16(&build_ptr->tree_width, buffer);
 
 		safe_unpack16(&build_ptr->use_pam, buffer);
+		safe_unpack16(&build_ptr->use_spec_resources, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->unkillable_program,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->unkillable_timeout, buffer);
@@ -6687,7 +8072,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->z_32, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->z_char, &uint32_tmp,
 				       buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint16_t dynalloc_port = 0;
 		/* unpack timestamp of snapshot */
 		safe_unpack_time(&build_ptr->last_update, buffer);
 
@@ -6705,6 +8091,23 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->accounting_storage_user,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->acctng_store_job_comment, buffer);
+
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			List tmp_list = list_create(destroy_config_key_pair);
+			config_key_pair_t *object = NULL;
+			int i;
+			for (i=0; i<count; i++) {
+				if (unpack_config_key_pair(
+					    (void *)&object, protocol_version,
+					    buffer)
+				    == SLURM_ERROR)
+					goto unpack_error;
+				list_append(tmp_list, object);
+			}
+			build_ptr->acct_gather_conf = (void *)tmp_list;
+		}
+
 		safe_unpackstr_xmalloc(&build_ptr->acct_gather_energy_type,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->acct_gather_filesystem_type,
@@ -6715,6 +8118,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->acct_gather_profile_type,
 				       &uint32_tmp, buffer);
 
+		safe_unpackstr_xmalloc(&build_ptr->authinfo,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->authtype,
 				       &uint32_tmp, buffer);
 
@@ -6734,6 +8139,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->control_machine,
 				       &uint32_tmp,buffer);
+		safe_unpackstr_xmalloc(&build_ptr->core_spec_plugin,
+				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->crypto_type, &uint32_tmp,
 				       buffer);
 
@@ -6741,7 +8148,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&uint32_tmp, buffer);
 		build_ptr->debug_flags = (uint64_t)uint32_tmp;
 		safe_unpack16(&build_ptr->disable_root_jobs, buffer);
-		safe_unpack16(&build_ptr->dynalloc_port, buffer);
+		safe_unpack16(&dynalloc_port, buffer);
 
 		safe_unpack16(&build_ptr->enforce_part_limits, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->epilog, &uint32_tmp,
@@ -6749,12 +8156,30 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->epilog_msg_time, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->epilog_slurmctld,
 				       &uint32_tmp, buffer);
+
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			List tmp_list = list_create(destroy_config_key_pair);
+			config_key_pair_t *object = NULL;
+			int i;
+			for (i=0; i<count; i++) {
+				if (unpack_config_key_pair(
+					    (void *)&object, protocol_version,
+					    buffer)
+				    == SLURM_ERROR)
+					goto unpack_error;
+				list_append(tmp_list, object);
+			}
+			build_ptr->ext_sensors_conf = (void *)tmp_list;
+		}
+
 		safe_unpackstr_xmalloc(&build_ptr->ext_sensors_type,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&build_ptr->ext_sensors_freq, buffer);
 
 		safe_unpack16(&build_ptr->fast_schedule, buffer);
 		safe_unpack32(&build_ptr->first_job_id, buffer);
+		safe_unpack16(&build_ptr->fs_dampening_factor, buffer);
 
 		safe_unpack16(&build_ptr->get_env_timeout, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->gres_plugins,
@@ -6774,6 +8199,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->job_acct_gather_type,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_acct_gather_params,
+				       &uint32_tmp, buffer);
 
 		safe_unpackstr_xmalloc(&build_ptr->job_ckpt_dir,
 				       &uint32_tmp, buffer);
@@ -6787,6 +8214,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->job_comp_user,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&build_ptr->job_container_plugin,
+				       &uint32_tmp, buffer);
 
 		safe_unpackstr_xmalloc(&build_ptr->job_credential_private_key,
 				       &uint32_tmp, buffer);
@@ -6809,11 +8238,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpackstr_xmalloc(&build_ptr->licenses_used,
 				       &uint32_tmp, buffer);
 
-		safe_unpack16(&uint16_tmp, buffer);
-		if (uint16_tmp == (uint16_t) NO_VAL)
-			build_ptr->max_array_sz = NO_VAL;
-		else
-			build_ptr->max_array_sz = (uint32_t) uint16_tmp;
+		safe_unpack32(&build_ptr->max_array_sz, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mail_prog,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&build_ptr->max_job_cnt, buffer);
@@ -6821,7 +8246,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 		safe_unpack32(&build_ptr->max_mem_per_cpu, buffer);
 		safe_unpack32(&build_ptr->max_step_cnt, buffer);
 		safe_unpack16(&build_ptr->max_tasks_per_node, buffer);
-		safe_unpack16(&build_ptr->min_job_age, buffer);
+		safe_unpack16((uint16_t *)&build_ptr->min_job_age, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_default,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->mpi_params,
@@ -6863,6 +8288,7 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       buffer);
 		safe_unpackstr_xmalloc(&build_ptr->prolog_slurmctld,
 				       &uint32_tmp, buffer);
+		safe_unpack16(&build_ptr->prolog_flags, buffer);
 		safe_unpack16(&build_ptr->propagate_prio_process, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->propagate_rlimits,
 				       &uint32_tmp, buffer);
@@ -6940,6 +8366,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       buffer);
 		safe_unpackstr_xmalloc(&build_ptr->slurmd_pidfile, &uint32_tmp,
 				       buffer);
+		safe_unpackstr_xmalloc(&build_ptr->slurmd_plugstack,
+				       &uint32_tmp, buffer);
 		if (!(cluster_flags & CLUSTER_FLAG_MULTSD))
 			safe_unpack32(&build_ptr->slurmd_port, buffer);
 
@@ -6971,7 +8399,8 @@ _unpack_slurm_ctl_conf_msg(slurm_ctl_conf_info_msg_t **build_buffer_ptr,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&build_ptr->task_plugin,
 				       &uint32_tmp, buffer);
-		safe_unpack16(&build_ptr->task_plugin_param, buffer);
+		safe_unpack16((uint16_t *)&build_ptr->task_plugin_param,
+			      buffer);
 		safe_unpackstr_xmalloc(&build_ptr->tmp_fs, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&build_ptr->topology_plugin,
@@ -7017,10 +8446,11 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		   uint16_t protocol_version)
 {
 	/* load the data values */
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(job_desc_ptr->clusters, buffer);
 		pack16(job_desc_ptr->contiguous, buffer);
 		pack16(job_desc_ptr->core_spec, buffer);
-		pack16(job_desc_ptr->task_dist, buffer);
+		pack32(job_desc_ptr->task_dist, buffer);
 		pack16(job_desc_ptr->kill_on_node_fail, buffer);
 		packstr(job_desc_ptr->features, buffer);
 		packstr(job_desc_ptr->gres, buffer);
@@ -7031,9 +8461,15 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		packstr(job_desc_ptr->alloc_node, buffer);
 		pack32(job_desc_ptr->alloc_sid, buffer);
 		packstr(job_desc_ptr->array_inx, buffer);
+		packstr(job_desc_ptr->burst_buffer, buffer);
 		pack16(job_desc_ptr->pn_min_cpus, buffer);
 		pack32(job_desc_ptr->pn_min_memory, buffer);
 		pack32(job_desc_ptr->pn_min_tmp_disk, buffer);
+		pack8(job_desc_ptr->power_flags, buffer);
+
+		pack32(job_desc_ptr->cpu_freq_min, buffer);
+		pack32(job_desc_ptr->cpu_freq_max, buffer);
+		pack32(job_desc_ptr->cpu_freq_gov, buffer);
 
 		packstr(job_desc_ptr->partition, buffer);
 		pack32(job_desc_ptr->priority, buffer);
@@ -7059,6 +8495,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		packstr(job_desc_ptr->script, buffer);
 		packstr_array(job_desc_ptr->argv, job_desc_ptr->argc, buffer);
 
+		pack8(job_desc_ptr->sicp_mode,   buffer);
 		packstr(job_desc_ptr->std_err, buffer);
 		packstr(job_desc_ptr->std_in, buffer);
 		packstr(job_desc_ptr->std_out, buffer);
@@ -7169,14 +8606,18 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 			job_desc_ptr->select_jobinfo = NULL;
 		}
 		pack16(job_desc_ptr->wait_all_nodes, buffer);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		pack32(job_desc_ptr->bitflags, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
 		pack16(job_desc_ptr->contiguous, buffer);
 		pack16(job_desc_ptr->core_spec, buffer);
-		pack16(job_desc_ptr->task_dist, buffer);
+		old_task_dist = task_dist_new2old(job_desc_ptr->task_dist);
+		pack16(old_task_dist, buffer);
 		pack16(job_desc_ptr->kill_on_node_fail, buffer);
 		packstr(job_desc_ptr->features, buffer);
 		packstr(job_desc_ptr->gres, buffer);
 		pack32(job_desc_ptr->job_id, buffer);
+		packstr(job_desc_ptr->job_id_str, buffer);
 		packstr(job_desc_ptr->name, buffer);
 
 		packstr(job_desc_ptr->alloc_node, buffer);
@@ -7217,6 +8658,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		packstr(job_desc_ptr->ckpt_dir, buffer);
 
 		pack16(job_desc_ptr->immediate, buffer);
+		pack16(job_desc_ptr->reboot, buffer);
 		pack16(job_desc_ptr->requeue, buffer);
 		pack16(job_desc_ptr->shared, buffer);
 		pack16(job_desc_ptr->cpus_per_task, buffer);
@@ -7319,9 +8761,12 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 			job_desc_ptr->select_jobinfo = NULL;
 		}
 		pack16(job_desc_ptr->wait_all_nodes, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
 		pack16(job_desc_ptr->contiguous, buffer);
-		pack16(job_desc_ptr->task_dist, buffer);
+		pack16(job_desc_ptr->core_spec, buffer);
+		old_task_dist = task_dist_new2old(job_desc_ptr->task_dist);
+		pack16(old_task_dist, buffer);
 		pack16(job_desc_ptr->kill_on_node_fail, buffer);
 		packstr(job_desc_ptr->features, buffer);
 		packstr(job_desc_ptr->gres, buffer);
@@ -7404,6 +8849,7 @@ _pack_job_desc_msg(job_desc_msg_t * job_desc_ptr, Buf buffer,
 		pack16(job_desc_ptr->mail_type, buffer);
 		packstr(job_desc_ptr->mail_user, buffer);
 		packstr(job_desc_ptr->reservation, buffer);
+		pack16(job_desc_ptr->warn_flags, buffer);
 		pack16(job_desc_ptr->warn_signal, buffer);
 		pack16(job_desc_ptr->warn_time, buffer);
 		packstr(job_desc_ptr->wckey, buffer);
@@ -7487,14 +8933,16 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 	job_desc_msg_t *job_desc_ptr = NULL;
 
 	/* alloc memory for structure */
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		job_desc_ptr = xmalloc(sizeof(job_desc_msg_t));
 		*job_desc_buffer_ptr = job_desc_ptr;
 
 		/* load the data values */
+		safe_unpackstr_xmalloc(&job_desc_ptr->clusters,
+				       &uint32_tmp, buffer);
 		safe_unpack16(&job_desc_ptr->contiguous, buffer);
 		safe_unpack16(&job_desc_ptr->core_spec, buffer);
-		safe_unpack16(&job_desc_ptr->task_dist, buffer);
+		safe_unpack32(&job_desc_ptr->task_dist, buffer);
 		safe_unpack16(&job_desc_ptr->kill_on_node_fail, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->features,
 				       &uint32_tmp, buffer);
@@ -7511,9 +8959,16 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		safe_unpack32(&job_desc_ptr->alloc_sid, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->array_inx,
 				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->burst_buffer,
+				       &uint32_tmp, buffer);
 		safe_unpack16(&job_desc_ptr->pn_min_cpus, buffer);
 		safe_unpack32(&job_desc_ptr->pn_min_memory, buffer);
 		safe_unpack32(&job_desc_ptr->pn_min_tmp_disk, buffer);
+		safe_unpack8(&job_desc_ptr->power_flags,   buffer);
+
+		safe_unpack32(&job_desc_ptr->cpu_freq_min, buffer);
+		safe_unpack32(&job_desc_ptr->cpu_freq_max, buffer);
+		safe_unpack32(&job_desc_ptr->cpu_freq_gov, buffer);
 
 		safe_unpackstr_xmalloc(&job_desc_ptr->partition,
 				       &uint32_tmp, buffer);
@@ -7550,6 +9005,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		safe_unpackstr_array(&job_desc_ptr->argv,
 				     &job_desc_ptr->argc, buffer);
 
+		safe_unpack8(&job_desc_ptr->sicp_mode,   buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->std_err,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->std_in,
@@ -7631,19 +9087,25 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		job_desc_ptr->mloaderimage = NULL;
 		job_desc_ptr->ramdiskimage = NULL;
 		safe_unpack16(&job_desc_ptr->wait_all_nodes, buffer);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		safe_unpack32(&job_desc_ptr->bitflags, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		job_desc_ptr = xmalloc(sizeof(job_desc_msg_t));
 		*job_desc_buffer_ptr = job_desc_ptr;
 
 		/* load the data values */
 		safe_unpack16(&job_desc_ptr->contiguous, buffer);
 		safe_unpack16(&job_desc_ptr->core_spec, buffer);
-		safe_unpack16(&job_desc_ptr->task_dist, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		job_desc_ptr->task_dist = task_dist_old2new(old_task_dist);
 		safe_unpack16(&job_desc_ptr->kill_on_node_fail, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->features,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->gres, &uint32_tmp,buffer);
 		safe_unpack32(&job_desc_ptr->job_id, buffer);
+		safe_unpackstr_xmalloc(&job_desc_ptr->job_id_str,
+				       &uint32_tmp,
+				       buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->name,
 				       &uint32_tmp, buffer);
 
@@ -7703,6 +9165,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 				       &uint32_tmp, buffer);
 
 		safe_unpack16(&job_desc_ptr->immediate, buffer);
+		safe_unpack16(&job_desc_ptr->reboot, buffer);
 		safe_unpack16(&job_desc_ptr->requeue, buffer);
 		safe_unpack16(&job_desc_ptr->shared, buffer);
 		safe_unpack16(&job_desc_ptr->cpus_per_task, buffer);
@@ -7765,20 +9228,22 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 		 */
 		job_desc_ptr->geometry[0] = (uint16_t)NO_VAL;
 		job_desc_ptr->conn_type[0] = (uint16_t)NO_VAL;
-		job_desc_ptr->reboot = (uint16_t)NO_VAL;
 		job_desc_ptr->rotate = (uint16_t)NO_VAL;
 		job_desc_ptr->blrtsimage = NULL;
 		job_desc_ptr->linuximage = NULL;
 		job_desc_ptr->mloaderimage = NULL;
 		job_desc_ptr->ramdiskimage = NULL;
 		safe_unpack16(&job_desc_ptr->wait_all_nodes, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		job_desc_ptr = xmalloc(sizeof(job_desc_msg_t));
 		*job_desc_buffer_ptr = job_desc_ptr;
 
 		/* load the data values */
 		safe_unpack16(&job_desc_ptr->contiguous, buffer);
-		safe_unpack16(&job_desc_ptr->task_dist, buffer);
+		safe_unpack16(&job_desc_ptr->core_spec, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		job_desc_ptr->task_dist = task_dist_old2new(old_task_dist);
 		safe_unpack16(&job_desc_ptr->kill_on_node_fail, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->features,
 				       &uint32_tmp, buffer);
@@ -7887,6 +9352,7 @@ _unpack_job_desc_msg(job_desc_msg_t ** job_desc_buffer_ptr, Buf buffer,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->reservation,
 				       &uint32_tmp, buffer);
+		safe_unpack16(&job_desc_ptr->warn_flags, buffer);
 		safe_unpack16(&job_desc_ptr->warn_signal, buffer);
 		safe_unpack16(&job_desc_ptr->warn_time, buffer);
 		safe_unpackstr_xmalloc(&job_desc_ptr->wckey,
@@ -7930,7 +9396,7 @@ _pack_job_alloc_info_msg(job_alloc_info_msg_t * job_desc_ptr, Buf buffer,
 			 uint16_t protocol_version)
 {
 	/* load the data values */
-	pack32((uint32_t)job_desc_ptr->job_id, buffer);
+	pack32(job_desc_ptr->job_id, buffer);
 }
 
 static int
@@ -7955,6 +9421,49 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+
+static void
+_pack_step_alloc_info_msg(step_alloc_info_msg_t * job_desc_ptr, Buf buffer,
+			  uint16_t protocol_version)
+{
+	/* load the data values */
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(job_desc_ptr->job_id, buffer);
+		pack32(job_desc_ptr->step_id, buffer);
+	} else {
+		pack32(job_desc_ptr->job_id, buffer);
+	}
+}
+
+static int
+_unpack_step_alloc_info_msg(step_alloc_info_msg_t **
+			    job_desc_buffer_ptr, Buf buffer,
+			    uint16_t protocol_version)
+{
+	step_alloc_info_msg_t *job_desc_ptr;
+
+	/* alloc memory for structure */
+	assert(job_desc_buffer_ptr != NULL);
+	job_desc_ptr = xmalloc(sizeof(step_alloc_info_msg_t));
+	*job_desc_buffer_ptr = job_desc_ptr;
+
+	/* load the data values */
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&job_desc_ptr->job_id, buffer);
+		safe_unpack32(&job_desc_ptr->step_id, buffer);
+	} else {
+		safe_unpack32(&job_desc_ptr->job_id, buffer);
+		job_desc_ptr->step_id = NO_VAL;
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_step_alloc_info_msg(job_desc_ptr);
+	*job_desc_buffer_ptr = NULL;
+	return SLURM_ERROR;
+}
+
 static void
 _pack_last_update_msg(last_update_msg_t * msg, Buf buffer,
 		      uint16_t protocol_version)
@@ -8246,7 +9755,7 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack32(msg->job_id, buffer);
 		pack32(msg->job_step_id, buffer);
 		pack32(msg->ntasks, buffer);
@@ -8259,9 +9768,10 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 
 		pack32(msg->nnodes, buffer);
 		pack16(msg->cpus_per_task, buffer);
-		pack16(msg->task_dist, buffer);
+		pack32(msg->task_dist, buffer);
 		pack16(msg->node_cpus, buffer);
 		pack16(msg->job_core_spec, buffer);
+		pack16(msg->accel_bind_type, buffer);
 
 		slurm_cred_pack(msg->cred, buffer, protocol_version);
 		for (i = 0; i < msg->nnodes; i++) {
@@ -8309,7 +9819,9 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 		pack8(msg->open_mode, buffer);
 		pack8(msg->pty, buffer);
 		packstr(msg->acctg_freq, buffer);
-		pack32(msg->cpu_freq, buffer);
+		pack32(msg->cpu_freq_min, buffer);
+		pack32(msg->cpu_freq_max, buffer);
+		pack32(msg->cpu_freq_gov, buffer);
 		packstr(msg->ckpt_dir, buffer);
 		packstr(msg->restart_dir, buffer);
 		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
@@ -8323,23 +9835,28 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 						     buffer,
 						     protocol_version);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
 		pack32(msg->job_id, buffer);
 		pack32(msg->job_step_id, buffer);
 		pack32(msg->ntasks, buffer);
 		pack32(msg->uid, buffer);
+		packstr(msg->partition, buffer);
+		packstr(msg->user_name, buffer);
 		pack32(msg->gid, buffer);
 		pack32(msg->job_mem_lim, buffer);
 		pack32(msg->step_mem_lim, buffer);
 
 		pack32(msg->nnodes, buffer);
 		pack16(msg->cpus_per_task, buffer);
-		pack16(msg->task_dist, buffer);
+		old_task_dist = task_dist_new2old(msg->task_dist);
+		pack16(old_task_dist, buffer);
+		pack16(msg->node_cpus, buffer);
+		pack16(msg->job_core_spec, buffer);
 
 		slurm_cred_pack(msg->cred, buffer, protocol_version);
 		for (i = 0; i < msg->nnodes; i++) {
 			pack16(msg->tasks_to_launch[i], buffer);
-			pack16(0, buffer);
 			pack32_array(msg->global_task_ids[i],
 				     (uint32_t) msg->tasks_to_launch[i],
 				     buffer);
@@ -8383,7 +9900,7 @@ _pack_launch_tasks_request_msg(launch_tasks_request_msg_t * msg, Buf buffer,
 		pack8(msg->open_mode, buffer);
 		pack8(msg->pty, buffer);
 		packstr(msg->acctg_freq, buffer);
-		pack32(msg->cpu_freq, buffer);
+		pack32(msg->cpu_freq_max, buffer);
 		packstr(msg->ckpt_dir, buffer);
 		packstr(msg->restart_dir, buffer);
 		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
@@ -8410,7 +9927,6 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 {
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 	uint32_t uint32_tmp;
-	uint16_t uint16 = 0;
 	launch_tasks_request_msg_t *msg;
 	int i = 0;
 
@@ -8418,7 +9934,7 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 	msg = xmalloc(sizeof(launch_tasks_request_msg_t));
 	*msg_ptr = msg;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&msg->job_id, buffer);
 		safe_unpack32(&msg->job_step_id, buffer);
 		safe_unpack32(&msg->ntasks, buffer);
@@ -8431,9 +9947,10 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 
 		safe_unpack32(&msg->nnodes, buffer);
 		safe_unpack16(&msg->cpus_per_task, buffer);
-		safe_unpack16(&msg->task_dist, buffer);
+		safe_unpack32(&msg->task_dist, buffer);
 		safe_unpack16(&msg->node_cpus, buffer);
 		safe_unpack16(&msg->job_core_spec, buffer);
+		safe_unpack16(&msg->accel_bind_type, buffer);
 
 		if (!(msg->cred = slurm_cred_unpack(buffer, protocol_version)))
 			goto unpack_error;
@@ -8511,7 +10028,9 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 		safe_unpack8(&msg->open_mode, buffer);
 		safe_unpack8(&msg->pty, buffer);
 		safe_unpackstr_xmalloc(&msg->acctg_freq, &uint32_tmp, buffer);
-		safe_unpack32(&msg->cpu_freq, buffer);
+		safe_unpack32(&msg->cpu_freq_min, buffer);
+		safe_unpack32(&msg->cpu_freq_max, buffer);
+		safe_unpack32(&msg->cpu_freq_gov, buffer);
 		safe_unpackstr_xmalloc(&msg->ckpt_dir, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg->restart_dir, &uint32_tmp, buffer);
 		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
@@ -8519,18 +10038,24 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 						       buffer,
 						       protocol_version);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		safe_unpack32(&msg->job_id, buffer);
 		safe_unpack32(&msg->job_step_id, buffer);
 		safe_unpack32(&msg->ntasks, buffer);
 		safe_unpack32(&msg->uid, buffer);
+		safe_unpackstr_xmalloc(&msg->partition, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg->user_name, &uint32_tmp, buffer);
 		safe_unpack32(&msg->gid, buffer);
 		safe_unpack32(&msg->job_mem_lim, buffer);
 		safe_unpack32(&msg->step_mem_lim, buffer);
 
 		safe_unpack32(&msg->nnodes, buffer);
 		safe_unpack16(&msg->cpus_per_task, buffer);
-		safe_unpack16(&msg->task_dist, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		msg->task_dist = task_dist_old2new(old_task_dist);
+		safe_unpack16(&msg->node_cpus, buffer);
+		safe_unpack16(&msg->job_core_spec, buffer);
 
 		if (!(msg->cred = slurm_cred_unpack(buffer, protocol_version)))
 			goto unpack_error;
@@ -8539,7 +10064,6 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 					       msg->nnodes);
 		for (i = 0; i < msg->nnodes; i++) {
 			safe_unpack16(&msg->tasks_to_launch[i], buffer);
-			safe_unpack16(&uint16, buffer); /* not needed */
 			safe_unpack32_array(&msg->global_task_ids[i],
 					    &uint32_tmp,
 					    buffer);
@@ -8609,7 +10133,7 @@ _unpack_launch_tasks_request_msg(launch_tasks_request_msg_t **
 		safe_unpack8(&msg->open_mode, buffer);
 		safe_unpack8(&msg->pty, buffer);
 		safe_unpackstr_xmalloc(&msg->acctg_freq, &uint32_tmp, buffer);
-		safe_unpack32(&msg->cpu_freq, buffer);
+		safe_unpack32(&msg->cpu_freq_max, buffer);
 		safe_unpackstr_xmalloc(&msg->ckpt_dir, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg->restart_dir, &uint32_tmp, buffer);
 		if (!(cluster_flags & CLUSTER_FLAG_BG)) {
@@ -8988,17 +10512,34 @@ _pack_prolog_launch_msg(
 {
 	xassert(msg != NULL);
 
-	pack32(msg->job_id, buffer);
-	pack32(msg->uid, buffer);
-	pack32(msg->gid, buffer);
-
-	packstr(msg->alias_list, buffer);
-	packstr(msg->nodes, buffer);
-	packstr(msg->partition, buffer);
-	packstr(msg->std_err, buffer);
-	packstr(msg->std_out, buffer);
-	packstr(msg->work_dir, buffer);
-	packstr_array(msg->spank_job_env, msg->spank_job_env_size, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->job_id, buffer);
+		pack32(msg->uid, buffer);
+		pack32(msg->gid, buffer);
+
+		packstr(msg->alias_list, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->partition, buffer);
+		packstr(msg->std_err, buffer);
+		packstr(msg->std_out, buffer);
+		packstr(msg->work_dir, buffer);
+		packstr_array(msg->spank_job_env, msg->spank_job_env_size,
+			      buffer);
+		slurm_cred_pack(msg->cred, buffer, protocol_version);
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		pack32(msg->job_id, buffer);
+		pack32(msg->uid, buffer);
+		pack32(msg->gid, buffer);
+
+		packstr(msg->alias_list, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->partition, buffer);
+		packstr(msg->std_err, buffer);
+		packstr(msg->std_out, buffer);
+		packstr(msg->work_dir, buffer);
+		packstr_array(msg->spank_job_env, msg->spank_job_env_size,
+			      buffer);
+	}
 }
 
 static int
@@ -9013,20 +10554,52 @@ _unpack_prolog_launch_msg(
 	launch_msg_ptr = xmalloc(sizeof(prolog_launch_msg_t));
 	*msg = launch_msg_ptr;
 
-	safe_unpack32(&launch_msg_ptr->job_id, buffer);
-	safe_unpack32(&launch_msg_ptr->uid, buffer);
-	safe_unpack32(&launch_msg_ptr->gid, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&launch_msg_ptr->job_id, buffer);
+		safe_unpack32(&launch_msg_ptr->uid, buffer);
+		safe_unpack32(&launch_msg_ptr->gid, buffer);
+
+		safe_unpackstr_xmalloc(&launch_msg_ptr->alias_list, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->nodes, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->partition, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->std_err, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->std_out, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->work_dir, &uint32_tmp,
+				       buffer);
 
-	safe_unpackstr_xmalloc(&launch_msg_ptr->alias_list, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&launch_msg_ptr->nodes, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&launch_msg_ptr->partition, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&launch_msg_ptr->std_err, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&launch_msg_ptr->std_out, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&launch_msg_ptr->work_dir, &uint32_tmp, buffer);
+		safe_unpackstr_array(&launch_msg_ptr->spank_job_env,
+				     &launch_msg_ptr->spank_job_env_size,
+				     buffer);
+		if (!(launch_msg_ptr->cred = slurm_cred_unpack(buffer,
+							protocol_version)))
+			goto unpack_error;
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&launch_msg_ptr->job_id, buffer);
+		safe_unpack32(&launch_msg_ptr->uid, buffer);
+		safe_unpack32(&launch_msg_ptr->gid, buffer);
 
-	safe_unpackstr_array(&launch_msg_ptr->spank_job_env,
-			     &launch_msg_ptr->spank_job_env_size,
-			     buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->alias_list, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->nodes, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->partition, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->std_err, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->std_out, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->work_dir, &uint32_tmp,
+				       buffer);
+
+		safe_unpackstr_array(&launch_msg_ptr->spank_job_env,
+				     &launch_msg_ptr->spank_job_env_size,
+				     buffer);
+	}
 
 	return SLURM_SUCCESS;
 
@@ -9546,35 +11119,143 @@ extern int slurm_unpack_block_info_msg(
 				    protocol_version))
 				goto unpack_error;
 		}
-	} else {
-		error("slurm_unpack_block_info_msg: protocol_version "
-		      "%hu not supported", protocol_version);
-		goto unpack_error;
+	} else {
+		error("slurm_unpack_block_info_msg: protocol_version "
+		      "%hu not supported", protocol_version);
+		goto unpack_error;
+	}
+	*block_info_msg_pptr = buf;
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_block_info_msg(buf);
+	*block_info_msg_pptr = NULL;
+	return SLURM_ERROR;
+}
+
+static int _unpack_block_info(block_info_t **block_info, Buf buffer,
+			      uint16_t protocol_version)
+{
+	int rc = SLURM_SUCCESS;
+	block_info_t *bg_rec = xmalloc(sizeof(block_info_t));
+
+	if ((rc = slurm_unpack_block_info_members(
+		    bg_rec, buffer, protocol_version))
+	    != SLURM_SUCCESS)
+		xfree(bg_rec);
+	else
+		*block_info = bg_rec;
+	return rc;
+}
+
+static int _unpack_burst_buffer_info_msg(
+			burst_buffer_info_msg_t **burst_buffer_info, Buf buffer,
+			uint16_t protocol_version)
+{
+	int i, j, k;
+	burst_buffer_info_msg_t *bb_msg_ptr = NULL;
+	burst_buffer_info_t *bb_info_ptr;
+	burst_buffer_resv_t *bb_resv_ptr;
+	burst_buffer_use_t  *bb_use_ptr;
+	uint32_t uint32_tmp;
+
+	bb_msg_ptr = xmalloc(sizeof(burst_buffer_info_msg_t));
+	safe_unpack32(&bb_msg_ptr->record_count, buffer);
+	bb_msg_ptr->burst_buffer_array = xmalloc(sizeof(burst_buffer_info_t) *
+						 bb_msg_ptr->record_count);
+	for (i = 0, bb_info_ptr = bb_msg_ptr->burst_buffer_array;
+	     i < bb_msg_ptr->record_count; i++, bb_info_ptr++) {
+		safe_unpackstr_xmalloc(&bb_info_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->allow_users, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->create_buffer,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->default_pool,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->deny_users, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->destroy_buffer,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&bb_info_ptr->flags, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->get_sys_state, &uint32_tmp,
+				       buffer);
+		safe_unpack64(&bb_info_ptr->granularity, buffer);
+		safe_unpack32(&bb_info_ptr->gres_cnt, buffer);
+		bb_info_ptr->gres_ptr = xmalloc(bb_info_ptr->gres_cnt *
+						sizeof(burst_buffer_gres_t));
+		for (j = 0; j < bb_info_ptr->gres_cnt; j++) {
+			safe_unpackstr_xmalloc(&bb_info_ptr->gres_ptr[j].name,
+					       &uint32_tmp, buffer);
+			safe_unpack64(&bb_info_ptr->gres_ptr[j].avail_cnt,
+				      buffer);
+			safe_unpack64(&bb_info_ptr->gres_ptr[j].used_cnt,
+				      buffer);
+		}
+		safe_unpackstr_xmalloc(&bb_info_ptr->start_stage_in,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->start_stage_out,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->stop_stage_in,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&bb_info_ptr->stop_stage_out,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&bb_info_ptr->stage_in_timeout, buffer);
+		safe_unpack32(&bb_info_ptr->stage_out_timeout, buffer);
+		safe_unpack64(&bb_info_ptr->total_space, buffer);
+		safe_unpack64(&bb_info_ptr->used_space, buffer);
+
+		safe_unpack32(&bb_info_ptr->buffer_count, buffer);
+		bb_info_ptr->burst_buffer_resv_ptr =
+			xmalloc(sizeof(burst_buffer_resv_t) *
+				bb_info_ptr->buffer_count);
+		for (j = 0, bb_resv_ptr = bb_info_ptr->burst_buffer_resv_ptr;
+		     j < bb_info_ptr->buffer_count; j++, bb_resv_ptr++) {
+			safe_unpackstr_xmalloc(&bb_resv_ptr->account,
+					       &uint32_tmp, buffer);
+			safe_unpack32(&bb_resv_ptr->array_job_id, buffer);
+			safe_unpack32(&bb_resv_ptr->array_task_id, buffer);
+			safe_unpack_time(&bb_resv_ptr->create_time, buffer);
+			safe_unpack32(&bb_resv_ptr->gres_cnt, buffer);
+			bb_resv_ptr->gres_ptr = xmalloc(bb_resv_ptr->gres_cnt *
+						sizeof(burst_buffer_gres_t));
+			for (k = 0; k < bb_resv_ptr->gres_cnt; k++) {
+				safe_unpackstr_xmalloc(&bb_resv_ptr->
+						       gres_ptr[k].name,
+						       &uint32_tmp, buffer);
+				safe_unpack64(&bb_resv_ptr->gres_ptr[k].
+					      used_cnt, buffer);
+			}
+			safe_unpack32(&bb_resv_ptr->job_id, buffer);
+			safe_unpackstr_xmalloc(&bb_resv_ptr->name,
+					       &uint32_tmp, buffer);
+			safe_unpackstr_xmalloc(&bb_resv_ptr->partition,
+					       &uint32_tmp, buffer);
+			safe_unpackstr_xmalloc(&bb_resv_ptr->qos,
+					       &uint32_tmp, buffer);
+			safe_unpack64(&bb_resv_ptr->size, buffer);
+			safe_unpack16(&bb_resv_ptr->state, buffer);
+			safe_unpack32(&bb_resv_ptr->user_id, buffer);
+		}
+
+		safe_unpack32(&bb_info_ptr->use_count, buffer);
+		bb_info_ptr->burst_buffer_use_ptr =
+			xmalloc(sizeof(burst_buffer_use_t) *
+				bb_info_ptr->use_count);
+		for (j = 0, bb_use_ptr = bb_info_ptr->burst_buffer_use_ptr;
+		     j < bb_info_ptr->use_count; j++, bb_use_ptr++) {
+			safe_unpack64(&bb_use_ptr->used, buffer);
+			safe_unpack32(&bb_use_ptr->user_id, buffer);
+		}
 	}
-	*block_info_msg_pptr = buf;
+	*burst_buffer_info = bb_msg_ptr;
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurm_free_block_info_msg(buf);
-	*block_info_msg_pptr = NULL;
+	slurm_free_burst_buffer_info_msg(bb_msg_ptr);
+	*burst_buffer_info = NULL;
 	return SLURM_ERROR;
 }
 
-static int _unpack_block_info(block_info_t **block_info, Buf buffer,
-			      uint16_t protocol_version)
-{
-	int rc = SLURM_SUCCESS;
-	block_info_t *bg_rec = xmalloc(sizeof(block_info_t));
-
-	if ((rc = slurm_unpack_block_info_members(
-		    bg_rec, buffer, protocol_version))
-	    != SLURM_SUCCESS)
-		xfree(bg_rec);
-	else
-		*block_info = bg_rec;
-	return rc;
-}
-
 static void
 _pack_job_step_info_req_msg(job_step_info_request_msg_t * msg, Buf buffer,
 			    uint16_t protocol_version)
@@ -9774,25 +11455,6 @@ _unpack_front_end_info_members(front_end_info_t *front_end, Buf buffer,
 		safe_unpack_time(&front_end->slurmd_start_time, buffer);
 		front_end->node_state = tmp_state;
 
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpackstr_xmalloc(&front_end->allow_groups, &uint32_tmp,
-				       buffer);
-		safe_unpackstr_xmalloc(&front_end->allow_users, &uint32_tmp,
-				       buffer);
-		safe_unpack_time(&front_end->boot_time, buffer);
-		safe_unpackstr_xmalloc(&front_end->deny_groups, &uint32_tmp,
-				       buffer);
-		safe_unpackstr_xmalloc(&front_end->deny_users, &uint32_tmp,
-				       buffer);
-		safe_unpackstr_xmalloc(&front_end->name, &uint32_tmp, buffer);
-		safe_unpack16(&tmp_state, buffer);
-
-		safe_unpackstr_xmalloc(&front_end->reason, &uint32_tmp, buffer);
-		safe_unpack_time(&front_end->reason_time, buffer);
-		safe_unpack32(&front_end->reason_uid, buffer);
-
-		safe_unpack_time(&front_end->slurmd_start_time, buffer);
-		front_end->node_state = tmp_state;
 	} else {
 		error("_unpack_front_end_info_members: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -9857,6 +11519,38 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void
+_pack_layout_info_request_msg(layout_info_request_msg_t * msg, Buf buffer,
+			      uint16_t protocol_version)
+{
+	packstr(msg->layout_type, buffer);
+	packstr(msg->entities, buffer);
+	packstr(msg->type, buffer);
+	pack32(msg->no_relation, buffer);
+}
+
+static int
+_unpack_layout_info_request_msg(layout_info_request_msg_t ** msg, Buf buffer,
+			      uint16_t protocol_version)
+{
+	layout_info_request_msg_t* layout_info;
+	uint32_t uint32_tmp;
+
+	layout_info = xmalloc(sizeof(layout_info_request_msg_t));
+	*msg = layout_info;
+
+	safe_unpackstr_xmalloc(&layout_info->layout_type, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&layout_info->entities, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&layout_info->type, &uint32_tmp, buffer);
+	safe_unpack32(&layout_info->no_relation, buffer);
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_layout_info_request_msg(layout_info);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 static void
 _pack_slurm_addr_array(slurm_addr_t * slurm_address,
 		       uint32_t size_val, Buf buffer,
@@ -9934,7 +11628,7 @@ unpack_error:
 		error("_unpack_ret_list: message type %u, record %d of %u",
 		      ret_data_info->type, i, size_val);
 	}
-	list_destroy(*ret_list);
+	FREE_NULL_LIST(*ret_list);
 	*ret_list = NULL;
 	return SLURM_ERROR;
 }
@@ -9945,7 +11639,7 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer,
 {
 	xassert(msg != NULL);
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack32(msg->job_id, buffer);
 		pack32(msg->step_id, buffer);
 		pack32(msg->uid, buffer);
@@ -10001,10 +11695,17 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer,
 
 		select_g_select_jobinfo_pack(msg->select_jobinfo, buffer,
 					     protocol_version);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+
+		packstr(msg->account, buffer);
+		packstr(msg->qos, buffer);
+		packstr(msg->resv_name, buffer);
+
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		pack32(msg->job_id, buffer);
 		pack32(msg->step_id, buffer);
 		pack32(msg->uid, buffer);
+		packstr(msg->partition, buffer);
+		packstr(msg->user_name, buffer);
 		pack32(msg->gid, buffer);
 		pack32(msg->ntasks, buffer);
 		pack32(msg->pn_min_memory, buffer);
@@ -10013,12 +11714,13 @@ _pack_batch_job_launch_msg(batch_job_launch_msg_t * msg, Buf buffer,
 		pack8(msg->overcommit, buffer);
 
 		pack32(msg->array_job_id,   buffer);
-		pack16((uint16_t) msg->array_task_id, buffer);
+		pack32(msg->array_task_id,  buffer);
 
 		packstr(msg->acctg_freq,     buffer);
 		pack16(msg->cpu_bind_type,  buffer);
 		pack16(msg->cpus_per_task,  buffer);
 		pack16(msg->restart_cnt,    buffer);
+		pack16(msg->job_core_spec,  buffer);
 
 		pack32(msg->num_cpu_groups, buffer);
 		if (msg->num_cpu_groups) {
@@ -10065,14 +11767,13 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer,
 			     uint16_t protocol_version)
 {
 	uint32_t uint32_tmp;
-	uint16_t uint16_tmp = 0;
 	batch_job_launch_msg_t *launch_msg_ptr;
 
 	xassert(msg != NULL);
 	launch_msg_ptr = xmalloc(sizeof(batch_job_launch_msg_t));
 	*msg = launch_msg_ptr;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >=  SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&launch_msg_ptr->job_id, buffer);
 		safe_unpack32(&launch_msg_ptr->step_id, buffer);
 		safe_unpack32(&launch_msg_ptr->uid, buffer);
@@ -10152,10 +11853,25 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer,
 						   select_jobinfo,
 						   buffer, protocol_version))
 			goto unpack_error;
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+
+		safe_unpackstr_xmalloc(&launch_msg_ptr->account,
+				       &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->qos,
+				       &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->resv_name,
+				       &uint32_tmp,
+				       buffer);
+
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpack32(&launch_msg_ptr->job_id, buffer);
 		safe_unpack32(&launch_msg_ptr->step_id, buffer);
 		safe_unpack32(&launch_msg_ptr->uid, buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->partition,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&launch_msg_ptr->user_name,
+				       &uint32_tmp, buffer);
 		safe_unpack32(&launch_msg_ptr->gid, buffer);
 		safe_unpack32(&launch_msg_ptr->ntasks, buffer);
 		safe_unpack32(&launch_msg_ptr->pn_min_memory, buffer);
@@ -10164,17 +11880,14 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer,
 		safe_unpack8(&launch_msg_ptr->overcommit, buffer);
 
 		safe_unpack32(&launch_msg_ptr->array_job_id,   buffer);
-		safe_unpack16(&uint16_tmp,  buffer);
-		if (uint16_tmp == (uint16_t) NO_VAL)
-			launch_msg_ptr->array_task_id = NO_VAL;
-		else
-			launch_msg_ptr->array_task_id = (uint32_t) uint16_tmp;
+		safe_unpack32(&launch_msg_ptr->array_task_id,  buffer);
 
 		safe_unpackstr_xmalloc(&launch_msg_ptr->acctg_freq,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&launch_msg_ptr->cpu_bind_type,  buffer);
 		safe_unpack16(&launch_msg_ptr->cpus_per_task,  buffer);
 		safe_unpack16(&launch_msg_ptr->restart_cnt,    buffer);
+		safe_unpack16(&launch_msg_ptr->job_core_spec,  buffer);
 
 		safe_unpack32(&launch_msg_ptr->num_cpu_groups, buffer);
 		if (launch_msg_ptr->num_cpu_groups) {
@@ -10188,7 +11901,6 @@ _unpack_batch_job_launch_msg(batch_job_launch_msg_t ** msg, Buf buffer,
 				goto unpack_error;
 		}
 
-
 		safe_unpackstr_xmalloc(&launch_msg_ptr->alias_list,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&launch_msg_ptr->cpu_bind, &uint32_tmp,
@@ -10758,7 +12470,12 @@ static void _pack_ping_slurmd_resp(ping_slurmd_resp_msg_t *msg,
 {
 	xassert (msg != NULL);
 
-	pack32(msg->cpu_load, buffer);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->cpu_load, buffer);
+		pack32(msg->free_mem, buffer);
+	} else {
+		pack32(msg->cpu_load, buffer);
+	}
 }
 
 static int _unpack_ping_slurmd_resp(ping_slurmd_resp_msg_t **msg_ptr,
@@ -10769,7 +12486,13 @@ static int _unpack_ping_slurmd_resp(ping_slurmd_resp_msg_t **msg_ptr,
 	xassert (msg_ptr != NULL);
 	msg = xmalloc(sizeof(ping_slurmd_resp_msg_t));
 	*msg_ptr = msg;
-	safe_unpack32(&msg->cpu_load, buffer);
+	
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&msg->cpu_load, buffer);
+		safe_unpack32(&msg->free_mem, buffer);
+	} else {
+		safe_unpack32(&msg->cpu_load, buffer);
+	}
 
 	return SLURM_SUCCESS;
 
@@ -11630,24 +13353,6 @@ static int _unpack_accounting_update_msg(accounting_update_msg_t **msg,
 				goto unpack_error;
 			list_append(msg_ptr->update_list, rec);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		/* We need to work off the version sent in the message since
-		   we might not know what the protocol_version is at this
-		   moment (we might not of been updated before other parts of
-		   SLURM).
-		*/
-		uint16_t rpc_version;
-		safe_unpack16(&rpc_version, buffer);
-		safe_unpack32(&count, buffer);
-		msg_ptr->update_list = list_create(
-			slurmdb_destroy_update_object);
-		for (i=0; i<count; i++) {
-			if ((slurmdb_unpack_update_object(
-				    &rec, rpc_version, buffer))
-			   == SLURM_ERROR)
-				goto unpack_error;
-			list_append(msg_ptr->update_list, rec);
-		}
 	} else {
 		error("_unpack_accounting_update_msg: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -11670,9 +13375,9 @@ static void _pack_topo_info_msg(topo_info_response_msg_t *msg, Buf buffer,
 	for (i=0; i<msg->record_count; i++) {
 		pack16(msg->topo_array[i].level,      buffer);
 		pack32(msg->topo_array[i].link_speed, buffer);
-  		packstr(msg->topo_array[i].name,      buffer);
-  		packstr(msg->topo_array[i].nodes,     buffer);
-  		packstr(msg->topo_array[i].switches,  buffer);
+		packstr(msg->topo_array[i].name,      buffer);
+		packstr(msg->topo_array[i].nodes,     buffer);
+		packstr(msg->topo_array[i].switches,  buffer);
 	}
 }
 
@@ -11708,6 +13413,40 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static void _pack_powercap_info_msg(powercap_info_msg_t *msg, Buf buffer,
+				    uint16_t protocol_version)
+{
+	pack32(msg->power_cap, buffer);
+	pack32(msg->power_floor, buffer);
+	pack32(msg->power_change, buffer);
+	pack32(msg->min_watts, buffer);
+	pack32(msg->cur_max_watts, buffer);
+	pack32(msg->adj_max_watts, buffer);
+	pack32(msg->max_watts, buffer);
+}
+
+static int  _unpack_powercap_info_msg(powercap_info_msg_t **msg, Buf buffer,
+				  uint16_t protocol_version)
+{
+	powercap_info_msg_t *msg_ptr = xmalloc(sizeof(powercap_info_msg_t));
+
+	*msg = msg_ptr;
+	safe_unpack32(&msg_ptr->power_cap, buffer);
+	safe_unpack32(&msg_ptr->power_floor, buffer);
+	safe_unpack32(&msg_ptr->power_change, buffer);
+	safe_unpack32(&msg_ptr->min_watts, buffer);
+	safe_unpack32(&msg_ptr->cur_max_watts, buffer);
+	safe_unpack32(&msg_ptr->adj_max_watts, buffer);
+	safe_unpack32(&msg_ptr->max_watts, buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_powercap_info_msg(msg_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 static void _pack_spank_env_request_msg(spank_env_request_msg_t * msg,
 					Buf buffer, uint16_t protocol_version)
 {
@@ -11808,7 +13547,7 @@ static int  _unpack_stats_response_msg(stats_info_response_msg_t **msg_ptr,
 	msg = xmalloc ( sizeof (stats_info_response_msg_t) );
 	*msg_ptr = msg ;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&msg->parts_packed,	buffer);
 		if (msg->parts_packed) {
 			safe_unpack_time(&msg->req_time,	buffer);
@@ -11831,7 +13570,55 @@ static int  _unpack_stats_response_msg(stats_info_response_msg_t **msg_ptr,
 			safe_unpack32(&msg->bf_backfilled_jobs,	buffer);
 			safe_unpack32(&msg->bf_last_backfilled_jobs, buffer);
 			safe_unpack32(&msg->bf_cycle_counter,	buffer);
-			safe_unpack32(&msg->bf_cycle_sum,	buffer);
+			safe_unpack64(&msg->bf_cycle_sum,	buffer);
+			safe_unpack32(&msg->bf_cycle_last,	buffer);
+			safe_unpack32(&msg->bf_last_depth,	buffer);
+			safe_unpack32(&msg->bf_last_depth_try,	buffer);
+
+			safe_unpack32(&msg->bf_queue_len,	buffer);
+			safe_unpack32(&msg->bf_cycle_max,	buffer);
+			safe_unpack_time(&msg->bf_when_last_cycle, buffer);
+			safe_unpack32(&msg->bf_depth_sum,	buffer);
+			safe_unpack32(&msg->bf_depth_try_sum,	buffer);
+			safe_unpack32(&msg->bf_queue_len_sum,	buffer);
+			safe_unpack32(&msg->bf_active,		buffer);
+		}
+
+		safe_unpack32(&msg->rpc_type_size,		buffer);
+		safe_unpack16_array(&msg->rpc_type_id,   &uint32_tmp, buffer);
+		safe_unpack32_array(&msg->rpc_type_cnt,  &uint32_tmp, buffer);
+		safe_unpack64_array(&msg->rpc_type_time, &uint32_tmp, buffer);
+
+		safe_unpack32(&msg->rpc_user_size,		buffer);
+		safe_unpack32_array(&msg->rpc_user_id,   &uint32_tmp, buffer);
+		safe_unpack32_array(&msg->rpc_user_cnt,  &uint32_tmp, buffer);
+		safe_unpack64_array(&msg->rpc_user_time, &uint32_tmp, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		safe_unpack32(&msg->parts_packed,	buffer);
+
+		if (msg->parts_packed) {
+			safe_unpack_time(&msg->req_time,	buffer);
+			safe_unpack_time(&msg->req_time_start,	buffer);
+			safe_unpack32(&msg->server_thread_count,buffer);
+			safe_unpack32(&msg->agent_queue_size,	buffer);
+			safe_unpack32(&msg->jobs_submitted,	buffer);
+			safe_unpack32(&msg->jobs_started,	buffer);
+			safe_unpack32(&msg->jobs_completed,	buffer);
+			safe_unpack32(&msg->jobs_canceled,	buffer);
+			safe_unpack32(&msg->jobs_failed,	buffer);
+
+			safe_unpack32(&msg->schedule_cycle_max,	buffer);
+			safe_unpack32(&msg->schedule_cycle_last,buffer);
+			safe_unpack32(&msg->schedule_cycle_sum, buffer);
+			safe_unpack32(&msg->schedule_cycle_counter, buffer);
+			safe_unpack32(&msg->schedule_cycle_depth, buffer);
+			safe_unpack32(&msg->schedule_queue_len,	buffer);
+
+			safe_unpack32(&msg->bf_backfilled_jobs,	buffer);
+			safe_unpack32(&msg->bf_last_backfilled_jobs, buffer);
+			safe_unpack32(&msg->bf_cycle_counter,	buffer);
+			safe_unpack32(&uint32_tmp,       	buffer);
+			msg->bf_cycle_sum = (uint64_t)uint32_tmp;
 			safe_unpack32(&msg->bf_cycle_last,	buffer);
 			safe_unpack32(&msg->bf_last_depth,	buffer);
 			safe_unpack32(&msg->bf_last_depth_try,	buffer);
@@ -11869,7 +13656,7 @@ static int  _unpack_stats_response_msg(stats_info_response_msg_t **msg_ptr,
 
 			safe_unpack32(&msg->schedule_cycle_max,	buffer);
 			safe_unpack32(&msg->schedule_cycle_last,buffer);
-			safe_unpack32(&msg->schedule_cycle_sum,	buffer);
+			safe_unpack32(&msg->schedule_cycle_sum, buffer);
 			safe_unpack32(&msg->schedule_cycle_counter, buffer);
 			safe_unpack32(&msg->schedule_cycle_depth, buffer);
 			safe_unpack32(&msg->schedule_queue_len,	buffer);
@@ -11877,7 +13664,8 @@ static int  _unpack_stats_response_msg(stats_info_response_msg_t **msg_ptr,
 			safe_unpack32(&msg->bf_backfilled_jobs,	buffer);
 			safe_unpack32(&msg->bf_last_backfilled_jobs, buffer);
 			safe_unpack32(&msg->bf_cycle_counter,	buffer);
-			safe_unpack32(&msg->bf_cycle_sum,	buffer);
+			safe_unpack32(&uint32_tmp,       	buffer);
+			msg->bf_cycle_sum = (uint64_t)uint32_tmp;
 			safe_unpack32(&msg->bf_cycle_last,	buffer);
 			safe_unpack32(&msg->bf_last_depth,	buffer);
 			safe_unpack32(&msg->bf_last_depth_try,	buffer);
@@ -11909,8 +13697,8 @@ unpack_error:
  */
 static void
 _pack_license_info_request_msg(license_info_request_msg_t *msg,
-                               Buf buffer,
-                               uint16_t protocol_version)
+			       Buf buffer,
+			       uint16_t protocol_version)
 {
 	pack_time(msg->last_update, buffer);
 	pack16((uint16_t)msg->show_flags, buffer);
@@ -11920,8 +13708,8 @@ _pack_license_info_request_msg(license_info_request_msg_t *msg,
  */
 static int
 _unpack_license_info_request_msg(license_info_request_msg_t **msg,
-                                 Buf buffer,
-                                 uint16_t protocol_version)
+				 Buf buffer,
+				 uint16_t protocol_version)
 {
 	*msg = xmalloc(sizeof(license_info_msg_t));
 
@@ -11953,8 +13741,8 @@ _pack_license_info_msg(slurm_msg_t *msg, Buf buffer)
  */
 static int
 _unpack_license_info_msg(license_info_msg_t **msg,
-                         Buf buffer,
-                         uint16_t protocol_version)
+			 Buf buffer,
+			 uint16_t protocol_version)
 {
 	int i;
 	uint32_t zz;
@@ -11970,7 +13758,7 @@ _unpack_license_info_msg(license_info_msg_t **msg,
 		safe_unpack_time(&((*msg)->last_update), buffer);
 
 		(*msg)->lic_array = xmalloc(sizeof(slurm_license_info_t)
-		                            * (*msg)->num_lic);
+					    * (*msg)->num_lic);
 
 		/* Decode individual license data.
 		 */
@@ -12018,7 +13806,7 @@ static void _pack_job_array_resp_msg(job_array_resp_msg_t *msg, Buf buffer,
 	pack32(msg->job_array_count, buffer);
 	for (i = 0; i < msg->job_array_count; i++) {
 		pack32(msg->error_code[i], buffer);
-  		packstr(msg->job_array_id[i], buffer);
+		packstr(msg->job_array_id[i], buffer);
 	}
 }
 static int  _unpack_job_array_resp_msg(job_array_resp_msg_t **msg, Buf buffer,
@@ -12045,6 +13833,111 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+
+/* _pack_assoc_mgr_info_request_msg()
+ */
+static void
+_pack_assoc_mgr_info_request_msg(assoc_mgr_info_request_msg_t *msg,
+				 Buf buffer,
+				 uint16_t protocol_version)
+{
+	uint32_t count = NO_VAL;
+	char *tmp_info = NULL;
+	ListIterator itr = NULL;
+
+	xassert(msg != NULL);
+
+	if (!msg->acct_list || !(count = list_count(msg->acct_list)))
+		count = NO_VAL;
+
+	pack32(count, buffer);
+	if (count != NO_VAL) {
+		itr = list_iterator_create(msg->acct_list);
+		while ((tmp_info = list_next(itr)))
+			packstr(tmp_info, buffer);
+		list_iterator_destroy(itr);
+	}
+
+	pack32(msg->flags, buffer);
+
+	if (!msg->qos_list || !(count = list_count(msg->qos_list)))
+		count = NO_VAL;
+
+	pack32(count, buffer);
+	if (count != NO_VAL) {
+		itr = list_iterator_create(msg->qos_list);
+		while ((tmp_info = list_next(itr)))
+			packstr(tmp_info, buffer);
+		list_iterator_destroy(itr);
+	}
+
+	if (!msg->user_list || !(count = list_count(msg->user_list)))
+		count = NO_VAL;
+
+	pack32(count, buffer);
+	if (count != NO_VAL) {
+		itr = list_iterator_create(msg->user_list);
+		while ((tmp_info = list_next(itr)))
+			packstr(tmp_info, buffer);
+		list_iterator_destroy(itr);
+	}
+}
+
+static int
+_unpack_assoc_mgr_info_request_msg(assoc_mgr_info_request_msg_t **msg,
+				    Buf buffer,
+				    uint16_t protocol_version)
+{
+	uint32_t uint32_tmp;
+	uint32_t count = NO_VAL;
+	int i;
+	char *tmp_info = NULL;
+	assoc_mgr_info_request_msg_t *object_ptr = NULL;
+
+	xassert(msg != NULL);
+
+	object_ptr = xmalloc(sizeof(assoc_mgr_info_request_msg_t));
+	*msg = object_ptr;
+
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		object_ptr->acct_list = list_create(slurm_destroy_char);
+		for (i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info,
+					       &uint32_tmp, buffer);
+			list_append(object_ptr->acct_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&object_ptr->flags, buffer);
+
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		object_ptr->qos_list = list_create(slurm_destroy_char);
+		for (i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info,
+					       &uint32_tmp, buffer);
+			list_append(object_ptr->qos_list, tmp_info);
+		}
+	}
+
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		object_ptr->user_list = list_create(slurm_destroy_char);
+		for (i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(&tmp_info,
+					       &uint32_tmp, buffer);
+			list_append(object_ptr->user_list, tmp_info);
+		}
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurm_free_assoc_mgr_info_request_msg(object_ptr);
+	*msg = NULL;
+	return SLURM_ERROR;
+}
+
 /* template
    void pack_ ( * msg , Buf buffer )
    {
diff --git a/src/common/slurm_protocol_pack.h b/src/common/slurm_protocol_pack.h
index d3e9efee8..2b2c3c14f 100644
--- a/src/common/slurm_protocol_pack.h
+++ b/src/common/slurm_protocol_pack.h
@@ -166,4 +166,9 @@ extern int slurm_unpack_block_info_members(block_info_t *block_info, Buf buffer,
 extern int slurm_unpack_block_info_msg(
 	block_info_msg_t **block_info_msg_pptr, Buf buffer,
 	uint16_t protocol_version);
+
+/* Translate task_dist value from v15.08+ format to v14.11- format */
+extern uint16_t task_dist_new2old(uint32_t new_task_dist);
+/* Translate task_dist value from v14.11- format to v15.08+ format */
+extern uint32_t task_dist_old2new(uint16_t old_task_dist);
 #endif
diff --git a/src/common/slurm_protocol_socket_implementation.c b/src/common/slurm_protocol_socket_implementation.c
index 5889687f9..b938f44fc 100644
--- a/src/common/slurm_protocol_socket_implementation.c
+++ b/src/common/slurm_protocol_socket_implementation.c
@@ -90,6 +90,21 @@
  */
 #define MAX_MSG_SIZE     (1024*1024*1024)
 
+
+/* Static functions */
+static int _slurm_connect (int __fd, struct sockaddr const * __addr,
+			   socklen_t __len);
+static slurm_fd_t _slurm_create_socket ( slurm_socket_type_t type );
+static int _slurm_vfcntl(int fd, int cmd, va_list va );
+static int _slurm_fcntl(int fd, int cmd, ... );
+static int _slurm_socket (int __domain, int __type, int __protocol);
+static ssize_t _slurm_send (int __fd, __const void *__buf, size_t __n,
+			    int __flags);
+static ssize_t _slurm_recv (int __fd, void *__buf, size_t __n, int __flags);
+static int _slurm_setsockopt (int __fd, int __level, int __optname,
+			      __const void *__optval, socklen_t __optlen);
+
+
 /****************************************************************
  * MIDDLE LAYER MSG FUNCTIONS
  ****************************************************************/
@@ -125,10 +140,7 @@ static void _sock_bind_wild(int sockfd)
 		srand48((long int) (time(NULL) + getpid()));
 	}
 
-	memset(&sin, 0, sizeof(sin));
-	sin.sin_family = AF_INET;
-	sin.sin_addr.s_addr = htonl(INADDR_ANY);
-	sin.sin_port = htons(RANDOM_USER_PORT);
+	slurm_setup_sockaddr(&sin, RANDOM_USER_PORT);
 
 	for (retry=0; retry < PORT_RETRIES ; retry++) {
 		rc = bind(sockfd, (struct sockaddr *) &sin, sizeof(sin));
@@ -139,29 +151,14 @@ static void _sock_bind_wild(int sockfd)
 	return;
 }
 
-/*
- * This would be a no-op in a message implementation
- */
-int _slurm_close_accepted_conn (slurm_fd_t fd)
-{
-	return _slurm_close (fd);
-}
-
-ssize_t _slurm_msg_recvfrom(slurm_fd_t fd, char **pbuf, size_t *lenp,
-			    uint32_t flags)
-{
-	return _slurm_msg_recvfrom_timeout(fd, pbuf, lenp, flags,
-				(slurm_get_msg_timeout() * 1000));
-}
-
-ssize_t _slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **pbuf, size_t *lenp,
-				    uint32_t flags, int tmout)
+extern ssize_t slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **pbuf,
+		size_t *lenp, uint32_t flags, int tmout)
 {
 	ssize_t  len;
 	uint32_t msglen;
 
-	len = _slurm_recv_timeout( fd, (char *)&msglen,
-				   sizeof(msglen), 0, tmout );
+	len = slurm_recv_timeout( fd, (char *)&msglen,
+				  sizeof(msglen), 0, tmout );
 
 	if (len < ((ssize_t) sizeof(msglen)))
 		return SLURM_ERROR;
@@ -176,7 +173,7 @@ ssize_t _slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **pbuf, size_t *lenp,
 	 */
 	*pbuf = xmalloc_nz(msglen);
 
-	if (_slurm_recv_timeout(fd, *pbuf, msglen, 0, tmout) != msglen) {
+	if (slurm_recv_timeout(fd, *pbuf, msglen, 0, tmout) != msglen) {
 		xfree(*pbuf);
 		*pbuf = NULL;
 		return SLURM_ERROR;
@@ -187,15 +184,15 @@ ssize_t _slurm_msg_recvfrom_timeout(slurm_fd_t fd, char **pbuf, size_t *lenp,
 	return (ssize_t) msglen;
 }
 
-ssize_t _slurm_msg_sendto(slurm_fd_t fd, char *buffer, size_t size,
-			  uint32_t flags)
+extern ssize_t slurm_msg_sendto(slurm_fd_t fd, char *buffer, size_t size,
+				uint32_t flags)
 {
-	return _slurm_msg_sendto_timeout( fd, buffer, size, flags,
+	return slurm_msg_sendto_timeout( fd, buffer, size, flags,
 				(slurm_get_msg_timeout() * 1000));
 }
 
-ssize_t _slurm_msg_sendto_timeout(slurm_fd_t fd, char *buffer, size_t size,
-				  uint32_t flags, int timeout)
+ssize_t slurm_msg_sendto_timeout(slurm_fd_t fd, char *buffer, size_t size,
+				 uint32_t flags, int timeout)
 {
 	int   len;
 	uint32_t usize;
@@ -209,12 +206,12 @@ ssize_t _slurm_msg_sendto_timeout(slurm_fd_t fd, char *buffer, size_t size,
 
 	usize = htonl(size);
 
-	if ((len = _slurm_send_timeout(
+	if ((len = slurm_send_timeout(
 				fd, (char *)&usize, sizeof(usize), 0,
 				timeout)) < 0)
 		goto done;
 
-	if ((len = _slurm_send_timeout(fd, buffer, size, 0, timeout)) < 0)
+	if ((len = slurm_send_timeout(fd, buffer, size, 0, timeout)) < 0)
 		goto done;
 
 
@@ -225,8 +222,8 @@ ssize_t _slurm_msg_sendto_timeout(slurm_fd_t fd, char *buffer, size_t size,
 
 /* Send slurm message with timeout
  * RET message size (as specified in argument) or SLURM_ERROR on error */
-int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
-			uint32_t flags, int timeout)
+extern int slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
+			      uint32_t flags, int timeout)
 {
 	int rc;
 	int sent = 0;
@@ -247,7 +244,7 @@ int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
 	while (sent < size) {
 		timeleft = timeout - _tot_wait(&tstart);
 		if (timeleft <= 0) {
-			debug("_slurm_send_timeout at %d of %zd, timeout",
+			debug("slurm_send_timeout at %d of %zd, timeout",
 				sent, size);
 			slurm_seterrno(SLURM_PROTOCOL_SOCKET_IMPL_TIMEOUT);
 			sent = SLURM_ERROR;
@@ -258,7 +255,7 @@ int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
 			if ((rc == 0) || (errno == EINTR) || (errno == EAGAIN))
  				continue;
 			else {
-				debug("_slurm_send_timeout at %d of %zd, "
+				debug("slurm_send_timeout at %d of %zd, "
 					"poll error: %s",
 					sent, size, strerror(errno));
 				slurm_seterrno(SLURM_COMMUNICATIONS_SEND_ERROR);
@@ -275,20 +272,20 @@ int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
 		 * nonblocking read means just that.
 		 */
 		if (ufds.revents & POLLERR) {
-			debug("_slurm_send_timeout: Socket POLLERR");
+			debug("slurm_send_timeout: Socket POLLERR");
 			slurm_seterrno(ENOTCONN);
 			sent = SLURM_ERROR;
 			goto done;
 		}
 		if ((ufds.revents & POLLHUP) || (ufds.revents & POLLNVAL) ||
 		    (_slurm_recv(fd, &temp, 1, flags) == 0)) {
-			debug2("_slurm_send_timeout: Socket no longer there");
+			debug2("slurm_send_timeout: Socket no longer there");
 			slurm_seterrno(ENOTCONN);
 			sent = SLURM_ERROR;
 			goto done;
 		}
 		if ((ufds.revents & POLLOUT) != POLLOUT) {
-			error("_slurm_send_timeout: Poll failure, revents:%d",
+			error("slurm_send_timeout: Poll failure, revents:%d",
 			      ufds.revents);
 		}
 
@@ -296,7 +293,7 @@ int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
 		if (rc < 0) {
  			if (errno == EINTR)
 				continue;
-			debug("_slurm_send_timeout at %d of %zd, "
+			debug("slurm_send_timeout at %d of %zd, "
 				"send error: %s",
 				sent, size, strerror(errno));
  			if (errno == EAGAIN) {	/* poll() lied to us */
@@ -308,7 +305,7 @@ int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
 			goto done;
 		}
 		if (rc == 0) {
-			debug("_slurm_send_timeout at %d of %zd, "
+			debug("slurm_send_timeout at %d of %zd, "
 				"sent zero bytes", sent, size);
 			slurm_seterrno(SLURM_PROTOCOL_SOCKET_ZERO_BYTES_SENT);
 			sent = SLURM_ERROR;
@@ -332,8 +329,8 @@ int _slurm_send_timeout(slurm_fd_t fd, char *buf, size_t size,
 
 /* Get slurm message with timeout
  * RET message size (as specified in argument) or SLURM_ERROR on error */
-int _slurm_recv_timeout(slurm_fd_t fd, char *buffer, size_t size,
-			uint32_t flags, int timeout )
+extern int slurm_recv_timeout(slurm_fd_t fd, char *buffer, size_t size,
+			      uint32_t flags, int timeout )
 {
 	int rc;
 	int recvlen = 0;
@@ -430,12 +427,7 @@ int _slurm_recv_timeout(slurm_fd_t fd, char *buffer, size_t size,
 	return recvlen;
 }
 
-int _slurm_shutdown_msg_engine ( slurm_fd_t open_fd )
-{
-	return _slurm_close ( open_fd ) ;
-}
-
-slurm_fd_t slurm_init_msg_engine(slurm_addr_t *addr)
+extern slurm_fd_t slurm_init_msg_engine(slurm_addr_t *addr)
 {
 	int rc;
 	slurm_fd_t fd;
@@ -453,13 +445,13 @@ slurm_fd_t slurm_init_msg_engine(slurm_addr_t *addr)
 		goto error;
 	}
 
-	rc = _slurm_bind(fd, (struct sockaddr const *) addr, sizeof(*addr));
+	rc = bind(fd, (struct sockaddr const *) addr, sizeof(*addr));
 	if (rc < 0) {
 		error("Error binding slurm stream socket: %m");
 		goto error;
 	}
 
-	if (_slurm_listen(fd, SLURM_PROTOCOL_DEFAULT_LISTEN_BACKLOG) < 0) {
+	if (listen(fd, SLURM_PROTOCOL_DEFAULT_LISTEN_BACKLOG) < 0) {
 		error( "Error listening on slurm stream socket: %m" ) ;
 		rc = SLURM_ERROR;
 		goto error;
@@ -468,19 +460,24 @@ slurm_fd_t slurm_init_msg_engine(slurm_addr_t *addr)
 	return fd;
 
     error:
-	if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
-		_slurm_close_stream(fd);	/* try again */
+	if ((slurm_close(fd) < 0) && (errno == EINTR))
+		slurm_close(fd);	/* try again */
 	return rc;
 
 }
 
-slurm_fd_t slurm_accept_msg_conn(slurm_fd_t fd, slurm_addr_t *addr)
+/* Await a connection on socket FD.
+ * When a connection arrives, open a new socket to communicate with it,
+ * set *ADDR (which is *ADDR_LEN bytes long) to the address of the connecting
+ * peer and *ADDR_LEN to the address's actual length, and return the
+ * new socket's descriptor, or -1 for errors.  */
+extern slurm_fd_t slurm_accept_msg_conn(slurm_fd_t fd, slurm_addr_t *addr)
 {
 	socklen_t len = sizeof(slurm_addr_t);
-	return _slurm_accept(fd, (struct sockaddr *)addr, &len);
+	return accept(fd, (struct sockaddr *)addr, &len);
 }
 
-slurm_fd_t slurm_open_stream(slurm_addr_t *addr, bool retry)
+extern slurm_fd_t slurm_open_stream(slurm_addr_t *addr, bool retry)
 {
 	int retry_cnt;
 	slurm_fd_t fd;
@@ -519,8 +516,8 @@ slurm_fd_t slurm_open_stream(slurm_addr_t *addr, bool retry)
 			goto error;
 		}
 
-		if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
-			_slurm_close_stream(fd);	/* try again */
+		if ((slurm_close(fd) < 0) && (errno == EINTR))
+			slurm_close(fd);	/* try again */
 	}
 
 	return fd;
@@ -529,86 +526,48 @@ slurm_fd_t slurm_open_stream(slurm_addr_t *addr, bool retry)
 	slurm_get_ip_str(addr, &port, ip, sizeof(ip));
 	debug2("Error connecting slurm stream socket at %s:%d: %m",
 	       ip, ntohs(port));
-	if ((_slurm_close_stream(fd) < 0) && (errno == EINTR))
-		_slurm_close_stream(fd);	/* try again */
+	if ((slurm_close(fd) < 0) && (errno == EINTR))
+		slurm_close(fd);	/* try again */
 	return SLURM_SOCKET_ERROR;
 }
 
-int _slurm_get_stream_addr(slurm_fd_t fd, slurm_addr_t *addr )
+/* Put the local address of FD into *ADDR and its length in *LEN.  */
+extern int slurm_get_stream_addr(slurm_fd_t fd, slurm_addr_t *addr )
 {
 	socklen_t size = sizeof(addr);
-	return _slurm_getsockname(fd, (struct sockaddr *)addr, &size);
+	return getsockname(fd, (struct sockaddr *)addr, &size);
 }
 
-int _slurm_close_stream ( slurm_fd_t open_fd )
-{
-	return _slurm_close ( open_fd ) ;
-}
-
-
-inline int _slurm_set_stream_non_blocking(slurm_fd_t fd)
-{
-	fd_set_nonblocking(fd);
-	return SLURM_SUCCESS;
-}
-
-inline int _slurm_set_stream_blocking(slurm_fd_t fd)
-{
-	fd_set_blocking(fd);
-	return SLURM_SUCCESS;
-}
-
-extern int _slurm_socket (int __domain, int __type, int __protocol)
+static int _slurm_socket (int __domain, int __type, int __protocol)
 {
 	return socket ( __domain, __type, __protocol ) ;
 }
 
-extern slurm_fd_t _slurm_create_socket ( slurm_socket_type_t type )
+/* Create a socket of the specified type
+ * IN type - SLURM_STREAM or SLURM_MESSAGE
+ */
+static slurm_fd_t _slurm_create_socket ( slurm_socket_type_t type )
 {
 	switch ( type )
 	{
 		case SLURM_STREAM :
 			return _slurm_socket ( AF_INET, SOCK_STREAM,
-						IPPROTO_TCP) ;
+					      IPPROTO_TCP) ;
 			break;
 		case SLURM_MESSAGE :
 			return _slurm_socket ( AF_INET, SOCK_DGRAM,
-						IPPROTO_UDP ) ;
+					      IPPROTO_UDP ) ;
 			break;
 		default :
 			return SLURM_SOCKET_ERROR;
 	}
 }
 
-/* Create two new sockets, of type TYPE in domain DOMAIN and using
- * protocol PROTOCOL, which are connected to each other, and put file
- * descriptors for them in FDS[0] and FDS[1].  If PROTOCOL is zero,
- * one will be chosen automatically.  Returns 0 on success, -1 for errors.  */
-extern int _slurm_socketpair (int __domain, int __type,
-			      int __protocol, int __fds[2])
-{
-	return SLURM_PROTOCOL_FUNCTION_NOT_IMPLEMENTED ;
-}
-
-/* Give the socket FD the local address ADDR (which is LEN bytes long).  */
-extern int _slurm_bind (int __fd, struct sockaddr const * __addr,
-				socklen_t __len)
-{
-	return bind ( __fd , __addr , __len ) ;
-}
-
-/* Put the local address of FD into *ADDR and its length in *LEN.  */
-extern int _slurm_getsockname (int __fd, struct sockaddr * __addr,
-			       socklen_t *__restrict __len)
-{
-	return getsockname ( __fd , __addr , __len ) ;
-}
-
 /* Open a connection on socket FD to peer at ADDR (which LEN bytes long).
  * For connectionless socket types, just set the default address to send to
  * and the only address from which to accept transmissions.
  * Return 0 on success, -1 for errors.  */
-extern int _slurm_connect (int __fd, struct sockaddr const * __addr,
+static int _slurm_connect (int __fd, struct sockaddr const * __addr,
 			   socklen_t __len)
 {
 #if 0
@@ -647,15 +606,15 @@ again:	rc = poll(&ufds, 1, timeout);
 		/* poll failed */
 		if (errno == EINTR) {
 			/* NOTE: connect() is non-interruptible in Linux */
-			debug2("_slurm_connect poll failed: %m");
+			debug2("slurm_connect poll failed: %m");
 			goto again;
 		} else
-			error("_slurm_connect poll failed: %m");
+			error("slurm_connect poll failed: %m");
 		return -1;
 	} else if (rc == 0) {
 		/* poll timed out before any socket events */
 		slurm_seterrno(ETIMEDOUT);
-		debug2("_slurm_connect poll timeout: %m");
+		debug2("slurm_connect poll timeout: %m");
 		return -1;
 	} else {
 		/* poll saw some event on the socket
@@ -676,7 +635,7 @@ done:
 	 * with terminated srun commands. */
 	if (err) {
 		slurm_seterrno(err);
-		debug2("_slurm_connect failed: %m");
+		debug2("slurm_connect failed: %m");
 		slurm_seterrno(err);
 		return -1;
 	}
@@ -687,14 +646,14 @@ done:
 
 /* Put the address of the peer connected to socket FD into *ADDR
  * (which is *LEN bytes long), and its actual length into *LEN.  */
-extern int _slurm_getpeername (int __fd, struct sockaddr * __addr,
-			       socklen_t *__restrict __len)
+extern int slurm_getpeername (int __fd, struct sockaddr * __addr,
+			      socklen_t *__restrict __len)
 {
 	return getpeername ( __fd , __addr , __len ) ;
 }
 
 /* Send N bytes of BUF to socket FD.  Returns the number sent or -1.  */
-extern ssize_t _slurm_send (int __fd, __const void *__buf, size_t __n,
+static ssize_t _slurm_send (int __fd, __const void *__buf, size_t __n,
 			    int __flags)
 {
 	return send ( __fd , __buf , __n , __flags ) ;
@@ -702,113 +661,37 @@ extern ssize_t _slurm_send (int __fd, __const void *__buf, size_t __n,
 
 /* Read N bytes into BUF from socket FD.
  * Returns the number read or -1 for errors.  */
-extern ssize_t _slurm_recv (int __fd, void *__buf, size_t __n, int __flags)
+static ssize_t _slurm_recv (int __fd, void *__buf, size_t __n, int __flags)
 {
 	return recv ( __fd , __buf , __n , __flags ) ;
 }
 
-/* Send N bytes of BUF on socket FD to peer at address ADDR (which is
- * ADDR_LEN bytes long).  Returns the number sent, or -1 for errors.  */
-extern ssize_t _slurm_sendto (int __fd, __const void *__buf, size_t __n,
-			      int __flags, struct sockaddr const * __addr,
-			      socklen_t __addr_len)
-{
-	return sendto ( __fd , __buf , __n , __flags , __addr, __addr_len) ;
-}
-/* Read N bytes into BUF through socket FD.
- * If ADDR is not NULL, fill in *ADDR_LEN bytes of it with tha address of
- * the sender, and store the actual size of the address in *ADDR_LEN.
- * Returns the number of bytes read or -1 for errors.  */
-extern ssize_t _slurm_recvfrom (int __fd, void *__restrict __buf,
-				size_t __n, int __flags,
-				struct sockaddr * __addr,
-				socklen_t *__restrict __addr_len)
-{
-	return recvfrom ( __fd , __buf , __n , __flags , __addr, __addr_len) ;
-}
-
-/* Send a msg described MESSAGE on socket FD.
- * Returns the number of bytes sent, or -1 for errors.  */
-extern ssize_t _slurm_sendmsg (int __fd, __const struct msghdr *__msg,
-				int __flags)
-{
-	return sendmsg ( __fd , __msg , __flags ) ;
-}
-
-/* Send a msg described MESSAGE on socket FD.
- * Returns the number of bytes read or -1 for errors.  */
-extern ssize_t _slurm_recvmsg (int __fd, struct msghdr *__msg, int __flags)
-{
-	return recvmsg ( __fd , __msg , __flags );
-}
-
-/* Put the current value for socket FD's option OPTNAME at protocol level LEVEL
- * into OPTVAL (which is *OPTLEN bytes long), and set *OPTLEN to the value's
- * actual length.  Returns 0 on success, -1 for errors.  */
-extern int _slurm_getsockopt (int __fd, int __level, int __optname,
-				void *__restrict __optval,
-				socklen_t *__restrict __optlen)
-{
-	return getsockopt ( __fd , __level , __optname , __optval , __optlen ) ;
-}
-
 /* Set socket FD's option OPTNAME at protocol level LEVEL
  * to *OPTVAL (which is OPTLEN bytes long).
  * Returns 0 on success, -1 for errors.  */
-extern int _slurm_setsockopt (int __fd, int __level, int __optname,
-				__const void *__optval, socklen_t __optlen)
+static int _slurm_setsockopt (int __fd, int __level, int __optname,
+			      __const void *__optval, socklen_t __optlen)
 {
 	return setsockopt ( __fd , __level , __optname , __optval , __optlen ) ;
 }
 
-
-/* Prepare to accept connections on socket FD.
- * N connection requests will be queued before further requests are refused.
- * Returns 0 on success, -1 for errors.  */
-extern int _slurm_listen (int __fd, int __n)
-{
-	return listen ( __fd , __n ) ;
-}
-
-/* Await a connection on socket FD.
- * When a connection arrives, open a new socket to communicate with it,
- * set *ADDR (which is *ADDR_LEN bytes long) to the address of the connecting
- * peer and *ADDR_LEN to the address's actual length, and return the
- * new socket's descriptor, or -1 for errors.  */
-extern int _slurm_accept (int __fd, struct sockaddr * __addr,
-				socklen_t *__restrict __addr_len)
-{
-	return accept ( __fd , __addr , __addr_len ) ;
-}
-
-/* Shut down all or part of the connection open on socket FD.
- * HOW determines what to shut down:
- * SHUT_RD   = No more receptions;
- * SHUT_WR   = No more transmissions;
- * SHUT_RDWR = No more receptions or transmissions.
- * Returns 0 on success, -1 for errors.  */
-extern int _slurm_shutdown (int __fd, int __how)
-{
-	return shutdown ( __fd , __how );
-}
-
-extern int _slurm_close (int __fd )
+extern int slurm_close (int __fd )
 {
 	return close ( __fd ) ;
 }
 
-extern int _slurm_fcntl(int fd, int cmd, ... )
+static int _slurm_fcntl(int fd, int cmd, ... )
 {
 	int rc ;
 	va_list va ;
 
 	va_start ( va , cmd ) ;
-	rc =_slurm_vfcntl ( fd , cmd , va ) ;
+	rc = _slurm_vfcntl ( fd , cmd , va ) ;
 	va_end ( va ) ;
 	return rc ;
 }
 
-extern int _slurm_vfcntl(int fd, int cmd, va_list va )
+static int _slurm_vfcntl(int fd, int cmd, va_list va )
 {
 	long arg ;
 
@@ -828,20 +711,15 @@ extern int _slurm_vfcntl(int fd, int cmd, va_list va )
 }
 
 /* sets the fields of a slurm_addr_t */
-void _slurm_set_addr_uint (slurm_addr_t *addr, uint16_t port, uint32_t ipaddr)
+extern void slurm_set_addr_uint (slurm_addr_t *addr, uint16_t port,
+				 uint32_t ipaddr)
 {
 	addr->sin_family      = AF_SLURM ;
 	addr->sin_port	= htons(port);
 	addr->sin_addr.s_addr = htonl(ipaddr);
 }
 
-/* resets the address field of a slurm_addr, port and family are unchanged */
-void _reset_slurm_addr (slurm_addr_t *addr, slurm_addr_t new_addr)
-{
-	addr->sin_addr.s_addr = new_addr.sin_addr.s_addr;
-}
-
-void _slurm_set_addr_char (slurm_addr_t * addr, uint16_t port, char *host)
+extern void slurm_set_addr_char (slurm_addr_t * addr, uint16_t port, char *host)
 {
 	struct hostent * he    = NULL;
 	int	   h_err = 0;
@@ -868,8 +746,8 @@ void _slurm_set_addr_char (slurm_addr_t * addr, uint16_t port, char *host)
 	return;
 }
 
-void _slurm_get_addr (slurm_addr_t *addr, uint16_t *port, char *host,
-		      unsigned int buflen )
+extern void slurm_get_addr (slurm_addr_t *addr, uint16_t *port, char *host,
+			    unsigned int buflen )
 {
 	struct hostent *he;
 	char   h_buf[4096];
@@ -891,21 +769,28 @@ void _slurm_get_addr (slurm_addr_t *addr, uint16_t *port, char *host,
 	return;
 }
 
-void _slurm_print_slurm_addr ( slurm_addr_t * address, char *buf, size_t n )
+extern void slurm_print_slurm_addr ( slurm_addr_t * address, char *buf,
+				     size_t n )
 {
 	char addrbuf[INET_ADDRSTRLEN];
+
+	if (!address) {
+		snprintf(buf, n, "NULL");
+		return;
+	}
+
 	inet_ntop(AF_INET, &address->sin_addr, addrbuf, INET_ADDRSTRLEN);
 	/* warning: silently truncates */
 	snprintf(buf, n, "%s:%d", addrbuf, ntohs(address->sin_port));
 }
 
-void _slurm_pack_slurm_addr(slurm_addr_t *addr, Buf buffer)
+extern void slurm_pack_slurm_addr(slurm_addr_t *addr, Buf buffer)
 {
 	pack32( ntohl( addr->sin_addr.s_addr ), buffer );
 	pack16( ntohs( addr->sin_port ), buffer );
 }
 
-int _slurm_unpack_slurm_addr_no_alloc(slurm_addr_t *addr, Buf buffer)
+extern int slurm_unpack_slurm_addr_no_alloc(slurm_addr_t *addr, Buf buffer)
 {
 	addr->sin_family = AF_SLURM ;
 	safe_unpack32(&addr->sin_addr.s_addr, buffer);
@@ -919,6 +804,3 @@ int _slurm_unpack_slurm_addr_no_alloc(slurm_addr_t *addr, Buf buffer)
 	return SLURM_ERROR;
 }
 
-/*
- * vi: tabstop=8 shiftwidth=8 expandtab
- */
diff --git a/src/common/slurm_protocol_util.c b/src/common/slurm_protocol_util.c
index 8991808bc..7c67d4933 100644
--- a/src/common/slurm_protocol_util.c
+++ b/src/common/slurm_protocol_util.c
@@ -50,14 +50,6 @@
 #include "src/common/xmalloc.h"
 #include "src/slurmdbd/read_config.h"
 
-uint16_t _get_slurm_version(uint32_t rpc_version)
-{
-	if (rpc_version >= SLURM_PROTOCOL_VERSION)
-		return SLURM_PROTOCOL_VERSION;
-	else
-		return SLURM_2_6_PROTOCOL_VERSION;
-}
-
 /*
  * check_header_version checks to see that the specified header was sent
  * from a node running the same version of the protocol as the current node
@@ -68,15 +60,13 @@ int check_header_version(header_t * header)
 {
 	uint16_t check_version = SLURM_PROTOCOL_VERSION;
 
-	if (working_cluster_rec) {
-		check_version = _get_slurm_version(
-			working_cluster_rec->rpc_version);
-	}
+	if (working_cluster_rec)
+		check_version = working_cluster_rec->rpc_version;
 
 	if (slurmdbd_conf) {
 		if ((header->version != SLURM_PROTOCOL_VERSION)     &&
-		    (header->version != SLURM_14_03_PROTOCOL_VERSION) &&
-		    (header->version != SLURM_2_6_PROTOCOL_VERSION)) {
+		    (header->version != SLURM_14_11_PROTOCOL_VERSION) &&
+		    (header->version != SLURM_14_03_PROTOCOL_VERSION)) {
 			debug("unsupported RPC version %hu msg type %s(%u)",
 			      header->version, rpc_num2string(header->msg_type),
 			      header->msg_type);
@@ -100,13 +90,14 @@ int check_header_version(header_t * header)
 			}
 		default:
 			if ((header->version != SLURM_PROTOCOL_VERSION)     &&
-			    (header->version != SLURM_14_03_PROTOCOL_VERSION) &&
-			    (header->version != SLURM_2_6_PROTOCOL_VERSION)) {
+			    (header->version != SLURM_14_11_PROTOCOL_VERSION) &&
+			    (header->version != SLURM_14_03_PROTOCOL_VERSION)) {
 				debug("Unsupported RPC version %hu "
 				      "msg type %s(%u)", header->version,
 				      rpc_num2string(header->msg_type),
 				      header->msg_type);
-				slurm_seterrno_ret(SLURM_PROTOCOL_VERSION_ERROR);
+				slurm_seterrno_ret(
+					SLURM_PROTOCOL_VERSION_ERROR);
 			}
 			break;
 
@@ -132,14 +123,13 @@ void init_header(header_t *header, slurm_msg_t *msg, uint16_t flags)
 	if (msg->protocol_version != (uint16_t)NO_VAL)
 		header->version = msg->protocol_version;
 	else if (working_cluster_rec)
-		msg->protocol_version = header->version = _get_slurm_version(
-			working_cluster_rec->rpc_version);
+		msg->protocol_version = header->version =
+			working_cluster_rec->rpc_version;
 	else if ((msg->msg_type == ACCOUNTING_UPDATE_MSG) ||
 	         (msg->msg_type == ACCOUNTING_FIRST_REG)) {
 		uint32_t rpc_version =
 			((accounting_update_msg_t *)msg->data)->rpc_version;
-		msg->protocol_version = header->version =
-			_get_slurm_version(rpc_version);
+		msg->protocol_version = header->version = rpc_version;
 	} else
 		msg->protocol_version = header->version =
 			SLURM_PROTOCOL_VERSION;
@@ -153,6 +143,7 @@ void init_header(header_t *header, slurm_msg_t *msg, uint16_t flags)
 	else
 		header->ret_cnt = 0;
 	header->ret_list = msg->ret_list;
+	header->msg_index = msg->msg_index;
 	header->orig_addr = msg->orig_addr;
 }
 
diff --git a/src/common/slurm_resource_info.c b/src/common/slurm_resource_info.c
index cb932bb48..fb0f13413 100644
--- a/src/common/slurm_resource_info.c
+++ b/src/common/slurm_resource_info.c
@@ -146,6 +146,10 @@ void slurm_sprint_cpu_bind_type(char *str, cpu_bind_type_t cpu_bind_type)
 
 	if (cpu_bind_type & CPU_AUTO_BIND_TO_THREADS)
 		strcat(str, "autobind=threads,");
+	if (cpu_bind_type & CPU_AUTO_BIND_TO_CORES)
+		strcat(str, "autobind=cores,");
+	if (cpu_bind_type & CPU_AUTO_BIND_TO_SOCKETS)
+		strcat(str, "autobind=sockets,");
 
 	if (*str) {
 		str[strlen(str)-1] = '\0';	/* remove trailing ',' */
@@ -244,7 +248,7 @@ int slurm_verify_cpu_bind(const char *arg, char **cpu_bind,
 		CPU_BIND_NONE|CPU_BIND_RANK|CPU_BIND_MAP|CPU_BIND_MASK;
 	int bind_to_bits =
 		CPU_BIND_TO_SOCKETS|CPU_BIND_TO_CORES|CPU_BIND_TO_THREADS;
-	uint16_t task_plugin_param = slurm_get_task_plugin_param();
+	uint32_t task_plugin_param = slurm_get_task_plugin_param();
 	bool have_binding = _have_task_affinity();
 	bool log_binding = true;
 
diff --git a/src/common/slurm_route.c b/src/common/slurm_route.c
index 54ed02527..f7d30297b 100644
--- a/src/common/slurm_route.c
+++ b/src/common/slurm_route.c
@@ -209,12 +209,8 @@ static void _set_collectors(char *this_node_name)
 					info("ROUTE -- message collector backup"
 					     " address is %s", addrbuf);
 				}
-			} else {
-				if (debug_flags & DEBUG_FLAG_ROUTE) {
-					info("ROUTE -- no message collector "
-					     "backup");
-				}
-
+			} else if (debug_flags & DEBUG_FLAG_ROUTE) {
+				info("ROUTE -- no message collector backup");
 			}
 			goto clean;
 		}
@@ -250,15 +246,19 @@ static void _set_collectors(char *this_node_name)
 			backup_port = slurm_conf_get_port(backup);
 		} else
 			backup_port = 0;
-
 	}
 clean:
 	if (debug_flags & DEBUG_FLAG_ROUTE) {
-		if (this_is_collector)
-			info("ROUTE -- %s is a collector node", this_node_name);
-		else
-			info("ROUTE -- %s is a leaf node", this_node_name);
+		char addrbuf2[32];
+		slurm_print_slurm_addr(msg_collect_node,
+				       addrbuf, 32);
+		slurm_print_slurm_addr(msg_collect_backup,
+				       addrbuf2, 32);
+		info("ROUTE -- %s is a %s node (parent: %s, backup: %s)",
+		     this_node_name, this_is_collector ? "collector" : "leaf",
+		     addrbuf, addrbuf2);
 	}
+
 	hostlist_destroy(nodes);
 	if (parent)
 		free(parent);
diff --git a/src/common/slurm_selecttype_info.c b/src/common/slurm_selecttype_info.c
index fa5c845c9..1ccda85df 100644
--- a/src/common/slurm_selecttype_info.c
+++ b/src/common/slurm_selecttype_info.c
@@ -85,7 +85,10 @@ int parse_select_type_param(char *select_type_parameters, uint16_t *param)
 			*param |= CR_OTHER_CONS_RES;
 		} else if (!strcasecmp(str_parameters,
 				       "CR_ALLOCATE_FULL_SOCKET")) {
-			*param |= CR_ALLOCATE_FULL_SOCKET;
+			verbose("CR_ALLOCATE_FULL_SOCKET is deprecated.  "
+				"It is now the default for CR_SOCKET*.  "
+				"It is safe to remove it "
+				"from your slurm.conf");
 		} else if (!strcasecmp(str_parameters,
 				       "CR_ONE_TASK_PER_CORE")) {
 			*param |= CR_ONE_TASK_PER_CORE;
@@ -166,11 +169,6 @@ extern char *select_type_param_string(uint16_t select_type_param)
 			strcat(select_str, ",");
 		strcat(select_str, "CR_CORE_DEFAULT_DIST_BLOCK");
 	}
-	if (select_type_param & CR_ALLOCATE_FULL_SOCKET) {
-		if (select_str[0])
-			strcat(select_str, ",");
-		strcat(select_str, "CR_ALLOCATE_FULL_SOCKET");
-	}
 	if (select_type_param & CR_LLN) {
 		if (select_str[0])
 			strcat(select_str, ",");
diff --git a/src/common/slurm_step_layout.c b/src/common/slurm_step_layout.c
index 15f33564c..38ef01fda 100644
--- a/src/common/slurm_step_layout.c
+++ b/src/common/slurm_step_layout.c
@@ -65,10 +65,11 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 			     const char *arbitrary_nodes,
 			     uint16_t *cpus_per_node, uint32_t *cpu_count_reps,
 			     uint16_t cpus_per_task,
-			     uint16_t task_dist, uint16_t plane_size);
+			     uint32_t task_dist, uint16_t plane_size);
 
 static int _task_layout_block(slurm_step_layout_t *step_layout,
-			      uint16_t *cpus, uint16_t cpus_per_task);
+			      uint16_t *cpus, uint32_t task_dist,
+			      uint16_t cpus_per_task);
 static int _task_layout_cyclic(slurm_step_layout_t *step_layout,
 			       uint16_t *cpus);
 static int _task_layout_plane(slurm_step_layout_t *step_layout,
@@ -97,7 +98,7 @@ slurm_step_layout_t *slurm_step_layout_create(
 	uint32_t num_hosts,
 	uint32_t num_tasks,
 	uint16_t cpus_per_task,
-	uint16_t task_dist,
+	uint32_t task_dist,
 	uint16_t plane_size)
 {
 	char *arbitrary_nodes = NULL;
@@ -106,7 +107,7 @@ slurm_step_layout_t *slurm_step_layout_create(
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
 	step_layout->task_dist = task_dist;
-	if (task_dist == SLURM_DIST_ARBITRARY) {
+	if ((task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY) {
 		hostlist_t hl = NULL;
 		char *buf = NULL;
 		/* set the node list for the task layout later if user
@@ -264,9 +265,9 @@ extern void pack_slurm_step_layout(slurm_step_layout_t *step_layout,
 {
 	uint32_t i = 0;
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (step_layout)
-			i=1;
+			i = 1;
 
 		pack16(i, buffer);
 		if (!i)
@@ -275,9 +276,29 @@ extern void pack_slurm_step_layout(slurm_step_layout_t *step_layout,
 		packstr(step_layout->node_list, buffer);
 		pack32(step_layout->node_cnt, buffer);
 		pack32(step_layout->task_cnt, buffer);
-		pack16(step_layout->task_dist, buffer);
+		pack32(step_layout->task_dist, buffer);
 
-		for (i=0; i<step_layout->node_cnt; i++) {
+		for (i = 0; i < step_layout->node_cnt; i++) {
+			pack32_array(step_layout->tids[i],
+				     step_layout->tasks[i],
+				     buffer);
+		}
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
+		if (step_layout)
+			i = 1;
+
+		pack16(i, buffer);
+		if (!i)
+			return;
+		packstr(step_layout->front_end, buffer);
+		packstr(step_layout->node_list, buffer);
+		pack32(step_layout->node_cnt, buffer);
+		pack32(step_layout->task_cnt, buffer);
+		old_task_dist = task_dist_new2old(step_layout->task_dist);
+		pack16(old_task_dist, buffer);
+
+		for (i = 0; i < step_layout->node_cnt; i++) {
 			pack32_array(step_layout->tids[i],
 				     step_layout->tasks[i],
 				     buffer);
@@ -296,7 +317,34 @@ extern int unpack_slurm_step_layout(slurm_step_layout_t **layout, Buf buffer,
 	slurm_step_layout_t *step_layout = NULL;
 	int i;
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack16(&uint16_tmp, buffer);
+		if (!uint16_tmp)
+			return SLURM_SUCCESS;
+
+		step_layout = xmalloc(sizeof(slurm_step_layout_t));
+		*layout = step_layout;
+
+		safe_unpackstr_xmalloc(&step_layout->front_end,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&step_layout->node_list,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&step_layout->node_cnt, buffer);
+		safe_unpack32(&step_layout->task_cnt, buffer);
+		safe_unpack32(&step_layout->task_dist, buffer);
+
+		step_layout->tasks =
+			xmalloc(sizeof(uint32_t) * step_layout->node_cnt);
+		step_layout->tids = xmalloc(sizeof(uint32_t *)
+					    * step_layout->node_cnt);
+		for (i = 0; i < step_layout->node_cnt; i++) {
+			safe_unpack32_array(&(step_layout->tids[i]),
+					    &num_tids,
+					    buffer);
+			step_layout->tasks[i] = num_tids;
+		}
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		safe_unpack16(&uint16_tmp, buffer);
 		if (!uint16_tmp)
 			return SLURM_SUCCESS;
@@ -310,7 +358,8 @@ extern int unpack_slurm_step_layout(slurm_step_layout_t **layout, Buf buffer,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&step_layout->node_cnt, buffer);
 		safe_unpack32(&step_layout->task_cnt, buffer);
-		safe_unpack16(&step_layout->task_dist, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		step_layout->task_dist = task_dist_old2new(old_task_dist);
 
 		step_layout->tasks =
 			xmalloc(sizeof(uint32_t) * step_layout->node_cnt);
@@ -382,12 +431,12 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 			     const char *arbitrary_nodes,
 			     uint16_t *cpus_per_node, uint32_t *cpu_count_reps,
 			     uint16_t cpus_per_task,
-			     uint16_t task_dist, uint16_t plane_size)
+			     uint32_t task_dist, uint16_t plane_size)
 {
 	int cpu_cnt = 0, cpu_inx = 0, i;
 	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
 
-/* 	char *name = NULL; */
+/*	char *name = NULL; */
 	uint16_t cpus[step_layout->node_cnt];
 
 	if (step_layout->node_cnt == 0)
@@ -422,14 +471,14 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 	}
 
 	for (i=0; i<step_layout->node_cnt; i++) {
-/* 		name = hostlist_shift(hl); */
-/* 		if (!name) { */
-/* 			error("hostlist incomplete for this job request"); */
-/* 			hostlist_destroy(hl); */
-/* 			return SLURM_ERROR; */
-/* 		} */
-/* 		debug2("host %d = %s", i, name); */
-/* 		free(name); */
+/*		name = hostlist_shift(hl); */
+/*		if (!name) { */
+/*			error("hostlist incomplete for this job request"); */
+/*			hostlist_destroy(hl); */
+/*			return SLURM_ERROR; */
+/*		} */
+/*		debug2("host %d = %s", i, name); */
+/*		free(name); */
 		cpus[i] = (cpus_per_node[cpu_inx] / cpus_per_task);
 		if (cpus[i] == 0) {
 			/* this can be a result of a heterogeneous allocation
@@ -438,8 +487,8 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 			cpus[i] = 1;
 		}
 
-		if (plane_size && (plane_size != (uint16_t)NO_VAL)
-		    && (task_dist != SLURM_DIST_PLANE)) {
+		if (plane_size && (plane_size != (uint16_t)NO_VAL) &&
+		    ((task_dist & SLURM_DIST_STATE_BASE) != SLURM_DIST_PLANE)) {
 			/* plane_size when dist != plane is used to
 			   convey ntasks_per_node. Adjust the number
 			   of cpus to reflect that.
@@ -457,18 +506,16 @@ static int _init_task_layout(slurm_step_layout_t *step_layout,
 		}
 	}
 
-        if ((task_dist == SLURM_DIST_CYCLIC) ||
-            (task_dist == SLURM_DIST_CYCLIC_CYCLIC) ||
-            (task_dist == SLURM_DIST_CYCLIC_CFULL) ||
-            (task_dist == SLURM_DIST_CYCLIC_BLOCK))
+	if ((task_dist & SLURM_DIST_NODEMASK) == SLURM_DIST_NODECYCLIC)
 		return _task_layout_cyclic(step_layout, cpus);
-	else if (task_dist == SLURM_DIST_ARBITRARY
-		&& !(cluster_flags & CLUSTER_FLAG_FE))
+	else if (((task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY) &&
+		!(cluster_flags & CLUSTER_FLAG_FE))
 		return _task_layout_hostfile(step_layout, arbitrary_nodes);
-        else if (task_dist == SLURM_DIST_PLANE)
-                return _task_layout_plane(step_layout, cpus);
+	else if ((task_dist & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE)
+		return _task_layout_plane(step_layout, cpus);
 	else
-		return _task_layout_block(step_layout, cpus, cpus_per_task);
+		return _task_layout_block(step_layout, cpus, task_dist,
+					  cpus_per_task);
 }
 
 /* use specific set run tasks on each host listed in hostfile
@@ -553,15 +600,24 @@ static int _task_layout_hostfile(slurm_step_layout_t *step_layout,
 }
 
 static int _task_layout_block(slurm_step_layout_t *step_layout, uint16_t *cpus,
-			      uint16_t cpus_per_task)
+			      uint32_t task_dist, uint16_t cpus_per_task)
 {
 	static uint16_t select_params = (uint16_t) NO_VAL;
 	int i, j, task_id = 0;
+	bool pack_nodes;
 
 	if (select_params == (uint16_t) NO_VAL)
 		select_params = slurm_get_select_type_param();
+	if (task_dist & SLURM_DIST_PACK_NODES)
+		pack_nodes = true;
+	else if (task_dist & SLURM_DIST_NO_PACK_NODES)
+		pack_nodes = false;
+	else if (select_params & CR_PACK_NODES)
+		pack_nodes = true;
+	else
+		pack_nodes = false;
 
-	if (select_params & CR_PACK_NODES) {
+	if (pack_nodes) {
 		/* Pass 1: Put one task on each node */
 		for (i = 0; ((i < step_layout->node_cnt) &&
 			     (task_id < step_layout->task_cnt)); i++) {
@@ -575,7 +631,7 @@ static int _task_layout_block(slurm_step_layout_t *step_layout, uint16_t *cpus,
 		for (i = 0; ((i < step_layout->node_cnt) &&
 			     (task_id < step_layout->task_cnt)); i++) {
 			while (((step_layout->tasks[i] * cpus_per_task) <
-			        cpus[i]) &&
+				cpus[i]) &&
 			       (task_id < step_layout->task_cnt)) {
 				step_layout->tasks[i]++;
 				task_id++;
@@ -689,13 +745,14 @@ static int _task_layout_plane(slurm_step_layout_t *step_layout,
 	int i, j, k, taskid = 0;
 	bool over_subscribe = false;
 	uint32_t cur_task[step_layout->node_cnt];
+	int plane_start = 0;
 
 	debug3("_task_layout_plane plane_size %u node_cnt %u task_cnt %u",
 	       step_layout->plane_size,
 	       step_layout->node_cnt, step_layout->task_cnt);
 
 	if (step_layout->plane_size <= 0)
-	        return SLURM_ERROR;
+		return SLURM_ERROR;
 
 	if (step_layout->tasks == NULL)
 		return SLURM_ERROR;
@@ -703,13 +760,31 @@ static int _task_layout_plane(slurm_step_layout_t *step_layout,
 	/* figure out how many tasks go to each node */
 	for (j=0; taskid<step_layout->task_cnt; j++) {   /* cycle counter */
 		bool space_remaining = false;
-		for (i=0; ((i<step_layout->node_cnt)
-			   && (taskid<step_layout->task_cnt)); i++) {
-			if ((j<cpus[i]) || over_subscribe) {
+		/* place one task on each node first */
+		if (j == 0) {
+			for (i = 0; ((i < step_layout->node_cnt) &&
+				     (taskid < step_layout->task_cnt)); i++) {
 				taskid++;
 				step_layout->tasks[i]++;
-				if ((j+1) < cpus[i])
-					space_remaining = true;
+			}
+		}
+		for (i = 0; ((i < step_layout->node_cnt) &&
+			     (taskid < step_layout->task_cnt)); i++) {
+			/* handle placing first task on each node */
+			if (j == 0)
+				plane_start = 1;
+			else
+				plane_start = 0;
+			for (k = plane_start; (k < step_layout->plane_size) &&
+				     (taskid < step_layout->task_cnt); k++) {
+				if ((cpus[i] - step_layout->tasks[i]) ||
+				    over_subscribe) {
+					taskid++;
+					step_layout->tasks[i]++;
+					if (cpus[i] - (step_layout->tasks[i]
+						       + 1) >= 0)
+						space_remaining = true;
+				}
 			}
 		}
 		if (!space_remaining)
@@ -719,9 +794,9 @@ static int _task_layout_plane(slurm_step_layout_t *step_layout,
 	/* now distribute the tasks */
 	taskid = 0;
 	for (i=0; i < step_layout->node_cnt; i++) {
-	    step_layout->tids[i] = xmalloc(sizeof(uint32_t)
-				           * step_layout->tasks[i]);
-	    cur_task[i] = 0;
+		step_layout->tids[i] = xmalloc(sizeof(uint32_t)
+					       * step_layout->tasks[i]);
+		cur_task[i] = 0;
 	}
 	for (j=0; taskid<step_layout->task_cnt; j++) {   /* cycle counter */
 		for (i=0; ((i<step_layout->node_cnt)
@@ -763,49 +838,119 @@ static int _task_layout_plane(slurm_step_layout_t *step_layout,
 
 extern char *slurm_step_layout_type_name(task_dist_states_t task_dist)
 {
-	switch(task_dist) {
+	static char name[64] = "";
+
+	name[0] = '\0';
+	switch (task_dist & SLURM_DIST_STATE_BASE) {
 	case SLURM_DIST_CYCLIC:
-		return "Cyclic";
+		strcat(name, "Cyclic");
 		break;
 	case SLURM_DIST_BLOCK:	/* distribute tasks filling node by node */
-		return "Block";
+		strcat(name, "Block");
 		break;
 	case SLURM_DIST_ARBITRARY:	/* arbitrary task distribution  */
-		return "Arbitrary";
+		strcat(name, "Arbitrary");
 		break;
 	case SLURM_DIST_PLANE:	/* distribute tasks by filling up
 				   planes of lllp first and then by
 				   going across the nodes See
 				   documentation for more
 				   information */
-		return "Plane";
+		strcat(name, "Plane");
 		break;
 	case SLURM_DIST_CYCLIC_CYCLIC:/* distribute tasks 1 per node:
 					 round robin: same for lowest
 					 level of logical processor (lllp) */
-		return "CCyclic";
+		strcat(name, "CCyclic");
 		break;
 	case SLURM_DIST_CYCLIC_BLOCK: /* cyclic for node and block for lllp  */
-		return "CBlock";
+		strcat(name, "CBlock");
 		break;
 	case SLURM_DIST_BLOCK_CYCLIC: /* block for node and cyclic for lllp  */
-		return "BCyclic";
+		strcat(name, "BCyclic");
 		break;
 	case SLURM_DIST_BLOCK_BLOCK:	/* block for node and block for lllp  */
-		return "BBlock";
+		strcat(name, "BBlock");
 		break;
 	case SLURM_DIST_CYCLIC_CFULL:	/* cyclic for node and full
 					 * cyclic for lllp  */
-		return "CFCyclic";
+		strcat(name, "CFCyclic");
 		break;
 	case SLURM_DIST_BLOCK_CFULL:	/* block for node and full
 					 * cyclic for lllp  */
-		return "BFCyclic";
+		strcat(name, "BFCyclic");
+		break;
+	case SLURM_DIST_CYCLIC_CYCLIC_CYCLIC:
+		return "CCyclicCyclic";
+		break;
+	case SLURM_DIST_CYCLIC_CYCLIC_BLOCK:
+		return "CCyclicBlock";
+		break;
+	case SLURM_DIST_CYCLIC_CYCLIC_CFULL:
+		return "CCyclicFCyclic";
 		break;
-	case SLURM_NO_LLLP_DIST:	/* No distribution specified for lllp */
+	case SLURM_DIST_CYCLIC_BLOCK_CYCLIC:
+		return "CBlockCyclic";
+		break;
+	case SLURM_DIST_CYCLIC_BLOCK_BLOCK:
+		return "CBlockBlock";
+		break;
+	case SLURM_DIST_CYCLIC_BLOCK_CFULL:
+		return "CCyclicFCyclic";
+		break;
+	case SLURM_DIST_CYCLIC_CFULL_CYCLIC:
+		return "CFCyclicCyclic";
+		break;
+	case SLURM_DIST_CYCLIC_CFULL_BLOCK:
+		return "CFCyclicBlock";
+		break;
+	case SLURM_DIST_CYCLIC_CFULL_CFULL:
+		return "CFCyclicFCyclic";
+		break;
+	case SLURM_DIST_BLOCK_CYCLIC_CYCLIC:
+		return "BCyclicCyclic";
+		break;
+	case SLURM_DIST_BLOCK_CYCLIC_BLOCK:
+		return "BCyclicBlock";
+		break;
+	case SLURM_DIST_BLOCK_CYCLIC_CFULL:
+		return "BCyclicFCyclic";
+		break;
+	case SLURM_DIST_BLOCK_BLOCK_CYCLIC:
+		return "BBlockCyclic";
+		break;
+	case SLURM_DIST_BLOCK_BLOCK_BLOCK:
+		return "BBlockBlock";
+		break;
+	case SLURM_DIST_BLOCK_BLOCK_CFULL:
+		return "BBlockFCyclic";
+		break;
+	case SLURM_DIST_BLOCK_CFULL_CYCLIC:
+		return "BFCyclicCyclic";
+		break;
+	case SLURM_DIST_BLOCK_CFULL_BLOCK:
+		return "BFCyclicBlock";
+		break;
+	case SLURM_DIST_BLOCK_CFULL_CFULL:
+		return "BFCyclicFCyclic";
+		break;
+	case SLURM_DIST_NO_LLLP:	/* No distribution specified for lllp */
 	case SLURM_DIST_UNKNOWN:
 	default:
-		return "Unknown";
+		strcat(name, "Unknown");
+	}
+
+	if (task_dist & SLURM_DIST_PACK_NODES) {
+		if (name[0])
+			strcat(name, ",");
+		strcat(name, "Pack");
+	}
 
+	if (task_dist & SLURM_DIST_NO_PACK_NODES) {
+		if (name[0])
+			strcat(name, ",");
+		strcat(name, "NoPack");
 	}
+
+	return name;
 }
diff --git a/src/common/slurm_step_layout.h b/src/common/slurm_step_layout.h
index 5afca3aca..3e32e94f5 100644
--- a/src/common/slurm_step_layout.h
+++ b/src/common/slurm_step_layout.h
@@ -68,7 +68,7 @@ extern slurm_step_layout_t *slurm_step_layout_create(const char *tlist,
 						     uint32_t node_cnt,
 						     uint32_t task_cnt,
 						     uint16_t cpus_per_task,
-						     uint16_t task_dist,
+						     uint32_t task_dist,
 						     uint16_t plane_size);
 
 /*
diff --git a/src/common/slurm_time.c b/src/common/slurm_time.c
new file mode 100644
index 000000000..11f461583
--- /dev/null
+++ b/src/common/slurm_time.c
@@ -0,0 +1,178 @@
+/*****************************************************************************\
+ *  time.h - Slurm wrappers for the glibc time functions. Unlike the glibc
+ *  functions, these are re-entrant. If a process is forked while glibc is
+ *  in a lock, the child process will deadlock if it tries to use another
+ *  glibc function, but not with these functions.
+ *
+ *  Based upon glibc version 2.21 and the fork handler logic from Slurm.
+ *****************************************************************************
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <time.h>
+
+#include "src/common/macros.h"
+
+#ifdef WITH_PTHREADS
+#  include <pthread.h>
+
+static pthread_mutex_t  time_lock = PTHREAD_MUTEX_INITIALIZER;
+static void _atfork_child()  { pthread_mutex_init(&time_lock, NULL); }
+static bool at_forked = false;
+
+inline static void _init(void)
+{
+	while (!at_forked) {
+		pthread_atfork(NULL, NULL, _atfork_child);
+		at_forked = true;
+	}
+}
+#else
+
+inline static void _init(void)
+{
+	;
+}
+#endif
+
+extern char *slurm_asctime(const struct tm *tp)
+{
+	char *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = asctime(tp);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern char *slurm_asctime_r(const struct tm *tp, char *buf)
+{
+	char *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = asctime_r(tp, buf);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern char *slurm_ctime(const time_t *timep)
+{
+	char *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = ctime(timep);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern char *slurm_ctime_r(const time_t *timep, char *buf)
+{
+	char *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = ctime_r(timep, buf);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern struct tm *slurm_gmtime(const time_t *timep)
+{
+	struct tm *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = gmtime(timep);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern struct tm *slurm_gmtime_r(const time_t *timep, struct tm *result)
+{
+	struct tm *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = gmtime_r(timep, result);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern struct tm *slurm_localtime(const time_t *timep)
+{
+	struct tm *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = localtime(timep);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern struct tm *slurm_localtime_r(const time_t *timep, struct tm *result)
+{
+	struct tm *rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = localtime_r(timep, result);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+extern time_t slurm_mktime(struct tm *tp)
+{
+	time_t rc;
+	slurm_mutex_lock(&time_lock);
+	_init();
+	rc = mktime(tp);
+	slurm_mutex_unlock(&time_lock);
+	return rc;
+}
+
+/* Slurm variants of ctime and ctime_r without a trailing new-line */
+extern char *slurm_ctime2(const time_t *timep)
+{
+	static char time_str[25];
+
+	strftime(time_str, sizeof(time_str), "%a %b %d %T %Y",
+		 slurm_localtime(timep));
+
+	return time_str;
+}
+
+extern char *slurm_ctime2_r(const time_t *timep, char *time_str)
+{
+	struct tm newtime;
+	slurm_localtime_r(timep, &newtime);
+
+	strftime(time_str, 25, "%a %b %d %T %Y", &newtime);
+
+	return time_str;
+}
diff --git a/src/common/slurm_time.h b/src/common/slurm_time.h
new file mode 100644
index 000000000..282f20d84
--- /dev/null
+++ b/src/common/slurm_time.h
@@ -0,0 +1,50 @@
+/*****************************************************************************\
+ *  time.h - Slurm versions of glibc time functions. Unlike the glibc
+ *  functions, these are re-entrant. If a process is forked while glibc is
+ *  in a lock, the child process will deadlock if it tries to use another
+ *  glibc function.
+ *
+ *  Based upon glibc version 2.21 and the fork handler logic from Slurm.
+ *****************************************************************************
+ *  Convert `time_t' to `struct tm' in local time zone.
+ *  Copyright (C) 1991-2015 Free Software Foundation, Inc.
+ *  This file is part of the GNU C Library.
+ *
+ *  The GNU C Library is free software; you can redistribute it and/or
+ *  modify it under the terms of the GNU Lesser General Public
+ *  License as published by the Free Software Foundation; either
+ *  version 2.1 of the License, or (at your option) any later version.
+ *
+ *  The GNU C Library is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *  Lesser General Public License for more details.
+ *
+ *  You should have received a copy of the GNU Lesser General Public
+ *  License along with the GNU C Library; if not, see
+ *  <http://www.gnu.org/licenses/>.
+\*****************************************************************************/
+#ifndef _HAVE_SLURM_TIME_H
+#define _HAVE_SLURM_TIME_H
+
+#include <time.h>
+
+extern char *slurm_asctime(const struct tm *tp);
+extern char *slurm_asctime_r(const struct tm *tp, char *buf);
+
+extern char *slurm_ctime(const time_t *timep);
+extern char *slurm_ctime_r(const time_t *timep, char *buf);
+
+extern struct tm *slurm_gmtime(const time_t *timep);
+extern struct tm *slurm_gmtime_r(const time_t *timep, struct tm *result);
+
+extern struct tm *slurm_localtime(const time_t *timep);
+extern struct tm *slurm_localtime_r(const time_t *timep, struct tm *result);
+
+extern time_t slurm_mktime(struct tm *tp);
+
+/* Slurm variants of ctime and ctime_r without a trailing new-line */
+extern char *slurm_ctime2(const time_t *timep);
+extern char *slurm_ctime2_r(const time_t *timep, char *time_str);
+
+#endif /* _HAVE_SLURM_TIME_H */
diff --git a/src/common/slurm_topology.c b/src/common/slurm_topology.c
index 1e82c9b96..f8cd2b77e 100644
--- a/src/common/slurm_topology.c
+++ b/src/common/slurm_topology.c
@@ -3,6 +3,7 @@
  *  slurm_topology.c - Topology plugin function setup.
  *****************************************************************************
  *  Copyright (C) 2009-2010 Lawrence Livermore National Security.
+ *  Copyright (C) 2014 Silicon Graphics International Corp. All rights reserved.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -52,6 +53,12 @@ struct switch_record *switch_record_table = NULL;
 int switch_record_cnt = 0;
 int switch_levels = 0;               /* number of switch levels     */
 
+/* defined here but is really hypercube plugin related */
+int hypercube_dimensions = 0; 
+struct hypercube_switch *hypercube_switch_table = NULL; 
+int hypercube_switch_cnt = 0;
+struct hypercube_switch ***hypercube_switches = NULL; 
+
 /* ************************************************************************ */
 /*  TAG(                        slurm_topo_ops_t                         )  */
 /* ************************************************************************ */
diff --git a/src/common/slurm_topology.h b/src/common/slurm_topology.h
index ddb442eb0..87ed978eb 100644
--- a/src/common/slurm_topology.h
+++ b/src/common/slurm_topology.h
@@ -2,6 +2,7 @@
  *  slurm_topology.h - Define topology plugin functions.
  *****************************************************************************
  *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2014 Silicon Graphics International Corp. All rights reserved.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -47,7 +48,7 @@
  *  defined here but is really tree plugin related
 \*****************************************************************************/
 struct switch_record {
-	uint32_t consumed_energy;	/* consumed energy, in joules */
+	uint64_t consumed_energy;	/* consumed energy, in joules */
 	int level;			/* level in hierarchy, leaf=0 */
 	uint32_t link_speed;		/* link speed, arbitrary units */
 	char *name;			/* switch name */
@@ -65,6 +66,33 @@ extern struct switch_record *switch_record_table;  /* ptr to switch records */
 extern int switch_record_cnt;		/* size of switch_record_table */
 extern int switch_levels;               /* number of switch levels     */
 
+/*****************************************************************************\
+ *  Hypercube SWITCH topology data structures
+ *  defined here but is really hypercube plugin related
+\*****************************************************************************/
+struct hypercube_switch {
+	int switch_index; /* index of this switch in switch_record_table */
+	char *switch_name; /* the name of this switch */
+	bitstr_t *node_bitmap; /* bitmap of nodes connected to this switch */
+	int node_cnt; /* number of nodes connected to this switch */
+	int avail_cnt; /* number of available nodes connected to this switch */
+	int32_t *distance; /*distance to the start (first) switch for each curve */
+	int *node_index; /* index of the connected nodes in the node_record_table */
+};
+
+extern int hypercube_dimensions; /* number of dimensions in hypercube 
+ network topolopy - determined by max number of switch connections*/
+
+/* table of hypercube_switch records */
+extern struct hypercube_switch *hypercube_switch_table; 
+extern int hypercube_switch_cnt; /* size of hypercube_switch_table */
+
+/* An array of hilbert curves, where each hilbert curve
+ * is a list of pointers to the hypercube_switch records in the 
+ * hypercube_switch_table. Each list of pointers is sorted in accordance
+ * with the sorting of the Hilbert curve. */
+extern struct hypercube_switch ***hypercube_switches; 
+
 /*****************************************************************************\
  *  Slurm topology functions
 \*****************************************************************************/
diff --git a/src/common/slurm_xlator.h b/src/common/slurm_xlator.h
index 45c33d3be..339b3e651 100644
--- a/src/common/slurm_xlator.h
+++ b/src/common/slurm_xlator.h
@@ -240,6 +240,8 @@
 #define	unpack_time		slurm_unpack_time
 #define	packdouble		slurm_packdouble
 #define	unpackdouble		slurm_unpackdouble
+#define	packlongdouble		slurm_packlongdouble
+#define	unpacklongdouble	slurm_unpacklongdouble
 #define	pack64			slurm_pack64
 #define	unpack64		slurm_unpack64
 #define	pack32			slurm_pack32
@@ -406,11 +408,22 @@
 #define eio_signal_shutdown		slurm_eio_signal_shutdown
 #define eio_signal_wakeup		slurm_eio_signal_wakeup
 
+/* callerid.[ch] functions */
+#define callerid_get_own_netinfo	slurm_callerid_get_own_netinfo
+
+/* some stepd_api.[ch] functions */
+#define stepd_available			slurm_stepd_available
+#define stepd_connect			slurm_stepd_connect
+#define stepd_get_uid			slurm_stepd_get_uid
+
+
+
 #endif /* USE_ALIAS */
 
 /* Include the function definitions after redefining their names. */
 #include "src/common/arg_desc.h"
 #include "src/common/bitstring.h"
+#include "src/common/callerid.h"
 #include "src/common/eio.h"
 #include "src/common/env.h"
 #include "src/common/hostlist.h"
@@ -426,6 +439,7 @@
 #include "src/common/slurm_route.h"
 #include "src/common/slurm_step_layout.h"
 #include "src/common/strlcpy.h"
+#include "src/common/stepd_api.h"
 #include "src/common/switch.h"
 #include "src/common/working_cluster.h"
 #include "src/common/xassert.h"
diff --git a/src/common/slurmdb_defs.c b/src/common/slurmdb_defs.c
index 4c4171b18..ee387c74e 100644
--- a/src/common/slurmdb_defs.c
+++ b/src/common/slurmdb_defs.c
@@ -38,40 +38,28 @@
 
 #include <stdlib.h>
 
-#include "src/common/slurmdb_defs.h"
 #include "src/common/assoc_mgr.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-#include "src/common/slurm_strcasestr.h"
-#include "src/common/slurm_protocol_defs.h"
-#include "src/common/parse_time.h"
 #include "src/common/node_select.h"
+#include "src/common/parse_time.h"
 #include "src/common/slurm_auth.h"
+#include "src/common/slurm_strcasestr.h"
+#include "src/common/slurm_protocol_defs.h"
+#include "src/common/slurm_time.h"
+#include "src/common/slurmdb_defs.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
 #include "src/slurmdbd/read_config.h"
 
 #define FORMAT_STRING_SIZE 34
 
 slurmdb_cluster_rec_t *working_cluster_rec = NULL;
 
+static char *local_cluster_name; /* name of local_cluster      */
+
 static void _free_res_cond_members(slurmdb_res_cond_t *res_cond);
 static void _free_res_rec_members(slurmdb_res_rec_t *res);
 
-static void _free_assoc_rec_members(slurmdb_association_rec_t *assoc)
-{
-	if (assoc) {
-		if (assoc->accounting_list)
-			list_destroy(assoc->accounting_list);
-		xfree(assoc->acct);
-		xfree(assoc->cluster);
-		xfree(assoc->parent_acct);
-		xfree(assoc->partition);
-		if (assoc->qos_list)
-			list_destroy(assoc->qos_list);
-		xfree(assoc->user);
-
-		destroy_assoc_mgr_association_usage(assoc->usage);
-	}
-}
+strong_alias(get_qos_complete_str_bitstr, slurmdb_get_qos_complete_str_bitstr);
 
 static void _free_clus_res_rec_members(slurmdb_clus_res_rec_t *clus_res)
 {
@@ -83,33 +71,20 @@ static void _free_clus_res_rec_members(slurmdb_clus_res_rec_t *clus_res)
 static void _free_cluster_rec_members(slurmdb_cluster_rec_t *cluster)
 {
 	if (cluster) {
-		if (cluster->accounting_list)
-			list_destroy(cluster->accounting_list);
+		FREE_NULL_LIST(cluster->accounting_list);
 		xfree(cluster->control_host);
 		xfree(cluster->dim_size);
 		xfree(cluster->name);
 		xfree(cluster->nodes);
-		slurmdb_destroy_association_rec(cluster->root_assoc);
-	}
-}
-
-static void _free_qos_rec_members(slurmdb_qos_rec_t *qos)
-{
-	if (qos) {
-		xfree(qos->description);
-		xfree(qos->name);
-		FREE_NULL_BITMAP(qos->preempt_bitstr);
-		if (qos->preempt_list)
-			list_destroy(qos->preempt_list);
-		destroy_assoc_mgr_qos_usage(qos->usage);
+		slurmdb_destroy_assoc_rec(cluster->root_assoc);
+		xfree(cluster->tres_str);
 	}
 }
 
 static void _free_wckey_rec_members(slurmdb_wckey_rec_t *wckey)
 {
 	if (wckey) {
-		if (wckey->accounting_list)
-			list_destroy(wckey->accounting_list);
+		FREE_NULL_LIST(wckey->accounting_list);
 		xfree(wckey->cluster);
 		xfree(wckey->name);
 		xfree(wckey->user);
@@ -119,8 +94,16 @@ static void _free_wckey_rec_members(slurmdb_wckey_rec_t *wckey)
 static void _free_cluster_cond_members(slurmdb_cluster_cond_t *cluster_cond)
 {
 	if (cluster_cond) {
-		if (cluster_cond->cluster_list)
-			list_destroy(cluster_cond->cluster_list);
+		FREE_NULL_LIST(cluster_cond->cluster_list);
+	}
+}
+
+static void _free_tres_cond_members(slurmdb_tres_cond_t *tres_cond)
+{
+	if (tres_cond) {
+		FREE_NULL_LIST(tres_cond->id_list);
+		FREE_NULL_LIST(tres_cond->name_list);
+		FREE_NULL_LIST(tres_cond->type_list);
 	}
 }
 
@@ -197,14 +180,13 @@ static int _sort_children_list(void *v1, void *v2)
  * returns: -1 assoc_a < assoc_b   0: assoc_a == assoc_b   1: assoc_a > assoc_b
  *
  */
-
 static int _sort_assoc_by_lft_dec(void *v1, void *v2)
 {
-	slurmdb_association_rec_t *assoc_a;
-	slurmdb_association_rec_t *assoc_b;
+	slurmdb_assoc_rec_t *assoc_a;
+	slurmdb_assoc_rec_t *assoc_b;
 
-	assoc_a = *(slurmdb_association_rec_t **)v1;
-	assoc_b = *(slurmdb_association_rec_t **)v2;
+	assoc_a = *(slurmdb_assoc_rec_t **)v1;
+	assoc_b = *(slurmdb_assoc_rec_t **)v2;
 
 	if (assoc_a->lft == assoc_b->lft)
 		return 0;
@@ -369,6 +351,9 @@ static uint32_t _str_2_qos_flags(char *flags)
 	if (slurm_strcasestr(flags, "RequiresReservation"))
 		return QOS_FLAG_REQ_RESV;
 
+	if (slurm_strcasestr(flags, "OverPartQOS"))
+		return QOS_FLAG_OVER_PART_QOS;
+
 	if (slurm_strcasestr(flags, "NoReserve"))
 		return QOS_FLAG_NO_RESERVE;
 
@@ -380,6 +365,111 @@ static uint32_t _str_2_res_flags(char *flags)
 	return 0;
 }
 
+static void _destroy_local_cluster_rec(void *object)
+{
+	xfree(object);
+}
+
+static int _sort_local_cluster(void *v1, void *v2)
+{
+	local_cluster_rec_t* rec_a = *(local_cluster_rec_t**)v1;
+	local_cluster_rec_t* rec_b = *(local_cluster_rec_t**)v2;
+
+	if (rec_a->start_time < rec_b->start_time)
+		return -1;
+	else if (rec_a->start_time > rec_b->start_time)
+		return 1;
+
+	if (rec_a->preempt_cnt < rec_b->preempt_cnt)
+		return -1;
+	else if (rec_a->preempt_cnt > rec_b->preempt_cnt)
+		return 1;
+
+	if (!strcmp(local_cluster_name, rec_a->cluster_rec->name))
+		return -1;
+	else if (!strcmp(local_cluster_name, rec_b->cluster_rec->name))
+		return 1;
+
+	return 0;
+}
+
+static local_cluster_rec_t * _job_will_run (job_desc_msg_t *req)
+{
+	local_cluster_rec_t *local_cluster = NULL;
+	will_run_response_msg_t *will_run_resp;
+	char buf[64];
+	int rc;
+	uint32_t cluster_flags = slurmdb_setup_cluster_flags();
+	char *type = "processors";
+
+	rc = slurm_job_will_run2(req, &will_run_resp);
+
+	if (rc >= 0) {
+		if (cluster_flags & CLUSTER_FLAG_BG)
+			type = "cnodes";
+		slurm_make_time_str(&will_run_resp->start_time,
+				    buf, sizeof(buf));
+		debug("Job %u to start at %s on cluster %s using %u %s on %s",
+		      will_run_resp->job_id, buf, working_cluster_rec->name,
+		      will_run_resp->proc_cnt, type,
+		      will_run_resp->node_list);
+
+		local_cluster = xmalloc(sizeof(local_cluster_rec_t));
+		local_cluster->cluster_rec = working_cluster_rec;
+		local_cluster->start_time = will_run_resp->start_time;
+
+		if (will_run_resp->preemptee_job_id) {
+			ListIterator itr;
+			uint32_t *job_id_ptr;
+			char *job_list = NULL, *sep = "";
+			local_cluster->preempt_cnt = slurm_list_count(
+				will_run_resp->preemptee_job_id);
+			itr = list_iterator_create(will_run_resp->
+						   preemptee_job_id);
+			while ((job_id_ptr = list_next(itr))) {
+				if (job_list)
+					sep = ",";
+				xstrfmtcat(job_list, "%s%u",
+					   sep, *job_id_ptr);
+			}
+			list_iterator_destroy(itr);
+			debug("  Preempts: %s", job_list);
+			xfree(job_list);
+		}
+
+		slurm_free_will_run_response_msg(will_run_resp);
+	}
+
+	return local_cluster;
+}
+
+static int _set_qos_bit_from_string(bitstr_t *valid_qos, char *name)
+{
+	void (*my_function) (bitstr_t *b, bitoff_t bit);
+	bitoff_t bit = 0;
+
+	xassert(valid_qos);
+
+	if (!name)
+		return SLURM_ERROR;
+
+	if (name[0] == '-') {
+		name++;
+		my_function = bit_clear;
+	} else if (name[0] == '+') {
+		name++;
+		my_function = bit_set;
+	} else
+		my_function = bit_set;
+
+	if ((bit = atoi(name)) >= bit_size(valid_qos))
+		return SLURM_ERROR;
+
+	(*(my_function))(valid_qos, bit);
+
+	return SLURM_SUCCESS;
+}
+
 extern slurmdb_job_rec_t *slurmdb_create_job_rec()
 {
 	slurmdb_job_rec_t *job = xmalloc(sizeof(slurmdb_job_rec_t));
@@ -403,7 +493,6 @@ extern slurmdb_step_rec_t *slurmdb_create_step_rec()
 	step->stepid = (uint32_t)NO_VAL;
 	step->state = NO_VAL;
 	step->exitcode = NO_VAL;
-	step->ncpus = (uint32_t)NO_VAL;
 	step->elapsed = (uint32_t)NO_VAL;
 	step->tot_cpu_sec = (uint32_t)NO_VAL;
 	step->tot_cpu_usec = (uint32_t)NO_VAL;
@@ -412,21 +501,89 @@ extern slurmdb_step_rec_t *slurmdb_create_step_rec()
 	return step;
 }
 
+extern slurmdb_assoc_usage_t *slurmdb_create_assoc_usage(int tres_cnt)
+{
+	slurmdb_assoc_usage_t *usage =
+		xmalloc(sizeof(slurmdb_assoc_usage_t));
+
+	usage->level_shares = NO_VAL;
+	usage->shares_norm = NO_VAL64;
+	usage->usage_efctv = 0;
+	usage->usage_norm = (long double)NO_VAL;
+	usage->usage_raw = 0;
+	usage->level_fs = 0;
+	usage->fs_factor = 0;
+
+	if (tres_cnt) {
+		int alloc_size = sizeof(uint64_t) * tres_cnt;
+		usage->tres_cnt = tres_cnt;
+		usage->grp_used_tres = xmalloc(alloc_size);
+		usage->grp_used_tres_run_secs = xmalloc(alloc_size);
+		usage->usage_tres_raw = xmalloc(sizeof(long double) * tres_cnt);
+	}
+
+	return usage;
+}
+
+extern slurmdb_qos_usage_t *slurmdb_create_qos_usage(int tres_cnt)
+{
+	slurmdb_qos_usage_t *usage =
+		xmalloc(sizeof(slurmdb_qos_usage_t));
+
+	if (tres_cnt) {
+		int alloc_size = sizeof(uint64_t) * tres_cnt;
+		usage->tres_cnt = tres_cnt;
+		usage->grp_used_tres_run_secs = xmalloc(alloc_size);
+		usage->grp_used_tres = xmalloc(alloc_size);
+		usage->usage_tres_raw = xmalloc(sizeof(long double) * tres_cnt);
+	}
+
+	return usage;
+}
+
+extern void slurmdb_destroy_assoc_usage(void *object)
+{
+	slurmdb_assoc_usage_t *usage =
+		(slurmdb_assoc_usage_t *)object;
+
+	if (usage) {
+		FREE_NULL_LIST(usage->children_list);
+		FREE_NULL_BITMAP(usage->valid_qos);
+		xfree(usage->grp_used_tres_run_secs);
+		xfree(usage->grp_used_tres);
+		xfree(usage->usage_tres_raw);
+		xfree(usage);
+	}
+}
+
+extern void slurmdb_destroy_qos_usage(void *object)
+{
+	slurmdb_qos_usage_t *usage =
+		(slurmdb_qos_usage_t *)object;
+
+	if (usage) {
+		FREE_NULL_LIST(usage->job_list);
+		FREE_NULL_LIST(usage->user_limit_list);
+		xfree(usage->grp_used_tres_run_secs);
+		xfree(usage->grp_used_tres);
+		xfree(usage->usage_tres_raw);
+		xfree(usage);
+	}
+}
+
+
 extern void slurmdb_destroy_user_rec(void *object)
 {
 	slurmdb_user_rec_t *slurmdb_user = (slurmdb_user_rec_t *)object;
 
 	if (slurmdb_user) {
-		if (slurmdb_user->assoc_list)
-			list_destroy(slurmdb_user->assoc_list);
-		if (slurmdb_user->coord_accts)
-			list_destroy(slurmdb_user->coord_accts);
+		FREE_NULL_LIST(slurmdb_user->assoc_list);
+		FREE_NULL_LIST(slurmdb_user->coord_accts);
 		xfree(slurmdb_user->default_acct);
 		xfree(slurmdb_user->default_wckey);
 		xfree(slurmdb_user->name);
 		xfree(slurmdb_user->old_name);
-		if (slurmdb_user->wckey_list)
-			list_destroy(slurmdb_user->wckey_list);
+		FREE_NULL_LIST(slurmdb_user->wckey_list);
 		xfree(slurmdb_user);
 	}
 }
@@ -437,10 +594,8 @@ extern void slurmdb_destroy_account_rec(void *object)
 		(slurmdb_account_rec_t *)object;
 
 	if (slurmdb_account) {
-		if (slurmdb_account->assoc_list)
-			list_destroy(slurmdb_account->assoc_list);
-		if (slurmdb_account->coordinators)
-			list_destroy(slurmdb_account->coordinators);
+		FREE_NULL_LIST(slurmdb_account->assoc_list);
+		FREE_NULL_LIST(slurmdb_account->coordinators);
 		xfree(slurmdb_account->description);
 		xfree(slurmdb_account->name);
 		xfree(slurmdb_account->organization);
@@ -465,6 +620,8 @@ extern void slurmdb_destroy_cluster_accounting_rec(void *object)
 		(slurmdb_cluster_accounting_rec_t *)object;
 
 	if (clusteracct_rec) {
+		slurmdb_destroy_tres_rec_noalloc(
+			&clusteracct_rec->tres_rec);
 		xfree(clusteracct_rec);
 	}
 }
@@ -497,18 +654,49 @@ extern void slurmdb_destroy_accounting_rec(void *object)
 		(slurmdb_accounting_rec_t *)object;
 
 	if (slurmdb_accounting) {
+		slurmdb_destroy_tres_rec_noalloc(
+			&slurmdb_accounting->tres_rec);
 		xfree(slurmdb_accounting);
 	}
 }
 
-extern void slurmdb_destroy_association_rec(void *object)
+extern void slurmdb_free_assoc_rec_members(slurmdb_assoc_rec_t *assoc)
+{
+	if (assoc) {
+		FREE_NULL_LIST(assoc->accounting_list);
+		xfree(assoc->acct);
+		xfree(assoc->cluster);
+		xfree(assoc->grp_tres);
+		xfree(assoc->grp_tres_ctld);
+		xfree(assoc->grp_tres_mins);
+		xfree(assoc->grp_tres_mins_ctld);
+		xfree(assoc->grp_tres_run_mins);
+		xfree(assoc->grp_tres_run_mins_ctld);
+		xfree(assoc->max_tres_mins_pj);
+		xfree(assoc->max_tres_mins_ctld);
+		xfree(assoc->max_tres_run_mins);
+		xfree(assoc->max_tres_run_mins_ctld);
+		xfree(assoc->max_tres_pj);
+		xfree(assoc->max_tres_ctld);
+		xfree(assoc->max_tres_pn);
+		xfree(assoc->max_tres_pn_ctld);
+		xfree(assoc->parent_acct);
+		xfree(assoc->partition);
+		FREE_NULL_LIST(assoc->qos_list);
+		xfree(assoc->user);
+
+		slurmdb_destroy_assoc_usage(assoc->usage);
+	}
+}
+
+extern void slurmdb_destroy_assoc_rec(void *object)
 {
-	slurmdb_association_rec_t *slurmdb_association =
-		(slurmdb_association_rec_t *)object;
+	slurmdb_assoc_rec_t *slurmdb_assoc =
+		(slurmdb_assoc_rec_t *)object;
 
-	if (slurmdb_association) {
-		_free_assoc_rec_members(slurmdb_association);
-		xfree(slurmdb_association);
+	if (slurmdb_assoc) {
+		slurmdb_free_assoc_rec_members(slurmdb_assoc);
+		xfree(slurmdb_assoc);
 	}
 }
 
@@ -522,6 +710,7 @@ extern void slurmdb_destroy_event_rec(void *object)
 		xfree(slurmdb_event->cluster_nodes);
 		xfree(slurmdb_event->node_name);
 		xfree(slurmdb_event->reason);
+		xfree(slurmdb_event->tres_str);
 
 		xfree(slurmdb_event);
 	}
@@ -542,21 +731,49 @@ extern void slurmdb_destroy_job_rec(void *object)
 		xfree(job->nodes);
 		xfree(job->req_gres);
 		xfree(job->resv_name);
-		if (job->steps) {
-			list_destroy(job->steps);
-			job->steps = NULL;
-		}
+		FREE_NULL_LIST(job->steps);
+		xfree(job->tres_alloc_str);
+		xfree(job->tres_req_str);
 		xfree(job->user);
 		xfree(job->wckey);
 		xfree(job);
 	}
 }
 
+extern void slurmdb_free_qos_rec_members(slurmdb_qos_rec_t *qos)
+{
+	if (qos) {
+		xfree(qos->description);
+		xfree(qos->grp_tres);
+		xfree(qos->grp_tres_ctld);
+		xfree(qos->grp_tres_mins);
+		xfree(qos->grp_tres_mins_ctld);
+		xfree(qos->grp_tres_run_mins);
+		xfree(qos->grp_tres_run_mins_ctld);
+		xfree(qos->max_tres_mins_pj);
+		xfree(qos->max_tres_mins_pj_ctld);
+		xfree(qos->max_tres_run_mins_pu);
+		xfree(qos->max_tres_run_mins_pu_ctld);
+		xfree(qos->max_tres_pj);
+		xfree(qos->max_tres_pj_ctld);
+		xfree(qos->max_tres_pn);
+		xfree(qos->max_tres_pn_ctld);
+		xfree(qos->max_tres_pu);
+		xfree(qos->max_tres_pu_ctld);
+		xfree(qos->min_tres_pj);
+		xfree(qos->min_tres_pj_ctld);
+		xfree(qos->name);
+		FREE_NULL_BITMAP(qos->preempt_bitstr);
+		FREE_NULL_LIST(qos->preempt_list);
+		slurmdb_destroy_qos_usage(qos->usage);
+	}
+}
+
 extern void slurmdb_destroy_qos_rec(void *object)
 {
 	slurmdb_qos_rec_t *slurmdb_qos = (slurmdb_qos_rec_t *)object;
 	if (slurmdb_qos) {
-		_free_qos_rec_members(slurmdb_qos);
+		slurmdb_free_qos_rec_members(slurmdb_qos);
 		xfree(slurmdb_qos);
 	}
 }
@@ -571,6 +788,7 @@ extern void slurmdb_destroy_reservation_rec(void *object)
 		xfree(slurmdb_resv->name);
 		xfree(slurmdb_resv->nodes);
 		xfree(slurmdb_resv->node_inx);
+		xfree(slurmdb_resv->tres_str);
 		xfree(slurmdb_resv);
 	}
 }
@@ -582,6 +800,7 @@ extern void slurmdb_destroy_step_rec(void *object)
 		xfree(step->nodes);
 		xfree(step->pid_str);
 		xfree(step->stepname);
+		xfree(step->tres_alloc_str);
 		xfree(step);
 	}
 }
@@ -632,6 +851,27 @@ extern void slurmdb_destroy_archive_rec(void *object)
 	}
 }
 
+extern void slurmdb_destroy_tres_rec_noalloc(void *object)
+{
+	slurmdb_tres_rec_t *tres_rec = (slurmdb_tres_rec_t *)object;
+
+	if (!tres_rec)
+		return;
+
+	xfree(tres_rec->name);
+	xfree(tres_rec->type);
+}
+
+extern void slurmdb_destroy_tres_rec(void *object)
+{
+	slurmdb_tres_rec_t *tres_rec = (slurmdb_tres_rec_t *)object;
+
+	if (tres_rec) {
+		slurmdb_destroy_tres_rec_noalloc(tres_rec);
+		xfree(tres_rec);
+	}
+}
+
 extern void slurmdb_destroy_report_assoc_rec(void *object)
 {
 	slurmdb_report_assoc_rec_t *slurmdb_report_assoc =
@@ -640,6 +880,7 @@ extern void slurmdb_destroy_report_assoc_rec(void *object)
 		xfree(slurmdb_report_assoc->acct);
 		xfree(slurmdb_report_assoc->cluster);
 		xfree(slurmdb_report_assoc->parent_acct);
+		FREE_NULL_LIST(slurmdb_report_assoc->tres_list);
 		xfree(slurmdb_report_assoc->user);
 		xfree(slurmdb_report_assoc);
 	}
@@ -651,11 +892,10 @@ extern void slurmdb_destroy_report_user_rec(void *object)
 		(slurmdb_report_user_rec_t *)object;
 	if (slurmdb_report_user) {
 		xfree(slurmdb_report_user->acct);
-		if (slurmdb_report_user->acct_list)
-			list_destroy(slurmdb_report_user->acct_list);
-		if (slurmdb_report_user->assoc_list)
-			list_destroy(slurmdb_report_user->assoc_list);
+		FREE_NULL_LIST(slurmdb_report_user->acct_list);
+		FREE_NULL_LIST(slurmdb_report_user->assoc_list);
 		xfree(slurmdb_report_user->name);
+		FREE_NULL_LIST(slurmdb_report_user->tres_list);
 		xfree(slurmdb_report_user);
 	}
 }
@@ -665,11 +905,10 @@ extern void slurmdb_destroy_report_cluster_rec(void *object)
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster =
 		(slurmdb_report_cluster_rec_t *)object;
 	if (slurmdb_report_cluster) {
-		if (slurmdb_report_cluster->assoc_list)
-			list_destroy(slurmdb_report_cluster->assoc_list);
+		FREE_NULL_LIST(slurmdb_report_cluster->assoc_list);
 		xfree(slurmdb_report_cluster->name);
-		if (slurmdb_report_cluster->user_list)
-			list_destroy(slurmdb_report_cluster->user_list);
+		FREE_NULL_LIST(slurmdb_report_cluster->tres_list);
+		FREE_NULL_LIST(slurmdb_report_cluster->user_list);
 		xfree(slurmdb_report_cluster);
 	}
 }
@@ -679,11 +918,9 @@ extern void slurmdb_destroy_user_cond(void *object)
 	slurmdb_user_cond_t *slurmdb_user = (slurmdb_user_cond_t *)object;
 
 	if (slurmdb_user) {
-		slurmdb_destroy_association_cond(slurmdb_user->assoc_cond);
-		if (slurmdb_user->def_acct_list)
-			list_destroy(slurmdb_user->def_acct_list);
-		if (slurmdb_user->def_wckey_list)
-			list_destroy(slurmdb_user->def_wckey_list);
+		slurmdb_destroy_assoc_cond(slurmdb_user->assoc_cond);
+		FREE_NULL_LIST(slurmdb_user->def_acct_list);
+		FREE_NULL_LIST(slurmdb_user->def_wckey_list);
 		xfree(slurmdb_user);
 	}
 }
@@ -694,11 +931,9 @@ extern void slurmdb_destroy_account_cond(void *object)
 		(slurmdb_account_cond_t *)object;
 
 	if (slurmdb_account) {
-		slurmdb_destroy_association_cond(slurmdb_account->assoc_cond);
-		if (slurmdb_account->description_list)
-			list_destroy(slurmdb_account->description_list);
-		if (slurmdb_account->organization_list)
-			list_destroy(slurmdb_account->organization_list);
+		slurmdb_destroy_assoc_cond(slurmdb_account->assoc_cond);
+		FREE_NULL_LIST(slurmdb_account->description_list);
+		FREE_NULL_LIST(slurmdb_account->organization_list);
 		xfree(slurmdb_account);
 	}
 }
@@ -714,70 +949,32 @@ extern void slurmdb_destroy_cluster_cond(void *object)
 	}
 }
 
-extern void slurmdb_destroy_association_cond(void *object)
-{
-	slurmdb_association_cond_t *slurmdb_association =
-		(slurmdb_association_cond_t *)object;
-
-	if (slurmdb_association) {
-		if (slurmdb_association->acct_list)
-			list_destroy(slurmdb_association->acct_list);
-		if (slurmdb_association->cluster_list)
-			list_destroy(slurmdb_association->cluster_list);
-		if (slurmdb_association->def_qos_id_list)
-			list_destroy(slurmdb_association->def_qos_id_list);
-
-		if (slurmdb_association->fairshare_list)
-			list_destroy(slurmdb_association->fairshare_list);
-
-		if (slurmdb_association->grp_cpu_mins_list)
-			list_destroy(slurmdb_association->grp_cpu_mins_list);
-		if (slurmdb_association->grp_cpu_run_mins_list)
-			list_destroy(slurmdb_association->
-				     grp_cpu_run_mins_list);
-		if (slurmdb_association->grp_cpus_list)
-			list_destroy(slurmdb_association->grp_cpus_list);
-		if (slurmdb_association->grp_jobs_list)
-			list_destroy(slurmdb_association->grp_jobs_list);
-		if (slurmdb_association->grp_mem_list)
-			list_destroy(slurmdb_association->grp_mem_list);
-		if (slurmdb_association->grp_nodes_list)
-			list_destroy(slurmdb_association->grp_nodes_list);
-		if (slurmdb_association->grp_submit_jobs_list)
-			list_destroy(slurmdb_association->grp_submit_jobs_list);
-		if (slurmdb_association->grp_wall_list)
-			list_destroy(slurmdb_association->grp_wall_list);
-
-		if (slurmdb_association->id_list)
-			list_destroy(slurmdb_association->id_list);
-
-		if (slurmdb_association->max_cpu_mins_pj_list)
-			list_destroy(slurmdb_association->max_cpu_mins_pj_list);
-		if (slurmdb_association->max_cpu_run_mins_list)
-			list_destroy(slurmdb_association->
-				     max_cpu_run_mins_list);
-		if (slurmdb_association->max_cpus_pj_list)
-			list_destroy(slurmdb_association->max_cpus_pj_list);
-		if (slurmdb_association->max_jobs_list)
-			list_destroy(slurmdb_association->max_jobs_list);
-		if (slurmdb_association->max_nodes_pj_list)
-			list_destroy(slurmdb_association->max_nodes_pj_list);
-		if (slurmdb_association->max_submit_jobs_list)
-			list_destroy(slurmdb_association->max_submit_jobs_list);
-		if (slurmdb_association->max_wall_pj_list)
-			list_destroy(slurmdb_association->max_wall_pj_list);
-
-		if (slurmdb_association->partition_list)
-			list_destroy(slurmdb_association->partition_list);
-
-		if (slurmdb_association->parent_acct_list)
-			list_destroy(slurmdb_association->parent_acct_list);
-
-		if (slurmdb_association->qos_list)
-			list_destroy(slurmdb_association->qos_list);
-		if (slurmdb_association->user_list)
-			list_destroy(slurmdb_association->user_list);
-		xfree(slurmdb_association);
+extern void slurmdb_destroy_tres_cond(void *object)
+{
+	slurmdb_tres_cond_t *slurmdb_tres =
+		(slurmdb_tres_cond_t *)object;
+
+	if (slurmdb_tres) {
+		_free_tres_cond_members(slurmdb_tres);
+		xfree(slurmdb_tres);
+	}
+}
+
+extern void slurmdb_destroy_assoc_cond(void *object)
+{
+	slurmdb_assoc_cond_t *slurmdb_assoc =
+		(slurmdb_assoc_cond_t *)object;
+
+	if (slurmdb_assoc) {
+		FREE_NULL_LIST(slurmdb_assoc->acct_list);
+		FREE_NULL_LIST(slurmdb_assoc->cluster_list);
+		FREE_NULL_LIST(slurmdb_assoc->def_qos_id_list);
+		FREE_NULL_LIST(slurmdb_assoc->id_list);
+		FREE_NULL_LIST(slurmdb_assoc->partition_list);
+		FREE_NULL_LIST(slurmdb_assoc->parent_acct_list);
+		FREE_NULL_LIST(slurmdb_assoc->qos_list);
+		FREE_NULL_LIST(slurmdb_assoc->user_list);
+		xfree(slurmdb_assoc);
 	}
 }
 
@@ -787,16 +984,11 @@ extern void slurmdb_destroy_event_cond(void *object)
 		(slurmdb_event_cond_t *)object;
 
 	if (slurmdb_event) {
-		if (slurmdb_event->cluster_list)
-			list_destroy(slurmdb_event->cluster_list);
-		if (slurmdb_event->node_list)
-			list_destroy(slurmdb_event->node_list);
-		if (slurmdb_event->reason_list)
-			list_destroy(slurmdb_event->reason_list);
-		if (slurmdb_event->reason_uid_list)
-			list_destroy(slurmdb_event->reason_uid_list);
-		if (slurmdb_event->state_list)
-			list_destroy(slurmdb_event->state_list);
+		FREE_NULL_LIST(slurmdb_event->cluster_list);
+		FREE_NULL_LIST(slurmdb_event->node_list);
+		FREE_NULL_LIST(slurmdb_event->reason_list);
+		FREE_NULL_LIST(slurmdb_event->reason_uid_list);
+		FREE_NULL_LIST(slurmdb_event->state_list);
 		xfree(slurmdb_event);
 	}
 }
@@ -807,33 +999,20 @@ extern void slurmdb_destroy_job_cond(void *object)
 		(slurmdb_job_cond_t *)object;
 
 	if (job_cond) {
-		if (job_cond->acct_list)
-			list_destroy(job_cond->acct_list);
-		if (job_cond->associd_list)
-			list_destroy(job_cond->associd_list);
-		if (job_cond->cluster_list)
-			list_destroy(job_cond->cluster_list);
-		if (job_cond->groupid_list)
-			list_destroy(job_cond->groupid_list);
-		if (job_cond->jobname_list)
-			list_destroy(job_cond->jobname_list);
-		if (job_cond->partition_list)
-			list_destroy(job_cond->partition_list);
-		if (job_cond->qos_list)
-			list_destroy(job_cond->qos_list);
-		if (job_cond->resv_list)
-			list_destroy(job_cond->resv_list);
-		if (job_cond->resvid_list)
-			list_destroy(job_cond->resvid_list);
-		if (job_cond->step_list)
-			list_destroy(job_cond->step_list);
-		if (job_cond->state_list)
-			list_destroy(job_cond->state_list);
+		FREE_NULL_LIST(job_cond->acct_list);
+		FREE_NULL_LIST(job_cond->associd_list);
+		FREE_NULL_LIST(job_cond->cluster_list);
+		FREE_NULL_LIST(job_cond->groupid_list);
+		FREE_NULL_LIST(job_cond->jobname_list);
+		FREE_NULL_LIST(job_cond->partition_list);
+		FREE_NULL_LIST(job_cond->qos_list);
+		FREE_NULL_LIST(job_cond->resv_list);
+		FREE_NULL_LIST(job_cond->resvid_list);
+		FREE_NULL_LIST(job_cond->step_list);
+		FREE_NULL_LIST(job_cond->state_list);
 		xfree(job_cond->used_nodes);
-		if (job_cond->userid_list)
-			list_destroy(job_cond->userid_list);
-		if (job_cond->wckey_list)
-			list_destroy(job_cond->wckey_list);
+		FREE_NULL_LIST(job_cond->userid_list);
+		FREE_NULL_LIST(job_cond->wckey_list);
 		xfree(job_cond);
 	}
 }
@@ -853,10 +1032,8 @@ extern void slurmdb_destroy_qos_cond(void *object)
 {
 	slurmdb_qos_cond_t *slurmdb_qos = (slurmdb_qos_cond_t *)object;
 	if (slurmdb_qos) {
-		if (slurmdb_qos->id_list)
-			list_destroy(slurmdb_qos->id_list);
-		if (slurmdb_qos->name_list)
-			list_destroy(slurmdb_qos->name_list);
+		FREE_NULL_LIST(slurmdb_qos->id_list);
+		FREE_NULL_LIST(slurmdb_qos->name_list);
 		xfree(slurmdb_qos);
 	}
 }
@@ -876,12 +1053,9 @@ extern void slurmdb_destroy_reservation_cond(void *object)
 	slurmdb_reservation_cond_t *slurmdb_resv =
 		(slurmdb_reservation_cond_t *)object;
 	if (slurmdb_resv) {
-		if (slurmdb_resv->cluster_list)
-			list_destroy(slurmdb_resv->cluster_list);
-		if (slurmdb_resv->id_list)
-			list_destroy(slurmdb_resv->id_list);
-		if (slurmdb_resv->name_list)
-			list_destroy(slurmdb_resv->name_list);
+		FREE_NULL_LIST(slurmdb_resv->cluster_list);
+		FREE_NULL_LIST(slurmdb_resv->id_list);
+		FREE_NULL_LIST(slurmdb_resv->name_list);
 		xfree(slurmdb_resv->nodes);
 		xfree(slurmdb_resv);
 	}
@@ -891,22 +1065,14 @@ extern void slurmdb_destroy_txn_cond(void *object)
 {
 	slurmdb_txn_cond_t *slurmdb_txn = (slurmdb_txn_cond_t *)object;
 	if (slurmdb_txn) {
-		if (slurmdb_txn->acct_list)
-			list_destroy(slurmdb_txn->acct_list);
-		if (slurmdb_txn->action_list)
-			list_destroy(slurmdb_txn->action_list);
-		if (slurmdb_txn->actor_list)
-			list_destroy(slurmdb_txn->actor_list);
-		if (slurmdb_txn->cluster_list)
-			list_destroy(slurmdb_txn->cluster_list);
-		if (slurmdb_txn->id_list)
-			list_destroy(slurmdb_txn->id_list);
-		if (slurmdb_txn->info_list)
-			list_destroy(slurmdb_txn->info_list);
-		if (slurmdb_txn->name_list)
-			list_destroy(slurmdb_txn->name_list);
-		if (slurmdb_txn->user_list)
-			list_destroy(slurmdb_txn->user_list);
+		FREE_NULL_LIST(slurmdb_txn->acct_list);
+		FREE_NULL_LIST(slurmdb_txn->action_list);
+		FREE_NULL_LIST(slurmdb_txn->actor_list);
+		FREE_NULL_LIST(slurmdb_txn->cluster_list);
+		FREE_NULL_LIST(slurmdb_txn->id_list);
+		FREE_NULL_LIST(slurmdb_txn->info_list);
+		FREE_NULL_LIST(slurmdb_txn->name_list);
+		FREE_NULL_LIST(slurmdb_txn->user_list);
 		xfree(slurmdb_txn);
 	}
 }
@@ -916,14 +1082,10 @@ extern void slurmdb_destroy_wckey_cond(void *object)
 	slurmdb_wckey_cond_t *wckey = (slurmdb_wckey_cond_t *)object;
 
 	if (wckey) {
-		if (wckey->cluster_list)
-			list_destroy(wckey->cluster_list);
-		if (wckey->id_list)
-			list_destroy(wckey->id_list);
-		if (wckey->name_list)
-			list_destroy(wckey->name_list);
-		if (wckey->user_list)
-			list_destroy(wckey->user_list);
+		FREE_NULL_LIST(wckey->cluster_list);
+		FREE_NULL_LIST(wckey->id_list);
+		FREE_NULL_LIST(wckey->name_list);
+		FREE_NULL_LIST(wckey->user_list);
 		xfree(wckey);
 	}
 }
@@ -947,9 +1109,7 @@ extern void slurmdb_destroy_update_object(void *object)
 		(slurmdb_update_object_t *) object;
 
 	if (slurmdb_update) {
-		if (slurmdb_update->objects)
-			list_destroy(slurmdb_update->objects);
-
+		FREE_NULL_LIST(slurmdb_update->objects);
 		xfree(slurmdb_update);
 	}
 }
@@ -960,6 +1120,8 @@ extern void slurmdb_destroy_used_limits(void *object)
 		(slurmdb_used_limits_t *)object;
 
 	if (slurmdb_used_limits) {
+		xfree(slurmdb_used_limits->tres);
+		xfree(slurmdb_used_limits->tres_run_mins);
 		xfree(slurmdb_used_limits);
 	}
 }
@@ -990,9 +1152,7 @@ extern void slurmdb_destroy_hierarchical_rec(void *object)
 	slurmdb_hierarchical_rec_t *slurmdb_hierarchical_rec =
 		(slurmdb_hierarchical_rec_t *)object;
 	if (slurmdb_hierarchical_rec) {
-		if (slurmdb_hierarchical_rec->children) {
-			list_destroy(slurmdb_hierarchical_rec->children);
-		}
+		FREE_NULL_LIST(slurmdb_hierarchical_rec->children);
 		xfree(slurmdb_hierarchical_rec);
 	}
 }
@@ -1010,8 +1170,8 @@ extern void slurmdb_destroy_report_job_grouping(void *object)
 	slurmdb_report_job_grouping_t *job_grouping =
 		(slurmdb_report_job_grouping_t *)object;
 	if (job_grouping) {
-		if (job_grouping->jobs)
-			list_destroy(job_grouping->jobs);
+		FREE_NULL_LIST(job_grouping->jobs);
+		FREE_NULL_LIST(job_grouping->tres_list);
 		xfree(job_grouping);
 	}
 }
@@ -1022,8 +1182,8 @@ extern void slurmdb_destroy_report_acct_grouping(void *object)
 		(slurmdb_report_acct_grouping_t *)object;
 	if (acct_grouping) {
 		xfree(acct_grouping->acct);
-		if (acct_grouping->groups)
-			list_destroy(acct_grouping->groups);
+		FREE_NULL_LIST(acct_grouping->groups);
+		FREE_NULL_LIST(acct_grouping->tres_list);
 		xfree(acct_grouping);
 	}
 }
@@ -1034,8 +1194,8 @@ extern void slurmdb_destroy_report_cluster_grouping(void *object)
 		(slurmdb_report_cluster_grouping_t *)object;
 	if (cluster_grouping) {
 		xfree(cluster_grouping->cluster);
-		if (cluster_grouping->acct_list)
-			list_destroy(cluster_grouping->acct_list);
+		FREE_NULL_LIST(cluster_grouping->acct_list);
+		FREE_NULL_LIST(cluster_grouping->tres_list);
 		xfree(cluster_grouping);
 	}
 }
@@ -1099,37 +1259,33 @@ extern List slurmdb_get_info_cluster(char *cluster_names)
 	list_iterator_destroy(itr);
 
 end_it:
-	if (cluster_cond.cluster_list)
-		list_destroy(cluster_cond.cluster_list);
+	FREE_NULL_LIST(cluster_cond.cluster_list);
 	acct_storage_g_close_connection(&db_conn);
 
 	if (temp_list && !list_count(temp_list)) {
-		list_destroy(temp_list);
-		temp_list = NULL;
+		FREE_NULL_LIST(temp_list);
 	}
 
 	return temp_list;
 }
 
-extern void slurmdb_init_association_rec(slurmdb_association_rec_t *assoc,
+extern void slurmdb_init_assoc_rec(slurmdb_assoc_rec_t *assoc,
 					 bool free_it)
 {
 	if (!assoc)
 		return;
 
 	if (free_it)
-		_free_assoc_rec_members(assoc);
-	memset(assoc, 0, sizeof(slurmdb_association_rec_t));
+		slurmdb_free_assoc_rec_members(assoc);
+	memset(assoc, 0, sizeof(slurmdb_assoc_rec_t));
 
 	assoc->def_qos_id = NO_VAL;
 	assoc->is_def = (uint16_t)NO_VAL;
 
-	assoc->grp_cpu_mins = (uint64_t)NO_VAL;
-	assoc->grp_cpu_run_mins = (uint64_t)NO_VAL;
-	assoc->grp_cpus = NO_VAL;
+	/* assoc->grp_tres_mins = NULL; */
+	/* assoc->grp_tres_run_mins = NULL; */
+	/* assoc->grp_tres = NULL; */
 	assoc->grp_jobs = NO_VAL;
-	assoc->grp_mem = NO_VAL;
-	assoc->grp_nodes = NO_VAL;
 	assoc->grp_submit_jobs = NO_VAL;
 	assoc->grp_wall = NO_VAL;
 
@@ -1137,15 +1293,14 @@ extern void slurmdb_init_association_rec(slurmdb_association_rec_t *assoc,
 	assoc->rgt = NO_VAL;
 	/* assoc->level_shares = NO_VAL; */
 
-	assoc->max_cpu_mins_pj = (uint64_t)NO_VAL;
-	assoc->max_cpu_run_mins = (uint64_t)NO_VAL;
-	assoc->max_cpus_pj = NO_VAL;
+	/* assoc->max_tres_mins_pj = NULL; */
+	/* assoc->max_tres_run_mins = NULL; */
+	/* assoc->max_tres_pj = NULL; */
 	assoc->max_jobs = NO_VAL;
-	assoc->max_nodes_pj = NO_VAL;
 	assoc->max_submit_jobs = NO_VAL;
 	assoc->max_wall_pj = NO_VAL;
 
-	/* assoc->shares_norm = (double)NO_VAL; */
+	/* assoc->shares_norm = NO_VAL64; */
 	assoc->shares_raw = NO_VAL;
 
 	/* assoc->usage_efctv = 0; */
@@ -1177,44 +1332,41 @@ extern void slurmdb_init_cluster_rec(slurmdb_cluster_rec_t *cluster,
 	cluster->flags = NO_VAL;
 }
 
-extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos, bool free_it)
+extern void slurmdb_init_qos_rec(slurmdb_qos_rec_t *qos, bool free_it,
+				 uint32_t init_val)
 {
 	if (!qos)
 		return;
 
 	if (free_it)
-		_free_qos_rec_members(qos);
+		slurmdb_free_qos_rec_members(qos);
 	memset(qos, 0, sizeof(slurmdb_qos_rec_t));
 
 	qos->flags = QOS_FLAG_NOTSET;
 
-	qos->grace_time = NO_VAL;
-	qos->preempt_mode = (uint16_t)NO_VAL;
-	qos->priority = NO_VAL;
+	qos->grace_time = init_val;
+	qos->preempt_mode = (uint16_t)init_val;
+	qos->priority = init_val;
 
-	qos->grp_cpu_mins = (uint64_t)NO_VAL;
-	qos->grp_cpu_run_mins = (uint64_t)NO_VAL;
-	qos->grp_cpus = NO_VAL;
-	qos->grp_jobs = NO_VAL;
-	qos->grp_mem = NO_VAL;
-	qos->grp_nodes = NO_VAL;
-	qos->grp_submit_jobs = NO_VAL;
-	qos->grp_wall = NO_VAL;
+	/* qos->grp_tres_mins = NULL; */
+	/* qos->grp_tres_run_mins = NULL; */
+	/* qos->grp_tres = NULL; */
+	qos->grp_jobs = init_val;
+	qos->grp_submit_jobs = init_val;
+	qos->grp_wall = init_val;
 
-	qos->max_cpu_mins_pj = (uint64_t)NO_VAL;
-	qos->max_cpu_run_mins_pu = (uint64_t)NO_VAL;
-	qos->max_cpus_pj = NO_VAL;
-	qos->max_cpus_pu = NO_VAL;
-	qos->max_jobs_pu = NO_VAL;
-	qos->max_nodes_pj = NO_VAL;
-	qos->max_nodes_pu = NO_VAL;
-	qos->max_submit_jobs_pu = NO_VAL;
-	qos->max_wall_pj = NO_VAL;
+	/* qos->max_tres_mins_pj = NULL; */
+	/* qos->max_tres_run_mins_pu = NULL; */
+	/* qos->max_tres_pj = NULL; */
+	/* qos->max_tres_pu = NULL; */
+	qos->max_jobs_pu = init_val;
+	qos->max_submit_jobs_pu = init_val;
+	qos->max_wall_pj = init_val;
 
-	qos->min_cpus_pj = NO_VAL;
+	/* qos->min_tres_pj = NULL; */
 
-	qos->usage_factor = (double)NO_VAL;
-	qos->usage_thres = (double)NO_VAL;
+	qos->usage_factor = (double)init_val;
+	qos->usage_thres = (double)init_val;
 }
 
 extern void slurmdb_init_res_rec(slurmdb_res_rec_t *res,
@@ -1244,6 +1396,18 @@ extern void slurmdb_init_wckey_rec(slurmdb_wckey_rec_t *wckey, bool free_it)
 	wckey->is_def = (uint16_t)NO_VAL;
 }
 
+extern void slurmdb_init_tres_cond(slurmdb_tres_cond_t *tres,
+				    bool free_it)
+{
+	if (!tres)
+		return;
+
+	if (free_it)
+		_free_tres_cond_members(tres);
+	memset(tres, 0, sizeof(slurmdb_tres_cond_t));
+	tres->count = NO_VAL;
+}
+
 extern void slurmdb_init_cluster_cond(slurmdb_cluster_cond_t *cluster,
 				      bool free_it)
 {
@@ -1344,6 +1508,8 @@ extern char *slurmdb_qos_flags_str(uint32_t flags)
 		xstrcat(qos_flags, "PartitionMaxNodes,");
 	if (flags & QOS_FLAG_PART_MIN_NODE)
 		xstrcat(qos_flags, "PartitionMinNodes,");
+	if (flags & QOS_FLAG_OVER_PART_QOS)
+		xstrcat(qos_flags, "OverPartQOS,");
 	if (flags & QOS_FLAG_PART_TIME_LIMIT)
 		xstrcat(qos_flags, "PartitionTimeLimit,");
 	if (flags & QOS_FLAG_REQ_RESV)
@@ -1507,7 +1673,7 @@ extern List slurmdb_get_hierarchical_sorted_assoc_list(List assoc_list)
 
 	_append_hierarchical_children_ret_list(ret_list,
 					       slurmdb_hierarchical_rec_list);
-	list_destroy(slurmdb_hierarchical_rec_list);
+	FREE_NULL_LIST(slurmdb_hierarchical_rec_list);
 
 	return ret_list;
 }
@@ -1525,7 +1691,7 @@ extern void slurmdb_sort_hierarchical_assoc_list(List assoc_list)
 
 	_append_hierarchical_children_ret_list(assoc_list,
 					       slurmdb_hierarchical_rec_list);
-	list_destroy(slurmdb_hierarchical_rec_list);
+	FREE_NULL_LIST(slurmdb_hierarchical_rec_list);
 }
 
 extern List slurmdb_get_acct_hierarchical_rec_list(List assoc_list)
@@ -1534,7 +1700,7 @@ extern List slurmdb_get_acct_hierarchical_rec_list(List assoc_list)
 	slurmdb_hierarchical_rec_t *last_acct_parent = NULL;
 	slurmdb_hierarchical_rec_t *last_parent = NULL;
 	slurmdb_hierarchical_rec_t *arch_rec = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	List total_assoc_list = list_create(NULL);
 	List arch_rec_list =
 		list_create(slurmdb_destroy_hierarchical_rec);
@@ -1610,7 +1776,7 @@ extern List slurmdb_get_acct_hierarchical_rec_list(List assoc_list)
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr2);
 
-	list_destroy(total_assoc_list);
+	FREE_NULL_LIST(total_assoc_list);
 //	info("got %d", list_count(arch_rec_list));
 	_sort_slurmdb_hierarchical_rec_list(arch_rec_list);
 
@@ -1670,10 +1836,8 @@ extern char *slurmdb_tree_name_get(char *name, char *parent, List tree_list)
 extern int set_qos_bitstr_from_list(bitstr_t *valid_qos, List qos_list)
 {
 	ListIterator itr = NULL;
-	bitoff_t bit = 0;
 	int rc = SLURM_SUCCESS;
 	char *temp_char = NULL;
-	void (*my_function) (bitstr_t *b, bitoff_t bit);
 
 	xassert(valid_qos);
 
@@ -1681,23 +1845,51 @@ extern int set_qos_bitstr_from_list(bitstr_t *valid_qos, List qos_list)
 		return SLURM_ERROR;
 
 	itr = list_iterator_create(qos_list);
-	while((temp_char = list_next(itr))) {
-		if (temp_char[0] == '-') {
-			temp_char++;
-			my_function = bit_clear;
-		} else if (temp_char[0] == '+') {
-			temp_char++;
-			my_function = bit_set;
-		} else
-			my_function = bit_set;
-		bit = atoi(temp_char);
-		if (bit >= bit_size(valid_qos)) {
-			rc = SLURM_ERROR;
-			break;
+	while((temp_char = list_next(itr)))
+		_set_qos_bit_from_string(valid_qos, temp_char);
+	list_iterator_destroy(itr);
+
+	return rc;
+}
+
+extern int set_qos_bitstr_from_string(bitstr_t *valid_qos, char *names)
+{
+	int rc = SLURM_SUCCESS;
+	int i=0, start=0;
+	char *name = NULL;
+
+	xassert(valid_qos);
+
+	if (!names)
+		return SLURM_ERROR;
+
+	/* skip the first comma if it is one */
+	if (names[i] == ',')
+		i++;
+
+	start = i;
+	while (names[i]) {
+		//info("got %d - %d = %d", i, start, i-start);
+		if (names[i] == ',') {
+			/* If there is a comma at the end just
+			   ignore it */
+			if (!names[i+1])
+				break;
+
+			name = xstrndup(names+start, (i-start));
+			/* info("got %s %d", name, i-start); */
+			_set_qos_bit_from_string(valid_qos, name);
+			xfree(name);
+			i++;
+			start = i;
 		}
-		(*(my_function))(valid_qos, bit);
+		i++;
 	}
-	list_iterator_destroy(itr);
+
+	name = xstrndup(names+start, (i-start));
+	/* info("got %s %d", name, i-start); */
+	_set_qos_bit_from_string(valid_qos, name);
+	xfree(name);
 
 	return rc;
 }
@@ -1731,7 +1923,7 @@ extern char *get_qos_complete_str_bitstr(List qos_list, bitstr_t *valid_qos)
 			print_this = xstrdup(temp_char);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(temp_list);
+	FREE_NULL_LIST(temp_list);
 
 	if (!print_this)
 		return xstrdup("");
@@ -1779,7 +1971,7 @@ extern char *get_qos_complete_str(List qos_list, List num_qos_list)
 			print_this = xstrdup(temp_char);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(temp_list);
+	FREE_NULL_LIST(temp_list);
 
 	if (!print_this)
 		return xstrdup("");
@@ -1877,11 +2069,11 @@ extern uint16_t str_2_slurmdb_problem(char *problem)
 	if (!problem)
 		return type;
 
-	if (slurm_strcasestr(problem, "account no associations"))
+	if (slurm_strcasestr(problem, "account no assocs"))
 		type = SLURMDB_PROBLEM_USER_NO_ASSOC;
 	else if (slurm_strcasestr(problem, "account no users"))
 		type = SLURMDB_PROBLEM_ACCT_NO_USERS;
-	else if (slurm_strcasestr(problem, "user no associations"))
+	else if (slurm_strcasestr(problem, "user no assocs"))
 		type = SLURMDB_PROBLEM_USER_NO_ASSOC;
 	else if (slurm_strcasestr(problem, "user no uid"))
 		type = SLURMDB_PROBLEM_USER_NO_UID;
@@ -1889,7 +2081,7 @@ extern uint16_t str_2_slurmdb_problem(char *problem)
 	return type;
 }
 
-extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr,
+extern void log_assoc_rec(slurmdb_assoc_rec_t *assoc_ptr,
 			  List qos_list)
 {
 	xassert(assoc_ptr);
@@ -1909,38 +2101,21 @@ extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr,
 	else
 		debug2("  Default QOS      : NONE");
 
-	if (assoc_ptr->grp_cpu_mins == INFINITE)
-		debug2("  GrpCPUMins       : NONE");
-	else if (assoc_ptr->grp_cpu_mins != NO_VAL)
-		debug2("  GrpCPUMins       : %"PRIu64"",
-		       assoc_ptr->grp_cpu_mins);
-
-	if (assoc_ptr->grp_cpu_run_mins == INFINITE)
-		debug2("  GrpCPURunMins    : NONE");
-	else if (assoc_ptr->grp_cpu_run_mins != NO_VAL)
-		debug2("  GrpCPURunMins    : %"PRIu64"",
-		       assoc_ptr->grp_cpu_run_mins);
-
-	if (assoc_ptr->grp_cpus == INFINITE)
-		debug2("  GrpCPUs          : NONE");
-	else if (assoc_ptr->grp_cpus != NO_VAL)
-		debug2("  GrpCPUs          : %u", assoc_ptr->grp_cpus);
+	debug2("  GrpTRESMins      : %s",
+	       assoc_ptr->grp_tres_mins ?
+	       assoc_ptr->grp_tres_mins : "NONE");
+	debug2("  GrpTRESRunMins   : %s",
+	       assoc_ptr->grp_tres_run_mins ?
+	       assoc_ptr->grp_tres_run_mins : "NONE");
+	debug2("  GrpTRES          : %s",
+	       assoc_ptr->grp_tres ?
+	       assoc_ptr->grp_tres : "NONE");
 
 	if (assoc_ptr->grp_jobs == INFINITE)
 		debug2("  GrpJobs          : NONE");
 	else if (assoc_ptr->grp_jobs != NO_VAL)
 		debug2("  GrpJobs          : %u", assoc_ptr->grp_jobs);
 
-	if (assoc_ptr->grp_mem == INFINITE)
-		debug2("  GrpMemory        : NONE");
-	else if (assoc_ptr->grp_mem != NO_VAL)
-		debug2("  GrpMemory        : %u", assoc_ptr->grp_mem);
-
-	if (assoc_ptr->grp_nodes == INFINITE)
-		debug2("  GrpNodes         : NONE");
-	else if (assoc_ptr->grp_nodes != NO_VAL)
-		debug2("  GrpNodes         : %u", assoc_ptr->grp_nodes);
-
 	if (assoc_ptr->grp_submit_jobs == INFINITE)
 		debug2("  GrpSubmitJobs    : NONE");
 	else if (assoc_ptr->grp_submit_jobs != NO_VAL)
@@ -1955,33 +2130,24 @@ extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr,
 		debug2("  GrpWall          : %s", time_buf);
 	}
 
-	if (assoc_ptr->max_cpu_mins_pj == INFINITE)
-		debug2("  MaxCPUMins       : NONE");
-	else if (assoc_ptr->max_cpu_mins_pj != NO_VAL)
-		debug2("  MaxCPUMins       : %"PRIu64"",
-		       assoc_ptr->max_cpu_mins_pj);
-
-	if (assoc_ptr->max_cpu_run_mins == INFINITE)
-		debug2("  MaxCPURunMins    : NONE");
-	else if (assoc_ptr->max_cpu_run_mins != NO_VAL)
-		debug2("  MaxCPURunMins    : %"PRIu64"",
-		       assoc_ptr->max_cpu_run_mins);
-
-	if (assoc_ptr->max_cpus_pj == INFINITE)
-		debug2("  MaxCPUs          : NONE");
-	else if (assoc_ptr->max_cpus_pj != NO_VAL)
-		debug2("  MaxCPUs          : %u", assoc_ptr->max_cpus_pj);
+	debug2("  MaxTRESMins      : %s",
+	       assoc_ptr->max_tres_mins_pj ?
+	       assoc_ptr->max_tres_mins_pj : "NONE");
+	debug2("  MaxTRESRunMins   : %s",
+	       assoc_ptr->max_tres_run_mins ?
+	       assoc_ptr->max_tres_run_mins : "NONE");
+	debug2("  MaxTRESPerJob    : %s",
+	       assoc_ptr->max_tres_pj ?
+	       assoc_ptr->max_tres_pj : "NONE");
+	debug2("  MaxTRESPerNode   : %s",
+	       assoc_ptr->max_tres_pn ?
+	       assoc_ptr->max_tres_pn : "NONE");
 
 	if (assoc_ptr->max_jobs == INFINITE)
 		debug2("  MaxJobs          : NONE");
 	else if (assoc_ptr->max_jobs != NO_VAL)
 		debug2("  MaxJobs          : %u", assoc_ptr->max_jobs);
 
-	if (assoc_ptr->max_nodes_pj == INFINITE)
-		debug2("  MaxNodes         : NONE");
-	else if (assoc_ptr->max_nodes_pj != NO_VAL)
-		debug2("  MaxNodes         : %u", assoc_ptr->max_nodes_pj);
-
 	if (assoc_ptr->max_submit_jobs == INFINITE)
 		debug2("  MaxSubmitJobs    : NONE");
 	else if (assoc_ptr->max_submit_jobs != NO_VAL)
@@ -2047,16 +2213,16 @@ extern int slurmdb_report_set_start_end_time(time_t *start, time_t *end)
 //	info("now got %d and %d sent", (*start), (*end));
 	/* Default is going to be the last day */
 	if (!sent_end) {
-		if (!localtime_r(&my_time, &end_tm)) {
+		if (!slurm_localtime_r(&my_time, &end_tm)) {
 			error("Couldn't get localtime from end %ld",
 			      (long)my_time);
 			return SLURM_ERROR;
 		}
 		end_tm.tm_hour = 0;
-		//(*end) = mktime(&end_tm);
+		//(*end) = slurm_mktime(&end_tm);
 	} else {
 		temp_time = sent_end;
-		if (!localtime_r(&temp_time, &end_tm)) {
+		if (!slurm_localtime_r(&temp_time, &end_tm)) {
 			error("Couldn't get localtime from user end %ld",
 			      (long)my_time);
 			return SLURM_ERROR;
@@ -2070,20 +2236,20 @@ extern int slurmdb_report_set_start_end_time(time_t *start, time_t *end)
 	end_tm.tm_sec = 0;
 	end_tm.tm_min = 0;
 	end_tm.tm_isdst = -1;
-	(*end) = mktime(&end_tm);
+	(*end) = slurm_mktime(&end_tm);
 
 	if (!sent_start) {
-		if (!localtime_r(&my_time, &start_tm)) {
+		if (!slurm_localtime_r(&my_time, &start_tm)) {
 			error("Couldn't get localtime from start %ld",
 			      (long)my_time);
 			return SLURM_ERROR;
 		}
 		start_tm.tm_hour = 0;
 		start_tm.tm_mday--;
-		//(*start) = mktime(&start_tm);
+		//(*start) = slurm_mktime(&start_tm);
 	} else {
 		temp_time = sent_start;
-		if (!localtime_r(&temp_time, &start_tm)) {
+		if (!slurm_localtime_r(&temp_time, &start_tm)) {
 			error("Couldn't get localtime from user start %ld",
 			      (long)my_time);
 			return SLURM_ERROR;
@@ -2096,7 +2262,7 @@ extern int slurmdb_report_set_start_end_time(time_t *start, time_t *end)
 	start_tm.tm_sec = 0;
 	start_tm.tm_min = 0;
 	start_tm.tm_isdst = -1;
-	(*start) = mktime(&start_tm);
+	(*start) = slurm_mktime(&start_tm);
 
 	if ((*end)-(*start) < 3600)
 		(*end) = (*start) + 3600;
@@ -2461,7 +2627,8 @@ extern slurmdb_report_cluster_rec_t *slurmdb_cluster_rec_2_report(
 {
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster;
 	slurmdb_cluster_accounting_rec_t *accting = NULL;
-	ListIterator cluster_itr = NULL;
+	slurmdb_tres_rec_t *tres_rec;
+	ListIterator itr = NULL;
 	int count;
 
 	xassert(cluster);
@@ -2471,19 +2638,18 @@ extern slurmdb_report_cluster_rec_t *slurmdb_cluster_rec_2_report(
 	if (!(count = list_count(cluster->accounting_list)))
 		return slurmdb_report_cluster;
 
-	/* get the amount of time and the average cpu count
+	/* get the amount of time and the average count
 	   during the time we are looking at */
-	cluster_itr = list_iterator_create(cluster->accounting_list);
-	while((accting = list_next(cluster_itr))) {
-		slurmdb_report_cluster->cpu_secs += accting->alloc_secs
-			+ accting->down_secs + accting->idle_secs
-			+ accting->resv_secs + accting->pdown_secs;
-		slurmdb_report_cluster->cpu_count += accting->cpu_count;
-		slurmdb_report_cluster->consumed_energy += accting->consumed_energy;
-	}
-	list_iterator_destroy(cluster_itr);
+	itr = list_iterator_create(cluster->accounting_list);
+	while ((accting = list_next(itr)))
+		slurmdb_add_cluster_accounting_to_tres_list(
+			accting, &slurmdb_report_cluster->tres_list);
+	list_iterator_destroy(itr);
 
-	slurmdb_report_cluster->cpu_count /= count;
+	itr = list_iterator_create(slurmdb_report_cluster->tres_list);
+	while ((tres_rec = list_next(itr)))
+		tres_rec->count /= tres_rec->rec_count;
+	list_iterator_destroy(itr);
 
 	return slurmdb_report_cluster;
 }
@@ -2514,3 +2680,928 @@ extern char *slurmdb_get_selected_step_id(
 
 	return job_id_str;
 }
+
+extern int slurmdb_get_first_avail_cluster(job_desc_msg_t *req,
+	char *cluster_names, slurmdb_cluster_rec_t **cluster_rec)
+{
+	local_cluster_rec_t *local_cluster = NULL;
+	int rc = SLURM_SUCCESS;
+	char buf[64];
+	bool host_set = false;
+	ListIterator itr;
+	List cluster_list = NULL;
+	List ret_list = NULL;
+
+	*cluster_rec = NULL;
+	cluster_list = slurmdb_get_info_cluster(cluster_names);
+
+	/* return if we only have 1 or less clusters here */
+	if (!cluster_list || !list_count(cluster_list)) {
+		rc = SLURM_ERROR;
+		goto end_it;
+	} else if (list_count(cluster_list) == 1) {
+		*cluster_rec = list_pop(cluster_list);
+		goto end_it;
+	}
+
+	if ((req->alloc_node == NULL) &&
+	    (gethostname_short(buf, sizeof(buf)) == 0)) {
+		req->alloc_node = buf;
+		host_set = true;
+	}
+
+	if (working_cluster_rec)
+		*cluster_rec = working_cluster_rec;
+
+	ret_list = list_create(_destroy_local_cluster_rec);
+	itr = list_iterator_create(cluster_list);
+	while ((working_cluster_rec = list_next(itr))) {
+		if ((local_cluster = _job_will_run(req)))
+			list_append(ret_list, local_cluster);
+		else
+			error("Problem with submit to cluster %s: %m",
+			      working_cluster_rec->name);
+	}
+	list_iterator_destroy(itr);
+
+	/* restore working_cluster_rec in case it was already set */
+	if (*cluster_rec) {
+		working_cluster_rec = *cluster_rec;
+		*cluster_rec = NULL;
+	}
+
+	if (host_set)
+		req->alloc_node = NULL;
+
+	if (!list_count(ret_list)) {
+		error("Can't run on any of the specified clusters");
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+
+	/* sort the list so the first spot is on top */
+	local_cluster_name = slurm_get_cluster_name();
+	list_sort(ret_list, (ListCmpF)_sort_local_cluster);
+	xfree(local_cluster_name);
+	local_cluster = list_peek(ret_list);
+
+	/* prevent cluster_rec from being freed when cluster_list is destroyed */
+	itr = list_iterator_create(cluster_list);
+	while ((*cluster_rec = list_next(itr))) {
+		if (*cluster_rec == local_cluster->cluster_rec) {
+			list_remove(itr);
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+end_it:
+	FREE_NULL_LIST(ret_list);
+	FREE_NULL_LIST(cluster_list);
+
+	return rc;
+}
+
+extern void slurmdb_copy_assoc_rec_limits(slurmdb_assoc_rec_t *out,
+					  slurmdb_assoc_rec_t *in)
+{
+	out->grp_jobs = in->grp_jobs;
+	out->grp_submit_jobs = in->grp_submit_jobs;
+	xfree(out->grp_tres);
+	out->grp_tres = xstrdup(in->grp_tres);
+	xfree(out->grp_tres_mins);
+	out->grp_tres_mins = xstrdup(in->grp_tres_mins);
+	xfree(out->grp_tres_run_mins);
+	out->grp_tres_run_mins = xstrdup(in->grp_tres_run_mins);
+	out->grp_wall = in->grp_wall;
+
+	out->max_jobs = in->max_jobs;
+	out->max_submit_jobs = in->max_submit_jobs;
+	xfree(out->max_tres_pj);
+	out->max_tres_pj = xstrdup(in->max_tres_pj);
+	xfree(out->max_tres_pn);
+	out->max_tres_pn = xstrdup(in->max_tres_pn);
+	xfree(out->max_tres_mins_pj);
+	out->max_tres_mins_pj =	xstrdup(in->max_tres_mins_pj);
+	xfree(out->max_tres_run_mins);
+	out->max_tres_run_mins = xstrdup(in->max_tres_run_mins);
+	out->max_wall_pj = in->max_wall_pj;
+
+	FREE_NULL_LIST(out->qos_list);
+	out->qos_list = slurm_copy_char_list(in->qos_list);
+}
+
+extern void slurmdb_copy_qos_rec_limits(slurmdb_qos_rec_t *out,
+					slurmdb_qos_rec_t *in)
+{
+	out->flags = in->flags;
+	out->grace_time = in->grace_time;
+	out->grp_jobs = in->grp_jobs;
+	out->grp_submit_jobs = in->grp_submit_jobs;
+	xfree(out->grp_tres);
+	out->grp_tres = xstrdup(in->grp_tres);
+	xfree(out->grp_tres_mins);
+	out->grp_tres_mins = xstrdup(in->grp_tres_mins);
+	xfree(out->grp_tres_run_mins);
+	out->grp_tres_run_mins = xstrdup(in->grp_tres_run_mins);
+	out->grp_wall = in->grp_wall;
+
+	out->max_jobs_pu = in->max_jobs_pu;
+	out->max_submit_jobs_pu = in->max_submit_jobs_pu;
+	xfree(out->max_tres_mins_pj);
+	out->max_tres_mins_pj =	xstrdup(in->max_tres_mins_pj);
+	xfree(out->max_tres_pj);
+	out->max_tres_pj = xstrdup(in->max_tres_pj);
+	xfree(out->max_tres_pn);
+	out->max_tres_pn = xstrdup(in->max_tres_pn);
+	xfree(out->max_tres_pu);
+	out->max_tres_pu = xstrdup(in->max_tres_pu);
+	xfree(out->max_tres_run_mins_pu);
+	out->max_tres_run_mins_pu = xstrdup(in->max_tres_run_mins_pu);
+	out->max_wall_pj = in->max_wall_pj;
+	xfree(out->min_tres_pj);
+	out->min_tres_pj = xstrdup(in->min_tres_pj);
+
+	FREE_NULL_LIST(out->preempt_list);
+	out->preempt_list = slurm_copy_char_list(in->preempt_list);
+
+	out->preempt_mode = in->preempt_mode;
+
+	out->priority = in->priority;
+
+	out->usage_factor = in->usage_factor;
+	out->usage_thres = in->usage_thres;
+
+}
+
+extern slurmdb_tres_rec_t *slurmdb_copy_tres_rec(slurmdb_tres_rec_t *tres)
+{
+	slurmdb_tres_rec_t *tres_out = NULL;
+
+	if (!tres)
+		return tres_out;
+
+	tres_out = xmalloc_nz(sizeof(slurmdb_tres_rec_t));
+	memcpy(tres_out, tres, sizeof(slurmdb_tres_rec_t));
+	tres_out->name = xstrdup(tres->name);
+	tres_out->type = xstrdup(tres->type);
+	tres_out->count = tres->count;
+
+	return tres_out;
+}
+
+extern List slurmdb_copy_tres_list(List tres)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL;
+	ListIterator itr;
+	List tres_out;
+
+	if (!tres)
+		return NULL;
+
+	tres_out = list_create(slurmdb_destroy_tres_rec);
+
+	itr = list_iterator_create(tres);
+	while ((tres_rec = list_next(itr)))
+		list_append(tres_out, slurmdb_copy_tres_rec(tres_rec));
+	list_iterator_destroy(itr);
+
+	return tres_out;
+}
+
+extern List slurmdb_diff_tres_list(List tres_list_old, List tres_list_new)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL, *tres_rec_old;
+	ListIterator itr;
+	List tres_out;
+
+	if (!tres_list_new || !list_count(tres_list_new))
+		return NULL;
+
+	tres_out = slurmdb_copy_tres_list(tres_list_new);
+
+	itr = list_iterator_create(tres_out);
+	while ((tres_rec = list_next(itr))) {
+		if (!(tres_rec_old = list_find_first(tres_list_old,
+						     slurmdb_find_tres_in_list,
+						     &tres_rec->id)))
+			continue;
+		if (tres_rec_old->count == tres_rec->count)
+			list_delete_item(itr);
+	}
+	list_iterator_destroy(itr);
+
+	return tres_out;
+}
+
+extern char *slurmdb_tres_string_combine_lists(
+	List tres_list_old, List tres_list_new)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL, *tres_rec_old;
+	ListIterator itr;
+	char *tres_str = NULL;
+
+	if (!tres_list_new || !list_count(tres_list_new))
+		return NULL;
+
+	itr = list_iterator_create(tres_list_new);
+	while ((tres_rec = list_next(itr))) {
+		if (!(tres_rec_old = list_find_first(tres_list_old,
+						     slurmdb_find_tres_in_list,
+						     &tres_rec->id))
+		    || (tres_rec_old->count == INFINITE64))
+			continue;
+		if (tres_str)
+			xstrcat(tres_str, ",");
+		xstrfmtcat(tres_str, "%u=%"PRIu64,
+			   tres_rec->id, tres_rec->count);
+	}
+	list_iterator_destroy(itr);
+
+	return tres_str;
+}
+
+/* caller must xfree this char * returned */
+extern char *slurmdb_make_tres_string(List tres, uint32_t flags)
+{
+	char *tres_str = NULL;
+	ListIterator itr;
+	slurmdb_tres_rec_t *tres_rec;
+
+	if (!tres)
+		return tres_str;
+
+	itr = list_iterator_create(tres);
+	while ((tres_rec = list_next(itr))) {
+		if ((flags & TRES_STR_FLAG_SIMPLE) || !tres_rec->type)
+			xstrfmtcat(tres_str, "%s%u=%"PRIu64,
+				   (tres_str ||
+				    (flags & TRES_STR_FLAG_COMMA1)) ? "," : "",
+				   tres_rec->id, tres_rec->count);
+
+		else
+			xstrfmtcat(tres_str, "%s%s%s%s=%"PRIu64,
+				   (tres_str ||
+				    (flags & TRES_STR_FLAG_COMMA1)) ? "," : "",
+				   tres_rec->type,
+				   tres_rec->name ? "/" : "",
+				   tres_rec->name ? tres_rec->name : "",
+				   tres_rec->count);
+	}
+	list_iterator_destroy(itr);
+
+	return tres_str;
+}
+
+extern char *slurmdb_make_tres_string_from_arrays(char **tres_names,
+						  uint64_t *tres_cnts,
+						  uint32_t tres_cnt,
+						  uint32_t flags)
+{
+	char *tres_str = NULL;
+	int i;
+
+	if (!tres_names || !tres_cnts)
+		return tres_str;
+
+	for (i=0; i<tres_cnt; i++) {
+		if ((tres_cnts[i] == INFINITE64) &&
+		    (flags & TRES_STR_FLAG_REMOVE))
+			continue;
+		xstrfmtcat(tres_str, "%s%s=%"PRIu64,
+			   tres_str ? "," : "", tres_names[i], tres_cnts[i]);
+	}
+
+	return tres_str;
+}
+
+extern char *slurmdb_make_tres_string_from_simple(
+	char *tres_in, List full_tres_list)
+{
+	char *tres_str = NULL;
+	char *tmp_str = tres_in;
+	int id;
+	uint64_t count;
+	slurmdb_tres_rec_t *tres_rec;
+
+	if (!full_tres_list || !tmp_str || !tmp_str[0]
+	    || tmp_str[0] < '0' || tmp_str[0] > '9')
+		return tres_str;
+
+	while (tmp_str) {
+		id = atoi(tmp_str);
+		if (id <= 0) {
+			error("slurmdb_make_tres_string_from_simple: no id "
+			      "found at %s instead", tmp_str);
+			goto get_next;
+		}
+
+		if (!(tres_rec = list_find_first(
+			      full_tres_list, slurmdb_find_tres_in_list,
+			      &id))) {
+			debug("No tres known by id %d", id);
+			goto get_next;
+		}
+
+		if (!(tmp_str = strchr(tmp_str, '='))) {
+			error("slurmdb_make_tres_string_from_simple: "
+			      "no value found");
+			break;
+		}
+		count = slurm_atoull(++tmp_str);
+
+		if (tres_str)
+			xstrcat(tres_str, ",");
+		if (!tres_rec->type)
+			xstrfmtcat(tres_str, "%u=", tres_rec->id);
+
+		else
+			xstrfmtcat(tres_str, "%s%s%s=",
+				   tres_rec->type,
+				   tres_rec->name ? "/" : "",
+				   tres_rec->name ? tres_rec->name : "");
+		if (count != INFINITE64)
+			xstrfmtcat(tres_str, "%"PRIu64, count);
+		else
+			xstrfmtcat(tres_str, "NONE");
+
+
+	get_next:
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	return tres_str;
+}
+
+extern char *slurmdb_format_tres_str(
+	char *tres_in, List full_tres_list, bool simple)
+{
+	char *tres_str = NULL;
+	char *tmp_str = tres_in;
+	uint64_t count;
+	slurmdb_tres_rec_t *tres_rec;
+
+	if (!full_tres_list || !tmp_str || !tmp_str[0])
+		return tres_str;
+
+	if (tmp_str[0] == ',')
+		tmp_str++;
+
+	while (tmp_str) {
+		if (tmp_str[0] >= '0' && tmp_str[0] <= '9') {
+			int id = atoi(tmp_str);
+			if (id <= 0) {
+				error("slurmdb_format_tres_str: "
+				      "no id found at %s instead", tmp_str);
+				goto get_next;
+			}
+			if (!(tres_rec = list_find_first(
+				      full_tres_list, slurmdb_find_tres_in_list,
+				      &id))) {
+				debug("slurmdb_format_tres_str: "
+				      "No tres known by id %d", id);
+				goto get_next;
+			}
+		} else {
+			int end = 0;
+			char *tres_name;
+
+			while (tmp_str[end]) {
+				if (tmp_str[end] == '=')
+					break;
+				end++;
+			}
+			if (!tmp_str[end]) {
+				error("slurmdb_format_tres_str: "
+				      "no id found at %s instead", tmp_str);
+				goto get_next;
+			}
+			tres_name = xstrndup(tmp_str, end);
+			if (!(tres_rec = list_find_first(
+				      full_tres_list,
+				      slurmdb_find_tres_in_list_by_type,
+				      tres_name))) {
+				debug("slurmdb_format_tres_str: "
+				      "No tres known by type %s", tres_name);
+				xfree(tres_name);
+				goto get_next;
+			}
+			xfree(tres_name);
+		}
+
+		if (!(tmp_str = strchr(tmp_str, '='))) {
+			error("slurmdb_format_tres_str: "
+			      "no value found");
+			break;
+		}
+		count = slurm_atoull(++tmp_str);
+
+		if (tres_str)
+			xstrcat(tres_str, ",");
+		if (simple || !tres_rec->type)
+			xstrfmtcat(tres_str, "%u=%"PRIu64"",
+				   tres_rec->id, count);
+
+		else
+			xstrfmtcat(tres_str, "%s%s%s=%"PRIu64"",
+				   tres_rec->type,
+				   tres_rec->name ? "/" : "",
+				   tres_rec->name ? tres_rec->name : "",
+				   count);
+	get_next:
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	return tres_str;
+}
+
+/*
+ * Comparator used for sorting tres by id
+ *
+ * returns: -1 tres_a < tres_b   0: tres_a == tres_b   1: tres_a > tres_b
+ *
+ */
+extern int slurmdb_sort_tres_by_id_asc(void *v1, void *v2)
+{
+	slurmdb_tres_rec_t *tres_a = *(slurmdb_tres_rec_t **)v1;
+	slurmdb_tres_rec_t *tres_b = *(slurmdb_tres_rec_t **)v2;
+
+	if (tres_a->id < tres_b->id)
+		return -1;
+	else if (tres_a->id > tres_b->id)
+		return 1;
+
+	return 0;
+}
+
+/* This only works on a simple id=count list, not on a formatted list */
+extern void slurmdb_tres_list_from_string(
+	List *tres_list, char *tres, uint32_t flags)
+{
+	char *tmp_str = tres;
+	int id;
+	uint64_t count;
+	slurmdb_tres_rec_t *tres_rec;
+	int remove_found = 0;
+	xassert(tres_list);
+
+	if (!tres || !tres[0])
+		return;
+
+	if (tmp_str[0] == ',')
+		tmp_str++;
+
+	while (tmp_str) {
+		id = atoi(tmp_str);
+		/* 0 isn't a valid tres id */
+		if (id <= 0) {
+			error("slurmdb_tres_list_from_string: no id "
+			      "found at %s instead", tmp_str);
+			break;
+		}
+		if (!(tmp_str = strchr(tmp_str, '='))) {
+			error("slurmdb_tres_list_from_string: "
+			      "no value found %s", tres);
+			break;
+		}
+		count = slurm_atoull(++tmp_str);
+
+		if (!*tres_list)
+			*tres_list = list_create(slurmdb_destroy_tres_rec);
+
+		if (!(tres_rec = list_find_first(
+			      *tres_list, slurmdb_find_tres_in_list, &id))) {
+			tres_rec = xmalloc(sizeof(slurmdb_tres_rec_t));
+			tres_rec->id = id;
+			tres_rec->count = count;
+			list_append(*tres_list, tres_rec);
+			if (count == INFINITE64)
+				remove_found++;
+		} else if (flags & TRES_STR_FLAG_REPLACE) {
+			debug2("TRES %u was already here with count %"PRIu64", "
+			       "replacing with %"PRIu64,
+			      tres_rec->id, tres_rec->count, count);
+			tres_rec->count = count;
+		}
+
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	if (remove_found && (flags & TRES_STR_FLAG_REMOVE)) {
+		/* here we will remove the tres we don't want in the
+		   string */
+		uint64_t inf64 = INFINITE64;
+		int removed;
+
+		if ((removed = list_delete_all(
+			     *tres_list,
+			     slurmdb_find_tres_in_list_by_count,
+			     &inf64)) != remove_found)
+			debug("slurmdb_tres_list_from_string: "
+			      "was expecting to remove %d, but removed %d",
+			      remove_found, removed);
+	}
+
+	if (flags & TRES_STR_FLAG_SORT_ID)
+		list_sort(*tres_list, (ListCmpF)slurmdb_sort_tres_by_id_asc);
+
+	return;
+}
+
+extern char *slurmdb_combine_tres_strings(
+	char **tres_str_old, char *tres_str_new, uint32_t flags)
+{
+	List tres_list = NULL;
+
+	xassert(tres_str_old);
+
+	/* If a new string is being added concat it onto the old
+	 * string, then send it to slurmdb_tres_list_from_string which
+	 * will make it a unique list if flags doesn't contain
+	 * TRES_STR_FLAG_ONLY_CONCAT.
+	 */
+	if (tres_str_new && tres_str_new[0])
+		xstrfmtcat(*tres_str_old, "%s%s%s",
+			   (flags & (TRES_STR_FLAG_COMMA1 |
+				     TRES_STR_FLAG_ONLY_CONCAT)) ? "," : "",
+			   (*tres_str_old && tres_str_new[0] != ',') ? "," : "",
+			   tres_str_new);
+
+	if (flags & TRES_STR_FLAG_ONLY_CONCAT)
+		goto endit;
+
+	slurmdb_tres_list_from_string(&tres_list, *tres_str_old, flags);
+	xfree(*tres_str_old);
+
+	/* Always make it a simple string */
+	flags |= TRES_STR_FLAG_SIMPLE;
+
+	/* Make a new string from the combined */
+	*tres_str_old = slurmdb_make_tres_string(tres_list, flags);
+
+	FREE_NULL_LIST(tres_list);
+endit:
+	/* Send back a blank string instead of NULL. */
+	if (!*tres_str_old && (flags & TRES_STR_FLAG_NO_NULL))
+		*tres_str_old = xstrdup("");
+
+	return *tres_str_old;
+}
+
+extern slurmdb_tres_rec_t *slurmdb_find_tres_in_string(
+	char *tres_str_in, int id)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL;
+	char *tmp_str = tres_str_in;
+
+	if (!tmp_str || !tmp_str[0])
+		return tres_rec;
+
+	while (tmp_str) {
+		if (id == atoi(tmp_str)) {
+			if (!(tmp_str = strchr(tmp_str, '='))) {
+				error("%s: no value found", __func__);
+				break;
+			}
+			tres_rec = xmalloc(sizeof(slurmdb_tres_rec_t));
+			tres_rec->id = id;
+			tres_rec->count = slurm_atoull(++tmp_str);
+			return tres_rec;
+		}
+
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	return tres_rec;
+}
+
+extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id)
+{
+	char *tmp_str = tres_str_in;
+
+	if (!tmp_str || !tmp_str[0])
+		return INFINITE64;
+
+	while (tmp_str) {
+		if (id == atoi(tmp_str)) {
+			if (!(tmp_str = strchr(tmp_str, '='))) {
+				error("slurmdb_find_tres_count_in_string: "
+				      "no value found");
+				break;
+			}
+			return slurm_atoull(++tmp_str);
+		}
+
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	return INFINITE64;
+}
+
+extern int slurmdb_find_qos_in_list_by_name(void *x, void *key)
+{
+	slurmdb_qos_rec_t *qos_rec = (slurmdb_qos_rec_t *)x;
+	char *name = (char *)key;
+
+	if (!xstrcmp(qos_rec->name, name))
+		return 1;
+
+	return 0;
+}
+
+extern int slurmdb_find_tres_in_list(void *x, void *key)
+{
+	slurmdb_tres_rec_t *tres_rec = (slurmdb_tres_rec_t *)x;
+	uint32_t tres_id = *(uint32_t *)key;
+
+	if (tres_rec->id == tres_id)
+		return 1;
+
+	return 0;
+}
+
+extern int slurmdb_find_tres_in_list_by_count(void *x, void *key)
+{
+	slurmdb_tres_rec_t *tres_rec = (slurmdb_tres_rec_t *)x;
+	uint64_t count = *(uint64_t *)key;
+
+	if (tres_rec->count == count)
+		return 1;
+
+	return 0;
+}
+
+extern int slurmdb_find_tres_in_list_by_type(void *x, void *key)
+{
+	slurmdb_tres_rec_t *tres_rec = (slurmdb_tres_rec_t *)x;
+	char *type = (char *)key;
+	int end = 0;
+	bool found = false;
+
+	while (type[end]) {
+		if (type[end] == '/') {
+			found = true;
+			break;
+		}
+		end++;
+	}
+
+	if (!xstrncmp(tres_rec->type, type, end)) {
+		if ((!found && !tres_rec->name) ||
+		    (found && !xstrcmp(tres_rec->name, type + end + 1))) {
+			return 1;
+		}
+	}
+
+	return 0;
+}
+
+extern int slurmdb_find_cluster_accting_tres_in_list(void *x, void *key)
+{
+	slurmdb_cluster_accounting_rec_t *object =
+		(slurmdb_cluster_accounting_rec_t *)x;
+	uint32_t tres_id = *(uint32_t *)key;
+
+	if (object->tres_rec.id == tres_id)
+		return 1;
+
+	return 0;
+}
+
+extern int slurmdb_add_cluster_accounting_to_tres_list(
+	slurmdb_cluster_accounting_rec_t *accting,
+	List *tres)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL;
+
+	if (!*tres)
+		*tres = list_create(slurmdb_destroy_tres_rec);
+	else
+		tres_rec = list_find_first(*tres,
+					   slurmdb_find_tres_in_list,
+					   &accting->tres_rec.id);
+
+	if (!tres_rec) {
+		tres_rec = slurmdb_copy_tres_rec(&accting->tres_rec);
+		if (!tres_rec) {
+			error("slurmdb_copy_tres_rec returned NULL");
+			return SLURM_ERROR;
+		}
+		list_push(*tres, tres_rec);
+	}
+
+	tres_rec->alloc_secs += accting->alloc_secs
+		+ accting->down_secs + accting->idle_secs
+		+ accting->resv_secs + accting->pdown_secs;
+	tres_rec->count += accting->tres_rec.count;
+	tres_rec->rec_count++;
+
+	return SLURM_SUCCESS;
+}
+
+extern int slurmdb_add_accounting_to_tres_list(
+	slurmdb_accounting_rec_t *accting,
+	List *tres)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL;
+
+	if (!*tres)
+		*tres = list_create(slurmdb_destroy_tres_rec);
+	else
+		tres_rec = list_find_first(*tres,
+					   slurmdb_find_tres_in_list,
+					   &accting->tres_rec.id);
+
+	if (!tres_rec) {
+		tres_rec = slurmdb_copy_tres_rec(&accting->tres_rec);
+		if (!tres_rec) {
+			error("slurmdb_copy_tres_rec returned NULL");
+			return SLURM_ERROR;
+		}
+		list_push(*tres, tres_rec);
+	}
+
+	tres_rec->alloc_secs += accting->alloc_secs;
+
+	return SLURM_SUCCESS;
+}
+
+extern int slurmdb_add_time_from_count_to_tres_list(
+	slurmdb_tres_rec_t *tres_in, List *tres, time_t elapsed)
+{
+	slurmdb_tres_rec_t *tres_rec = NULL;
+
+	if (!elapsed)
+		return SLURM_SUCCESS;
+
+	if (!*tres)
+		*tres = list_create(slurmdb_destroy_tres_rec);
+	else
+		tres_rec = list_find_first(*tres,
+					   slurmdb_find_tres_in_list,
+					   &tres_in->id);
+
+	if (!tres_rec) {
+		tres_rec = slurmdb_copy_tres_rec(tres_in);
+		if (!tres_rec) {
+			error("slurmdb_copy_tres_rec returned NULL");
+			return SLURM_ERROR;
+		}
+		list_push(*tres, tres_rec);
+	}
+
+	tres_rec->alloc_secs +=
+		((uint64_t)tres_in->count * (uint64_t)elapsed);
+
+	return SLURM_SUCCESS;
+}
+
+extern int slurmdb_sum_accounting_list(
+	slurmdb_cluster_accounting_rec_t *accting,
+	List *total_tres_acct)
+{
+	slurmdb_cluster_accounting_rec_t *total_acct = NULL;
+
+	if (!*total_tres_acct)
+		*total_tres_acct = list_create(
+			slurmdb_destroy_cluster_accounting_rec);
+	else
+		total_acct = list_find_first(
+			*total_tres_acct,
+			slurmdb_find_cluster_accting_tres_in_list,
+			&accting->tres_rec.id);
+
+	if (!total_acct) {
+		total_acct = xmalloc(sizeof(slurmdb_cluster_accounting_rec_t));
+		total_acct->tres_rec.id = accting->tres_rec.id;
+		list_push(*total_tres_acct, total_acct);
+	}
+
+	total_acct->alloc_secs += accting->alloc_secs;
+	total_acct->down_secs  += accting->down_secs;
+	total_acct->idle_secs  += accting->idle_secs;
+	total_acct->resv_secs  += accting->resv_secs;
+	total_acct->over_secs  += accting->over_secs;
+	total_acct->pdown_secs += accting->pdown_secs;
+	total_acct->tres_rec.count += accting->tres_rec.count;
+	total_acct->tres_rec.rec_count++;
+
+	return SLURM_SUCCESS;
+}
+
+extern void slurmdb_transfer_acct_list_2_tres(
+	List accounting_list, List *tres)
+{
+	ListIterator itr;
+	slurmdb_accounting_rec_t *accting = NULL;
+
+	xassert(accounting_list);
+	xassert(tres);
+
+	/* get the amount of time this assoc used
+	   during the time we are looking at */
+	itr = list_iterator_create(accounting_list);
+	while ((accting = list_next(itr)))
+		slurmdb_add_accounting_to_tres_list(accting, tres);
+	list_iterator_destroy(itr);
+}
+
+extern void slurmdb_transfer_tres_time(
+	List *tres_list_out, char *tres_str, int elapsed)
+{
+	ListIterator itr;
+	slurmdb_tres_rec_t *tres_rec = NULL;
+	List job_tres_list = NULL;
+
+	xassert(tres_list_out);
+
+	slurmdb_tres_list_from_string(&job_tres_list, tres_str,
+				      TRES_STR_FLAG_NONE);
+
+	if (!job_tres_list)
+		return;
+
+	/* get the amount of time this assoc used
+	   during the time we are looking at */
+	itr = list_iterator_create(job_tres_list);
+	while ((tres_rec = list_next(itr)))
+		slurmdb_add_time_from_count_to_tres_list(
+			tres_rec, tres_list_out, elapsed);
+	list_iterator_destroy(itr);
+	FREE_NULL_LIST(job_tres_list);
+}
+
+extern int slurmdb_get_new_tres_pos(slurmdb_tres_rec_t **new_array,
+				    slurmdb_tres_rec_t **old_array,
+				    int cur_pos, int max_cnt)
+{
+	int j, pos = NO_VAL;
+
+	/* This means the tres didn't change order */
+	if (new_array[cur_pos]->id == old_array[cur_pos]->id)
+		pos = cur_pos;
+	else {
+		/* This means we might of changed the location or it
+		 * wasn't there before so break
+		 */
+		for (j=0; j<max_cnt; j++)
+			if (new_array[cur_pos]->id == old_array[j]->id) {
+				pos = j;
+				break;
+			}
+	}
+
+	return pos;
+}
+
+extern void slurmdb_set_new_tres_cnt(uint64_t **tres_cnt_in,
+				     slurmdb_tres_rec_t **new_array,
+				     slurmdb_tres_rec_t **old_array,
+				     int cur_cnt, int max_cnt)
+{
+	int i, pos;
+	uint64_t tres_cnt[cur_cnt];
+	bool changed = false;
+
+	/* This means we don't have to redo the tres */
+	if (!old_array || !new_array || !tres_cnt_in || !*tres_cnt_in)
+		return;
+
+	for (i=0; i<cur_cnt; i++) {
+		/* Done! */
+		if (!new_array[i])
+			break;
+
+		pos = slurmdb_get_new_tres_pos(
+			new_array, old_array, i, max_cnt);
+
+		if (pos != i)
+			changed = true;
+
+		if (pos != NO_VAL)
+			tres_cnt[i] = *tres_cnt_in[pos];
+	}
+
+	if (!changed)
+		return;
+
+	/* get the array the correct size */
+	i = sizeof(uint64_t) * cur_cnt;
+	if (cur_cnt != max_cnt)
+		xrealloc(*tres_cnt_in, i);
+
+	/* copy the data from tres_cnt which should contain
+	 * the new ordered tres values */
+	memcpy(*tres_cnt_in, tres_cnt, i);
+
+	return;
+}
diff --git a/src/common/slurmdb_defs.h b/src/common/slurmdb_defs.h
index 908f80a8c..7ec715783 100644
--- a/src/common/slurmdb_defs.h
+++ b/src/common/slurmdb_defs.h
@@ -52,8 +52,56 @@
 #define SLURMDB_PURGE_IN_MONTHS(_X) \
 	(_X != NO_VAL && _X & SLURMDB_PURGE_MONTHS)
 
-extern slurmdb_step_rec_t *slurmdb_create_step_rec();
+/* This is used to point out constants that exist in the
+ * TRES records.  This should be the same order as
+ * the enum pointing out the order in the array that is defined in
+ * src/slurmctld/slurmctld.h
+ */
+typedef enum {
+	TRES_CPU = 1,
+	TRES_MEM,
+	TRES_ENERGY,
+	TRES_NODE,
+} tres_types_t;
+
+/* These #defines are for the tres_str functions below and should be
+ * sent when flags are allowed in the functions.
+ */
+#define TRES_STR_FLAG_NONE        0x00000000 /* No flags, meaning by
+					      * default the string
+					      * will contain -1 and
+					      * be unique honoring
+					      * the first value found
+					      * in an incoming string */
+#define TRES_STR_FLAG_ONLY_CONCAT 0x00000001 /* Only concat the
+					      * string, this will
+					      * most likely trump the
+					      * other flags below. */
+#define TRES_STR_FLAG_REPLACE     0x00000002 /* Replace previous count
+					      * values found, if this
+					      * is not set duplicate
+					      * entries will be skipped. */
+#define TRES_STR_FLAG_REMOVE      0x00000004 /* If -1 entries are
+					      * found remove them, by
+					      * default they will be
+					      * added to the string
+					      */
+#define TRES_STR_FLAG_SORT_ID     0x00000008 /* sort string by ID */
+#define TRES_STR_FLAG_SIMPLE      0x00000010 /* make a simple string */
+#define TRES_STR_FLAG_COMMA1      0x00000020 /* make a first char a comma */
+#define TRES_STR_FLAG_NO_NULL     0x00000040 /* return blank string
+					      * instead of NULL */
+
+typedef struct {
+	slurmdb_cluster_rec_t *cluster_rec;
+	int preempt_cnt;
+	time_t start_time;
+} local_cluster_rec_t;
+
 extern slurmdb_job_rec_t *slurmdb_create_job_rec();
+extern slurmdb_step_rec_t *slurmdb_create_step_rec();
+extern slurmdb_assoc_usage_t *slurmdb_create_assoc_usage(int tres_cnt);
+extern slurmdb_qos_usage_t *slurmdb_create_qos_usage(int tres_cnt);
 
 extern char *slurmdb_qos_str(List qos_list, uint32_t level);
 extern uint32_t str_2_slurmdb_qos(List qos_list, char *level);
@@ -80,6 +128,7 @@ extern void slurmdb_sort_hierarchical_assoc_list(List assoc_list);
 /* IN/OUT: tree_list a list of slurmdb_print_tree_t's */
 extern char *slurmdb_tree_name_get(char *name, char *parent, List tree_list);
 
+extern int set_qos_bitstr_from_string(bitstr_t *valid_qos, char *names);
 extern int set_qos_bitstr_from_list(bitstr_t *valid_qos, List qos_list);
 extern char *get_qos_complete_str_bitstr(List qos_list, bitstr_t *valid_qos);
 extern char *get_qos_complete_str(List qos_list, List num_qos_list);
@@ -90,7 +139,7 @@ extern uint16_t str_2_classification(char *classification);
 extern char *slurmdb_problem_str_get(uint16_t problem);
 extern uint16_t str_2_slurmdb_problem(char *problem);
 
-extern void log_assoc_rec(slurmdb_association_rec_t *assoc_ptr, List qos_list);
+extern void log_assoc_rec(slurmdb_assoc_rec_t *assoc_ptr, List qos_list);
 
 extern int slurmdb_report_set_start_end_time(time_t *start, time_t *end);
 
@@ -110,5 +159,104 @@ extern slurmdb_report_cluster_rec_t *slurmdb_cluster_rec_2_report(
 extern char *slurmdb_get_selected_step_id(
 	char *job_id_str, int len,
 	slurmdb_selected_step_t *selected_step);
+/* OUT: out - copy grp/max/qos limits from in
+ * IN:  in  - what to copy from
+ */
+extern void slurmdb_copy_assoc_rec_limits(slurmdb_assoc_rec_t *out,
+					  slurmdb_assoc_rec_t *in);
+extern void slurmdb_copy_qos_rec_limits(slurmdb_qos_rec_t *out,
+					slurmdb_qos_rec_t *in);
+extern slurmdb_tres_rec_t *slurmdb_copy_tres_rec(slurmdb_tres_rec_t *tres);
+extern List slurmdb_copy_tres_list(List tres);
+extern List slurmdb_diff_tres_list(List tres_list_old, List tres_list_new);
+extern char *slurmdb_tres_string_combine_lists(
+	List tres_list_old, List tres_list_new);
+/* make a tres_string from a given list
+ * IN tres - list of slurmdb_tres_rec_t's
+ * IN flags - see the TRES_STR_FLAGS above
+ *                 Meaningful flags are TRES_STR_FLAG_SIMPLE
+ *                                      TRES_STR_FLAG_COMMA1
+ * RET char * of tres_str
+ */
+extern char *slurmdb_make_tres_string(List tres, uint32_t flags);
+extern char *slurmdb_format_tres_str(
+	char *tres_in, List full_tres_list, bool simple);
+/*
+ * Comparator used for sorting tres by id
+ *
+ * returns: -1 tres_a < tres_b   0: tres_a == tres_b   1: tres_a > tres_b
+ *
+ */
+extern int slurmdb_sort_tres_by_id_asc(void *v1, void *v2);
+
+/* Used to turn a tres string into a list containing
+ * slurmdb_tres_rec_t's with only id's and counts filled in, no
+ * formatted types or names.
+ *
+ * IN/OUT: tres_list - list created from the simple tres string
+ * IN    : tres - simple string you want convert
+ * IN    : flags - see the TRES_STR_FLAGS above
+ *                 Meaningful flags are TRES_STR_FLAG_REPLACE
+ *                                      TRES_STR_FLAG_REMOVE
+ *                                      TRES_STR_FLAG_SORT_ID
+ */
+extern void slurmdb_tres_list_from_string(
+	List *tres_list, char *tres, uint32_t flags);
+
+/* combine a name array and count array into a string */
+extern char *slurmdb_make_tres_string_from_arrays(char **tres_names,
+						  uint64_t *tres_cnts,
+						  uint32_t tres_cnt,
+						  uint32_t flags);
+
+extern char *slurmdb_make_tres_string_from_simple(
+	char *tres_in, List full_tres_list);
+/* Used to combine 2 different TRES strings together
+ *
+ * IN/OUT: tres_str_old - original simple tres string
+ * IN    : tres_str_new - string you want added
+ * IN    : flags - see the TRES_STR_FLAGS above
+ *                 Meaningful flags are TRES_STR_FLAG_ONLY_CONCAT
+ *                                      TRES_STR_FLAG_REPLACE
+ *                                      TRES_STR_FLAG_REMOVE
+ *                                      TRES_STR_FLAG_SORT_ID
+ *                                      TRES_STR_FLAG_SIMPLE
+ *                                      TRES_STR_FLAG_COMMA1
+ *                                      TRES_STR_FLAG_NO_NULL
+ * RET   : new tres_str_old - the new string (also sent out)
+ */
+extern char *slurmdb_combine_tres_strings(
+	char **tres_str_old, char *tres_str_new, uint32_t flags);
+extern slurmdb_tres_rec_t *slurmdb_find_tres_in_string(
+	char *tres_str_in, int id);
+extern uint64_t slurmdb_find_tres_count_in_string(char *tres_str_in, int id);
+extern int slurmdb_find_qos_in_list_by_name(void *x, void *key);
+extern int slurmdb_find_tres_in_list(void *x, void *key);
+extern int slurmdb_find_tres_in_list_by_count(void *x, void *key);
+extern int slurmdb_find_tres_in_list_by_type(void *x, void *key);
+extern int slurmdb_find_cluster_accting_tres_in_list(void *x, void *key);
+extern int slurmdb_add_cluster_accounting_to_tres_list(
+	slurmdb_cluster_accounting_rec_t *accting,
+	List *tres);
+extern int slurmdb_add_accounting_to_tres_list(
+	slurmdb_accounting_rec_t *accting,
+	List *tres);
+extern int slurmdb_add_time_from_count_to_tres_list(
+	slurmdb_tres_rec_t *tres_in, List *tres, time_t elapsed);
+extern int slurmdb_sum_accounting_list(
+	slurmdb_cluster_accounting_rec_t *accting,
+	List *total_tres_acct);
+extern void slurmdb_transfer_acct_list_2_tres(
+	List accounting_list, List *tres);
+extern void slurmdb_transfer_tres_time(
+	List *tres_list_out, char *tres_str, int elapsed);
+
+extern int slurmdb_get_new_tres_pos(slurmdb_tres_rec_t **new_array,
+				    slurmdb_tres_rec_t **old_array,
+				    int cur_pos, int max_cnt);
+extern void slurmdb_set_new_tres_cnt(uint64_t **tres_cnt_in,
+				     slurmdb_tres_rec_t **new_array,
+				     slurmdb_tres_rec_t **old_array,
+				     int cur_cnt, int max_cnt);
 
 #endif
diff --git a/src/common/slurmdb_pack.c b/src/common/slurmdb_pack.c
index 969775a5e..d8d776316 100644
--- a/src/common/slurmdb_pack.c
+++ b/src/common/slurmdb_pack.c
@@ -1,9 +1,10 @@
 /*****************************************************************************\
  *  slurmdb_pack.h - un/pack definitions used by slurmdb api
  ******************************************************************************
+ *  Copyright (C) 2011-2015 SchedMD LLC.
  *  Copyright (C) 2010 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble da@llnl.gov, et. al.
+ *  Written by Danny Auble da@schedmd.com, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
@@ -80,48 +81,6 @@ static void _pack_slurmdb_stats(slurmdb_stats_t *stats,
 		packdouble(stats->disk_write_max, buffer);
 		packdouble(stats->disk_write_ave, buffer);
 
-		pack32(stats->vsize_max_nodeid, buffer);
-		pack32(stats->vsize_max_taskid, buffer);
-		pack32(stats->rss_max_nodeid, buffer);
-		pack32(stats->rss_max_taskid, buffer);
-		pack32(stats->pages_max_nodeid, buffer);
-		pack32(stats->pages_max_taskid, buffer);
-		pack32(stats->cpu_min_nodeid, buffer);
-		pack32(stats->cpu_min_taskid, buffer);
-		pack32(stats->disk_read_max_nodeid, buffer);
-		pack32(stats->disk_read_max_taskid, buffer);
-		pack32(stats->disk_write_max_nodeid, buffer);
-		pack32(stats->disk_write_max_taskid, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
-		if (!stats) {
-			for (i=0; i<4; i++)
-				pack32((uint32_t) 0, buffer);
-
-			for (i=0; i<10; i++)
-				packdouble(0, buffer);
-
-			for (i=0; i<12; i++) {
-				pack32(0, buffer);
-			}
-			return;
-		}
-
-		pack32((uint32_t)stats->vsize_max, buffer);
-		pack32((uint32_t)stats->rss_max, buffer);
-		pack32((uint32_t)stats->pages_max, buffer);
-		pack32(stats->cpu_min, buffer);
-
-		packdouble(stats->vsize_ave, buffer);
-		packdouble(stats->rss_ave, buffer);
-		packdouble(stats->pages_ave, buffer);
-		packdouble(stats->cpu_ave, buffer);
-		packdouble(stats->act_cpufreq, buffer);
-		packdouble(stats->consumed_energy, buffer);
-		packdouble(stats->disk_read_max, buffer);
-		packdouble(stats->disk_read_ave, buffer);
-		packdouble(stats->disk_write_max, buffer);
-		packdouble(stats->disk_write_ave, buffer);
-
 		pack32(stats->vsize_max_nodeid, buffer);
 		pack32(stats->vsize_max_taskid, buffer);
 		pack32(stats->rss_max_nodeid, buffer);
@@ -157,35 +116,6 @@ static int _unpack_slurmdb_stats(slurmdb_stats_t *stats,
 		safe_unpackdouble(&stats->disk_write_max, buffer);
 		safe_unpackdouble(&stats->disk_write_ave, buffer);
 
-		safe_unpack32(&stats->vsize_max_nodeid, buffer);
-		safe_unpack32(&stats->vsize_max_taskid, buffer);
-		safe_unpack32(&stats->rss_max_nodeid, buffer);
-		safe_unpack32(&stats->rss_max_taskid, buffer);
-		safe_unpack32(&stats->pages_max_nodeid, buffer);
-		safe_unpack32(&stats->pages_max_taskid, buffer);
-		safe_unpack32(&stats->cpu_min_nodeid, buffer);
-		safe_unpack32(&stats->cpu_min_taskid, buffer);
-		safe_unpack32(&stats->disk_read_max_nodeid, buffer);
-		safe_unpack32(&stats->disk_read_max_taskid, buffer);
-		safe_unpack32(&stats->disk_write_max_nodeid, buffer);
-		safe_unpack32(&stats->disk_write_max_taskid, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
-		safe_unpack32((uint32_t *)&stats->vsize_max, buffer);
-		safe_unpack32((uint32_t *)&stats->rss_max, buffer);
-		safe_unpack32((uint32_t *)&stats->pages_max, buffer);
-		safe_unpack32(&stats->cpu_min, buffer);
-
-		safe_unpackdouble(&stats->vsize_ave, buffer);
-		safe_unpackdouble(&stats->rss_ave, buffer);
-		safe_unpackdouble(&stats->pages_ave, buffer);
-		safe_unpackdouble(&stats->cpu_ave, buffer);
-		safe_unpackdouble(&stats->act_cpufreq, buffer);
-		safe_unpackdouble(&stats->consumed_energy, buffer);
-		safe_unpackdouble(&stats->disk_read_max, buffer);
-		safe_unpackdouble(&stats->disk_read_ave, buffer);
-		safe_unpackdouble(&stats->disk_write_max, buffer);
-		safe_unpackdouble(&stats->disk_write_ave, buffer);
-
 		safe_unpack32(&stats->vsize_max_nodeid, buffer);
 		safe_unpack32(&stats->vsize_max_taskid, buffer);
 		safe_unpack32(&stats->rss_max_nodeid, buffer);
@@ -214,10 +144,10 @@ extern void slurmdb_pack_user_rec(void *in, uint16_t rpc_version, Buf buffer)
 	slurmdb_user_rec_t *object = (slurmdb_user_rec_t *)in;
 	uint32_t count = NO_VAL;
 	slurmdb_coord_rec_t *coord = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_wckey_rec_t *wckey = NULL;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack16(0, buffer);
 			pack32(NO_VAL, buffer);
@@ -240,8 +170,8 @@ extern void slurmdb_pack_user_rec(void *in, uint16_t rpc_version, Buf buffer)
 		if (count && count != NO_VAL) {
 			itr = list_iterator_create(object->assoc_list);
 			while ((assoc = list_next(itr))) {
-				slurmdb_pack_association_rec(assoc, rpc_version,
-							     buffer);
+				slurmdb_pack_assoc_rec(assoc, rpc_version,
+						       buffer);
 			}
 			list_iterator_destroy(itr);
 		}
@@ -290,20 +220,20 @@ extern int slurmdb_unpack_user_rec(void **object, uint16_t rpc_version,
 	slurmdb_user_rec_t *object_ptr = xmalloc(sizeof(slurmdb_user_rec_t));
 	uint32_t count = NO_VAL;
 	slurmdb_coord_rec_t *coord = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_wckey_rec_t *wckey = NULL;
 	int i;
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack16(&object_ptr->admin_level, buffer);
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->assoc_list =
-				list_create(slurmdb_destroy_association_rec);
+				list_create(slurmdb_destroy_assoc_rec);
 			for(i=0; i<count; i++) {
-				if (slurmdb_unpack_association_rec(
+				if (slurmdb_unpack_assoc_rec(
 					    (void *)&assoc, rpc_version, buffer)
 				    == SLURM_ERROR)
 					goto unpack_error;
@@ -353,46 +283,46 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void slurmdb_pack_used_limits(void *in, uint16_t rpc_version, Buf buffer)
+extern void slurmdb_pack_used_limits(void *in, uint32_t tres_cnt,
+				     uint16_t rpc_version, Buf buffer)
 {
 	slurmdb_used_limits_t *object = (slurmdb_used_limits_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		if (!object) {
-			pack64(0, buffer);
-			pack32(0, buffer);
-			pack32(0, buffer);
-			pack32(0, buffer);
-			pack32(0, buffer);
-			pack32(0, buffer);
-			return;
-		}
-
-		pack64(object->cpu_run_mins, buffer);
-		pack32(object->cpus, buffer);
-		pack32(object->jobs, buffer);
-		pack32(object->nodes, buffer);
-		pack32(object->submit_jobs, buffer);
-		pack32(object->uid, buffer);
+	if (!object) {
+		pack32(0, buffer);
+		pack32(0, buffer);
+		pack64_array(NULL, 0, buffer);
+		pack64_array(NULL, 0, buffer);
+		pack32(0, buffer);
+		return;
 	}
+
+	pack32(object->jobs, buffer);
+	pack32(object->submit_jobs, buffer);
+	pack64_array(object->tres, tres_cnt, buffer);
+	pack64_array(object->tres_run_mins, tres_cnt, buffer);
+	pack32(object->uid, buffer);
 }
 
-extern int slurmdb_unpack_used_limits(void **object,
+extern int slurmdb_unpack_used_limits(void **object, uint32_t tres_cnt,
 				      uint16_t rpc_version, Buf buffer)
 {
 	slurmdb_used_limits_t *object_ptr =
 		xmalloc(sizeof(slurmdb_used_limits_t));
+	uint32_t tmp32;
 
 	*object = (void *)object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		safe_unpack64(&object_ptr->cpu_run_mins, buffer);
-		safe_unpack32(&object_ptr->cpus, buffer);
-		safe_unpack32(&object_ptr->jobs, buffer);
-		safe_unpack32(&object_ptr->nodes, buffer);
-		safe_unpack32(&object_ptr->submit_jobs, buffer);
-		safe_unpack32(&object_ptr->uid, buffer);
-	}
+	safe_unpack32(&object_ptr->jobs, buffer);
+	safe_unpack32(&object_ptr->submit_jobs, buffer);
+	safe_unpack64_array(&object_ptr->tres, &tmp32, buffer);
+	if (tmp32 != tres_cnt)
+		goto unpack_error;
+	safe_unpack64_array(&object_ptr->tres_run_mins, &tmp32, buffer);
+	if (tmp32 != tres_cnt)
+		goto unpack_error;
+
+	safe_unpack32(&object_ptr->uid, buffer);
 
 	return SLURM_SUCCESS;
 
@@ -408,9 +338,9 @@ extern void slurmdb_pack_account_rec(void *in, uint16_t rpc_version, Buf buffer)
 	ListIterator itr = NULL;
 	uint32_t count = NO_VAL;
 	slurmdb_account_rec_t *object = (slurmdb_account_rec_t *)in;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -427,8 +357,8 @@ extern void slurmdb_pack_account_rec(void *in, uint16_t rpc_version, Buf buffer)
 		if (count && count != NO_VAL) {
 			itr = list_iterator_create(object->assoc_list);
 			while ((assoc = list_next(itr))) {
-				slurmdb_pack_association_rec(assoc, rpc_version,
-							     buffer);
+				slurmdb_pack_assoc_rec(assoc, rpc_version,
+						       buffer);
 			}
 			list_iterator_destroy(itr);
 		}
@@ -460,19 +390,19 @@ extern int slurmdb_unpack_account_rec(void **object, uint16_t rpc_version,
 	int i;
 	uint32_t count;
 	slurmdb_coord_rec_t *coord = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_account_rec_t *object_ptr =
 		xmalloc(sizeof(slurmdb_account_rec_t));
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->assoc_list =
-				list_create(slurmdb_destroy_association_rec);
+				list_create(slurmdb_destroy_assoc_rec);
 			for(i=0; i<count; i++) {
-				if (slurmdb_unpack_association_rec(
+				if (slurmdb_unpack_assoc_rec(
 					    (void *)&assoc, rpc_version, buffer)
 				    == SLURM_ERROR)
 					goto unpack_error;
@@ -543,11 +473,11 @@ extern void slurmdb_pack_cluster_accounting_rec(void *in, uint16_t rpc_version,
 	slurmdb_cluster_accounting_rec_t *object =
 		(slurmdb_cluster_accounting_rec_t *)in;
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (!object) {
 			pack64(0, buffer);
+			slurmdb_pack_tres_rec(NULL, rpc_version, buffer);
 			pack64(0, buffer);
-			pack32(0, buffer);
 			pack64(0, buffer);
 			pack64(0, buffer);
 			pack64(0, buffer);
@@ -558,16 +488,19 @@ extern void slurmdb_pack_cluster_accounting_rec(void *in, uint16_t rpc_version,
 		}
 
 		pack64(object->alloc_secs, buffer);
-		pack64(object->consumed_energy, buffer);
-		pack32(object->cpu_count, buffer);
+		slurmdb_pack_tres_rec(&object->tres_rec, rpc_version, buffer);
 		pack64(object->down_secs, buffer);
 		pack64(object->idle_secs, buffer);
 		pack64(object->over_secs, buffer);
 		pack64(object->pdown_secs, buffer);
 		pack_time(object->period_start, buffer);
 		pack64(object->resv_secs, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
-		if (!object) {
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		/* We only want to send the CPU tres to older
+		   versions of SLURM.
+		*/
+		if (!object || (object->tres_rec.id != TRES_CPU)) {
+			pack64(0, buffer);
 			pack64(0, buffer);
 			pack32(0, buffer);
 			pack64(0, buffer);
@@ -580,7 +513,9 @@ extern void slurmdb_pack_cluster_accounting_rec(void *in, uint16_t rpc_version,
 		}
 
 		pack64(object->alloc_secs, buffer);
-		pack32(object->cpu_count, buffer);
+		pack64(0, buffer); /* consumed energy doesn't exist
+				      anymore */
+		pack32(object->tres_rec.count, buffer);
 		pack64(object->down_secs, buffer);
 		pack64(object->idle_secs, buffer);
 		pack64(object->over_secs, buffer);
@@ -599,19 +534,30 @@ extern int slurmdb_unpack_cluster_accounting_rec(void **object,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack64(&object_ptr->alloc_secs, buffer);
-		safe_unpack64(&object_ptr->consumed_energy, buffer);
-		safe_unpack32(&object_ptr->cpu_count, buffer);
+		if (slurmdb_unpack_tres_rec_noalloc(
+			    &object_ptr->tres_rec, rpc_version, buffer)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
 		safe_unpack64(&object_ptr->down_secs, buffer);
 		safe_unpack64(&object_ptr->idle_secs, buffer);
 		safe_unpack64(&object_ptr->over_secs, buffer);
 		safe_unpack64(&object_ptr->pdown_secs, buffer);
 		safe_unpack_time(&object_ptr->period_start, buffer);
 		safe_unpack64(&object_ptr->resv_secs, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint64_t tmp_64;
+		object_ptr->tres_rec.id = TRES_CPU;
+		object_ptr->tres_rec.name = xstrdup("cpu");
+
 		safe_unpack64(&object_ptr->alloc_secs, buffer);
-		safe_unpack32(&object_ptr->cpu_count, buffer);
+
+		/* consumed_energy has to be thrown away here, this
+		 * unpack shouldn't ever happen in practice.
+		 */
+		safe_unpack64(&tmp_64, buffer);
+		safe_unpack32((uint32_t *)&object_ptr->tres_rec.count, buffer);
 		safe_unpack64(&object_ptr->down_secs, buffer);
 		safe_unpack64(&object_ptr->idle_secs, buffer);
 		safe_unpack64(&object_ptr->over_secs, buffer);
@@ -670,7 +616,59 @@ extern void slurmdb_pack_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 	uint32_t count = NO_VAL;
 	slurmdb_cluster_rec_t *object = (slurmdb_cluster_rec_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!object) {
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			packnull(buffer);
+			pack32(0, buffer);
+			pack16(1, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+
+			slurmdb_pack_assoc_rec(NULL, rpc_version, buffer);
+
+			pack16(0, buffer);
+			packnull(buffer);
+			return;
+		}
+
+		if (!object->accounting_list ||
+		    !(count = list_count(object->accounting_list)))
+			count = NO_VAL;
+
+		pack32(count, buffer);
+
+		if (count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while ((slurmdb_info = list_next(itr))) {
+				slurmdb_pack_cluster_accounting_rec(
+					slurmdb_info, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+
+		pack16(object->classification, buffer);
+		packstr(object->control_host, buffer);
+		pack32(object->control_port, buffer);
+		pack16(object->dimensions, buffer);
+		pack32(object->flags, buffer);
+
+		packstr(object->name, buffer);
+		packstr(object->nodes, buffer);
+
+		pack32(object->plugin_id_select, buffer);
+
+		slurmdb_pack_assoc_rec(object->root_assoc,
+				       rpc_version, buffer);
+
+		pack16(object->rpc_version, buffer);
+		packstr(object->tres_str, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			pack16(0, buffer);
@@ -685,7 +683,7 @@ extern void slurmdb_pack_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 			pack32(NO_VAL, buffer);
 
-			slurmdb_pack_association_rec(NULL, rpc_version, buffer);
+			slurmdb_pack_assoc_rec(NULL, rpc_version, buffer);
 
 			pack16(0, buffer);
 			return;
@@ -693,6 +691,8 @@ extern void slurmdb_pack_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 		if (object->accounting_list)
 			count = list_count(object->accounting_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 
@@ -708,7 +708,9 @@ extern void slurmdb_pack_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack16(object->classification, buffer);
 		packstr(object->control_host, buffer);
 		pack32(object->control_port, buffer);
-		pack32(object->cpu_count, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			object->tres_str, TRES_CPU);
+		pack32(count, buffer);
 		pack16(object->dimensions, buffer);
 		pack32(object->flags, buffer);
 
@@ -717,8 +719,8 @@ extern void slurmdb_pack_cluster_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 		pack32(object->plugin_id_select, buffer);
 
-		slurmdb_pack_association_rec(object->root_assoc,
-					     rpc_version, buffer);
+		slurmdb_pack_assoc_rec(object->root_assoc,
+				       rpc_version, buffer);
 
 		pack16(object->rpc_version, buffer);
 	}
@@ -737,12 +739,48 @@ extern int slurmdb_unpack_cluster_rec(void **object, uint16_t rpc_version,
 	*object = object_ptr;
 
 	slurmdb_init_cluster_rec(object_ptr, 0);
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->accounting_list = list_create(
 				slurmdb_destroy_cluster_accounting_rec);
-			for(i=0; i<count; i++) {
+			for (i=0; i<count; i++) {
+				if (slurmdb_unpack_cluster_accounting_rec(
+					    (void *)&slurmdb_info,
+					    rpc_version, buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list,
+					    slurmdb_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->classification, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->control_host,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->control_port, buffer);
+		safe_unpack16(&object_ptr->dimensions, buffer);
+		safe_unpack32(&object_ptr->flags, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->nodes, &uint32_tmp, buffer);
+
+		safe_unpack32(&object_ptr->plugin_id_select, buffer);
+
+		if (slurmdb_unpack_assoc_rec(
+			    (void **)&object_ptr->root_assoc,
+			    rpc_version, buffer)
+		    == SLURM_ERROR)
+			goto unpack_error;
+
+		safe_unpack16(&object_ptr->rpc_version, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->tres_str,
+				       &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->accounting_list = list_create(
+				slurmdb_destroy_cluster_accounting_rec);
+			for (i=0; i<count; i++) {
 				if (slurmdb_unpack_cluster_accounting_rec(
 					    (void *)&slurmdb_info,
 					    rpc_version, buffer) == SLURM_ERROR)
@@ -756,7 +794,9 @@ extern int slurmdb_unpack_cluster_rec(void **object, uint16_t rpc_version,
 		safe_unpackstr_xmalloc(&object_ptr->control_host,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->control_port, buffer);
-		safe_unpack32(&object_ptr->cpu_count, buffer);
+		safe_unpack32(&count, buffer);
+		object_ptr->tres_str = xstrdup_printf("%d=%u", TRES_CPU, count);
+
 		safe_unpack16(&object_ptr->dimensions, buffer);
 		safe_unpack32(&object_ptr->flags, buffer);
 
@@ -765,7 +805,7 @@ extern int slurmdb_unpack_cluster_rec(void **object, uint16_t rpc_version,
 
 		safe_unpack32(&object_ptr->plugin_id_select, buffer);
 
-		if (slurmdb_unpack_association_rec(
+		if (slurmdb_unpack_assoc_rec(
 			    (void **)&object_ptr->root_assoc,
 			    rpc_version, buffer)
 		    == SLURM_ERROR)
@@ -787,21 +827,22 @@ extern void slurmdb_pack_accounting_rec(void *in, uint16_t rpc_version,
 {
 	slurmdb_accounting_rec_t *object = (slurmdb_accounting_rec_t *)in;
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (!object) {
 			pack64(0, buffer);
-			pack64(0, buffer);
+			slurmdb_pack_tres_rec(NULL, rpc_version, buffer);
 			pack32(0, buffer);
 			pack_time(0, buffer);
 			return;
 		}
 
 		pack64(object->alloc_secs, buffer);
-		pack64(object->consumed_energy, buffer);
+		slurmdb_pack_tres_rec(&object->tres_rec, rpc_version, buffer);
 		pack32(object->id, buffer);
 		pack_time(object->period_start, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		if (!object) {
+			pack64(0, buffer);
 			pack64(0, buffer);
 			pack32(0, buffer);
 			pack_time(0, buffer);
@@ -809,6 +850,8 @@ extern void slurmdb_pack_accounting_rec(void *in, uint16_t rpc_version,
 		}
 
 		pack64(object->alloc_secs, buffer);
+		pack64(0, buffer); /* consumed energy doesn't exist
+				      anymore */
 		pack32(object->id, buffer);
 		pack_time(object->period_start, buffer);
 	}
@@ -822,13 +865,24 @@ extern int slurmdb_unpack_accounting_rec(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack64(&object_ptr->alloc_secs, buffer);
-		safe_unpack64(&object_ptr->consumed_energy, buffer);
+		if (slurmdb_unpack_tres_rec_noalloc(
+			    &object_ptr->tres_rec, rpc_version, buffer)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
 		safe_unpack32(&object_ptr->id, buffer);
 		safe_unpack_time(&object_ptr->period_start, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint64_t tmp_64;
+
+		object_ptr->tres_rec.id = TRES_CPU;
+		object_ptr->tres_rec.name = xstrdup("cpu");
 		safe_unpack64(&object_ptr->alloc_secs, buffer);
+		/* consumed_energy has to be thrown away here, this
+		 * unpack shouldn't ever happen in practice.
+		 */
+		safe_unpack64(&tmp_64, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
 		safe_unpack_time(&object_ptr->period_start, buffer);
 	}
@@ -841,16 +895,16 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
-					 Buf buffer)
+extern void slurmdb_pack_assoc_rec(void *in, uint16_t rpc_version,
+				   Buf buffer)
 {
 	slurmdb_accounting_rec_t *slurmdb_info = NULL;
 	ListIterator itr = NULL;
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
-	slurmdb_association_rec_t *object = (slurmdb_association_rec_t *)in;
+	slurmdb_assoc_rec_t *object = (slurmdb_assoc_rec_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			packnull(buffer);
@@ -860,11 +914,9 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 
 			pack32(NO_VAL, buffer);
 
-			pack64(NO_VAL, buffer);
-			pack64(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -873,10 +925,10 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 			pack16(0, buffer);
 			pack32(0, buffer);
 
-			pack64(NO_VAL, buffer);
-			pack64(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -894,12 +946,13 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 			return;
 		}
 
-		if (object->accounting_list)
-			count = list_count(object->accounting_list);
+		if (!object->accounting_list ||
+		    !(count = list_count(object->accounting_list)))
+			count = NO_VAL;
 
 		pack32(count, buffer);
 
-		if (count && count != NO_VAL) {
+		if (count != NO_VAL) {
 			itr = list_iterator_create(object->accounting_list);
 			while ((slurmdb_info = list_next(itr))) {
 				slurmdb_pack_accounting_rec(slurmdb_info,
@@ -908,7 +961,7 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
+
 
 		packstr(object->acct, buffer);
 		packstr(object->cluster, buffer);
@@ -920,12 +973,10 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 		   just renamed it and called it good */
 		pack32(object->shares_raw, buffer);
 
-		pack64(object->grp_cpu_mins, buffer);
-		pack64(object->grp_cpu_run_mins, buffer);
-		pack32(object->grp_cpus, buffer);
+		packstr(object->grp_tres_mins, buffer);
+		packstr(object->grp_tres_run_mins, buffer);
+		packstr(object->grp_tres, buffer);
 		pack32(object->grp_jobs, buffer);
-		pack32(object->grp_mem, buffer);
-		pack32(object->grp_nodes, buffer);
 		pack32(object->grp_submit_jobs, buffer);
 		pack32(object->grp_wall, buffer);
 
@@ -933,11 +984,11 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 		pack16(object->is_def, buffer);
 		pack32(object->lft, buffer);
 
-		pack64(object->max_cpu_mins_pj, buffer);
-		pack64(object->max_cpu_run_mins, buffer);
-		pack32(object->max_cpus_pj, buffer);
+		packstr(object->max_tres_mins_pj, buffer);
+		packstr(object->max_tres_run_mins, buffer);
+		packstr(object->max_tres_pj, buffer);
+		packstr(object->max_tres_pn, buffer);
 		pack32(object->max_jobs, buffer);
-		pack32(object->max_nodes_pj, buffer);
 		pack32(object->max_submit_jobs, buffer);
 		pack32(object->max_wall_pj, buffer);
 
@@ -962,54 +1013,190 @@ extern void slurmdb_pack_association_rec(void *in, uint16_t rpc_version,
 		pack32(object->uid, buffer);
 
 		packstr(object->user, buffer);
-	}
-}
-
-extern int slurmdb_unpack_association_rec(void **object, uint16_t rpc_version,
-					  Buf buffer)
-{
-	uint32_t uint32_tmp;
-	int i;
-	uint32_t count;
-	char *tmp_info = NULL;
-	slurmdb_association_rec_t *object_ptr =
-		xmalloc(sizeof(slurmdb_association_rec_t));
-	slurmdb_accounting_rec_t *slurmdb_info = NULL;
-
-	*object = object_ptr;
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint64_t uint64_tmp;
+		if (!object) {
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
 
-	slurmdb_init_association_rec(object_ptr, 0);
+			pack32(NO_VAL, buffer);
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->accounting_list =
-				list_create(slurmdb_destroy_accounting_rec);
-			for(i=0; i<count; i++) {
-				if (slurmdb_unpack_accounting_rec(
-					    (void **)&slurmdb_info,
-					    rpc_version,
-					    buffer) == SLURM_ERROR)
-					goto unpack_error;
-				list_append(object_ptr->accounting_list,
-					    slurmdb_info);
-			}
-		}
+			pack32(NO_VAL, buffer);
 
-		safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
-				       buffer);
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
 
-		safe_unpack32(&object_ptr->def_qos_id, buffer);
+			pack32(0, buffer);
+			pack16(0, buffer);
+			pack32(0, buffer);
 
-		safe_unpack32(&object_ptr->shares_raw, buffer);
+			pack64(NO_VAL, buffer);
+			pack64(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			pack32(0, buffer);
+			packnull(buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(0, buffer);
+			pack32(0, buffer);
+
+			packnull(buffer);
+			return;
+		}
+
+		if (object->accounting_list)
+			count = list_count(object->accounting_list);
+
+		pack32(count, buffer);
+
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(object->accounting_list);
+			while ((slurmdb_info = list_next(itr))) {
+				slurmdb_pack_accounting_rec(slurmdb_info,
+							    rpc_version,
+							    buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		count = NO_VAL;
+
+		packstr(object->acct, buffer);
+		packstr(object->cluster, buffer);
+
+		pack32(object->def_qos_id, buffer);
+
+		/* this used to be named fairshare to not have to redo
+		   the order of things just to be in alpha order we
+		   just renamed it and called it good */
+		pack32(object->shares_raw, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres_run_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
+		pack32(object->grp_jobs, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_MEM);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		pack32(object->id, buffer);
+		pack16(object->is_def, buffer);
+		pack32(object->lft, buffer);
 
-		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
-		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
-		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_mins_pj, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_run_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pj, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
+		pack32(object->max_jobs, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pj, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
+		pack32(object->max_submit_jobs, buffer);
+		pack32(object->max_wall_pj, buffer);
+
+		packstr(object->parent_acct, buffer);
+		pack32(object->parent_id, buffer);
+		packstr(object->partition, buffer);
+
+		if (object->qos_list)
+			count = list_count(object->qos_list);
+
+		pack32(count, buffer);
+
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(object->qos_list);
+			while ((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+
+		pack32(object->rgt, buffer);
+		pack32(object->uid, buffer);
+
+		packstr(object->user, buffer);
+	}
+}
+
+extern int slurmdb_unpack_assoc_rec_members(slurmdb_assoc_rec_t *object_ptr,
+					    uint16_t rpc_version,
+					    Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	char *tmp_info = NULL;
+	slurmdb_accounting_rec_t *slurmdb_info = NULL;
+
+	slurmdb_init_assoc_rec(object_ptr, 0);
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(slurmdb_destroy_accounting_rec);
+			for(i=0; i<count; i++) {
+				if (slurmdb_unpack_accounting_rec(
+					    (void **)&slurmdb_info,
+					    rpc_version,
+					    buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list,
+					    slurmdb_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&object_ptr->def_qos_id, buffer);
+
+		safe_unpack32(&object_ptr->shares_raw, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->grp_tres_mins,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->grp_tres_run_mins,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->grp_tres,
+				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->grp_jobs, buffer);
-		safe_unpack32(&object_ptr->grp_mem, buffer);
-		safe_unpack32(&object_ptr->grp_nodes, buffer);
 		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
@@ -1017,11 +1204,15 @@ extern int slurmdb_unpack_association_rec(void **object, uint16_t rpc_version,
 		safe_unpack16(&object_ptr->is_def, buffer);
 		safe_unpack32(&object_ptr->lft, buffer);
 
-		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
-		safe_unpack64(&object_ptr->max_cpu_run_mins, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_mins_pj,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_run_mins,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_pj,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_pn,
+				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->max_jobs, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
 		safe_unpack32(&object_ptr->max_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->max_wall_pj, buffer);
 
@@ -1047,65 +1238,397 @@ extern int slurmdb_unpack_association_rec(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->uid, buffer);
 
 		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint64_t uint64_tmp;
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(slurmdb_destroy_accounting_rec);
+			for(i=0; i<count; i++) {
+				if (slurmdb_unpack_accounting_rec(
+					    (void **)&slurmdb_info,
+					    rpc_version,
+					    buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list,
+					    slurmdb_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&object_ptr->acct, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&object_ptr->def_qos_id, buffer);
+
+		safe_unpack32(&object_ptr->shares_raw, buffer);
+
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres_run_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->grp_tres, "%s%u=%"PRIu64,
+				   object_ptr->grp_tres ? "," : "",
+				   TRES_MEM, uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->grp_tres, "%s%u=%"PRIu64,
+				   object_ptr->grp_tres ? "," : "",
+				   TRES_NODE, uint64_tmp);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpack32(&object_ptr->id, buffer);
+		safe_unpack16(&object_ptr->is_def, buffer);
+		safe_unpack32(&object_ptr->lft, buffer);
+
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_mins_pj =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_run_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_pj =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&object_ptr->max_jobs, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->max_tres_pj, "%s%u=%"PRIu64,
+				   object_ptr->max_tres_pj ? "," : "",
+				   TRES_NODE, uint64_tmp);
+		safe_unpack32(&object_ptr->max_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->parent_acct, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&object_ptr->parent_id, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->partition, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&count, buffer);
+		/* This needs to look for zero to tell if something
+		   has changed */
+		if (count != NO_VAL) {
+			object_ptr->qos_list = list_create(slurm_destroy_char);
+			for(i=0; i<count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->qos_list, tmp_info);
+			}
+		}
+
+		safe_unpack32(&object_ptr->rgt, buffer);
+		safe_unpack32(&object_ptr->uid, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+
+	return SLURM_ERROR;
+}
+
+extern int slurmdb_unpack_assoc_rec(void **object, uint16_t rpc_version,
+				    Buf buffer)
+{
+	int rc;
+	slurmdb_assoc_rec_t *object_ptr = xmalloc(sizeof(slurmdb_assoc_rec_t));
+
+	*object = object_ptr;
+
+	slurmdb_init_assoc_rec(object_ptr, 0);
+
+	if ((rc = slurmdb_unpack_assoc_rec_members(
+		     object_ptr, rpc_version, buffer)) != SLURM_SUCCESS) {
+		slurmdb_destroy_assoc_rec(object_ptr);
+		*object = NULL;
 	}
+	return rc;
+}
+
+extern void slurmdb_pack_assoc_usage(void *in, uint16_t rpc_version, Buf buffer)
+{
+	slurmdb_assoc_usage_t *usage = (slurmdb_assoc_usage_t *)in;
+
+	pack64_array(usage->grp_used_tres, usage->tres_cnt, buffer);
+	pack64_array(usage->grp_used_tres_run_secs, usage->tres_cnt, buffer);
+	packdouble(usage->grp_used_wall, buffer);
+	packdouble(usage->fs_factor, buffer);
+	pack32(usage->level_shares, buffer);
+	packdouble(usage->shares_norm, buffer);
+	packlongdouble(usage->usage_efctv, buffer);
+	packlongdouble(usage->usage_norm, buffer);
+	packlongdouble(usage->usage_raw, buffer);
+	packlongdouble_array(usage->usage_tres_raw, usage->tres_cnt, buffer);
+	pack32(usage->used_jobs, buffer);
+	pack32(usage->used_submit_jobs, buffer);
+	packlongdouble(usage->level_fs, buffer);
+	pack_bit_str_hex(usage->valid_qos, buffer);
+}
+
+extern int slurmdb_unpack_assoc_usage(void **object, uint16_t rpc_version,
+				      Buf buffer)
+{
+	slurmdb_assoc_usage_t *object_ptr =
+		xmalloc(sizeof(slurmdb_assoc_usage_t));
+	uint32_t tmp32;
+	*object = object_ptr;
+
+	safe_unpack64_array(&object_ptr->grp_used_tres, &tmp32, buffer);
+	object_ptr->tres_cnt = tmp32;
+	safe_unpack64_array(&object_ptr->grp_used_tres_run_secs,
+			    &tmp32, buffer);
+	safe_unpackdouble(&object_ptr->grp_used_wall, buffer);
+	safe_unpackdouble(&object_ptr->fs_factor, buffer);
+	safe_unpack32(&object_ptr->level_shares, buffer);
+	safe_unpackdouble(&object_ptr->shares_norm, buffer);
+	safe_unpacklongdouble(&object_ptr->usage_efctv, buffer);
+	safe_unpacklongdouble(&object_ptr->usage_norm, buffer);
+	safe_unpacklongdouble(&object_ptr->usage_raw, buffer);
+	safe_unpacklongdouble_array(&object_ptr->usage_tres_raw,
+				    &tmp32, buffer);
+
+	safe_unpack32(&object_ptr->used_jobs, buffer);
+	safe_unpack32(&object_ptr->used_submit_jobs, buffer);
+	safe_unpacklongdouble(&object_ptr->level_fs, buffer);
+	unpack_bit_str_hex(&object_ptr->valid_qos, buffer);
 
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdb_destroy_association_rec(object_ptr);
+	slurmdb_destroy_assoc_usage(object_ptr);
+	*object = NULL;
+
+	return SLURM_ERROR;
+}
+
+extern void slurmdb_pack_assoc_rec_with_usage(void *in, uint16_t rpc_version,
+					      Buf buffer)
+{
+	slurmdb_assoc_rec_t *object = (slurmdb_assoc_rec_t *)in;
+
+	slurmdb_pack_assoc_rec(in, rpc_version, buffer);
+	slurmdb_pack_assoc_usage(object->usage, rpc_version, buffer);
+
+	pack64_array(object->grp_tres_mins_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->grp_tres_run_mins_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->grp_tres_ctld,
+		     object->usage->tres_cnt, buffer);
+
+	pack64_array(object->max_tres_mins_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_run_mins_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_pn_ctld,
+		     object->usage->tres_cnt, buffer);
+
+}
+
+extern int slurmdb_unpack_assoc_rec_with_usage(void **object,
+					       uint16_t rpc_version,
+					       Buf buffer)
+{
+	int rc;
+	uint32_t uint32_tmp;
+	slurmdb_assoc_rec_t *object_ptr;
+
+	if ((rc = slurmdb_unpack_assoc_rec(object, rpc_version, buffer))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	object_ptr = *object;
+
+	rc = slurmdb_unpack_assoc_usage((void **)&object_ptr->usage,
+					rpc_version, buffer);
+
+	safe_unpack64_array(&object_ptr->grp_tres_mins_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->grp_tres_run_mins_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->grp_tres_ctld,
+			    &uint32_tmp, buffer);
+
+	safe_unpack64_array(&object_ptr->max_tres_mins_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_run_mins_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_pn_ctld,
+			    &uint32_tmp, buffer);
+
+	return rc;
+
+unpack_error:
+	slurmdb_destroy_assoc_rec(object_ptr);
 	*object = NULL;
 	return SLURM_ERROR;
 }
 
 extern void slurmdb_pack_event_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
+	uint32_t count = NO_VAL;
 	slurmdb_event_rec_t *object = (slurmdb_event_rec_t *)in;
 
-	if (!object) {
-		packnull(buffer);
-		packnull(buffer);
-		pack32(NO_VAL, buffer);
-		pack16(0, buffer);
-		packnull(buffer);
-		pack_time(0, buffer);
-		pack_time(0, buffer);
-		packnull(buffer);
-		pack32(NO_VAL, buffer);
-		pack16((uint16_t)NO_VAL, buffer);
-		return;
-	}
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!object) {
+			packnull(buffer);
+			packnull(buffer);
+			pack16(0, buffer);
+			packnull(buffer);
+			pack_time(0, buffer);
+			pack_time(0, buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack16((uint16_t)NO_VAL, buffer);
+			packnull(buffer);
+			return;
+		}
 
-	packstr(object->cluster, buffer);
-	packstr(object->cluster_nodes, buffer);
-	pack32(object->cpu_count, buffer);
-	pack16(object->event_type, buffer);
-	packstr(object->node_name, buffer);
-	pack_time(object->period_start, buffer);
-	pack_time(object->period_end, buffer);
-	packstr(object->reason, buffer);
-	pack32(object->reason_uid, buffer);
-	pack16(object->state, buffer);
+		packstr(object->cluster, buffer);
+		packstr(object->cluster_nodes, buffer);
+		pack16(object->event_type, buffer);
+		packstr(object->node_name, buffer);
+		pack_time(object->period_start, buffer);
+		pack_time(object->period_end, buffer);
+		packstr(object->reason, buffer);
+		pack32(object->reason_uid, buffer);
+		pack16(object->state, buffer);
+		packstr(object->tres_str, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		if (!object) {
+			packnull(buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack16(0, buffer);
+			packnull(buffer);
+			pack_time(0, buffer);
+			pack_time(0, buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack16((uint16_t)NO_VAL, buffer);
+			return;
+		}
+
+		packstr(object->cluster, buffer);
+		packstr(object->cluster_nodes, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			object->tres_str, TRES_CPU);
+		pack32(count, buffer);
+		pack16(object->event_type, buffer);
+		packstr(object->node_name, buffer);
+		pack_time(object->period_start, buffer);
+		pack_time(object->period_end, buffer);
+		packstr(object->reason, buffer);
+		pack32(object->reason_uid, buffer);
+		pack16(object->state, buffer);
+	}
 }
 
 extern int slurmdb_unpack_event_rec(void **object, uint16_t rpc_version,
 				    Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint32_t count;
 	slurmdb_event_rec_t *object_ptr = xmalloc(sizeof(slurmdb_event_rec_t));
 
 	*object = object_ptr;
 
-	safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->cluster_nodes, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->cpu_count, buffer);
-	safe_unpack16(&object_ptr->event_type, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->node_name, &uint32_tmp, buffer);
-	safe_unpack_time(&object_ptr->period_start, buffer);
-	safe_unpack_time(&object_ptr->period_end, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->reason, &uint32_tmp, buffer);
-	safe_unpack32(&object_ptr->reason_uid, buffer);
-	safe_unpack16(&object_ptr->state, buffer);
-
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&object_ptr->cluster,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpack16(&object_ptr->event_type, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->node_name,
+				       &uint32_tmp, buffer);
+		safe_unpack_time(&object_ptr->period_start, buffer);
+		safe_unpack_time(&object_ptr->period_end, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->reason,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->reason_uid, buffer);
+		safe_unpack16(&object_ptr->state, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->tres_str,
+				       &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&object_ptr->cluster,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->cluster_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		object_ptr->tres_str = xstrdup_printf("%d=%u", TRES_CPU, count);
+		safe_unpack16(&object_ptr->event_type, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->node_name,
+				       &uint32_tmp, buffer);
+		safe_unpack_time(&object_ptr->period_start, buffer);
+		safe_unpack_time(&object_ptr->period_end, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->reason,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->reason_uid, buffer);
+		safe_unpack16(&object_ptr->state, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -1120,8 +1643,93 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 	slurmdb_qos_rec_t *object = (slurmdb_qos_rec_t *)in;
 	uint32_t count = NO_VAL;
 	char *tmp_info = NULL;
+	uint64_t uint64_tmp;
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (!object) {
+			packnull(buffer);
+			pack32(0, buffer);
+
+			pack32(QOS_FLAG_NOTSET, buffer);
+
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			packnull(buffer);
+
+			packnull(buffer);
+
+			pack_bit_str_hex(NULL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack32(0, buffer);
+
+			packdouble(NO_VAL64, buffer);
+			packdouble(NO_VAL64, buffer);
+			return;
+		}
+		packstr(object->description, buffer);
+		pack32(object->id, buffer);
+
+		pack32(object->flags, buffer);
+
+		pack32(object->grace_time, buffer);
+		packstr(object->grp_tres_mins, buffer);
+		packstr(object->grp_tres_run_mins, buffer);
+		packstr(object->grp_tres, buffer);
+		pack32(object->grp_jobs, buffer);
+		pack32(object->grp_submit_jobs, buffer);
+		pack32(object->grp_wall, buffer);
+
+		packstr(object->max_tres_mins_pj, buffer);
+		packstr(object->max_tres_run_mins_pu, buffer);
+		packstr(object->max_tres_pj, buffer);
+		packstr(object->max_tres_pn, buffer);
+		packstr(object->max_tres_pu, buffer);
+		pack32(object->max_jobs_pu, buffer);
+		pack32(object->max_submit_jobs_pu, buffer);
+		pack32(object->max_wall_pj, buffer);
+		packstr(object->min_tres_pj, buffer);
+
+		packstr(object->name, buffer);
+
+		pack_bit_str_hex(object->preempt_bitstr, buffer);
+
+		if (object->preempt_list)
+			count = list_count(object->preempt_list);
+
+		pack32(count, buffer);
+
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(object->preempt_list);
+			while ((tmp_info = list_next(itr))) {
+				packstr(tmp_info, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+
+		pack16(object->preempt_mode, buffer);
+		pack32(object->priority, buffer);
+
+		packdouble(object->usage_factor, buffer);
+		packdouble(object->usage_thres, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		if (!object) {
 			packnull(buffer);
 			pack32(0, buffer);
@@ -1157,8 +1765,8 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 			pack16(0, buffer);
 			pack32(0, buffer);
 
-			packdouble((double)NO_VAL, buffer);
-			packdouble((double)NO_VAL, buffer);
+			packdouble(NO_VAL, buffer);
+			packdouble(NO_VAL, buffer);
 			return;
 		}
 		packstr(object->description, buffer);
@@ -1167,25 +1775,57 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack32(object->flags, buffer);
 
 		pack32(object->grace_time, buffer);
-		pack64(object->grp_cpu_mins, buffer);
-		pack64(object->grp_cpu_run_mins, buffer);
-		pack32(object->grp_cpus, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres_run_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->grp_jobs, buffer);
-		pack32(object->grp_mem, buffer);
-		pack32(object->grp_nodes, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_MEM);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->grp_submit_jobs, buffer);
 		pack32(object->grp_wall, buffer);
 
-		pack64(object->max_cpu_mins_pj, buffer);
-		pack64(object->max_cpu_run_mins_pu, buffer);
-		pack32(object->max_cpus_pj, buffer);
-		pack32(object->max_cpus_pu, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_mins_pj, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_run_mins_pu, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pj, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pu, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->max_jobs_pu, buffer);
-		pack32(object->max_nodes_pj, buffer);
-		pack32(object->max_nodes_pu, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pj, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pu, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->max_submit_jobs_pu, buffer);
 		pack32(object->max_wall_pj, buffer);
-		pack32(object->min_cpus_pj, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->min_tres_pj, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
 
 		packstr(object->name, buffer);
 
@@ -1209,7 +1849,7 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 
 		packdouble(object->usage_factor, buffer);
 		packdouble(object->usage_thres, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			packnull(buffer);
 			pack32(0, buffer);
@@ -1244,8 +1884,8 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 			pack16(0, buffer);
 			pack32(0, buffer);
 
-			packdouble((double)NO_VAL, buffer);
-			packdouble((double)NO_VAL, buffer);
+			packdouble(NO_VAL, buffer);
+			packdouble(NO_VAL, buffer);
 			return;
 		}
 		packstr(object->description, buffer);
@@ -1254,22 +1894,52 @@ extern void slurmdb_pack_qos_rec(void *in, uint16_t rpc_version, Buf buffer)
 		pack32(object->flags, buffer);
 
 		pack32(object->grace_time, buffer);
-		pack64(object->grp_cpu_mins, buffer);
-		pack64(object->grp_cpu_run_mins, buffer);
-		pack32(object->grp_cpus, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres_run_mins, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->grp_jobs, buffer);
-		pack32(object->grp_mem, buffer);
-		pack32(object->grp_nodes, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_MEM);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->grp_tres, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->grp_submit_jobs, buffer);
 		pack32(object->grp_wall, buffer);
 
-		pack64(object->max_cpu_mins_pj, buffer);
-		pack64(object->max_cpu_run_mins_pu, buffer);
-		pack32(object->max_cpus_pj, buffer);
-		pack32(object->max_cpus_pu, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_mins_pj, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_run_mins_pu, TRES_CPU);
+		if (uint64_tmp == INFINITE64)
+			uint64_tmp = (uint64_t)INFINITE;
+		pack64(uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pj, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pu, TRES_CPU);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->max_jobs_pu, buffer);
-		pack32(object->max_nodes_pj, buffer);
-		pack32(object->max_nodes_pu, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pj, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
+		uint64_tmp = slurmdb_find_tres_count_in_string(
+			object->max_tres_pu, TRES_NODE);
+		pack32((uint32_t)uint64_tmp, buffer);
 		pack32(object->max_submit_jobs_pu, buffer);
 		pack32(object->max_wall_pj, buffer);
 
@@ -1302,6 +1972,7 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 				  Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint64_t uint64_tmp;
 	int i;
 	slurmdb_qos_rec_t *object_ptr = xmalloc(sizeof(slurmdb_qos_rec_t));
 	uint32_t count = NO_VAL;
@@ -1309,9 +1980,64 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	slurmdb_init_qos_rec(object_ptr, 0);
+	slurmdb_init_qos_rec(object_ptr, 0, NO_VAL);
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&object_ptr->description,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->id, buffer);
+
+		safe_unpack32(&object_ptr->flags, buffer);
+
+		safe_unpack32(&object_ptr->grace_time, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->grp_tres_mins,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->grp_tres_run_mins,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->grp_tres,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->grp_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
+		safe_unpack32(&object_ptr->grp_wall, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_mins_pj,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_run_mins_pu,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_pj,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_pn,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->max_tres_pu,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
+		safe_unpack32(&object_ptr->max_wall_pj, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->min_tres_pj,
+				       &uint32_tmp, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		unpack_bit_str_hex(&object_ptr->preempt_bitstr, buffer);
+
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->preempt_list =
+				list_create(slurm_destroy_char);
+			for (i = 0; i < count; i++) {
+				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
+						       buffer);
+				list_append(object_ptr->preempt_list,
+					    tmp_info);
+			}
+		}
+
+		safe_unpack16(&object_ptr->preempt_mode, buffer);
+		safe_unpack32(&object_ptr->priority, buffer);
+
+		safe_unpackdouble(&object_ptr->usage_factor, buffer);
+		safe_unpackdouble(&object_ptr->usage_thres, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
@@ -1319,25 +2045,121 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->flags, buffer);
 
 		safe_unpack32(&object_ptr->grace_time, buffer);
-		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
-		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
-		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint32_t)NO_VAL)
+			object_ptr->grp_tres_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres_run_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
 		safe_unpack32(&object_ptr->grp_jobs, buffer);
-		safe_unpack32(&object_ptr->grp_mem, buffer);
-		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->grp_tres, "%s%u=%"PRIu64,
+				   object_ptr->grp_tres ? "," : "",
+				   TRES_MEM, uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->grp_tres, "%s%u=%"PRIu64,
+				   object_ptr->grp_tres ? "," : "",
+				   TRES_NODE, uint64_tmp);
 		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
-		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
-		safe_unpack64(&object_ptr->max_cpu_run_mins_pu, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_mins_pj =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_run_mins_pu =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_pj =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_pu =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->max_tres_pj, "%s%u=%"PRIu64,
+				   object_ptr->max_tres_pj ? "," : "",
+				   TRES_NODE, uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->max_tres_pu, "%s%u=%"PRIu64,
+				   object_ptr->max_tres_pu ? "," : "",
+				   TRES_NODE, uint64_tmp);
 		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
 		safe_unpack32(&object_ptr->max_wall_pj, buffer);
-		safe_unpack32(&object_ptr->min_cpus_pj, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->min_tres_pj, "%s%u=%"PRIu64,
+				   object_ptr->min_tres_pj ? "," : "",
+				   TRES_CPU, uint64_tmp);
 
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
 
@@ -1360,7 +2182,7 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 
 		safe_unpackdouble(&object_ptr->usage_factor, buffer);
 		safe_unpackdouble(&object_ptr->usage_thres, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&object_ptr->description,
 				       &uint32_tmp, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
@@ -1368,22 +2190,109 @@ extern int slurmdb_unpack_qos_rec(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->flags, buffer);
 
 		safe_unpack32(&object_ptr->grace_time, buffer);
-		safe_unpack64(&object_ptr->grp_cpu_mins, buffer);
-		safe_unpack64(&object_ptr->grp_cpu_run_mins, buffer);
-		safe_unpack32(&object_ptr->grp_cpus, buffer);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres_run_mins =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->grp_tres =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
 		safe_unpack32(&object_ptr->grp_jobs, buffer);
-		safe_unpack32(&object_ptr->grp_mem, buffer);
-		safe_unpack32(&object_ptr->grp_nodes, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->grp_tres, "%s%u=%"PRIu64,
+				   object_ptr->grp_tres ? "," : "",
+				   TRES_MEM, uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->grp_tres, "%s%u=%"PRIu64,
+				   object_ptr->grp_tres ? "," : "",
+				   TRES_NODE, uint64_tmp);
 		safe_unpack32(&object_ptr->grp_submit_jobs, buffer);
 		safe_unpack32(&object_ptr->grp_wall, buffer);
 
-		safe_unpack64(&object_ptr->max_cpu_mins_pj, buffer);
-		safe_unpack64(&object_ptr->max_cpu_run_mins_pu, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pj, buffer);
-		safe_unpack32(&object_ptr->max_cpus_pu, buffer);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_mins_pj =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack64(&uint64_tmp, buffer);
+		if (uint64_tmp == (uint64_t)INFINITE)
+			uint64_tmp = INFINITE64;
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_run_mins_pu =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_pj =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			object_ptr->max_tres_pu =
+				xstrdup_printf("%u=%"PRIu64, TRES_CPU,
+					       uint64_tmp);
 		safe_unpack32(&object_ptr->max_jobs_pu, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pj, buffer);
-		safe_unpack32(&object_ptr->max_nodes_pu, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->max_tres_pj, "%s%u=%"PRIu64,
+				   object_ptr->max_tres_pj ? "," : "",
+				   TRES_NODE, uint64_tmp);
+		safe_unpack32(&uint32_tmp, buffer);
+		if (uint32_tmp == INFINITE)
+			uint64_tmp = INFINITE64;
+		else
+			uint64_tmp = uint32_tmp;
+
+		if (uint64_tmp != (uint64_t)NO_VAL)
+			xstrfmtcat(object_ptr->max_tres_pu, "%s%u=%"PRIu64,
+				   object_ptr->max_tres_pu ? "," : "",
+				   TRES_NODE, uint64_tmp);
 		safe_unpack32(&object_ptr->max_submit_jobs_pu, buffer);
 		safe_unpack32(&object_ptr->max_wall_pj, buffer);
 
@@ -1418,34 +2327,185 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+extern void slurmdb_pack_qos_usage(void *in, uint16_t rpc_version, Buf buffer)
+{
+	slurmdb_qos_usage_t *usage = (slurmdb_qos_usage_t *)in;
+	uint32_t count;
+	ListIterator itr;
+	void *used_limits;
+
+	pack32(usage->grp_used_jobs, buffer);
+	pack32(usage->grp_used_submit_jobs, buffer);
+	pack64_array(usage->grp_used_tres, usage->tres_cnt, buffer);
+	pack64_array(usage->grp_used_tres_run_secs, usage->tres_cnt, buffer);
+	packdouble(usage->grp_used_wall, buffer);
+	packdouble(usage->norm_priority, buffer);
+	packlongdouble(usage->usage_raw, buffer);
+	packlongdouble_array(usage->usage_tres_raw, usage->tres_cnt, buffer);
+
+	if (!usage->user_limit_list ||
+	    !(count = list_count(usage->user_limit_list)))
+		count = NO_VAL;
+
+	pack32(count, buffer);
+	if (count != NO_VAL) {
+		itr = list_iterator_create(usage->user_limit_list);
+		while ((used_limits = list_next(itr)))
+			slurmdb_pack_used_limits(used_limits, usage->tres_cnt,
+						 rpc_version, buffer);
+		list_iterator_destroy(itr);
+	}
+}
+
+extern int slurmdb_unpack_qos_usage(void **object, uint16_t rpc_version,
+				    Buf buffer)
+{
+	slurmdb_qos_usage_t *object_ptr = xmalloc(sizeof(slurmdb_qos_usage_t));
+
+	uint32_t count;
+	void *used_limits;
+	int i;
+
+	*object = object_ptr;
+
+	safe_unpack32(&object_ptr->grp_used_jobs, buffer);
+	safe_unpack32(&object_ptr->grp_used_submit_jobs, buffer);
+	safe_unpack64_array(&object_ptr->grp_used_tres,
+			    &object_ptr->tres_cnt, buffer);
+	safe_unpack64_array(&object_ptr->grp_used_tres_run_secs,
+			    &object_ptr->tres_cnt, buffer);
+	safe_unpackdouble(&object_ptr->grp_used_wall, buffer);
+	safe_unpackdouble(&object_ptr->norm_priority, buffer);
+	safe_unpacklongdouble(&object_ptr->usage_raw, buffer);
+	safe_unpacklongdouble_array(&object_ptr->usage_tres_raw,
+				    &count, buffer);
+
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		object_ptr->user_limit_list =
+			list_create(slurmdb_destroy_used_limits);
+		for (i = 0; i < count; i++) {
+			if (slurmdb_unpack_used_limits(&used_limits,
+						       object_ptr->tres_cnt,
+						       rpc_version, buffer)
+			    != SLURM_SUCCESS)
+				goto unpack_error;
+			list_append(object_ptr->user_limit_list, used_limits);
+		}
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurmdb_destroy_qos_usage(object_ptr);
+	*object = NULL;
+
+	return SLURM_ERROR;
+}
+
+extern void slurmdb_pack_qos_rec_with_usage(void *in, uint16_t rpc_version,
+					    Buf buffer)
+{
+	slurmdb_qos_rec_t *object = (slurmdb_qos_rec_t *)in;
+
+	slurmdb_pack_qos_rec(in, rpc_version, buffer);
+
+	pack64_array(object->grp_tres_mins_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->grp_tres_run_mins_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->grp_tres_ctld,
+		     object->usage->tres_cnt, buffer);
+
+	pack64_array(object->max_tres_mins_pj_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_run_mins_pu_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_pj_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_pn_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->max_tres_pu_ctld,
+		     object->usage->tres_cnt, buffer);
+	pack64_array(object->min_tres_pj_ctld,
+		     object->usage->tres_cnt, buffer);
+
+	slurmdb_pack_qos_usage(object->usage,
+			       rpc_version, buffer);
+}
+
+extern int slurmdb_unpack_qos_rec_with_usage(void **object,
+					     uint16_t rpc_version,
+					     Buf buffer)
+{
+	int rc;
+	slurmdb_qos_rec_t *object_ptr;
+	uint32_t uint32_tmp;
+
+	if ((rc = slurmdb_unpack_qos_rec(object, rpc_version, buffer))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	object_ptr = *object;
+
+	safe_unpack64_array(&object_ptr->grp_tres_mins_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->grp_tres_run_mins_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->grp_tres_ctld,
+			    &uint32_tmp, buffer);
+
+	safe_unpack64_array(&object_ptr->max_tres_mins_pj_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_run_mins_pu_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_pj_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_pn_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->max_tres_pu_ctld,
+			    &uint32_tmp, buffer);
+	safe_unpack64_array(&object_ptr->min_tres_pj_ctld,
+			    &uint32_tmp, buffer);
+
+	rc = slurmdb_unpack_qos_usage((void **)&object_ptr->usage,
+				      rpc_version, buffer);
+
+	return rc;
+
+unpack_error:
+	slurmdb_destroy_qos_rec(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
 extern void slurmdb_pack_reservation_rec(void *in, uint16_t rpc_version,
 					 Buf buffer)
 {
 	slurmdb_reservation_rec_t *object = (slurmdb_reservation_rec_t *)in;
+	uint32_t count = NO_VAL;
+	ListIterator itr;
+	slurmdb_tres_rec_t *tres_rec;
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (!object) {
-			pack64(0, buffer);
 			packnull(buffer);
 			packnull(buffer);
 			pack32((uint32_t)NO_VAL, buffer);
-			pack64(0, buffer);
 			pack32((uint32_t)NO_VAL, buffer);
-			pack32(0, buffer);
 			packnull(buffer);
 			packnull(buffer);
 			packnull(buffer);
 			pack_time(0, buffer);
 			pack_time(0, buffer);
 			pack_time(0, buffer);
+			packnull(buffer);
+			pack32((uint32_t)NO_VAL, buffer);
 			return;
 		}
 
-		pack64(object->alloc_secs, buffer);
 		packstr(object->assocs, buffer);
 		packstr(object->cluster, buffer);
-		pack32(object->cpus, buffer);
-		pack64(object->down_secs, buffer);
 		pack32(object->flags, buffer);
 		pack32(object->id, buffer);
 		packstr(object->name, buffer);
@@ -1454,14 +2514,34 @@ extern void slurmdb_pack_reservation_rec(void *in, uint16_t rpc_version,
 		pack_time(object->time_end, buffer);
 		pack_time(object->time_start, buffer);
 		pack_time(object->time_start_prev, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
+		packstr(object->tres_str, buffer);
+
+		if (object->tres_list)
+			count = list_count(object->tres_list);
+		else
+			count = NO_VAL;
+
+		pack32(count, buffer);
+
+		if (count && count != NO_VAL) {
+			itr = list_iterator_create(object->tres_list);
+			while ((tres_rec = list_next(itr))) {
+				slurmdb_pack_tres_rec(
+					tres_rec, rpc_version, buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		slurmdb_tres_rec_t *tres_rec = NULL;
+		int tres_id = TRES_CPU;
+
 		if (!object) {
 			pack64(0, buffer);
 			packnull(buffer);
 			packnull(buffer);
 			pack32((uint32_t)NO_VAL, buffer);
 			pack64(0, buffer);
-			pack16((uint16_t)NO_VAL, buffer);
+			pack32((uint32_t)NO_VAL, buffer);
 			pack32(0, buffer);
 			packnull(buffer);
 			packnull(buffer);
@@ -1472,12 +2552,26 @@ extern void slurmdb_pack_reservation_rec(void *in, uint16_t rpc_version,
 			return;
 		}
 
-		pack64(object->alloc_secs, buffer);
+		if (object->tres_list)
+			tres_rec = list_find_first(
+				object->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres_id);
+		if (tres_rec)
+			pack64(tres_rec->alloc_secs, buffer);
+		else
+			pack64(0, buffer);
+
 		packstr(object->assocs, buffer);
 		packstr(object->cluster, buffer);
-		pack32(object->cpus, buffer);
-		pack64(object->down_secs, buffer);
-		pack16((uint16_t)object->flags, buffer);
+		if (tres_rec)
+			count = (uint32_t)tres_rec->count;
+		else
+			count = (uint32_t)slurmdb_find_tres_count_in_string(
+				object->tres_str, TRES_CPU);
+		pack32(count, buffer);
+		pack64(0, buffer);
+		pack32(object->flags, buffer);
 		pack32(object->id, buffer);
 		packstr(object->name, buffer);
 		packstr(object->nodes, buffer);
@@ -1491,20 +2585,19 @@ extern void slurmdb_pack_reservation_rec(void *in, uint16_t rpc_version,
 extern int slurmdb_unpack_reservation_rec(void **object, uint16_t rpc_version,
 					  Buf buffer)
 {
-	uint32_t uint32_tmp;
+	uint32_t uint32_tmp, count;
+	int i;
+	void *tmp_info;
 	slurmdb_reservation_rec_t *object_ptr =
 		xmalloc(sizeof(slurmdb_reservation_rec_t));
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
-		safe_unpack64(&object_ptr->alloc_secs, buffer);
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&object_ptr->assocs, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
 				       buffer);
-		safe_unpack32(&object_ptr->cpus, buffer);
-		safe_unpack64(&object_ptr->down_secs, buffer);
 		safe_unpack32(&object_ptr->flags, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
@@ -1514,17 +2607,34 @@ extern int slurmdb_unpack_reservation_rec(void **object, uint16_t rpc_version,
 		safe_unpack_time(&object_ptr->time_end, buffer);
 		safe_unpack_time(&object_ptr->time_start, buffer);
 		safe_unpack_time(&object_ptr->time_start_prev, buffer);
-	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
-		uint16_t flags;
-		safe_unpack64(&object_ptr->alloc_secs, buffer);
+		safe_unpackstr_xmalloc(&object_ptr->tres_str,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->tres_list =
+				list_create(slurmdb_destroy_tres_rec);
+			for (i=0; i<count; i++) {
+				if (slurmdb_unpack_tres_rec(
+					    &tmp_info, rpc_version, buffer)
+				    != SLURM_SUCCESS)
+					goto unpack_error;
+				list_append(object_ptr->tres_list, tmp_info);
+			}
+		}
+	} else if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint64_t uint64_tmp;
+		safe_unpack64(&uint64_tmp, buffer); /* not needed (alloc_secs) */
 		safe_unpackstr_xmalloc(&object_ptr->assocs, &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
 				       buffer);
-		safe_unpack32(&object_ptr->cpus, buffer);
-		safe_unpack64(&object_ptr->down_secs, buffer);
-		safe_unpack16(&flags, buffer);
-		object_ptr->flags = flags;
+
+		safe_unpack32(&uint32_tmp, buffer);
+		object_ptr->tres_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, uint32_tmp);
+
+		safe_unpack64(&uint64_tmp, buffer); /* not needed (down_secs) */
+		safe_unpack32(&object_ptr->flags, buffer);
 		safe_unpack32(&object_ptr->id, buffer);
 		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&object_ptr->nodes, &uint32_tmp, buffer);
@@ -1654,7 +2764,7 @@ extern void slurmdb_pack_txn_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
 	slurmdb_txn_rec_t *object = (slurmdb_txn_rec_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			packnull(buffer);
 			pack16(0, buffer);
@@ -1687,7 +2797,7 @@ extern int slurmdb_unpack_txn_rec(
 	slurmdb_txn_rec_t *object_ptr = xmalloc(sizeof(slurmdb_txn_rec_t));
 
 	*object = object_ptr;
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&object_ptr->accts,
 				       &uint32_tmp, buffer);
 		safe_unpack16(&object_ptr->action, buffer);
@@ -1720,7 +2830,7 @@ extern void slurmdb_pack_wckey_rec(void *in, uint16_t rpc_version, Buf buffer)
 	uint32_t count = NO_VAL;
 	slurmdb_wckey_rec_t *object = (slurmdb_wckey_rec_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 
@@ -1750,105 +2860,288 @@ extern void slurmdb_pack_wckey_rec(void *in, uint16_t rpc_version, Buf buffer)
 			list_iterator_destroy(itr);
 		}
 
-		packstr(object->cluster, buffer);
+		packstr(object->cluster, buffer);
+
+		pack32(object->id, buffer);
+
+		pack16(object->is_def, buffer);
+
+		packstr(object->name, buffer);
+
+		pack32(object->uid, buffer);
+
+		packstr(object->user, buffer);
+	}
+}
+
+extern int slurmdb_unpack_wckey_rec(void **object, uint16_t rpc_version,
+				    Buf buffer)
+{
+	uint32_t uint32_tmp;
+	int i;
+	uint32_t count;
+	slurmdb_wckey_rec_t *object_ptr =
+		xmalloc(sizeof(slurmdb_wckey_rec_t));
+	slurmdb_accounting_rec_t *slurmdb_info = NULL;
+
+	*object = object_ptr;
+
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&count, buffer);
+		if (count != NO_VAL) {
+			object_ptr->accounting_list =
+				list_create(slurmdb_destroy_accounting_rec);
+			for(i=0; i<count; i++) {
+				if (slurmdb_unpack_accounting_rec(
+					    (void **)&slurmdb_info,
+					    rpc_version,
+					    buffer) == SLURM_ERROR)
+					goto unpack_error;
+				list_append(object_ptr->accounting_list,
+					    slurmdb_info);
+			}
+		}
+
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+
+		safe_unpack32(&object_ptr->id, buffer);
+
+		safe_unpack16(&object_ptr->is_def, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+
+		safe_unpack32(&object_ptr->uid, buffer);
+
+		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	}
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurmdb_destroy_wckey_rec(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern void slurmdb_pack_archive_rec(void *in, uint16_t rpc_version, Buf buffer)
+{
+	slurmdb_archive_rec_t *object = (slurmdb_archive_rec_t *)in;
+
+	if (!object) {
+		packnull(buffer);
+		packnull(buffer);
+		return;
+	}
+
+	packstr(object->archive_file, buffer);
+	packstr(object->insert, buffer);
+}
+
+extern int slurmdb_unpack_archive_rec(void **object, uint16_t rpc_version,
+				      Buf buffer)
+{
+	uint32_t uint32_tmp;
+	slurmdb_archive_rec_t *object_ptr =
+		xmalloc(sizeof(slurmdb_archive_rec_t));
+
+	*object = object_ptr;
+
+	safe_unpackstr_xmalloc(&object_ptr->archive_file, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->insert, &uint32_tmp, buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurmdb_destroy_archive_rec(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+
+}
+
+extern void slurmdb_pack_tres_cond(void *in, uint16_t rpc_version, Buf buffer)
+{
+	slurmdb_tres_cond_t *object = (slurmdb_tres_cond_t *)in;
+	ListIterator itr = NULL;
+	uint32_t count;
+	char *tmp_info = NULL;
+
+	if (!object) {
+		pack64(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack16(0, buffer);
+		return;
+	}
+
+	pack64(object->count, buffer);
 
-		pack32(object->id, buffer);
+	if (object->id_list)
+		count = list_count(object->id_list);
+	else
+		count = NO_VAL;
+	pack32(count, buffer);
 
-		pack16(object->is_def, buffer);
+	if (count && count != NO_VAL) {
+		itr = list_iterator_create(object->id_list);
+		while ((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
 
-		packstr(object->name, buffer);
+	if (object->name_list)
+		count = list_count(object->name_list);
+	else
+		count = NO_VAL;
+	pack32(count, buffer);
 
-		pack32(object->uid, buffer);
+	if (count && count != NO_VAL) {
+		itr = list_iterator_create(object->name_list);
+		while ((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
+	}
 
-		packstr(object->user, buffer);
+	if (object->type_list)
+		count = list_count(object->type_list);
+	else
+		count = NO_VAL;
+	pack32(count, buffer);
+
+	if (count && count != NO_VAL) {
+		itr = list_iterator_create(object->type_list);
+		while ((tmp_info = list_next(itr))) {
+			packstr(tmp_info, buffer);
+		}
+		list_iterator_destroy(itr);
 	}
+
+	safe_pack16(object->with_deleted, buffer);
 }
 
-extern int slurmdb_unpack_wckey_rec(void **object, uint16_t rpc_version,
+extern int slurmdb_unpack_tres_cond(void **object, uint16_t rpc_version,
 				    Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
 	uint32_t count;
-	slurmdb_wckey_rec_t *object_ptr =
-		xmalloc(sizeof(slurmdb_wckey_rec_t));
-	slurmdb_accounting_rec_t *slurmdb_info = NULL;
+	char *tmp_info = NULL;
+	slurmdb_tres_cond_t *object_ptr =
+		xmalloc(sizeof(slurmdb_tres_cond_t));
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->accounting_list =
-				list_create(slurmdb_destroy_accounting_rec);
-			for(i=0; i<count; i++) {
-				if (slurmdb_unpack_accounting_rec(
-					    (void **)&slurmdb_info,
-					    rpc_version,
-					    buffer) == SLURM_ERROR)
-					goto unpack_error;
-				list_append(object_ptr->accounting_list,
-					    slurmdb_info);
-			}
-		}
-
-		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
-				       buffer);
-
-		safe_unpack32(&object_ptr->id, buffer);
-
-		safe_unpack16(&object_ptr->is_def, buffer);
-
-		safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	safe_unpack64(&object_ptr->count, buffer);
 
-		safe_unpack32(&object_ptr->uid, buffer);
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		if (!object_ptr->id_list)
+			object_ptr->id_list =
+				list_create(slurm_destroy_char);
+		for (i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(
+				&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->id_list,
+				    tmp_info);
+		}
+	}
 
-		safe_unpackstr_xmalloc(&object_ptr->user, &uint32_tmp, buffer);
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		if (!object_ptr->name_list)
+			object_ptr->name_list =
+				list_create(slurm_destroy_char);
+		for (i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(
+				&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->name_list,
+				    tmp_info);
+		}
+	}
+	safe_unpack32(&count, buffer);
+	if (count != NO_VAL) {
+		if (!object_ptr->type_list)
+			object_ptr->type_list =
+				list_create(slurm_destroy_char);
+		for (i=0; i<count; i++) {
+			safe_unpackstr_xmalloc(
+				&tmp_info, &uint32_tmp, buffer);
+			list_append(object_ptr->type_list,
+				    tmp_info);
+		}
 	}
 
+	safe_unpack16(&object_ptr->with_deleted, buffer);
+
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdb_destroy_wckey_rec(object_ptr);
+	slurmdb_destroy_tres_cond(object_ptr);
 	*object = NULL;
+
 	return SLURM_ERROR;
 }
 
-extern void slurmdb_pack_archive_rec(void *in, uint16_t rpc_version, Buf buffer)
+extern void slurmdb_pack_tres_rec(void *in, uint16_t rpc_version, Buf buffer)
 {
-	slurmdb_archive_rec_t *object = (slurmdb_archive_rec_t *)in;
+	slurmdb_tres_rec_t *object = (slurmdb_tres_rec_t *)in;
 
 	if (!object) {
+		pack64(0, buffer);
+		pack64(0, buffer);
+		pack32(0, buffer);
 		packnull(buffer);
 		packnull(buffer);
 		return;
 	}
 
-	packstr(object->archive_file, buffer);
-	packstr(object->insert, buffer);
+	pack64(object->alloc_secs, buffer);
+	pack64(object->count, buffer);
+	pack32(object->id, buffer);
+	packstr(object->name, buffer);
+	packstr(object->type, buffer);
 }
 
-extern int slurmdb_unpack_archive_rec(void **object, uint16_t rpc_version,
-				      Buf buffer)
+extern int slurmdb_unpack_tres_rec_noalloc(
+	slurmdb_tres_rec_t *object_ptr, uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
-	slurmdb_archive_rec_t *object_ptr =
-		xmalloc(sizeof(slurmdb_archive_rec_t));
-
-	*object = object_ptr;
 
-	safe_unpackstr_xmalloc(&object_ptr->archive_file, &uint32_tmp, buffer);
-	safe_unpackstr_xmalloc(&object_ptr->insert, &uint32_tmp, buffer);
+	safe_unpack64(&object_ptr->alloc_secs, buffer);
+	safe_unpack64(&object_ptr->count, buffer);
+	safe_unpack32(&object_ptr->id, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->name, &uint32_tmp, buffer);
+	safe_unpackstr_xmalloc(&object_ptr->type, &uint32_tmp, buffer);
 
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdb_destroy_archive_rec(object_ptr);
-	*object = NULL;
 	return SLURM_ERROR;
 
 }
 
+extern int slurmdb_unpack_tres_rec(void **object, uint16_t rpc_version,
+				    Buf buffer)
+{
+	int rc;
+	slurmdb_tres_rec_t *object_ptr =
+		xmalloc(sizeof(slurmdb_tres_rec_t));
+
+	*object = object_ptr;
+
+	rc = slurmdb_unpack_tres_rec_noalloc(object_ptr, rpc_version, buffer);
+
+	if (rc != SLURM_SUCCESS) {
+		slurmdb_destroy_tres_rec(object_ptr);
+		*object = NULL;
+	}
+
+	return rc;
+}
+
 extern void slurmdb_pack_user_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	char *tmp_info = NULL;
@@ -1856,10 +3149,10 @@ extern void slurmdb_pack_user_cond(void *in, uint16_t rpc_version, Buf buffer)
 	slurmdb_user_cond_t *object = (slurmdb_user_cond_t *)in;
 	uint32_t count = NO_VAL;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack16(0, buffer);
-			slurmdb_pack_association_cond(
+			slurmdb_pack_assoc_cond(
 				NULL, rpc_version, buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -1872,8 +3165,8 @@ extern void slurmdb_pack_user_cond(void *in, uint16_t rpc_version, Buf buffer)
 
 		pack16(object->admin_level, buffer);
 
-		slurmdb_pack_association_cond(object->assoc_cond,
-					      rpc_version, buffer);
+		slurmdb_pack_assoc_cond(object->assoc_cond,
+					rpc_version, buffer);
 
 		if (object->def_acct_list)
 			count = list_count(object->def_acct_list);
@@ -1920,10 +3213,10 @@ extern int slurmdb_unpack_user_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack16(&object_ptr->admin_level, buffer);
 
-		if (slurmdb_unpack_association_cond(
+		if (slurmdb_unpack_assoc_cond(
 			    (void **)&object_ptr->assoc_cond,
 			    rpc_version, buffer) == SLURM_ERROR)
 			goto unpack_error;
@@ -1983,10 +3276,10 @@ extern void slurmdb_pack_account_cond(void *in, uint16_t rpc_version,
 	slurmdb_account_cond_t *object = (slurmdb_account_cond_t *)in;
 	uint32_t count = NO_VAL;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
-			slurmdb_pack_association_cond(NULL, rpc_version,
-						      buffer);
+			slurmdb_pack_assoc_cond(NULL, rpc_version,
+						buffer);
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
 			pack16(0, buffer);
@@ -1994,8 +3287,8 @@ extern void slurmdb_pack_account_cond(void *in, uint16_t rpc_version,
 			pack16(0, buffer);
 			return;
 		}
-		slurmdb_pack_association_cond(object->assoc_cond,
-					      rpc_version, buffer);
+		slurmdb_pack_assoc_cond(object->assoc_cond,
+					rpc_version, buffer);
 
 		count = NO_VAL;
 		if (object->description_list)
@@ -2043,8 +3336,8 @@ extern int slurmdb_unpack_account_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		if (slurmdb_unpack_association_cond(
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		if (slurmdb_unpack_assoc_cond(
 			    (void **)&object_ptr->assoc_cond,
 			    rpc_version, buffer) == SLURM_ERROR)
 			goto unpack_error;
@@ -2092,7 +3385,7 @@ extern void slurmdb_pack_cluster_cond(void *in, uint16_t rpc_version,
 	slurmdb_cluster_cond_t *object = (slurmdb_cluster_cond_t *)in;
 	uint32_t count = NO_VAL;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack16(0, buffer);
 			pack32(NO_VAL, buffer);
@@ -2173,7 +3466,7 @@ extern int slurmdb_unpack_cluster_cond(void **object, uint16_t rpc_version,
 	*object = object_ptr;
 
 	slurmdb_init_cluster_cond(object_ptr, 0);
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack16(&object_ptr->classification, buffer);
 		safe_unpack32(&count, buffer);
 		if (count && count != NO_VAL) {
@@ -2227,16 +3520,16 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
-extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
-					  Buf buffer)
+extern void slurmdb_pack_assoc_cond(void *in, uint16_t rpc_version,
+				    Buf buffer)
 {
 	char *tmp_info = NULL;
 	uint32_t count = NO_VAL;
 
 	ListIterator itr = NULL;
-	slurmdb_association_cond_t *object = (slurmdb_association_cond_t *)in;
+	slurmdb_assoc_cond_t *object = (slurmdb_assoc_cond_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -2245,25 +3538,6 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 
 			pack32(NO_VAL, buffer);
 
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-
-			pack32(NO_VAL, buffer);
-
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-			pack32(NO_VAL, buffer);
-
 			pack16(0, buffer);
 
 			pack32(NO_VAL, buffer);
@@ -2287,6 +3561,8 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 
 		if (object->acct_list)
 			count = list_count(object->acct_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
@@ -2296,10 +3572,11 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
 
 		if (object->cluster_list)
 			count = list_count(object->cluster_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
@@ -2309,10 +3586,11 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
 
 		if (object->def_qos_id_list)
 			count = list_count(object->def_qos_id_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
@@ -2322,229 +3600,209 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
-
-		if (object->fairshare_list)
-			count = list_count(object->fairshare_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->fairshare_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
-
-		if (object->grp_cpu_mins_list)
-			count = list_count(object->grp_cpu_mins_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->grp_cpu_mins_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
-
-		if (object->grp_cpu_run_mins_list)
-			count = list_count(object->grp_cpu_run_mins_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(
-				object->grp_cpu_run_mins_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
-
-		if (object->grp_cpus_list)
-			count = list_count(object->grp_cpus_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->grp_cpus_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
-
-		if (object->grp_jobs_list)
-			count = list_count(object->grp_jobs_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->grp_jobs_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
 
-		if (object->grp_mem_list)
-			count = list_count(object->grp_mem_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->grp_mem_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
 
-		if (object->grp_nodes_list)
-			count = list_count(object->grp_nodes_list);
+		if (object->id_list)
+			count = list_count(object->id_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->grp_nodes_list);
+			itr = list_iterator_create(object->id_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
-			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
 
-		if (object->grp_submit_jobs_list)
-			count = list_count(object->grp_submit_jobs_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(
-				object->grp_submit_jobs_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-			list_iterator_destroy(itr);
-		}
-		count = NO_VAL;
+		pack16(object->only_defs, buffer);
 
-		if (object->grp_wall_list)
-			count = list_count(object->grp_wall_list);
+		if (object->partition_list)
+			count = list_count(object->partition_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->grp_wall_list);
+			itr = list_iterator_create(object->partition_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
-
-		if (object->id_list)
-			count = list_count(object->id_list);
-
-		pack32(count, buffer);
-		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->id_list);
-			while ((tmp_info = list_next(itr))) {
-				packstr(tmp_info, buffer);
-			}
-		}
-		count = NO_VAL;
 
-		if (object->max_cpu_mins_pj_list)
-			count = list_count(object->max_cpu_mins_pj_list);
+		if (object->parent_acct_list)
+			count = list_count(object->parent_acct_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(
-				object->max_cpu_mins_pj_list);
+			itr = list_iterator_create(object->parent_acct_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
 
-		if (object->max_cpu_run_mins_list)
-			count = list_count(object->max_cpu_run_mins_list);
+		if (object->qos_list)
+			count = list_count(object->qos_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
+
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(
-				object->max_cpu_run_mins_list);
+			itr = list_iterator_create(object->qos_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
 
-		if (object->max_cpus_pj_list)
-			count = list_count(object->max_cpus_pj_list);
+		pack_time(object->usage_end, buffer);
+		pack_time(object->usage_start, buffer);
+
+		if (object->user_list)
+			count = list_count(object->user_list);
+		else
+			count = NO_VAL;
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->max_cpus_pj_list);
+			itr = list_iterator_create(object->user_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
-		count = NO_VAL;
-		if (object->max_jobs_list)
-			count = list_count(object->max_jobs_list);
+
+		pack16(object->with_usage, buffer);
+		pack16(object->with_deleted, buffer);
+		pack16(object->with_raw_qos, buffer);
+		pack16(object->with_sub_accts, buffer);
+		pack16(object->without_parent_info, buffer);
+		pack16(object->without_parent_limits, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		if (!object) {
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+
+			pack32(NO_VAL, buffer);
+			pack32(NO_VAL, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack_time(0, buffer);
+			pack_time(0, buffer);
+
+			pack32(NO_VAL, buffer);
+
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			pack16(0, buffer);
+			return;
+		}
+
+		if (object->acct_list)
+			count = list_count(object->acct_list);
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->max_jobs_list);
+			itr = list_iterator_create(object->acct_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
-		if (object->max_nodes_pj_list)
-			count = list_count(object->max_nodes_pj_list);
+
+		if (object->cluster_list)
+			count = list_count(object->cluster_list);
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->max_nodes_pj_list);
+			itr = list_iterator_create(object->cluster_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
-		if (object->max_submit_jobs_list)
-			count = list_count(object->max_submit_jobs_list);
+
+		if (object->def_qos_id_list)
+			count = list_count(object->def_qos_id_list);
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(
-				object->max_submit_jobs_list);
+			itr = list_iterator_create(object->def_qos_id_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
 			list_iterator_destroy(itr);
 		}
+
+		pack32(NO_VAL, buffer);
+
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+
 		count = NO_VAL;
-		if (object->max_wall_pj_list)
-			count = list_count(object->max_wall_pj_list);
+
+		if (object->id_list)
+			count = list_count(object->id_list);
 
 		pack32(count, buffer);
 		if (count && count != NO_VAL) {
-			itr = list_iterator_create(object->max_wall_pj_list);
+			itr = list_iterator_create(object->id_list);
 			while ((tmp_info = list_next(itr))) {
 				packstr(tmp_info, buffer);
 			}
-			list_iterator_destroy(itr);
 		}
 		count = NO_VAL;
 
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+		pack32(NO_VAL, buffer);
+
 		pack16(object->only_defs, buffer);
 
 		if (object->partition_list)
@@ -2611,18 +3869,18 @@ extern void slurmdb_pack_association_cond(void *in, uint16_t rpc_version,
 	}
 }
 
-extern int slurmdb_unpack_association_cond(void **object,
-					   uint16_t rpc_version, Buf buffer)
+extern int slurmdb_unpack_assoc_cond(void **object,
+				     uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
 	int i;
 	uint32_t count;
-	slurmdb_association_cond_t *object_ptr =
-		xmalloc(sizeof(slurmdb_association_cond_t));
+	slurmdb_assoc_cond_t *object_ptr =
+		xmalloc(sizeof(slurmdb_assoc_cond_t));
 	char *tmp_info = NULL;
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->acct_list =
@@ -2659,104 +3917,114 @@ extern int slurmdb_unpack_association_cond(void **object,
 
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->fairshare_list =
-				list_create(slurm_destroy_char);
+			object_ptr->id_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->fairshare_list,
-					    tmp_info);
+				list_append(object_ptr->id_list, tmp_info);
 			}
 		}
 
+		safe_unpack16(&object_ptr->only_defs, buffer);
+
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_cpu_mins_list =
+			object_ptr->partition_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_cpu_mins_list,
+				list_append(object_ptr->partition_list,
 					    tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_cpu_run_mins_list =
+			object_ptr->parent_acct_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_cpu_run_mins_list,
+				list_append(object_ptr->parent_acct_list,
 					    tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_cpus_list =
-				list_create(slurm_destroy_char);
+			object_ptr->qos_list = list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_cpus_list,
-					    tmp_info);
+				list_append(object_ptr->qos_list, tmp_info);
 			}
 		}
+
+		safe_unpack_time(&object_ptr->usage_end, buffer);
+		safe_unpack_time(&object_ptr->usage_start, buffer);
+
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_jobs_list =
+			object_ptr->user_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_jobs_list,
-					    tmp_info);
+				list_append(object_ptr->user_list, tmp_info);
 			}
 		}
+
+		safe_unpack16(&object_ptr->with_usage, buffer);
+		safe_unpack16(&object_ptr->with_deleted, buffer);
+		safe_unpack16(&object_ptr->with_raw_qos, buffer);
+		safe_unpack16(&object_ptr->with_sub_accts, buffer);
+		safe_unpack16(&object_ptr->without_parent_info, buffer);
+		safe_unpack16(&object_ptr->without_parent_limits, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_mem_list =
+			object_ptr->acct_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_mem_list,
-					    tmp_info);
+				list_append(object_ptr->acct_list, tmp_info);
 			}
 		}
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_nodes_list =
+			object_ptr->cluster_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_nodes_list,
+				list_append(object_ptr->cluster_list,
 					    tmp_info);
 			}
 		}
+
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
-			object_ptr->grp_submit_jobs_list =
+			object_ptr->def_qos_id_list =
 				list_create(slurm_destroy_char);
 			for(i=0; i<count; i++) {
 				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
 						       buffer);
-				list_append(object_ptr->grp_submit_jobs_list,
+				list_append(object_ptr->def_qos_id_list,
 					    tmp_info);
 			}
 		}
+
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
+		safe_unpack32(&count, buffer);
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->grp_wall_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->grp_wall_list,
-					    tmp_info);
-			}
-		}
 
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
@@ -2769,82 +4037,12 @@ extern int slurmdb_unpack_association_cond(void **object,
 		}
 
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_cpu_mins_pj_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_cpu_mins_pj_list,
-					    tmp_info);
-			}
-		}
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_cpu_run_mins_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_cpu_run_mins_list,
-					    tmp_info);
-			}
-		}
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_cpus_pj_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_cpus_pj_list,
-					    tmp_info);
-			}
-		}
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_jobs_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_jobs_list,
-					    tmp_info);
-			}
-		}
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_nodes_pj_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_nodes_pj_list,
-					    tmp_info);
-			}
-		}
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_submit_jobs_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_submit_jobs_list,
-					    tmp_info);
-			}
-		}
 		safe_unpack32(&count, buffer);
-		if (count != NO_VAL) {
-			object_ptr->max_wall_pj_list =
-				list_create(slurm_destroy_char);
-			for(i=0; i<count; i++) {
-				safe_unpackstr_xmalloc(&tmp_info, &uint32_tmp,
-						       buffer);
-				list_append(object_ptr->max_wall_pj_list,
-					    tmp_info);
-			}
-		}
 
 		safe_unpack16(&object_ptr->only_defs, buffer);
 
@@ -2907,7 +4105,7 @@ extern int slurmdb_unpack_association_cond(void **object,
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdb_destroy_association_cond(object_ptr);
+	slurmdb_destroy_assoc_cond(object_ptr);
 	*object = NULL;
 	return SLURM_ERROR;
 }
@@ -3091,7 +4289,7 @@ extern void slurmdb_pack_job_cond(void *in, uint16_t rpc_version, Buf buffer)
 	ListIterator itr = NULL;
 	slurmdb_job_cond_t *object = (slurmdb_job_cond_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);	/* count(acct_list) */
 			pack32(NO_VAL, buffer);	/* count(associd_list) */
@@ -3323,7 +4521,7 @@ extern int slurmdb_unpack_job_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->acct_list = list_create(slurm_destroy_char);
@@ -3504,7 +4702,7 @@ extern void slurmdb_pack_job_modify_cond(void *in, uint16_t rpc_version,
 {
 	slurmdb_job_modify_cond_t *cond = (slurmdb_job_modify_cond_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!cond) {
 			packnull(buffer);
 			pack32(NO_VAL, buffer);
@@ -3524,29 +4722,103 @@ extern int slurmdb_unpack_job_modify_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
-				       buffer);
-		safe_unpack32(&object_ptr->job_id, buffer);
-	}
-	return SLURM_SUCCESS;
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&object_ptr->cluster, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&object_ptr->job_id, buffer);
+	}
+	return SLURM_SUCCESS;
+
+unpack_error:
+	slurmdb_destroy_job_modify_cond(object_ptr);
+	*object = NULL;
+	return SLURM_ERROR;
+}
+
+extern void slurmdb_pack_job_rec(void *object, uint16_t rpc_version, Buf buffer)
+{
+	slurmdb_job_rec_t *job = (slurmdb_job_rec_t *)object;
+	ListIterator itr = NULL;
+	slurmdb_step_rec_t *step = NULL;
+	uint32_t count = 0;
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(job->account, buffer);
+		packstr(job->alloc_gres, buffer);
+		pack32(job->alloc_nodes, buffer);
+		pack32(job->array_job_id, buffer);
+		pack32(job->array_max_tasks, buffer);
+		pack32(job->array_task_id, buffer);
+		packstr(job->array_task_str, buffer);
+
+		pack32(job->associd, buffer);
+		packstr(job->blockid, buffer);
+		packstr(job->cluster, buffer);
+		pack32((uint32_t)job->derived_ec, buffer);
+		packstr(job->derived_es, buffer);
+		pack32(job->elapsed, buffer);
+		pack_time(job->eligible, buffer);
+		pack_time(job->end, buffer);
+		pack32((uint32_t)job->exitcode, buffer);
+		/* the first_step_ptr
+		   is set up on the client side so does
+		   not need to be packed */
+		pack32(job->gid, buffer);
+		pack32(job->jobid, buffer);
+		packstr(job->jobname, buffer);
+		pack32(job->lft, buffer);
+		packstr(job->nodes, buffer);
+		packstr(job->partition, buffer);
+		pack32(job->priority, buffer);
+		pack32(job->qosid, buffer);
+		pack32(job->req_cpus, buffer);
+		packstr(job->req_gres, buffer);
+		pack32(job->req_mem, buffer);
+		pack32(job->requid, buffer);
+		packstr(job->resv_name, buffer);
+		pack32(job->resvid, buffer);
+		pack32(job->show_full, buffer);
+		pack_time(job->start, buffer);
+		pack32(job->state, buffer);
+		_pack_slurmdb_stats(&job->stats, rpc_version, buffer);
+
+		if (job->steps)
+			count = list_count(job->steps);
+		else
+			count = 0;
 
-unpack_error:
-	slurmdb_destroy_job_modify_cond(object_ptr);
-	*object = NULL;
-	return SLURM_ERROR;
-}
+		pack32(count, buffer);
+		if (count) {
+			itr = list_iterator_create(job->steps);
+			while ((step = list_next(itr))) {
+				slurmdb_pack_step_rec(step, rpc_version,
+						      buffer);
+			}
+			list_iterator_destroy(itr);
+		}
+		pack_time(job->submit, buffer);
+		pack32(job->suspended, buffer);
+		pack32(job->sys_cpu_sec, buffer);
+		pack32(job->sys_cpu_usec, buffer);
+		pack32(job->timelimit, buffer);
+		pack32(job->tot_cpu_sec, buffer);
+		pack32(job->tot_cpu_usec, buffer);
+		pack16(job->track_steps, buffer);
 
-extern void slurmdb_pack_job_rec(void *object, uint16_t rpc_version, Buf buffer)
-{
-	slurmdb_job_rec_t *job = (slurmdb_job_rec_t *)object;
-	ListIterator itr = NULL;
-	slurmdb_step_rec_t *step = NULL;
-	uint32_t count = 0;
+		packstr(job->tres_alloc_str, buffer);
+		packstr(job->tres_req_str, buffer);
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		pack32(job->uid, buffer);
+		packstr(job->user, buffer);
+		pack32(job->user_cpu_sec, buffer);
+		pack32(job->user_cpu_usec, buffer);
+		packstr(job->wckey, buffer); /* added for rpc_version 4 */
+		pack32(job->wckeyid, buffer); /* added for rpc_version 4 */
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		packstr(job->account, buffer);
-		pack32(job->alloc_cpus, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			job->tres_alloc_str, TRES_CPU);
+		pack32(count, buffer);
 		packstr(job->alloc_gres, buffer);
 		pack32(job->alloc_nodes, buffer);
 		pack32(job->array_job_id, buffer);
@@ -3585,6 +4857,8 @@ extern void slurmdb_pack_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 		_pack_slurmdb_stats(&job->stats, rpc_version, buffer);
 		if (job->steps)
 			count = list_count(job->steps);
+		else
+			count = 0;
 		pack32(count, buffer);
 		if (count) {
 			itr = list_iterator_create(job->steps);
@@ -3608,9 +4882,11 @@ extern void slurmdb_pack_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 		pack32(job->user_cpu_usec, buffer);
 		packstr(job->wckey, buffer); /* added for rpc_version 4 */
 		pack32(job->wckeyid, buffer); /* added for rpc_version 4 */
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		packstr(job->account, buffer);
-		pack32(job->alloc_cpus, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			job->tres_alloc_str, TRES_CPU);
+		pack32(count, buffer);
 		pack32(job->alloc_nodes, buffer);
 		pack32(job->associd, buffer);
 		packstr(job->blockid, buffer);
@@ -3642,6 +4918,8 @@ extern void slurmdb_pack_job_rec(void *object, uint16_t rpc_version, Buf buffer)
 		_pack_slurmdb_stats(&job->stats, rpc_version, buffer);
 		if (job->steps)
 			count = list_count(job->steps);
+		else
+			count = 0;
 		pack32(count, buffer);
 		if (count) {
 			itr = list_iterator_create(job->steps);
@@ -3682,9 +4960,88 @@ extern int slurmdb_unpack_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 	job_ptr->array_job_id = 0;
 	job_ptr->array_task_id = NO_VAL;
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&job_ptr->account, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->alloc_gres, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&job_ptr->alloc_nodes, buffer);
+		safe_unpack32(&job_ptr->array_job_id, buffer);
+		safe_unpack32(&job_ptr->array_max_tasks, buffer);
+		safe_unpack32(&job_ptr->array_task_id, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->array_task_str,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->associd, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->blockid, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->cluster, &uint32_tmp, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		job_ptr->derived_ec = (int32_t)uint32_tmp;
+		safe_unpackstr_xmalloc(&job_ptr->derived_es, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&job_ptr->elapsed, buffer);
+		safe_unpack_time(&job_ptr->eligible, buffer);
+		safe_unpack_time(&job_ptr->end, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		job_ptr->exitcode = (int32_t)uint32_tmp;
+		safe_unpack32(&job_ptr->gid, buffer);
+		safe_unpack32(&job_ptr->jobid, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->jobname, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->lft, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->partition, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&job_ptr->priority, buffer);
+		safe_unpack32(&job_ptr->qosid, buffer);
+		safe_unpack32(&job_ptr->req_cpus, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->req_gres, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->req_mem, buffer);
+		safe_unpack32(&job_ptr->requid, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->resv_name, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&job_ptr->resvid, buffer);
+		safe_unpack32(&job_ptr->show_full, buffer);
+		safe_unpack_time(&job_ptr->start, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		job_ptr->state = uint32_tmp;
+		if (_unpack_slurmdb_stats(&job_ptr->stats, rpc_version, buffer)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
+
+		safe_unpack32(&count, buffer);
+		job_ptr->steps = list_create(slurmdb_destroy_step_rec);
+		for (i=0; i<count; i++) {
+			if (slurmdb_unpack_step_rec(&step, rpc_version, buffer)
+			    == SLURM_ERROR)
+				goto unpack_error;
+
+			step->job_ptr = job_ptr;
+			if (!job_ptr->first_step_ptr)
+				job_ptr->first_step_ptr = step;
+			list_append(job_ptr->steps, step);
+		}
+
+		safe_unpack_time(&job_ptr->submit, buffer);
+		safe_unpack32(&job_ptr->suspended, buffer);
+		safe_unpack32(&job_ptr->sys_cpu_sec, buffer);
+		safe_unpack32(&job_ptr->sys_cpu_usec, buffer);
+		safe_unpack32(&job_ptr->timelimit, buffer);
+		safe_unpack32(&job_ptr->tot_cpu_sec, buffer);
+		safe_unpack32(&job_ptr->tot_cpu_usec, buffer);
+		safe_unpack16(&job_ptr->track_steps, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->tres_alloc_str,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->tres_req_str,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->uid, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->user, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->user_cpu_sec, buffer);
+		safe_unpack32(&job_ptr->user_cpu_usec, buffer);
+		safe_unpackstr_xmalloc(&job_ptr->wckey, &uint32_tmp, buffer);
+		safe_unpack32(&job_ptr->wckeyid, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&job_ptr->account, &uint32_tmp, buffer);
-		safe_unpack32(&job_ptr->alloc_cpus, buffer);
+		safe_unpack32(&count, buffer);
+		job_ptr->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, count);
 		safe_unpackstr_xmalloc(&job_ptr->alloc_gres, &uint32_tmp,
 				       buffer);
 		safe_unpack32(&job_ptr->alloc_nodes, buffer);
@@ -3756,9 +5113,11 @@ extern int slurmdb_unpack_job_rec(void **job, uint16_t rpc_version, Buf buffer)
 		safe_unpack32(&job_ptr->user_cpu_usec, buffer);
 		safe_unpackstr_xmalloc(&job_ptr->wckey, &uint32_tmp, buffer);
 		safe_unpack32(&job_ptr->wckeyid, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&job_ptr->account, &uint32_tmp, buffer);
-		safe_unpack32(&job_ptr->alloc_cpus, buffer);
+		safe_unpack32(&count, buffer);
+		job_ptr->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, count);
 		safe_unpack32(&job_ptr->alloc_nodes, buffer);
 		safe_unpack32(&job_ptr->associd, buffer);
 		safe_unpackstr_xmalloc(&job_ptr->blockid, &uint32_tmp, buffer);
@@ -3837,7 +5196,7 @@ extern void slurmdb_pack_qos_cond(void *in, uint16_t rpc_version, Buf buffer)
 	ListIterator itr = NULL;
 	slurmdb_qos_cond_t *object = (slurmdb_qos_cond_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -3901,7 +5260,7 @@ extern int slurmdb_unpack_qos_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->description_list =
@@ -4072,7 +5431,7 @@ extern void slurmdb_pack_selected_step(slurmdb_selected_step_t *step,
 		pack32(step->array_task_id, buffer);
 		pack32(step->jobid, buffer);
 		pack32(step->stepid, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		pack32(step->jobid, buffer);
 		pack32(step->stepid, buffer);
 	}
@@ -4092,7 +5451,7 @@ extern int slurmdb_unpack_selected_step(slurmdb_selected_step_t **step,
 		safe_unpack32(&step_ptr->array_task_id, buffer);
 		safe_unpack32(&step_ptr->jobid, buffer);
 		safe_unpack32(&step_ptr->stepid, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&step_ptr->jobid, buffer);
 		safe_unpack32(&step_ptr->stepid, buffer);
 	}
@@ -4108,17 +5467,45 @@ unpack_error:
 extern void slurmdb_pack_step_rec(slurmdb_step_rec_t *step,
 				  uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	uint32_t count = 0;
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(step->elapsed, buffer);
+		pack_time(step->end, buffer);
+		pack32((uint32_t)step->exitcode, buffer);
+		pack32(step->nnodes, buffer);
+		packstr(step->nodes, buffer);
+		pack32(step->ntasks, buffer);
+		pack32(step->req_cpufreq_min, buffer);
+		pack32(step->req_cpufreq_max, buffer);
+		pack32(step->req_cpufreq_gov, buffer);
+		pack32(step->requid, buffer);
+		_pack_slurmdb_stats(&step->stats, rpc_version, buffer);
+		pack_time(step->start, buffer);
+		pack16(step->state, buffer);
+		pack32(step->stepid, buffer);	/* job's step number */
+		packstr(step->stepname, buffer);
+		pack32(step->suspended, buffer);
+		pack32(step->sys_cpu_sec, buffer);
+		pack32(step->sys_cpu_usec, buffer);
+		pack32(step->task_dist, buffer);
+		pack32(step->tot_cpu_sec, buffer);
+		pack32(step->tot_cpu_usec, buffer);
+		packstr(step->tres_alloc_str, buffer);
+		pack32(step->user_cpu_sec, buffer);
+		pack32(step->user_cpu_usec, buffer);
+	} else	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
 		pack32(step->elapsed, buffer);
 		pack_time(step->end, buffer);
 		pack32((uint32_t)step->exitcode, buffer);
-		/* the job_ptr is set up on the client side so does
-		 * not need to be packed */
-		pack32(step->ncpus, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			step->tres_alloc_str, TRES_CPU);
+		pack32(count, buffer);
 		pack32(step->nnodes, buffer);
 		packstr(step->nodes, buffer);
 		pack32(step->ntasks, buffer);
-		pack32(step->req_cpufreq, buffer);
+		pack32(step->req_cpufreq_min, buffer);
 		pack32(step->requid, buffer);
 		_pack_slurmdb_stats(&step->stats, rpc_version, buffer);
 		pack_time(step->start, buffer);
@@ -4128,7 +5515,8 @@ extern void slurmdb_pack_step_rec(slurmdb_step_rec_t *step,
 		pack32(step->suspended, buffer);
 		pack32(step->sys_cpu_sec, buffer);
 		pack32(step->sys_cpu_usec, buffer);
-		pack16(step->task_dist, buffer);
+		old_task_dist = task_dist_new2old(step->task_dist);
+		pack16(old_task_dist, buffer);
 		pack32(step->tot_cpu_sec, buffer);
 		pack32(step->tot_cpu_usec, buffer);
 		pack32(step->user_cpu_sec, buffer);
@@ -4145,16 +5533,50 @@ extern int slurmdb_unpack_step_rec(slurmdb_step_rec_t **step,
 
 	*step = step_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&step_ptr->elapsed, buffer);
+		safe_unpack_time(&step_ptr->end, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		step_ptr->exitcode = (int32_t)uint32_tmp;
+		safe_unpack32(&step_ptr->nnodes, buffer);
+		safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->ntasks, buffer);
+		safe_unpack32(&step_ptr->req_cpufreq_min, buffer);
+		safe_unpack32(&step_ptr->req_cpufreq_max, buffer);
+		safe_unpack32(&step_ptr->req_cpufreq_gov, buffer);
+		safe_unpack32(&step_ptr->requid, buffer);
+		if (_unpack_slurmdb_stats(&step_ptr->stats, rpc_version, buffer)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
+		safe_unpack_time(&step_ptr->start, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		step_ptr->state = uint16_tmp;
+		safe_unpack32(&step_ptr->stepid, buffer);
+		safe_unpackstr_xmalloc(&step_ptr->stepname,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->suspended, buffer);
+		safe_unpack32(&step_ptr->sys_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->sys_cpu_usec, buffer);
+		safe_unpack32(&step_ptr->task_dist, buffer);
+		safe_unpack32(&step_ptr->tot_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->tot_cpu_usec, buffer);
+		safe_unpackstr_xmalloc(&step_ptr->tres_alloc_str,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&step_ptr->user_cpu_sec, buffer);
+		safe_unpack32(&step_ptr->user_cpu_usec, buffer);
+	} else 	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		safe_unpack32(&step_ptr->elapsed, buffer);
 		safe_unpack_time(&step_ptr->end, buffer);
 		safe_unpack32(&uint32_tmp, buffer);
 		step_ptr->exitcode = (int32_t)uint32_tmp;
-		safe_unpack32(&step_ptr->ncpus, buffer);
+		safe_unpack32(&uint32_tmp, buffer);
+		step_ptr->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, uint32_tmp);
 		safe_unpack32(&step_ptr->nnodes, buffer);
 		safe_unpackstr_xmalloc(&step_ptr->nodes, &uint32_tmp, buffer);
 		safe_unpack32(&step_ptr->ntasks, buffer);
-		safe_unpack32(&step_ptr->req_cpufreq, buffer);
+		safe_unpack32(&step_ptr->req_cpufreq_min, buffer);
 		safe_unpack32(&step_ptr->requid, buffer);
 		if (_unpack_slurmdb_stats(&step_ptr->stats, rpc_version, buffer)
 		    != SLURM_SUCCESS)
@@ -4168,7 +5590,8 @@ extern int slurmdb_unpack_step_rec(slurmdb_step_rec_t **step,
 		safe_unpack32(&step_ptr->suspended, buffer);
 		safe_unpack32(&step_ptr->sys_cpu_sec, buffer);
 		safe_unpack32(&step_ptr->sys_cpu_usec, buffer);
-		safe_unpack16(&step_ptr->task_dist, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		step_ptr->task_dist = task_dist_old2new(old_task_dist);
 		safe_unpack32(&step_ptr->tot_cpu_sec, buffer);
 		safe_unpack32(&step_ptr->tot_cpu_usec, buffer);
 		safe_unpack32(&step_ptr->user_cpu_sec, buffer);
@@ -4183,12 +5606,21 @@ unpack_error:
 	return SLURM_ERROR;
 }
 
+static uint32_t _list_count_null(List l)
+{
+	uint32_t count = NO_VAL;
+
+	if (l)
+		count = list_count(l);
+	return count;
+}
+
 extern void slurmdb_pack_res_cond(void *in, uint16_t rpc_version, Buf buffer)
 {
 	char *tmp_info = NULL;
 	ListIterator itr = NULL;
 	slurmdb_res_cond_t *object = (slurmdb_res_cond_t *)in;
-	uint32_t count = NO_VAL;
+	uint32_t count;
 
 	if (!object) {
 		pack32(NO_VAL, buffer);
@@ -4205,119 +5637,88 @@ extern void slurmdb_pack_res_cond(void *in, uint16_t rpc_version, Buf buffer)
 
 		return;
 	}
-	if (object->cluster_list)
-		count = list_count(object->cluster_list);
 
+	count = _list_count_null(object->cluster_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->cluster_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
-
-	if (object->description_list)
-		count = list_count(object->description_list);
 
+	count = _list_count_null(object->description_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->description_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
 
 	pack32(object->flags, buffer);
 
-	if (object->id_list)
-		count = list_count(object->id_list);
-
+	count = _list_count_null(object->id_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->id_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
-
-	if (object->manager_list)
-		count = list_count(object->manager_list);
 
+	count = _list_count_null(object->manager_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->manager_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
-
-	if (object->name_list)
-		count = list_count(object->name_list);
 
+	count = _list_count_null(object->name_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->name_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
-
-	if (object->percent_list)
-		count = list_count(object->percent_list);
 
+	count = _list_count_null(object->percent_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->percent_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
-
-	if (object->server_list)
-		count = list_count(object->server_list);
 
+	count = _list_count_null(object->server_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->server_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
-
-	if (object->type_list)
-		count = list_count(object->type_list);
 
+	count = _list_count_null(object->type_list);
 	pack32(count, buffer);
-
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		itr = list_iterator_create(object->type_list);
 		while ((tmp_info = list_next(itr))) {
 			packstr(tmp_info, buffer);
 		}
 		list_iterator_destroy(itr);
 	}
-	count = NO_VAL;
 
 	pack16(object->with_deleted, buffer);
 	pack16(object->with_clusters, buffer);
@@ -4328,7 +5729,7 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 {
 	uint32_t uint32_tmp;
 	int i;
-	uint32_t count;
+	uint32_t count = 0;
 	slurmdb_res_cond_t *object_ptr =
 		xmalloc(sizeof(slurmdb_res_cond_t));
 	char *tmp_info = NULL;
@@ -4338,10 +5739,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	slurmdb_init_res_cond(object_ptr, 0);
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->cluster_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->cluster_list, tmp_info);
@@ -4349,10 +5750,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	}
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->description_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->description_list, tmp_info);
@@ -4362,10 +5763,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	safe_unpack32(&object_ptr->flags, buffer);
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->id_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->id_list, tmp_info);
@@ -4373,10 +5774,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	}
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->manager_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->manager_list, tmp_info);
@@ -4384,10 +5785,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	}
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->name_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->name_list, tmp_info);
@@ -4395,10 +5796,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	}
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->percent_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->percent_list, tmp_info);
@@ -4406,10 +5807,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	}
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->server_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->server_list, tmp_info);
@@ -4417,10 +5818,10 @@ extern int slurmdb_unpack_res_cond(void **object, uint16_t rpc_version,
 	}
 
 	safe_unpack32(&count, buffer);
-	if (count && count != NO_VAL) {
+	if (count && (count != NO_VAL)) {
 		object_ptr->type_list =
 			list_create(slurm_destroy_char);
-		for(i=0; i<count; i++) {
+		for (i = 0; i < count; i++) {
 			safe_unpackstr_xmalloc(&tmp_info,
 					       &uint32_tmp, buffer);
 			list_append(object_ptr->type_list, tmp_info);
@@ -4445,7 +5846,7 @@ extern void slurmdb_pack_txn_cond(void *in, uint16_t rpc_version, Buf buffer)
 	ListIterator itr = NULL;
 	slurmdb_txn_cond_t *object = (slurmdb_txn_cond_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -4578,7 +5979,7 @@ extern int slurmdb_unpack_txn_cond(void **object, uint16_t rpc_version,
 	char *tmp_info = NULL;
 
 	*object = object_ptr;
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->acct_list =
@@ -4688,7 +6089,7 @@ extern void slurmdb_pack_wckey_cond(void *in, uint16_t rpc_version, Buf buffer)
 	ListIterator itr = NULL;
 	slurmdb_wckey_cond_t *object = (slurmdb_wckey_cond_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			pack32(NO_VAL, buffer);
 			pack32(NO_VAL, buffer);
@@ -4778,7 +6179,7 @@ extern int slurmdb_unpack_wckey_cond(void **object, uint16_t rpc_version,
 
 	*object = object_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&count, buffer);
 		if (count != NO_VAL) {
 			object_ptr->cluster_list =
@@ -4866,7 +6267,7 @@ extern void slurmdb_pack_archive_cond(void *in, uint16_t rpc_version,
 		pack32(object->purge_resv, buffer);
 		pack32(object->purge_step, buffer);
 		pack32(object->purge_suspend, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (!object) {
 			packnull(buffer);
 			packnull(buffer);
@@ -4910,7 +6311,7 @@ extern int slurmdb_unpack_archive_cond(void **object, uint16_t rpc_version,
 		safe_unpack32(&object_ptr->purge_resv, buffer);
 		safe_unpack32(&object_ptr->purge_step, buffer);
 		safe_unpack32(&object_ptr->purge_suspend, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&object_ptr->archive_dir,
 				       &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&object_ptr->archive_script,
@@ -4953,7 +6354,7 @@ extern void slurmdb_pack_update_object(slurmdb_update_object_t *object,
 	case SLURMDB_MODIFY_ASSOC:
 	case SLURMDB_REMOVE_ASSOC:
 	case SLURMDB_REMOVE_ASSOC_USAGE:
-		my_function = slurmdb_pack_association_rec;
+		my_function = slurmdb_pack_assoc_rec;
 		break;
 	case SLURMDB_ADD_QOS:
 	case SLURMDB_MODIFY_QOS:
@@ -4975,6 +6376,9 @@ extern void slurmdb_pack_update_object(slurmdb_update_object_t *object,
 	case SLURMDB_REMOVE_RES:
 		my_function = slurmdb_pack_res_rec;
 		break;
+	case SLURMDB_ADD_TRES:
+		my_function = slurmdb_pack_tres_rec;
+		break;
 	case SLURMDB_UPDATE_NOTSET:
 	default:
 		error("pack: unknown type set in update_object: %d",
@@ -5023,8 +6427,8 @@ extern int slurmdb_unpack_update_object(slurmdb_update_object_t **object,
 	case SLURMDB_MODIFY_ASSOC:
 	case SLURMDB_REMOVE_ASSOC:
 	case SLURMDB_REMOVE_ASSOC_USAGE:
-		my_function = slurmdb_unpack_association_rec;
-		my_destroy = slurmdb_destroy_association_rec;
+		my_function = slurmdb_unpack_assoc_rec;
+		my_destroy = slurmdb_destroy_assoc_rec;
 		break;
 	case SLURMDB_ADD_QOS:
 	case SLURMDB_MODIFY_QOS:
@@ -5049,6 +6453,10 @@ extern int slurmdb_unpack_update_object(slurmdb_update_object_t **object,
 		my_function = slurmdb_unpack_res_rec;
 		my_destroy = slurmdb_destroy_res_rec;
 		break;
+	case SLURMDB_ADD_TRES:
+		my_function = slurmdb_unpack_tres_rec;
+		my_destroy = slurmdb_destroy_tres_rec;
+		break;
 	case SLURMDB_UPDATE_NOTSET:
 	default:
 		error("unpack: unknown type set in update_object: %d",
diff --git a/src/common/slurmdb_pack.h b/src/common/slurmdb_pack.h
index 48e3868fc..f733b0266 100644
--- a/src/common/slurmdb_pack.h
+++ b/src/common/slurmdb_pack.h
@@ -42,11 +42,8 @@
 #include "slurmdb_defs.h"
 #include "pack.h"
 #include "xmalloc.h"
+#include "xstring.h"
 
-extern void slurmdb_pack_user_defs(void *in,
-				   uint16_t rpc_version, Buf buffer);
-extern int slurmdb_unpack_user_defs(void **object,
-				    uint16_t rpc_version, Buf buffer);
 extern void slurmdb_pack_user_rec(void *in,
 				  uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_user_rec(void **object,
@@ -76,11 +73,23 @@ extern void slurmdb_pack_accounting_rec(void *in,
 					Buf buffer);
 extern int slurmdb_unpack_accounting_rec(void **object, uint16_t rpc_version,
 					 Buf buffer);
-extern void slurmdb_pack_association_rec(void *in,
-					 uint16_t rpc_version,
-					 Buf buffer);
-extern int slurmdb_unpack_association_rec(void **object, uint16_t rpc_version,
-					  Buf buffer);
+extern void slurmdb_pack_assoc_rec(void *in,
+				   uint16_t rpc_version,
+				   Buf buffer);
+extern int slurmdb_unpack_assoc_rec_members(slurmdb_assoc_rec_t *object_ptr,
+					    uint16_t rpc_version,
+					    Buf buffer);
+extern int slurmdb_unpack_assoc_rec(void **object, uint16_t rpc_version,
+				    Buf buffer);
+extern void slurmdb_pack_assoc_usage(void *in, uint16_t rpc_version,
+				     Buf buffer);
+extern int slurmdb_unpack_assoc_usage(void **object, uint16_t rpc_version,
+				      Buf buffer);
+extern void slurmdb_pack_assoc_rec_with_usage(void *in, uint16_t rpc_version,
+					      Buf buffer);
+extern int slurmdb_unpack_assoc_rec_with_usage(void **object,
+					       uint16_t rpc_version,
+					       Buf buffer);
 extern void slurmdb_pack_event_rec(void *in,
 				   uint16_t rpc_version,
 				   Buf buffer);
@@ -90,6 +99,14 @@ extern void slurmdb_pack_qos_rec(void *in,
 				 uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_qos_rec(void **object,
 				  uint16_t rpc_version, Buf buffer);
+extern void slurmdb_pack_qos_usage(void *in, uint16_t rpc_version, Buf buffer);
+extern int slurmdb_unpack_qos_usage(void **object, uint16_t rpc_version,
+				    Buf buffer);
+extern void slurmdb_pack_qos_rec_with_usage(void *in, uint16_t rpc_version,
+					    Buf buffer);
+extern int slurmdb_unpack_qos_rec_with_usage(void **object,
+					     uint16_t rpc_version,
+					     Buf buffer);
 extern void slurmdb_pack_reservation_rec(void *in,
 					 uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_reservation_rec(void **object, uint16_t rpc_version,
@@ -109,6 +126,14 @@ extern void slurmdb_pack_archive_rec(void *in,
 				     uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_archive_rec(void **object, uint16_t rpc_version,
 				      Buf buffer);
+extern void slurmdb_pack_tres_cond(void *in, uint16_t rpc_version, Buf buffer);
+extern int slurmdb_unpack_tres_cond(void **object, uint16_t rpc_version,
+				     Buf buffer);
+extern void slurmdb_pack_tres_rec(void *in, uint16_t rpc_version, Buf buffer);
+extern int slurmdb_unpack_tres_rec_noalloc(
+	slurmdb_tres_rec_t *object_ptr, uint16_t rpc_version, Buf buffer);
+extern int slurmdb_unpack_tres_rec(void **object, uint16_t rpc_version,
+				    Buf buffer);
 
 extern void slurmdb_pack_user_cond(void *in,
 				   uint16_t rpc_version, Buf buffer);
@@ -122,11 +147,11 @@ extern void slurmdb_pack_cluster_cond(void *in,
 				      uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_cluster_cond(void **object, uint16_t rpc_version,
 				       Buf buffer);
-extern void slurmdb_pack_association_cond(void *in,
-					  uint16_t rpc_version,
-					  Buf buffer);
-extern int slurmdb_unpack_association_cond(void **object, uint16_t rpc_version,
-					   Buf buffer);
+extern void slurmdb_pack_assoc_cond(void *in,
+				    uint16_t rpc_version,
+				    Buf buffer);
+extern int slurmdb_unpack_assoc_cond(void **object, uint16_t rpc_version,
+				     Buf buffer);
 extern void slurmdb_pack_event_cond(void *in,
 				    uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_event_cond(void **object, uint16_t rpc_version,
@@ -178,9 +203,9 @@ extern void slurmdb_pack_update_object(slurmdb_update_object_t *object,
 				       uint16_t rpc_version, Buf buffer);
 extern int slurmdb_unpack_update_object(slurmdb_update_object_t **object,
 					uint16_t rpc_version, Buf buffer);
-extern void slurmdb_pack_used_limits(void *in,
+extern void slurmdb_pack_used_limits(void *in, uint32_t tres_cnt,
 				     uint16_t rpc_version, Buf buffer);
-extern int slurmdb_unpack_used_limits(void **object,
+extern int slurmdb_unpack_used_limits(void **object, uint32_t tres_cnt,
 				      uint16_t rpc_version, Buf buffer);
 
 extern void pack_update_shares_used(void *in,
diff --git a/src/common/slurmdbd_defs.c b/src/common/slurmdbd_defs.c
index ec8b3d5ae..7fabbf86e 100644
--- a/src/common/slurmdbd_defs.c
+++ b/src/common/slurmdbd_defs.c
@@ -139,23 +139,6 @@ static int    _tot_wait (struct timeval *start_time);
  * Socket open/close/read/write functions
  ****************************************************************************/
 
-/* Some functions are called by the DBD as well as regular slurm
- * procedures.  In this case we need to make a way to translate the
- * DBD rpc to that of SLURM.
- * rpc_version IN - DBD rpc version
- * Returns corrisponding SLURM rpc version
- */
-extern uint16_t slurmdbd_translate_rpc(uint16_t rpc_version)
-{
-	if (rpc_version >= SLURM_14_03_PROTOCOL_VERSION)
-		return SLURM_14_03_PROTOCOL_VERSION;
-	else if (rpc_version >= SLURMDBD_2_6_VERSION)
-		return SLURM_2_6_PROTOCOL_VERSION;
-
-	return 0;
-}
-
-
 /* Open a socket connection to SlurmDbd
  * auth_info IN - alternate authentication key
  * callbacks IN - make agent to process RPCs and contains callback pointers
@@ -501,7 +484,7 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req, uint16_t rpc_version)
 {
 	Buf buffer;
 
-	if (rpc_version < SLURMDBD_MIN_VERSION) {
+	if (rpc_version < SLURM_MIN_PROTOCOL_VERSION) {
 		error("slurmdbd: Invalid message version=%hu, type:%hu",
 		      rpc_version, req->msg_type);
 		return NULL;
@@ -512,11 +495,13 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req, uint16_t rpc_version)
 
 	switch (req->msg_type) {
 	case DBD_ADD_ACCOUNTS:
+	case DBD_ADD_TRES:
 	case DBD_ADD_ASSOCS:
 	case DBD_ADD_CLUSTERS:
 	case DBD_ADD_RES:
 	case DBD_ADD_USERS:
 	case DBD_GOT_ACCOUNTS:
+	case DBD_GOT_TRES:
 	case DBD_GOT_ASSOCS:
 	case DBD_GOT_CLUSTERS:
 	case DBD_GOT_EVENTS:
@@ -549,13 +534,14 @@ extern Buf pack_slurmdbd_msg(slurmdbd_msg_t *req, uint16_t rpc_version)
 	case DBD_ARCHIVE_LOAD:
 		slurmdb_pack_archive_rec(req->data, rpc_version, buffer);
 		break;
-	case DBD_CLUSTER_CPUS:
+	case DBD_CLUSTER_TRES:
 	case DBD_FLUSH_JOBS:
-		slurmdbd_pack_cluster_cpus_msg(
-			(dbd_cluster_cpus_msg_t *)req->data, rpc_version,
+		slurmdbd_pack_cluster_tres_msg(
+			(dbd_cluster_tres_msg_t *)req->data, rpc_version,
 			buffer);
 		break;
 	case DBD_GET_ACCOUNTS:
+	case DBD_GET_TRES:
 	case DBD_GET_ASSOCS:
 	case DBD_GET_CLUSTERS:
 	case DBD_GET_EVENTS:
@@ -685,7 +671,7 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp,
 
 	safe_unpack16(&resp->msg_type, buffer);
 
-	if (rpc_version < SLURMDBD_MIN_VERSION) {
+	if (rpc_version < SLURM_MIN_PROTOCOL_VERSION) {
 		error("slurmdbd: Invalid message version=%hu, type:%hu",
 		      rpc_version, resp->msg_type);
 		return SLURM_ERROR;
@@ -693,11 +679,13 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp,
 
 	switch (resp->msg_type) {
 	case DBD_ADD_ACCOUNTS:
+	case DBD_ADD_TRES:
 	case DBD_ADD_ASSOCS:
 	case DBD_ADD_CLUSTERS:
 	case DBD_ADD_RES:
 	case DBD_ADD_USERS:
 	case DBD_GOT_ACCOUNTS:
+	case DBD_GOT_TRES:
 	case DBD_GOT_ASSOCS:
 	case DBD_GOT_CLUSTERS:
 	case DBD_GOT_EVENTS:
@@ -731,13 +719,14 @@ extern int unpack_slurmdbd_msg(slurmdbd_msg_t *resp,
 		rc = slurmdb_unpack_archive_rec(
 			&resp->data, rpc_version, buffer);
 		break;
-	case DBD_CLUSTER_CPUS:
+	case DBD_CLUSTER_TRES:
 	case DBD_FLUSH_JOBS:
-		rc = slurmdbd_unpack_cluster_cpus_msg(
-			(dbd_cluster_cpus_msg_t **)&resp->data,
+		rc = slurmdbd_unpack_cluster_tres_msg(
+			(dbd_cluster_tres_msg_t **)&resp->data,
 			rpc_version, buffer);
 		break;
 	case DBD_GET_ACCOUNTS:
+	case DBD_GET_TRES:
 	case DBD_GET_ASSOCS:
 	case DBD_GET_CLUSTERS:
 	case DBD_GET_EVENTS:
@@ -878,6 +867,8 @@ extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type)
 		return DBD_ADD_ACCOUNTS;
 	} else if (!strcasecmp(msg_type, "Add Account Coord")) {
 		return DBD_ADD_ACCOUNT_COORDS;
+	} else if (!strcasecmp(msg_type, "Add TRES")) {
+		return DBD_ADD_TRES;
 	} else if (!strcasecmp(msg_type, "Add Associations")) {
 		return DBD_ADD_ASSOCS;
 	} else if (!strcasecmp(msg_type, "Add Clusters")) {
@@ -886,12 +877,14 @@ extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type)
 		return DBD_ADD_RES;
 	} else if (!strcasecmp(msg_type, "Add Users")) {
 		return DBD_ADD_USERS;
-	} else if (!strcasecmp(msg_type, "Cluster Processors")) {
-		return DBD_CLUSTER_CPUS;
+	} else if (!strcasecmp(msg_type, "Cluster TRES")) {
+		return DBD_CLUSTER_TRES;
 	} else if (!strcasecmp(msg_type, "Flush Jobs")) {
 		return DBD_FLUSH_JOBS;
 	} else if (!strcasecmp(msg_type, "Get Accounts")) {
 		return DBD_GET_ACCOUNTS;
+	} else if (!strcasecmp(msg_type, "Get TRES")) {
+		return DBD_GET_TRES;
 	} else if (!strcasecmp(msg_type, "Get Associations")) {
 		return DBD_GET_ASSOCS;
 	} else if (!strcasecmp(msg_type, "Get Association Usage")) {
@@ -912,6 +905,8 @@ extern slurmdbd_msg_type_t str_2_slurmdbd_msg_type(char *msg_type)
 		return DBD_GET_USERS;
 	} else if (!strcasecmp(msg_type, "Got Accounts")) {
 		return DBD_GOT_ACCOUNTS;
+	} else if (!strcasecmp(msg_type, "Got TRES")) {
+		return DBD_GOT_TRES;
 	} else if (!strcasecmp(msg_type, "Got Associations")) {
 		return DBD_GOT_ASSOCS;
 	} else if (!strcasecmp(msg_type, "Got Association Usage")) {
@@ -1064,6 +1059,12 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Add Account Coord";
 		break;
+	case DBD_ADD_TRES:
+		if (get_enum) {
+			return "DBD_ADD_TRES";
+		} else
+			return "Add TRES";
+		break;
 	case DBD_ADD_ASSOCS:
 		if (get_enum) {
 			return "DBD_ADD_ASSOCS";
@@ -1088,11 +1089,11 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Add Users";
 		break;
-	case DBD_CLUSTER_CPUS:
+	case DBD_CLUSTER_TRES:
 		if (get_enum) {
-			return "DBD_CLUSTER_CPUS";
+			return "DBD_CLUSTER_TRES";
 		} else
-			return "Cluster Processors";
+			return "Cluster TRES";
 		break;
 	case DBD_FLUSH_JOBS:
 		if (get_enum) {
@@ -1106,6 +1107,12 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Get Accounts";
 		break;
+	case DBD_GET_TRES:
+		if (get_enum) {
+			return "DBD_GET_TRES";
+		} else
+			return "Get TRES";
+		break;
 	case DBD_GET_ASSOCS:
 		if (get_enum) {
 			return "DBD_GET_ASSOCS";
@@ -1166,6 +1173,12 @@ extern char *slurmdbd_msg_type_2_str(slurmdbd_msg_type_t msg_type, int get_enum)
 		} else
 			return "Got Accounts";
 		break;
+	case DBD_GOT_TRES:
+		if (get_enum) {
+			return "DBD_GOT_TRES";
+		} else
+			return "Got TRES";
+		break;
 	case DBD_GOT_ASSOCS:
 		if (get_enum) {
 			return "DBD_GOT_ASSOCS";
@@ -2195,7 +2208,7 @@ static void *_agent(void *x)
 			*/
 			if (list_msg.my_list) {
 				if (list_msg.my_list != agent_list)
-					list_destroy(list_msg.my_list);
+					FREE_NULL_LIST(list_msg.my_list);
 				list_msg.my_list = NULL;
 			} else
 				buffer = (Buf) list_dequeue(agent_list);
@@ -2208,7 +2221,7 @@ static void *_agent(void *x)
 			*/
 			if (list_msg.my_list) {
 				if (list_msg.my_list != agent_list)
-					list_destroy(list_msg.my_list);
+					FREE_NULL_LIST(list_msg.my_list);
 				list_msg.my_list = NULL;
 				free_buf(buffer);
 			}
@@ -2231,10 +2244,7 @@ static void *_agent(void *x)
 
 	slurm_mutex_lock(&agent_lock);
 	_save_dbd_state();
-	if (agent_list) {
-		list_destroy(agent_list);
-		agent_list = NULL;
-	}
+	FREE_NULL_LIST(agent_list);
 	slurm_mutex_unlock(&agent_lock);
 	return NULL;
 }
@@ -2514,19 +2524,17 @@ static int _purge_job_start_req(void)
 extern void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg)
 {
 	if (msg) {
-		if (msg->acct_list) {
-			list_destroy(msg->acct_list);
-			msg->acct_list = NULL;
-		}
+		FREE_NULL_LIST(msg->acct_list);
 		slurmdb_destroy_user_cond(msg->cond);
 		xfree(msg);
 	}
 }
 
-extern void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg)
+extern void slurmdbd_free_cluster_tres_msg(dbd_cluster_tres_msg_t *msg)
 {
 	if (msg) {
 		xfree(msg->cluster_nodes);
+		xfree(msg->tres_str);
 		xfree(msg);
 	}
 }
@@ -2564,10 +2572,13 @@ extern void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
 		case DBD_REMOVE_ACCOUNTS:
 			my_destroy = slurmdb_destroy_account_cond;
 			break;
+		case DBD_GET_TRES:
+			my_destroy = slurmdb_destroy_tres_cond;
+			break;
 		case DBD_GET_ASSOCS:
 		case DBD_GET_PROBS:
 		case DBD_REMOVE_ASSOCS:
-			my_destroy = slurmdb_destroy_association_cond;
+			my_destroy = slurmdb_destroy_assoc_cond;
 			break;
 		case DBD_GET_CLUSTERS:
 		case DBD_REMOVE_CLUSTERS:
@@ -2650,6 +2661,8 @@ extern void slurmdbd_free_job_start_msg(void *in)
 		xfree(msg->nodes);
 		xfree(msg->node_inx);
 		xfree(msg->partition);
+		xfree(msg->tres_alloc_str);
+		xfree(msg->tres_req_str);
 		xfree(msg->wckey);
 		xfree(msg);
 	}
@@ -2669,8 +2682,7 @@ extern void slurmdbd_free_job_suspend_msg(dbd_job_suspend_msg_t *msg)
 extern void slurmdbd_free_list_msg(dbd_list_msg_t *msg)
 {
 	if (msg) {
-		if (msg->my_list)
-			list_destroy(msg->my_list);
+		FREE_NULL_LIST(msg->my_list);
 		xfree(msg);
 	}
 }
@@ -2688,8 +2700,8 @@ extern void slurmdbd_free_modify_msg(dbd_modify_msg_t *msg,
 			destroy_rec = slurmdb_destroy_account_rec;
 			break;
 		case DBD_MODIFY_ASSOCS:
-			destroy_cond = slurmdb_destroy_association_cond;
-			destroy_rec = slurmdb_destroy_association_rec;
+			destroy_cond = slurmdb_destroy_assoc_cond;
+			destroy_rec = slurmdb_destroy_assoc_rec;
 			break;
 		case DBD_MODIFY_CLUSTERS:
 			destroy_cond = slurmdb_destroy_cluster_cond;
@@ -2729,6 +2741,7 @@ extern void slurmdbd_free_node_state_msg(dbd_node_state_msg_t *msg)
 	if (msg) {
 		xfree(msg->hostlist);
 		xfree(msg->reason);
+		xfree(msg->tres_str);
 		xfree(msg);
 	}
 }
@@ -2765,6 +2778,7 @@ extern void slurmdbd_free_step_start_msg(dbd_step_start_msg_t *msg)
 		xfree(msg->name);
 		xfree(msg->nodes);
 		xfree(msg->node_inx);
+		xfree(msg->tres_alloc_str);
 		xfree(msg);
 	}
 }
@@ -2777,7 +2791,7 @@ extern void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
 		switch(type) {
 		case DBD_GET_ASSOC_USAGE:
 		case DBD_GOT_ASSOC_USAGE:
-			destroy_rec = slurmdb_destroy_association_rec;
+			destroy_rec = slurmdb_destroy_assoc_rec;
 			break;
 		case DBD_GET_CLUSTER_USAGE:
 		case DBD_GOT_CLUSTER_USAGE:
@@ -2856,37 +2870,53 @@ unpack_error:
 }
 
 extern void
-slurmdbd_pack_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg,
+slurmdbd_pack_cluster_tres_msg(dbd_cluster_tres_msg_t *msg,
 			       uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	uint32_t count = NO_VAL;
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(msg->cluster_nodes, buffer);
+		pack_time(msg->event_time, buffer);
+		packstr(msg->tres_str, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		packstr(msg->cluster_nodes, buffer);
-		pack32(msg->cpu_count,    buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			msg->tres_str, TRES_CPU);
+		pack32(count, buffer);
 		pack_time(msg->event_time, buffer);
 	}
 }
 
 extern int
-slurmdbd_unpack_cluster_cpus_msg(dbd_cluster_cpus_msg_t **msg,
+slurmdbd_unpack_cluster_tres_msg(dbd_cluster_tres_msg_t **msg,
 				 uint16_t rpc_version, Buf buffer)
 {
-	dbd_cluster_cpus_msg_t *msg_ptr;
+	dbd_cluster_tres_msg_t *msg_ptr;
 	uint32_t uint32_tmp;
+	uint32_t count = NO_VAL;
 
-	msg_ptr = xmalloc(sizeof(dbd_cluster_cpus_msg_t));
+	msg_ptr = xmalloc(sizeof(dbd_cluster_tres_msg_t));
 	*msg = msg_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&msg_ptr->cluster_nodes,
 				       &uint32_tmp, buffer);
-		safe_unpack32(&msg_ptr->cpu_count, buffer);
+		safe_unpack_time(&msg_ptr->event_time, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->tres_str,
+				       &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&msg_ptr->cluster_nodes,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		msg_ptr->tres_str = xstrdup_printf("%d=%u", TRES_CPU, count);
 		safe_unpack_time(&msg_ptr->event_time, buffer);
 	}
 
 	return SLURM_SUCCESS;
 
 unpack_error:
-	slurmdbd_free_cluster_cpus_msg(msg_ptr);
+	slurmdbd_free_cluster_tres_msg(msg_ptr);
 	*msg = NULL;
 	return SLURM_ERROR;
 }
@@ -2954,10 +2984,13 @@ extern void slurmdbd_pack_cond_msg(dbd_cond_msg_t *msg,
 	case DBD_REMOVE_ACCOUNTS:
 		my_function = slurmdb_pack_account_cond;
 		break;
+	case DBD_GET_TRES:
+		my_function = slurmdb_pack_tres_cond;
+		break;
 	case DBD_GET_ASSOCS:
 	case DBD_GET_PROBS:
 	case DBD_REMOVE_ASSOCS:
-		my_function = slurmdb_pack_association_cond;
+		my_function = slurmdb_pack_assoc_cond;
 		break;
 	case DBD_GET_CLUSTERS:
 	case DBD_REMOVE_CLUSTERS:
@@ -3014,10 +3047,13 @@ extern int slurmdbd_unpack_cond_msg(dbd_cond_msg_t **msg,
 	case DBD_REMOVE_ACCOUNTS:
 		my_function = slurmdb_unpack_account_cond;
 		break;
+	case DBD_GET_TRES:
+		my_function = slurmdb_unpack_tres_cond;
+		break;
 	case DBD_GET_ASSOCS:
 	case DBD_GET_PROBS:
 	case DBD_REMOVE_ASSOCS:
-		my_function = slurmdb_unpack_association_cond;
+		my_function = slurmdb_unpack_assoc_cond;
 		break;
 	case DBD_GET_CLUSTERS:
 	case DBD_REMOVE_CLUSTERS:
@@ -3178,7 +3214,20 @@ extern void
 slurmdbd_pack_job_complete_msg(dbd_job_comp_msg_t *msg,
 			       uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->assoc_id, buffer);
+		packstr(msg->comment, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->derived_ec, buffer);
+		pack_time(msg->end_time, buffer);
+		pack32(msg->exit_code, buffer);
+		pack32(msg->job_id, buffer);
+		pack32(msg->job_state, buffer);
+		packstr(msg->nodes, buffer);
+		pack32(msg->req_uid, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->submit_time, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		pack32(msg->assoc_id, buffer);
 		packstr(msg->comment, buffer);
 		pack32(msg->db_index, buffer);
@@ -3199,10 +3248,24 @@ slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
 				 uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
 	dbd_job_comp_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_comp_msg_t));
 	*msg = msg_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->comment, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->derived_ec, buffer);
+		safe_unpack_time(&msg_ptr->end_time, buffer);
+		safe_unpack32(&msg_ptr->exit_code, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack32(&msg_ptr->job_state, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->req_uid, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&msg_ptr->assoc_id, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->comment, &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->db_index, buffer);
@@ -3210,7 +3273,8 @@ slurmdbd_unpack_job_complete_msg(dbd_job_comp_msg_t **msg,
 		safe_unpack_time(&msg_ptr->end_time, buffer);
 		safe_unpack32(&msg_ptr->exit_code, buffer);
 		safe_unpack32(&msg_ptr->job_id, buffer);
-		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		msg_ptr->job_state = uint16_tmp;
 		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->req_uid, buffer);
 		safe_unpack_time(&msg_ptr->start_time, buffer);
@@ -3228,10 +3292,48 @@ extern void
 slurmdbd_pack_job_start_msg(void *in,
 			    uint16_t rpc_version, Buf buffer)
 {
+	uint32_t count = NO_VAL;
 	dbd_job_start_msg_t *msg = (dbd_job_start_msg_t *)in;
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		packstr(msg->account, buffer);
-		pack32(msg->alloc_cpus, buffer);
+		pack32(msg->alloc_nodes, buffer);
+		pack32(msg->array_job_id, buffer);
+		pack32(msg->array_max_tasks, buffer);
+		pack32(msg->array_task_id, buffer);
+		packstr(msg->array_task_str, buffer);
+		pack32(msg->array_task_pending, buffer);
+		pack32(msg->assoc_id, buffer);
+		packstr(msg->block_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack_time(msg->eligible_time, buffer);
+		pack32(msg->gid, buffer);
+		packstr(msg->gres_alloc, buffer);
+		packstr(msg->gres_req, buffer);
+		packstr(msg->gres_used, buffer);
+		pack32(msg->job_id, buffer);
+		pack32(msg->job_state, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->node_inx, buffer);
+		packstr(msg->partition, buffer);
+		pack32(msg->priority, buffer);
+		pack32(msg->qos_id, buffer);
+		pack32(msg->req_cpus, buffer);
+		pack32(msg->req_mem, buffer);
+		pack32(msg->resv_id, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->submit_time, buffer);
+		pack32(msg->timelimit, buffer);
+		packstr(msg->tres_alloc_str, buffer);
+		packstr(msg->tres_req_str, buffer);
+		pack32(msg->uid, buffer);
+		packstr(msg->wckey, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		packstr(msg->account, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			msg->tres_alloc_str, TRES_CPU);
+		pack32(count, buffer);
 		pack32(msg->alloc_nodes, buffer);
 		pack32(msg->array_job_id, buffer);
 		pack32(msg->array_max_tasks, buffer);
@@ -3262,9 +3364,11 @@ slurmdbd_pack_job_start_msg(void *in,
 		pack32(msg->timelimit, buffer);
 		pack32(msg->uid, buffer);
 		packstr(msg->wckey, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		packstr(msg->account, buffer);
-		pack32(msg->alloc_cpus, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			msg->tres_alloc_str, TRES_CPU);
+		pack32(count, buffer);
 		pack32(msg->alloc_nodes, buffer);
 		pack32(msg->assoc_id, buffer);
 		packstr(msg->block_id, buffer);
@@ -3298,15 +3402,16 @@ slurmdbd_unpack_job_start_msg(void **msg,
 			      uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint16_t uint16_tmp;
+	uint32_t count = NO_VAL;
 	dbd_job_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_start_msg_t));
 	*msg = msg_ptr;
 
 	msg_ptr->array_job_id = 0;
 	msg_ptr->array_task_id = NO_VAL;
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
-		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
 		safe_unpack32(&msg_ptr->alloc_nodes, buffer);
 		safe_unpack32(&msg_ptr->array_job_id, buffer);
 		safe_unpack32(&msg_ptr->array_max_tasks, buffer);
@@ -3326,7 +3431,7 @@ slurmdbd_unpack_job_start_msg(void **msg,
 		safe_unpackstr_xmalloc(&msg_ptr->gres_used, &uint32_tmp,
 				       buffer);
 		safe_unpack32(&msg_ptr->job_id, buffer);
-		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpack32(&msg_ptr->job_state, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->node_inx, &uint32_tmp, buffer);
@@ -3340,12 +3445,24 @@ slurmdbd_unpack_job_start_msg(void **msg,
 		safe_unpack_time(&msg_ptr->start_time, buffer);
 		safe_unpack_time(&msg_ptr->submit_time, buffer);
 		safe_unpack32(&msg_ptr->timelimit, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->tres_alloc_str,
+				       &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->tres_req_str,
+				       &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->uid, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->wckey, &uint32_tmp, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
-		safe_unpack32(&msg_ptr->alloc_cpus, buffer);
+		safe_unpack32(&count, buffer);
+		msg_ptr->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, count);
 		safe_unpack32(&msg_ptr->alloc_nodes, buffer);
+		safe_unpack32(&msg_ptr->array_job_id, buffer);
+		safe_unpack32(&msg_ptr->array_max_tasks, buffer);
+		safe_unpack32(&msg_ptr->array_task_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->array_task_str,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->array_task_pending, buffer);
 		safe_unpack32(&msg_ptr->assoc_id, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->db_index, buffer);
@@ -3358,7 +3475,8 @@ slurmdbd_unpack_job_start_msg(void **msg,
 		safe_unpackstr_xmalloc(&msg_ptr->gres_used, &uint32_tmp,
 				       buffer);
 		safe_unpack32(&msg_ptr->job_id, buffer);
-		safe_unpack16(&msg_ptr->job_state, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		msg_ptr->job_state = uint16_tmp;
 		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->node_inx, &uint32_tmp, buffer);
@@ -3368,6 +3486,49 @@ slurmdbd_unpack_job_start_msg(void **msg,
 		safe_unpack32(&msg_ptr->qos_id, buffer);
 		safe_unpack32(&msg_ptr->req_cpus, buffer);
 		safe_unpack32(&msg_ptr->req_mem, buffer);
+
+		xstrfmtcat(msg_ptr->tres_alloc_str,
+			   ",%d=%u", TRES_MEM, msg_ptr->req_mem);
+
+		safe_unpack32(&msg_ptr->resv_id, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack32(&msg_ptr->timelimit, buffer);
+		safe_unpack32(&msg_ptr->uid, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->wckey, &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&msg_ptr->account, &uint32_tmp, buffer);
+		safe_unpack32(&count, buffer);
+		msg_ptr->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, count);
+		safe_unpack32(&msg_ptr->alloc_nodes, buffer);
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->block_id, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack_time(&msg_ptr->eligible_time, buffer);
+		safe_unpack32(&msg_ptr->gid, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->gres_alloc, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->gres_req, &uint32_tmp,
+				       buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->gres_used, &uint32_tmp,
+				       buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		msg_ptr->job_state = uint16_tmp;
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->node_inx, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->partition,
+				       &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->priority, buffer);
+		safe_unpack32(&msg_ptr->qos_id, buffer);
+		safe_unpack32(&msg_ptr->req_cpus, buffer);
+		safe_unpack32(&msg_ptr->req_mem, buffer);
+
+		xstrfmtcat(msg_ptr->tres_alloc_str,
+			   ",%d=%u", TRES_MEM, msg_ptr->req_mem);
+
 		safe_unpack32(&msg_ptr->resv_id, buffer);
 		safe_unpack_time(&msg_ptr->start_time, buffer);
 		safe_unpack_time(&msg_ptr->submit_time, buffer);
@@ -3390,7 +3551,7 @@ slurmdbd_pack_id_rc_msg(void *in,
 {
 	dbd_id_rc_msg_t *msg = (dbd_id_rc_msg_t *)in;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		pack32(msg->job_id, buffer);
 		pack32(msg->id, buffer);
 		pack32(msg->return_code, buffer);
@@ -3404,7 +3565,7 @@ slurmdbd_unpack_id_rc_msg(void **msg,
 	dbd_id_rc_msg_t *msg_ptr = xmalloc(sizeof(dbd_id_rc_msg_t));
 
 	*msg = msg_ptr;
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack32(&msg_ptr->job_id, buffer);
 		safe_unpack32(&msg_ptr->id, buffer);
 		safe_unpack32(&msg_ptr->return_code, buffer);
@@ -3421,26 +3582,46 @@ extern void
 slurmdbd_pack_job_suspend_msg(dbd_job_suspend_msg_t *msg,
 			      uint16_t rpc_version, Buf buffer)
 {
-	pack32(msg->assoc_id, buffer);
-	pack32(msg->db_index, buffer);
-	pack32(msg->job_id, buffer);
-	pack16(msg->job_state, buffer);
-	pack_time(msg->submit_time, buffer);
-	pack_time(msg->suspend_time, buffer);
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->assoc_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->job_id, buffer);
+		pack32(msg->job_state, buffer);
+		pack_time(msg->submit_time, buffer);
+		pack_time(msg->suspend_time, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		pack32(msg->assoc_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->job_id, buffer);
+		pack16(msg->job_state, buffer);
+		pack_time(msg->submit_time, buffer);
+		pack_time(msg->suspend_time, buffer);
+	}
 }
 
 extern int
 slurmdbd_unpack_job_suspend_msg(dbd_job_suspend_msg_t **msg,
 				uint16_t rpc_version, Buf buffer)
 {
+	uint16_t uint16_tmp;
 	dbd_job_suspend_msg_t *msg_ptr = xmalloc(sizeof(dbd_job_suspend_msg_t));
 	*msg = msg_ptr;
-	safe_unpack32(&msg_ptr->assoc_id, buffer);
-	safe_unpack32(&msg_ptr->db_index, buffer);
-	safe_unpack32(&msg_ptr->job_id, buffer);
-	safe_unpack16(&msg_ptr->job_state, buffer);
-	safe_unpack_time(&msg_ptr->submit_time, buffer);
-	safe_unpack_time(&msg_ptr->suspend_time, buffer);
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack32(&msg_ptr->job_state, buffer);
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack_time(&msg_ptr->suspend_time, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		msg_ptr->job_state = uint16_tmp;
+		safe_unpack_time(&msg_ptr->submit_time, buffer);
+		safe_unpack_time(&msg_ptr->suspend_time, buffer);
+	}
 	return SLURM_SUCCESS;
 
 unpack_error:
@@ -3464,10 +3645,14 @@ extern void slurmdbd_pack_list_msg(dbd_list_msg_t *msg,
 	case DBD_GOT_ACCOUNTS:
 		my_function = slurmdb_pack_account_rec;
 		break;
+	case DBD_ADD_TRES:
+	case DBD_GOT_TRES:
+		my_function = slurmdb_pack_tres_rec;
+		break;
 	case DBD_ADD_ASSOCS:
 	case DBD_GOT_ASSOCS:
 	case DBD_GOT_PROBS:
-		my_function = slurmdb_pack_association_rec;
+		my_function = slurmdb_pack_assoc_rec;
 		break;
 	case DBD_ADD_CLUSTERS:
 	case DBD_GOT_CLUSTERS:
@@ -3557,11 +3742,16 @@ extern int slurmdbd_unpack_list_msg(dbd_list_msg_t **msg, uint16_t rpc_version,
 		my_function = slurmdb_unpack_account_rec;
 		my_destroy = slurmdb_destroy_account_rec;
 		break;
+	case DBD_ADD_TRES:
+	case DBD_GOT_TRES:
+		my_function = slurmdb_unpack_tres_rec;
+		my_destroy = slurmdb_destroy_tres_rec;
+		break;
 	case DBD_ADD_ASSOCS:
 	case DBD_GOT_ASSOCS:
 	case DBD_GOT_PROBS:
-		my_function = slurmdb_unpack_association_rec;
-		my_destroy = slurmdb_destroy_association_rec;
+		my_function = slurmdb_unpack_assoc_rec;
+		my_destroy = slurmdb_destroy_assoc_rec;
 		break;
 	case DBD_ADD_CLUSTERS:
 	case DBD_GOT_CLUSTERS:
@@ -3673,8 +3863,8 @@ extern void slurmdbd_pack_modify_msg(dbd_modify_msg_t *msg,
 		my_rec = slurmdb_pack_account_rec;
 		break;
 	case DBD_MODIFY_ASSOCS:
-		my_cond = slurmdb_pack_association_cond;
-		my_rec = slurmdb_pack_association_rec;
+		my_cond = slurmdb_pack_assoc_cond;
+		my_rec = slurmdb_pack_assoc_rec;
 		break;
 	case DBD_MODIFY_CLUSTERS:
 		my_cond = slurmdb_pack_cluster_cond;
@@ -3722,8 +3912,8 @@ extern int slurmdbd_unpack_modify_msg(dbd_modify_msg_t **msg,
 		my_rec = slurmdb_unpack_account_rec;
 		break;
 	case DBD_MODIFY_ASSOCS:
-		my_cond = slurmdb_unpack_association_cond;
-		my_rec = slurmdb_unpack_association_rec;
+		my_cond = slurmdb_unpack_assoc_cond;
+		my_rec = slurmdb_unpack_assoc_rec;
 		break;
 	case DBD_MODIFY_CLUSTERS:
 		my_cond = slurmdb_unpack_cluster_cond;
@@ -3767,16 +3957,30 @@ extern void
 slurmdbd_pack_node_state_msg(dbd_node_state_msg_t *msg,
 			     uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
-		pack32(msg->cpu_count, buffer);
+	uint32_t count = NO_VAL;
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		packstr(msg->hostlist, buffer);
+		packstr(msg->reason, buffer);
+		pack32(msg->reason_uid, buffer);
+		pack16(msg->new_state, buffer);
+		pack_time(msg->event_time, buffer);
+		pack32(msg->state, buffer);
+		packstr(msg->tres_str, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			msg->tres_str, TRES_CPU);
+		pack32(count, buffer);
 		packstr(msg->hostlist, buffer);
 		packstr(msg->reason, buffer);
 		pack32(msg->reason_uid, buffer);
 		pack16(msg->new_state, buffer);
 		pack_time(msg->event_time, buffer);
 		pack32(msg->state, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		pack32(msg->cpu_count, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			msg->tres_str, TRES_CPU);
+		pack32(count, buffer);
 		packstr(msg->hostlist, buffer);
 		packstr(msg->reason, buffer);
 		pack32(msg->reason_uid, buffer);
@@ -3791,24 +3995,36 @@ slurmdbd_unpack_node_state_msg(dbd_node_state_msg_t **msg,
 			       uint16_t rpc_version, Buf buffer)
 {
 	dbd_node_state_msg_t *msg_ptr;
-	uint32_t uint32_tmp;
 	uint16_t tmp_state;
+	uint32_t uint32_tmp;
+	uint32_t count = NO_VAL;
 
 	msg_ptr = xmalloc(sizeof(dbd_node_state_msg_t));
 	*msg = msg_ptr;
 
 	msg_ptr->reason_uid = NO_VAL;
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
-		safe_unpack32(&msg_ptr->cpu_count, buffer);
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->reason,   &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->reason_uid, buffer);
+		safe_unpack16(&msg_ptr->new_state, buffer);
+		safe_unpack_time(&msg_ptr->event_time, buffer);
+		safe_unpack32(&msg_ptr->state, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->tres_str,
+				       &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		safe_unpack32(&count, buffer);
+		msg_ptr->tres_str = xstrdup_printf("%d=%u", TRES_CPU, count);
 		safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->reason,   &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->reason_uid, buffer);
 		safe_unpack16(&msg_ptr->new_state, buffer);
 		safe_unpack_time(&msg_ptr->event_time, buffer);
 		safe_unpack32(&msg_ptr->state, buffer);
-	} else if (rpc_version >= SLURMDBD_MIN_VERSION) {
-		safe_unpack32(&msg_ptr->cpu_count, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		safe_unpack32(&count, buffer);
+		msg_ptr->tres_str = xstrdup_printf("%d=%u", TRES_CPU, count);
 		safe_unpackstr_xmalloc(&msg_ptr->hostlist, &uint32_tmp, buffer);
 		safe_unpackstr_xmalloc(&msg_ptr->reason,   &uint32_tmp, buffer);
 		safe_unpack32(&msg_ptr->reason_uid, buffer);
@@ -3857,7 +4073,7 @@ extern void
 slurmdbd_pack_register_ctld_msg(dbd_register_ctld_msg_t *msg,
 				uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		pack16(msg->dimensions, buffer);
 		pack32(msg->flags, buffer);
 		pack32(msg->plugin_id_select, buffer);
@@ -3872,7 +4088,7 @@ slurmdbd_unpack_register_ctld_msg(dbd_register_ctld_msg_t **msg,
 	dbd_register_ctld_msg_t *msg_ptr = xmalloc(
 		sizeof(dbd_register_ctld_msg_t));
 	*msg = msg_ptr;
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack16(&msg_ptr->dimensions, buffer);
 		safe_unpack32(&msg_ptr->flags, buffer);
 		safe_unpack32(&msg_ptr->plugin_id_select, buffer);
@@ -3890,7 +4106,7 @@ extern void
 slurmdbd_pack_roll_usage_msg(dbd_roll_usage_msg_t *msg,
 			     uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		pack16(msg->archive_data, buffer);
 		pack_time(msg->end, buffer);
 		pack_time(msg->start, buffer);
@@ -3905,7 +4121,7 @@ slurmdbd_unpack_roll_usage_msg(dbd_roll_usage_msg_t **msg,
 
 	*msg = msg_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		safe_unpack16(&msg_ptr->archive_data, buffer);
 		safe_unpack_time(&msg_ptr->end, buffer);
 		safe_unpack_time(&msg_ptr->start, buffer);
@@ -4003,7 +4219,27 @@ extern void
 slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, uint16_t rpc_version,
 			     Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	uint32_t count;
+
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		pack32(msg->assoc_id, buffer);
+		pack32(msg->db_index, buffer);
+		pack32(msg->job_id, buffer);
+		packstr(msg->name, buffer);
+		packstr(msg->nodes, buffer);
+		packstr(msg->node_inx, buffer);
+		pack32(msg->node_cnt, buffer);
+		pack_time(msg->start_time, buffer);
+		pack_time(msg->job_submit_time, buffer);
+		pack32(msg->req_cpufreq_min, buffer);
+		pack32(msg->req_cpufreq_max, buffer);
+		pack32(msg->req_cpufreq_gov, buffer);
+		pack32(msg->step_id, buffer);
+		pack32(msg->task_dist, buffer);
+		pack32(msg->total_tasks, buffer);
+		packstr(msg->tres_alloc_str, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist;
 		pack32(msg->assoc_id, buffer);
 		pack32(msg->db_index, buffer);
 		pack32(msg->job_id, buffer);
@@ -4013,10 +4249,13 @@ slurmdbd_pack_step_start_msg(dbd_step_start_msg_t *msg, uint16_t rpc_version,
 		pack32(msg->node_cnt, buffer);
 		pack_time(msg->start_time, buffer);
 		pack_time(msg->job_submit_time, buffer);
-		pack32(msg->req_cpufreq, buffer);
+		pack32(msg->req_cpufreq_min, buffer);
 		pack32(msg->step_id, buffer);
-		pack16(msg->task_dist, buffer);
-		pack32(msg->total_cpus, buffer);
+		old_task_dist = task_dist_new2old(msg->task_dist);
+		pack16(old_task_dist, buffer);
+		count = (uint32_t)slurmdb_find_tres_count_in_string(
+			msg->tres_alloc_str, TRES_CPU);
+		pack32(count, buffer);
 		pack32(msg->total_tasks, buffer);
 	}
 }
@@ -4026,10 +4265,30 @@ slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg,
 			       uint16_t rpc_version, Buf buffer)
 {
 	uint32_t uint32_tmp;
+	uint32_t count;
 	dbd_step_start_msg_t *msg_ptr = xmalloc(sizeof(dbd_step_start_msg_t));
 	*msg = msg_ptr;
 
-	if (rpc_version >= SLURMDBD_MIN_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		safe_unpack32(&msg_ptr->assoc_id, buffer);
+		safe_unpack32(&msg_ptr->db_index, buffer);
+		safe_unpack32(&msg_ptr->job_id, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->name, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->nodes, &uint32_tmp, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->node_inx, &uint32_tmp, buffer);
+		safe_unpack32(&msg_ptr->node_cnt, buffer);
+		safe_unpack_time(&msg_ptr->start_time, buffer);
+		safe_unpack_time(&msg_ptr->job_submit_time, buffer);
+		safe_unpack32(&msg_ptr->req_cpufreq_min, buffer);
+		safe_unpack32(&msg_ptr->req_cpufreq_max, buffer);
+		safe_unpack32(&msg_ptr->req_cpufreq_gov, buffer);
+		safe_unpack32(&msg_ptr->step_id, buffer);
+		safe_unpack32(&msg_ptr->task_dist, buffer);
+		safe_unpack32(&msg_ptr->total_tasks, buffer);
+		safe_unpackstr_xmalloc(&msg_ptr->tres_alloc_str,
+				       &uint32_tmp, buffer);
+	} else if (rpc_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		safe_unpack32(&msg_ptr->assoc_id, buffer);
 		safe_unpack32(&msg_ptr->db_index, buffer);
 		safe_unpack32(&msg_ptr->job_id, buffer);
@@ -4039,10 +4298,13 @@ slurmdbd_unpack_step_start_msg(dbd_step_start_msg_t **msg,
 		safe_unpack32(&msg_ptr->node_cnt, buffer);
 		safe_unpack_time(&msg_ptr->start_time, buffer);
 		safe_unpack_time(&msg_ptr->job_submit_time, buffer);
-		safe_unpack32(&msg_ptr->req_cpufreq, buffer);
+		safe_unpack32(&msg_ptr->req_cpufreq_min, buffer);
 		safe_unpack32(&msg_ptr->step_id, buffer);
-		safe_unpack16(&msg_ptr->task_dist, buffer);
-		safe_unpack32(&msg_ptr->total_cpus, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		msg_ptr->task_dist = task_dist_old2new(old_task_dist);
+		safe_unpack32(&count, buffer);
+		msg_ptr->tres_alloc_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, count);
 		safe_unpack32(&msg_ptr->total_tasks, buffer);
 	}
 
@@ -4067,7 +4329,7 @@ extern void slurmdbd_pack_usage_msg(dbd_usage_msg_t *msg,
 	switch(type) {
 	case DBD_GET_ASSOC_USAGE:
 	case DBD_GOT_ASSOC_USAGE:
-		my_rec = slurmdb_pack_association_rec;
+		my_rec = slurmdb_pack_assoc_rec;
 		break;
 	case DBD_GET_CLUSTER_USAGE:
 	case DBD_GOT_CLUSTER_USAGE:
@@ -4101,7 +4363,7 @@ extern int slurmdbd_unpack_usage_msg(dbd_usage_msg_t **msg,
 	switch(type) {
 	case DBD_GET_ASSOC_USAGE:
 	case DBD_GOT_ASSOC_USAGE:
-		my_rec = slurmdb_unpack_association_rec;
+		my_rec = slurmdb_unpack_assoc_rec;
 		break;
 	case DBD_GET_CLUSTER_USAGE:
 	case DBD_GOT_CLUSTER_USAGE:
diff --git a/src/common/slurmdbd_defs.h b/src/common/slurmdbd_defs.h
index 3532d9e3f..d6b52efc5 100644
--- a/src/common/slurmdbd_defs.h
+++ b/src/common/slurmdbd_defs.h
@@ -58,31 +58,6 @@
 #include "src/common/list.h"
 #include "src/common/slurm_accounting_storage.h"
 
-/*
- * SLURMDBD_VERSION in 14.03 this was changed to be the same as
- * SLURM_PROTOCOL_VERSION in 14.03 we can remove all instances of
- * SLURMDBD_*VERSION* SLURMDBD_VERSION was already replaced.
- *
- * SLURMDBD_VERSION_MIN is the minimum protocol version which slurmdbd
- *	will accept. Messages being sent to the slurmdbd from commands
- *	or daemons using older versions of the protocol will be
- *	rejected. Increment this value and discard the code processing
- *	that msg_type only after all systems have been upgraded. Don't
- *	remove entries from slurmdbd_msg_type_t or the numbering scheme
- *	will break (the enum value of a msg_type would change).
- *
- * The slurmdbd should be at least as current as any Slurm cluster
- *	communicating with it (e.g. it will not accept messages with a
- *	version higher than SLURM_VERSION).
- *
- * NOTE: These values must be Moved to
- * src/plugins/accounting_storage/mysql/as_mysql_archive.c when we are
- * done here with them since we have to support old version of archive
- * files since they don't update once they are created.
- */
-#define SLURMDBD_2_6_VERSION   12	/* slurm version 2.6 */
-#define SLURMDBD_MIN_VERSION   SLURMDBD_2_6_VERSION
-
 /* SLURM DBD message types */
 /* ANY TIME YOU ADD TO THIS LIST UPDATE THE CONVERSION FUNCTIONS! */
 typedef enum {
@@ -93,7 +68,7 @@ typedef enum {
 	DBD_ADD_ASSOCS,         /* Add new association to the mix       */
 	DBD_ADD_CLUSTERS,       /* Add new cluster to the mix           */
 	DBD_ADD_USERS,          /* Add new user to the mix              */
-	DBD_CLUSTER_CPUS,	/* Record total processors on cluster	*/
+	DBD_CLUSTER_TRES,	/* Record total tres on cluster	*/
 	DBD_FLUSH_JOBS, 	/* End jobs that are still running
 				 * when a controller is restarted.	*/
 	DBD_GET_ACCOUNTS,	/* Get account information		*/
@@ -178,6 +153,9 @@ typedef enum {
 	DBD_ADD_CLUS_RES,    	/* Add cluster using a resource    	*/
 	DBD_REMOVE_CLUS_RES,   	/* Remove existing cluster resource    	*/
 	DBD_MODIFY_CLUS_RES,   	/* Modify existing cluster resource   	*/
+	DBD_ADD_TRES,         /* Add tres to the database           */
+	DBD_GET_TRES,         /* Get tres from the database         */
+	DBD_GOT_TRES,         /* Got tres from the database         */
 } slurmdbd_msg_type_t;
 
 /*****************************************************************************\
@@ -194,11 +172,11 @@ typedef struct {
 	slurmdb_user_cond_t *cond;
 } dbd_acct_coord_msg_t;
 
-typedef struct dbd_cluster_cpus_msg {
+typedef struct dbd_cluster_tres_msg {
 	char *cluster_nodes;	/* nodes in cluster */
-	uint32_t cpu_count;	/* total processor count */
 	time_t event_time;	/* time of transition */
-} dbd_cluster_cpus_msg_t;
+	char *tres_str;	        /* Simple comma separated list of TRES */
+} dbd_cluster_tres_msg_t;
 
 typedef struct {
 	void *rec; /* this could be anything based on the type types
@@ -258,7 +236,7 @@ typedef struct dbd_job_comp_msg {
 	time_t   end_time;	/* job termintation time */
 	uint32_t exit_code;	/* job exit code or signal */
 	uint32_t job_id;	/* job ID */
-	uint16_t job_state;	/* job state */
+	uint32_t job_state;	/* job state */
 	char *   nodes;		/* hosts allocated to the job */
 	uint32_t req_uid;	/* requester user ID */
 	time_t   start_time;	/* job start time */
@@ -269,7 +247,6 @@ typedef struct dbd_job_comp_msg {
 typedef struct dbd_job_start_msg {
 	char *   account;       /* Account name for those not running
 				 * with associations */
-	uint32_t alloc_cpus;	/* count of allocated processors */
 	uint32_t alloc_nodes;   /* how many nodes used in job */
 	uint32_t array_job_id;	/* job_id of a job array or 0 if N/A */
 	uint32_t array_max_tasks;/* max number of tasks able to run at once */
@@ -283,7 +260,7 @@ typedef struct dbd_job_start_msg {
 	time_t   eligible_time;	/* time job becomes eligible to run */
 	uint32_t gid;	        /* group ID */
 	uint32_t job_id;	/* job ID */
-	uint16_t job_state;	/* job state */
+	uint32_t job_state;	/* job state */
 	char *   name;		/* job name */
 	char *   nodes;		/* hosts allocated to the job */
 	char *   node_inx;      /* ranged bitmap string of hosts
@@ -304,6 +281,8 @@ typedef struct dbd_job_start_msg {
 				 * type for the entire job on all nodes. */
 	char*    gres_used;     /* String depicting the GRES actually used by
 				 * type for the entire job on all nodes. */
+	char    *tres_alloc_str;/* Simple comma separated list of TRES */
+	char    *tres_req_str;  /* Simple comma separated list of TRES */
 	char *   wckey;		/* wckey name */
 } dbd_job_start_msg_t;
 
@@ -320,7 +299,7 @@ typedef struct dbd_job_suspend_msg {
 	uint32_t db_index;	/* index into the db for this job */
 	uint32_t job_id;	/* job ID needed to find job record
 				 * in db */
-	uint16_t job_state;	/* job state */
+	uint32_t job_state;	/* job state */
 	time_t   submit_time;	/* job submit time needed to find job record
 				 * in db */
 	time_t   suspend_time;	/* job suspend or resume time */
@@ -342,7 +321,6 @@ typedef struct {
 #define DBD_NODE_STATE_DOWN  1
 #define DBD_NODE_STATE_UP    2
 typedef struct dbd_node_state_msg {
-	uint32_t cpu_count;     /* number of cpus on node */
 	time_t event_time;	/* time of transition */
 	char *hostlist;		/* name of hosts */
 	uint16_t new_state;	/* new state of host, see DBD_NODE_STATE_* */
@@ -351,6 +329,7 @@ typedef struct dbd_node_state_msg {
 				 * no reason is set. */
 	uint32_t state;         /* current state of node.  Used to get
 				   flags on the state (i.e. maintenance) */
+	char *tres_str;	        /* Simple comma separated list of TRES */
 } dbd_node_state_msg_t;
 
 typedef struct dbd_rc_msg {
@@ -395,11 +374,13 @@ typedef struct dbd_step_start_msg {
 	time_t   start_time;	/* step start time */
 	time_t   job_submit_time;/* job submit time needed to find job record
 				  * in db */
-	uint32_t req_cpufreq;   /* requested CPU frequency */
+	uint32_t req_cpufreq_min; /* requested minimum CPU frequency  */
+	uint32_t req_cpufreq_max; /* requested maximum CPU frequency  */
+	uint32_t req_cpufreq_gov; /* requested CPU frequency governor */
 	uint32_t step_id;	/* step ID */
-	uint16_t task_dist;     /* layout method of step */
-	uint32_t total_cpus;	/* count of allocated processors */
+	uint32_t task_dist;     /* layout method of step */
 	uint32_t total_tasks;	/* count of tasks for step */
+	char *tres_alloc_str;   /* Simple comma separated list of TRES */
 } dbd_step_start_msg_t;
 
 /* flag to let us know if we are running on cache or from the actual
@@ -414,14 +395,6 @@ extern pthread_cond_t assoc_cache_cond; /* assoc cache condition */
  * Slurm DBD message processing functions
 \*****************************************************************************/
 
-/* Some functions are called by the DBD as well as regular slurm
- * procedures.  In this case we need to make a way to translate the
- * DBD rpc to that of SLURM.
- * rpc_version IN - DBD rpc version
- * Returns corrisponding SLURM rpc version
- */
-extern uint16_t slurmdbd_translate_rpc(uint16_t rpc_version);
-
 /* Open a socket connection to SlurmDbd
  * auth_info IN - alternate authentication key
  * make_agent IN - make agent to process RPCs if set
@@ -432,7 +405,7 @@ extern int slurm_open_slurmdbd_conn(char *auth_info,
                                     bool rollback);
 
 /* Close the SlurmDBD socket connection */
-extern int slurm_close_slurmdbd_conn();
+extern int slurm_close_slurmdbd_conn(void);
 
 /* Send an RPC to the SlurmDBD. Do not wait for the reply. The RPC
  * will be queued and processed later if the SlurmDBD is not responding.
@@ -471,7 +444,7 @@ extern void slurmdbd_free_buffer(void *x);
  * Free various SlurmDBD message structures
 \*****************************************************************************/
 extern void slurmdbd_free_acct_coord_msg(dbd_acct_coord_msg_t *msg);
-extern void slurmdbd_free_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg);
+extern void slurmdbd_free_cluster_tres_msg(dbd_cluster_tres_msg_t *msg);
 extern void slurmdbd_free_rec_msg(dbd_rec_msg_t *msg, slurmdbd_msg_type_t type);
 extern void slurmdbd_free_cond_msg(dbd_cond_msg_t *msg,
 				   slurmdbd_msg_type_t type);
@@ -499,9 +472,9 @@ extern void slurmdbd_free_usage_msg(dbd_usage_msg_t *msg,
 extern void slurmdbd_pack_acct_coord_msg(dbd_acct_coord_msg_t *msg,
 					 uint16_t rpc_version,
 					 Buf buffer);
-extern void slurmdbd_pack_cluster_cpus_msg(dbd_cluster_cpus_msg_t *msg,
-					   uint16_t rpc_version,
-					   Buf buffer);
+extern void slurmdbd_pack_cluster_tres_msg(dbd_cluster_tres_msg_t *msg,
+					     uint16_t rpc_version,
+					     Buf buffer);
 extern void slurmdbd_pack_rec_msg(dbd_rec_msg_t *msg,
 				  uint16_t rpc_version,
 				  slurmdbd_msg_type_t type, Buf buffer);
@@ -562,9 +535,9 @@ extern void slurmdbd_pack_buffer(void *in,
 extern int slurmdbd_unpack_acct_coord_msg(dbd_acct_coord_msg_t **msg,
 					  uint16_t rpc_version,
 					  Buf buffer);
-extern int slurmdbd_unpack_cluster_cpus_msg(dbd_cluster_cpus_msg_t **msg,
-					    uint16_t rpc_version,
-					    Buf buffer);
+extern int slurmdbd_unpack_cluster_tres_msg(dbd_cluster_tres_msg_t **msg,
+					      uint16_t rpc_version,
+					      Buf buffer);
 extern int slurmdbd_unpack_rec_msg(dbd_rec_msg_t **msg,
 				   uint16_t rpc_version,
 				   slurmdbd_msg_type_t type,
diff --git a/src/common/stepd_api.c b/src/common/stepd_api.c
index 4b431aaf0..fbab63098 100644
--- a/src/common/stepd_api.c
+++ b/src/common/stepd_api.c
@@ -70,6 +70,10 @@
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
+strong_alias(stepd_available, slurm_stepd_available);
+strong_alias(stepd_connect, slurm_stepd_connect);
+strong_alias(stepd_get_uid, slurm_stepd_get_uid);
+
 static bool
 _slurm_authorized_user()
 {
@@ -150,7 +154,7 @@ _step_connect(const char *directory, const char *nodename,
 	char *name = NULL;
 
 	if ((fd = socket(AF_UNIX, SOCK_STREAM, 0)) < 0) {
-		error("%s: socket() failed dir %s node %s job %u step %d %m",
+		error("%s: socket() failed dir %s node %s job %u step %u %m",
 		      __func__, directory, nodename, jobid, stepid);
 		return -1;
 	}
@@ -164,7 +168,7 @@ _step_connect(const char *directory, const char *nodename,
 	len = strlen(addr.sun_path)+1 + sizeof(addr.sun_family);
 
 	if (connect(fd, (struct sockaddr *) &addr, len) < 0) {
-		error("%s: connect() failed dir %s node %s job %u step %d %m",
+		error("%s: connect() failed dir %s node %s job %u step %u %m",
 		      __func__, directory, nodename, jobid, stepid);
 		if (errno == ECONNREFUSED) {
 			_handle_stray_socket(name);
@@ -210,7 +214,7 @@ _guess_nodename()
  * Returns a socket descriptor for the opened socket on success,
  * and -1 on error.
  */
-int
+extern int
 stepd_connect(const char *directory, const char *nodename,
 	      uint32_t jobid, uint32_t stepid, uint16_t *protocol_version)
 {
@@ -238,7 +242,7 @@ stepd_connect(const char *directory, const char *nodename,
 
 	buffer = init_buf(0);
 	/* Create an auth credential */
-	auth_cred = g_slurm_auth_create(NULL, 2, NULL);
+	auth_cred = g_slurm_auth_create(NULL, 2, slurm_get_auth_info());
 	if (auth_cred == NULL) {
 		error("Creating authentication credential: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)));
@@ -458,12 +462,16 @@ stepd_attach(int fd, uint16_t protocol_version,
 {
 	int req = REQUEST_ATTACH;
 	int rc = SLURM_SUCCESS;
+	int proto = protocol_version;
 
 	safe_write(fd, &req, sizeof(int));
 	safe_write(fd, ioaddr, sizeof(slurm_addr_t));
 	safe_write(fd, respaddr, sizeof(slurm_addr_t));
 	safe_write(fd, job_cred_sig, SLURM_IO_KEY_SIZE);
 
+	if (SLURM_PROTOCOL_VERSION >= SLURM_15_08_PROTOCOL_VERSION)
+		safe_write(fd, &proto, sizeof(int));
+
 	/* Receive the return code */
 	safe_read(fd, &rc, sizeof(int));
 
@@ -562,7 +570,7 @@ _sockname_regex(regex_t *re, const char *filename,
  *
  * Returns a List of pointers to step_loc_t structures.
  */
-List
+extern List
 stepd_available(const char *directory, const char *nodename)
 {
 	List l;
@@ -1084,6 +1092,8 @@ rwfail:
 /*
  * Get the uid of the step
  * Returns uid of the running step if successful.  On error returns -1.
+ *
+ * FIXME: BUG: On Linux, uid_t is uint32_t but this can return -1.
  */
 extern uid_t stepd_get_uid(int fd, uint16_t protocol_version)
 {
diff --git a/src/common/stepd_api.h b/src/common/stepd_api.h
index fde67cf76..f3a1108ee 100644
--- a/src/common/stepd_api.h
+++ b/src/common/stepd_api.h
@@ -132,7 +132,7 @@ int stepd_terminate(int fd, uint16_t protocol_version);
  * and -1 on error.  Also fills in protocol_version with the version
  * of the running stepd.
  */
-int stepd_connect(const char *directory, const char *nodename,
+extern int stepd_connect(const char *directory, const char *nodename,
 		  uint32_t jobid, uint32_t stepid, uint16_t *protocol_version);
 
 /*
@@ -200,7 +200,7 @@ int stepd_attach(int fd, uint16_t protocol_version,
  *
  * Returns a List of pointers to step_loc_t structures.
  */
-List stepd_available(const char *directory, const char *nodename);
+extern List stepd_available(const char *directory, const char *nodename);
 
 /*
  * Return true if the process with process ID "pid" is found in
@@ -277,6 +277,8 @@ extern int stepd_get_mem_limits(int fd, uint16_t protocol_version,
 /*
  * Get the uid of the step
  * Returns uid of the running step if successful.  On error returns -1.
+ *
+ * FIXME: BUG: On Linux, uid_t is uint32_t but this can return -1.
  */
 extern uid_t stepd_get_uid(int fd, uint16_t protocol_version);
 
diff --git a/src/common/timers.c b/src/common/timers.c
index d5e0cf351..8f727413e 100644
--- a/src/common/timers.c
+++ b/src/common/timers.c
@@ -40,6 +40,7 @@
 #include <unistd.h>
 #include <sys/time.h>
 #include "src/common/log.h"
+#include "src/common/slurm_time.h"
 
 /*
  * slurm_diff_tv_str - build a string showing the time difference between two
@@ -51,7 +52,7 @@
  * IN from - where the function was called form
  */
 extern void slurm_diff_tv_str(struct timeval *tv1, struct timeval *tv2,
-			      char *tv_str, int len_tv_str, char *from,
+			      char *tv_str, int len_tv_str, const char *from,
 			      long limit, long *delta_t)
 {
 	char p[64] = "";
@@ -71,7 +72,7 @@ extern void slurm_diff_tv_str(struct timeval *tv1, struct timeval *tv2,
 			debug_limit = 1000000;
 		}
 		if ((*delta_t > debug_limit) || (*delta_t > limit)) {
-			if (!localtime_r(&tv1->tv_sec, &tm))
+			if (!slurm_localtime_r(&tv1->tv_sec, &tm))
 				error("localtime_r(): %m");
 			if (strftime(p, sizeof(p), "%T", &tm) == 0)
 				error("strftime(): %m");
diff --git a/src/common/timers.h b/src/common/timers.h
index dd1051049..1380481f1 100644
--- a/src/common/timers.h
+++ b/src/common/timers.h
@@ -64,7 +64,7 @@
  * OUT delta_t - raw time difference in usec
  */
 extern void slurm_diff_tv_str(struct timeval *tv1,struct timeval *tv2,
-			      char *tv_str, int len_tv_str, char *from,
+			      char *tv_str, int len_tv_str, const char *from,
 			      long limit, long *delta_t);
 
 /* Block daemon indefinitely.
diff --git a/src/common/working_cluster.c b/src/common/working_cluster.c
index 8af72ca29..77350950c 100644
--- a/src/common/working_cluster.c
+++ b/src/common/working_cluster.c
@@ -122,9 +122,6 @@ extern uint32_t slurmdb_setup_cluster_flags(void)
 #ifdef HAVE_SUN_CONST
 	cluster_flags |= CLUSTER_FLAG_SC;
 #endif
-#ifdef HAVE_XCPU
-	cluster_flags |= CLUSTER_FLAG_XCPU;
-#endif
 #ifdef HAVE_AIX
 	cluster_flags |= CLUSTER_FLAG_AIX;
 #endif
@@ -173,9 +170,6 @@ static uint32_t _str_2_cluster_flags(char *flags_in)
 	if (slurm_strcasestr(flags_in, "SunConstellation"))
 		return CLUSTER_FLAG_SC;
 
-	if (slurm_strcasestr(flags_in, "XCPU"))
-		return CLUSTER_FLAG_XCPU;
-
 	if (slurm_strcasestr(flags_in, "Cray"))
 		return CLUSTER_FLAG_CRAY_N;
 
@@ -258,12 +252,6 @@ extern char *slurmdb_cluster_flags_2_str(uint32_t flags_in)
 		xstrcat(cluster_flags, "SunConstellation");
 	}
 
-	if (flags_in & CLUSTER_FLAG_XCPU) {
-		if (cluster_flags)
-			xstrcat(cluster_flags, ",");
-		xstrcat(cluster_flags, "XCPU");
-	}
-
 	if (flags_in & CLUSTER_FLAG_CRAY_N) {
 		if (cluster_flags)
 			xstrcat(cluster_flags, ",");
@@ -275,4 +263,3 @@ extern char *slurmdb_cluster_flags_2_str(uint32_t flags_in)
 
 	return cluster_flags;
 }
-
diff --git a/src/common/xstring.c b/src/common/xstring.c
index 2e367d92b..9fe336251 100644
--- a/src/common/xstring.c
+++ b/src/common/xstring.c
@@ -63,6 +63,7 @@
 #include "slurm/slurm_errno.h"
 
 #include "src/common/macros.h"
+#include "src/common/slurm_time.h"
 #include "src/common/strlcpy.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
@@ -212,7 +213,7 @@ void _xstrftimecat(char **buf, const char *fmt)
 	if (time(&t) == (time_t) -1)
 		fprintf(stderr, "time() failed\n");
 
-	if (!localtime_r(&t, &tm))
+	if (!slurm_localtime_r(&t, &tm))
 		fprintf(stderr, "localtime_r() failed\n");
 
 	strftime(p, sizeof(p), fmt, &tm);
@@ -233,7 +234,7 @@ void _xiso8601timecat(char **buf, bool msec)
 	if (gettimeofday(&tv, NULL) == -1)
 		fprintf(stderr, "gettimeofday() failed\n");
 
-	if (!localtime_r(&tv.tv_sec, &tm))
+	if (!slurm_localtime_r(&tv.tv_sec, &tm))
 		fprintf(stderr, "localtime_r() failed\n");
 
 	if (strftime(p, sizeof(p), "%Y-%m-%dT%T", &tm) == 0)
@@ -259,7 +260,7 @@ void _xrfc5424timecat(char **buf, bool msec)
 	if (gettimeofday(&tv, NULL) == -1)
 		fprintf(stderr, "gettimeofday() failed\n");
 
-	if (!localtime_r(&tv.tv_sec, &tm))
+	if (!slurm_localtime_r(&tv.tv_sec, &tm))
 		fprintf(stderr, "localtime_r() failed\n");
 
 	if (strftime(p, sizeof(p), "%Y-%m-%dT%T", &tm) == 0)
@@ -431,17 +432,17 @@ long int xstrntol(const char *str, char **endptr, size_t n, int base)
  *   pattern (IN)	substring to look for in str
  *   replacement (IN)   string with which to replace the "pattern" string
  */
-void _xstrsubstitute(char **str, const char *pattern, const char *replacement)
+bool _xstrsubstitute(char **str, const char *pattern, const char *replacement)
 {
 	int pat_len, rep_len;
 	char *ptr, *end_copy;
 	int pat_offset;
 
 	if (*str == NULL || pattern == NULL || pattern[0] == '\0')
-		return;
+		return 0;
 
 	if ((ptr = strstr(*str, pattern)) == NULL)
-		return;
+		return 0;
 	pat_offset = ptr - (*str);
 	pat_len = strlen(pattern);
 	if (replacement == NULL)
@@ -456,6 +457,8 @@ void _xstrsubstitute(char **str, const char *pattern, const char *replacement)
 	}
 	strcpy((*str)+pat_offset+rep_len, end_copy);
 	xfree(end_copy);
+
+	return 1;
 }
 
 /*
diff --git a/src/common/xstring.h b/src/common/xstring.h
index 502a84916..3223f7014 100644
--- a/src/common/xstring.h
+++ b/src/common/xstring.h
@@ -52,6 +52,7 @@
 #define xstrfmtcat(__p, __fmt, args...)	_xstrfmtcat(&(__p), __fmt, ## args)
 #define xmemcat(__p, __s, __e)          _xmemcat(&(__p), __s, __e)
 #define xstrsubstitute(__p, __pat, __rep) _xstrsubstitute(&(__p), __pat, __rep)
+#define xstrsubstituteall(__p, __pat, __rep) while (_xstrsubstitute(&(__p), __pat, __rep));
 
 /*
 ** The following functions take a ptr to a string and expand the
@@ -142,8 +143,9 @@ char *xbasename(char *path);
 /*
 ** Find the first instance of a sub-string "pattern" in the string "str",
 ** and replace it with the string "replacement".
+** If it wasn't found returns 0, otherwise 1
 */
-void _xstrsubstitute(char **str, const char *pattern, const char *replacement);
+bool _xstrsubstitute(char **str, const char *pattern, const char *replacement);
 
 /*
  * Remove all quotes that surround a string in the string "str",
diff --git a/src/database/Makefile.in b/src/database/Makefile.in
index e1cb1819f..54ee8e349 100644
--- a/src/database/Makefile.in
+++ b/src/database/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -254,6 +257,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -303,8 +308,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -323,6 +332,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -366,6 +378,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -389,6 +402,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/database/mysql_common.c b/src/database/mysql_common.c
index 4f55b5af9..fc1e9770e 100644
--- a/src/database/mysql_common.c
+++ b/src/database/mysql_common.c
@@ -264,8 +264,8 @@ static int _mysql_make_table_current(mysql_conn_t *mysql_conn, char *table_name,
 
 
 	itr = list_iterator_create(columns);
-	query = xstrdup_printf("alter table %s", table_name);
-	correct_query = xstrdup_printf("alter table %s", table_name);
+	query = xstrdup_printf("alter ignore table %s", table_name);
+	correct_query = xstrdup_printf("alter ignore table %s", table_name);
 	START_TIMER;
 	while (fields[i].name) {
 		int found = 0;
@@ -323,7 +323,7 @@ static int _mysql_make_table_current(mysql_conn_t *mysql_conn, char *table_name,
 	}
 
 	list_iterator_destroy(itr);
-	list_destroy(columns);
+	FREE_NULL_LIST(columns);
 
 	if ((temp = strstr(ending, "primary key ("))) {
 		int open = 0, close =0;
@@ -439,7 +439,7 @@ static int _mysql_make_table_current(mysql_conn_t *mysql_conn, char *table_name,
 	}
 	list_iterator_destroy(itr);
 
-	list_destroy(keys_list);
+	FREE_NULL_LIST(keys_list);
 
 	query[strlen(query)-1] = ';';
 	correct_query[strlen(correct_query)-1] = ';';
@@ -597,7 +597,7 @@ extern int destroy_mysql_conn(mysql_conn_t *mysql_conn)
 		xfree(mysql_conn->pre_commit_query);
 		xfree(mysql_conn->cluster_name);
 		slurm_mutex_destroy(&mysql_conn->lock);
-		list_destroy(mysql_conn->update_list);
+		FREE_NULL_LIST(mysql_conn->update_list);
 		xfree(mysql_conn);
 	}
 
diff --git a/src/db_api/Makefile.am b/src/db_api/Makefile.am
index 84f142512..71a050174 100644
--- a/src/db_api/Makefile.am
+++ b/src/db_api/Makefile.am
@@ -67,6 +67,7 @@ libslurmdb_la_SOURCES =	\
 	qos_functions.c \
 	resource_functions.c \
 	resv_report_functions.c \
+	tres_functions.c \
 	usage_functions.c \
 	user_functions.c \
 	user_report_functions.c \
diff --git a/src/db_api/Makefile.in b/src/db_api/Makefile.in
index 126ce407f..407168c40 100644
--- a/src/db_api/Makefile.in
+++ b/src/db_api/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -170,8 +173,8 @@ am_libslurmdb_la_OBJECTS = account_functions.lo archive_functions.lo \
 	cluster_report_functions.lo coord_functions.lo \
 	connection_functions.lo extra_get_functions.lo \
 	job_report_functions.lo qos_functions.lo resource_functions.lo \
-	resv_report_functions.lo usage_functions.lo user_functions.lo \
-	user_report_functions.lo wckey_functions.lo
+	resv_report_functions.lo tres_functions.lo usage_functions.lo \
+	user_functions.lo user_report_functions.lo wckey_functions.lo
 libslurmdb_la_OBJECTS = $(am_libslurmdb_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
 am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
@@ -286,6 +289,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -336,8 +341,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -356,6 +365,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -399,6 +411,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -422,6 +435,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -537,6 +551,7 @@ libslurmdb_la_SOURCES = \
 	qos_functions.c \
 	resource_functions.c \
 	resv_report_functions.c \
+	tres_functions.c \
 	usage_functions.c \
 	user_functions.c \
 	user_report_functions.c \
@@ -660,6 +675,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/qos_functions.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/resource_functions.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/resv_report_functions.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tres_functions.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/usage_functions.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_functions.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_report_functions.Plo@am__quote@
diff --git a/src/db_api/assoc_functions.c b/src/db_api/assoc_functions.c
index 87d19ac44..d0e61d656 100644
--- a/src/db_api/assoc_functions.c
+++ b/src/db_api/assoc_functions.c
@@ -49,49 +49,49 @@
 
 /*
  * add associations to accounting system
- * IN:  association_list List of slurmdb_association_rec_t *
+ * IN:  assoc_list List of slurmdb_assoc_rec_t *
  * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
 extern int slurmdb_associations_add(void *db_conn, List assoc_list)
 {
-	return acct_storage_g_add_associations(db_conn, getuid(), assoc_list);
+	return acct_storage_g_add_assocs(db_conn, getuid(), assoc_list);
 }
 
 /*
  * get info from the storage
- * IN:  slurmdb_association_cond_t *
- * RET: List of slurmdb_association_rec_t *
+ * IN:  slurmdb_assoc_cond_t *
+ * RET: List of slurmdb_assoc_rec_t *
  * note List needs to be freed when called
  */
 extern List slurmdb_associations_get(void *db_conn,
-				     slurmdb_association_cond_t *assoc_cond)
+				     slurmdb_assoc_cond_t *assoc_cond)
 {
-	return acct_storage_g_get_associations(db_conn, getuid(), assoc_cond);
+	return acct_storage_g_get_assocs(db_conn, getuid(), assoc_cond);
 }
 
 
 /*
  * modify existing associations in the accounting system
- * IN:  slurmdb_association_cond_t *assoc_cond
- * IN:  slurmdb_association_rec_t *assoc
+ * IN:  slurmdb_assoc_cond_t *assoc_cond
+ * IN:  slurmdb_assoc_rec_t *assoc
  * RET: List containing (char *'s) else NULL on error
  */
 extern List slurmdb_associations_modify(void *db_conn,
-					slurmdb_association_cond_t *assoc_cond,
-					slurmdb_association_rec_t *assoc)
+					slurmdb_assoc_cond_t *assoc_cond,
+					slurmdb_assoc_rec_t *assoc)
 {
-	return acct_storage_g_modify_associations(db_conn, getuid(),
-						  assoc_cond, assoc);
+	return acct_storage_g_modify_assocs(db_conn, getuid(),
+					    assoc_cond, assoc);
 }
 
 /*
  * remove associations from accounting system
- * IN:  slurmdb_association_cond_t *assoc_cond
+ * IN:  slurmdb_assoc_cond_t *assoc_cond
  * RET: List containing (char *'s) else NULL on error
  */
 extern List slurmdb_associations_remove(
-	void *db_conn, slurmdb_association_cond_t *assoc_cond)
+	void *db_conn, slurmdb_assoc_cond_t *assoc_cond)
 {
-	return acct_storage_g_remove_associations(db_conn, getuid(), assoc_cond);
+	return acct_storage_g_remove_assocs(db_conn, getuid(), assoc_cond);
 }
 
diff --git a/src/db_api/cluster_report_functions.c b/src/db_api/cluster_report_functions.c
index 2ad5b4c50..cdba1a579 100644
--- a/src/db_api/cluster_report_functions.c
+++ b/src/db_api/cluster_report_functions.c
@@ -55,11 +55,10 @@ typedef enum {
 	CLUSTER_REPORT_WU
 } cluster_report_t;
 
-static void _process_ua(List user_list, slurmdb_association_rec_t *assoc)
+static void _process_ua(List user_list, slurmdb_assoc_rec_t *assoc)
 {
 	ListIterator itr = NULL;
 	slurmdb_report_user_rec_t *slurmdb_report_user = NULL;
-	slurmdb_accounting_rec_t *accting = NULL;
 
 	/* make sure we add all associations to this
 	   user rec because we could have some in
@@ -93,24 +92,17 @@ static void _process_ua(List user_list, slurmdb_association_rec_t *assoc)
 
 		list_append(user_list, slurmdb_report_user);
 	}
+
 	/* get the amount of time this assoc used
 	   during the time we are looking at */
-	itr = list_iterator_create(assoc->accounting_list);
-	while((accting = list_next(itr))) {
-		slurmdb_report_user->cpu_secs +=
-			(uint64_t)accting->alloc_secs;
-		slurmdb_report_user->consumed_energy +=
-			(uint64_t)accting->consumed_energy;
-	}
-	list_iterator_destroy(itr);
+	slurmdb_transfer_acct_list_2_tres(assoc->accounting_list,
+					  &slurmdb_report_user->tres_list);
 }
 
-static void _process_au(List assoc_list, slurmdb_association_rec_t *assoc)
+static void _process_au(List assoc_list, slurmdb_assoc_rec_t *assoc)
 {
 	slurmdb_report_assoc_rec_t *slurmdb_report_assoc =
 		xmalloc(sizeof(slurmdb_report_assoc_rec_t));
-	ListIterator itr = NULL;
-	slurmdb_accounting_rec_t *accting = NULL;
 
 	list_append(assoc_list, slurmdb_report_assoc);
 
@@ -121,22 +113,13 @@ static void _process_au(List assoc_list, slurmdb_association_rec_t *assoc)
 
 	/* get the amount of time this assoc used
 	   during the time we are looking at */
-	itr = list_iterator_create(assoc->accounting_list);
-	while((accting = list_next(itr))) {
-		slurmdb_report_assoc->cpu_secs +=
-			(uint64_t)accting->alloc_secs;
-		slurmdb_report_assoc->consumed_energy +=
-			(uint64_t)accting->consumed_energy;
-	}
-	list_iterator_destroy(itr);
-
+	slurmdb_transfer_acct_list_2_tres(assoc->accounting_list,
+					  &slurmdb_report_assoc->tres_list);
 }
 
 static void _process_uw(List user_list, slurmdb_wckey_rec_t *wckey)
 {
-	ListIterator itr = NULL;
 	slurmdb_report_user_rec_t *slurmdb_report_user = NULL;
-	slurmdb_accounting_rec_t *accting = NULL;
 	struct passwd *passwd_ptr = NULL;
 	uid_t uid = NO_VAL;
 
@@ -158,14 +141,8 @@ static void _process_uw(List user_list, slurmdb_wckey_rec_t *wckey)
 
 	/* get the amount of time this wckey used
 	   during the time we are looking at */
-	itr = list_iterator_create(wckey->accounting_list);
-	while((accting = list_next(itr))) {
-		slurmdb_report_user->cpu_secs +=
-			(uint64_t)accting->alloc_secs;
-		slurmdb_report_user->consumed_energy +=
-			(uint64_t)accting->consumed_energy;
-	}
-	list_iterator_destroy(itr);
+	slurmdb_transfer_acct_list_2_tres(wckey->accounting_list,
+					  &slurmdb_report_user->tres_list);
 }
 
 static void _process_wu(List assoc_list, slurmdb_wckey_rec_t *wckey)
@@ -173,7 +150,6 @@ static void _process_wu(List assoc_list, slurmdb_wckey_rec_t *wckey)
 	slurmdb_report_assoc_rec_t *slurmdb_report_assoc = NULL,
 		*parent_assoc = NULL;
 	ListIterator itr = NULL;
-	slurmdb_accounting_rec_t *accting = NULL;
 
 	/* find the parent */
 	itr = list_iterator_create(assoc_list);
@@ -200,18 +176,10 @@ static void _process_wu(List assoc_list, slurmdb_wckey_rec_t *wckey)
 
 	/* get the amount of time this wckey used
 	   during the time we are looking at */
-	itr = list_iterator_create(wckey->accounting_list);
-	while((accting = list_next(itr))) {
-		slurmdb_report_assoc->cpu_secs +=
-			(uint64_t)accting->alloc_secs;
-		parent_assoc->cpu_secs +=
-			(uint64_t)accting->alloc_secs;
-		slurmdb_report_assoc->consumed_energy +=
-			(uint64_t)accting->consumed_energy;
-		parent_assoc->consumed_energy +=
-			(uint64_t)accting->consumed_energy;
-	}
-	list_iterator_destroy(itr);
+	slurmdb_transfer_acct_list_2_tres(wckey->accounting_list,
+					  &slurmdb_report_assoc->tres_list);
+	slurmdb_transfer_acct_list_2_tres(wckey->accounting_list,
+					  &parent_assoc->tres_list);
 }
 
 static void _process_assoc_type(
@@ -220,7 +188,7 @@ static void _process_assoc_type(
 	char *cluster_name,
 	cluster_report_t type)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	/* now add the associations of interest here by user */
 	while((assoc = list_next(itr))) {
@@ -298,11 +266,11 @@ static List _process_util_by_report(void *db_conn, char *calling_name,
 	cluster_cond.with_deleted = 1;
 	cluster_cond.with_usage = 1;
 	if ((type == CLUSTER_REPORT_UA) || (type == CLUSTER_REPORT_AU)) {
-		start_time = ((slurmdb_association_cond_t *)cond)->usage_start;
-		end_time = ((slurmdb_association_cond_t *)cond)->usage_end;
+		start_time = ((slurmdb_assoc_cond_t *)cond)->usage_start;
+		end_time = ((slurmdb_assoc_cond_t *)cond)->usage_end;
 
 		cluster_cond.cluster_list =
-			((slurmdb_association_cond_t *)cond)->cluster_list;
+			((slurmdb_assoc_cond_t *)cond)->cluster_list;
 	} else if ((type == CLUSTER_REPORT_UW) || (type == CLUSTER_REPORT_WU)) {
 		start_time = ((slurmdb_wckey_cond_t *)cond)->usage_start;
 		end_time = ((slurmdb_wckey_cond_t *)cond)->usage_end;
@@ -334,9 +302,9 @@ static List _process_util_by_report(void *db_conn, char *calling_name,
 	}
 
 	if ((type == CLUSTER_REPORT_UA) || (type == CLUSTER_REPORT_AU)) {
-		((slurmdb_association_cond_t *)cond)->usage_start = start_time;
-		((slurmdb_association_cond_t *)cond)->usage_end = end_time;
-		type_list = acct_storage_g_get_associations(
+		((slurmdb_assoc_cond_t *)cond)->usage_start = start_time;
+		((slurmdb_assoc_cond_t *)cond)->usage_end = end_time;
+		type_list = acct_storage_g_get_assocs(
 			db_conn, my_uid, cond);
 	} else if ((type == CLUSTER_REPORT_UW) || (type == CLUSTER_REPORT_WU)) {
 		((slurmdb_wckey_cond_t *)cond)->usage_start = start_time;
@@ -392,35 +360,17 @@ static List _process_util_by_report(void *db_conn, char *calling_name,
 	list_iterator_destroy(itr);
 
 end_it:
-
-	if (type_list) {
-		list_destroy(type_list);
-		type_list = NULL;
-	}
-
-	if (first_list) {
-		list_destroy(first_list);
-		first_list = NULL;
-	}
-
-	if (cluster_list) {
-		list_destroy(cluster_list);
-		cluster_list = NULL;
-	}
-
-	if (exit_code) {
-		if (ret_list) {
-			list_destroy(ret_list);
-			ret_list = NULL;
-		}
-	}
-
+	FREE_NULL_LIST(type_list);
+	FREE_NULL_LIST(first_list);
+	FREE_NULL_LIST(cluster_list);
+	if (exit_code)
+		FREE_NULL_LIST(ret_list);
 	return ret_list;
 }
 
 
 extern List slurmdb_report_cluster_account_by_user(void *db_conn,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	return _process_util_by_report(db_conn,
 				       "slurmdb_report_cluster_account_by_user",
@@ -428,7 +378,7 @@ extern List slurmdb_report_cluster_account_by_user(void *db_conn,
 }
 
 extern List slurmdb_report_cluster_user_by_account(void *db_conn,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	return _process_util_by_report(db_conn,
 				       "slurmdb_report_cluster_user_by_account",
diff --git a/src/db_api/extra_get_functions.c b/src/db_api/extra_get_functions.c
index 03317aa85..22ebd585d 100644
--- a/src/db_api/extra_get_functions.c
+++ b/src/db_api/extra_get_functions.c
@@ -92,12 +92,12 @@ extern List slurmdb_jobs_get(void *db_conn, slurmdb_job_cond_t *job_cond)
 
 /*
  * get info from the storage
- * IN:  slurmdb_association_cond_t *
- * RET: List of slurmdb_association_rec_t *
+ * IN:  slurmdb_assoc_cond_t *
+ * RET: List of slurmdb_assoc_rec_t *
  * note List needs to be freed when called
  */
 extern List slurmdb_problems_get(void *db_conn,
-				 slurmdb_association_cond_t *assoc_cond)
+				 slurmdb_assoc_cond_t *assoc_cond)
 {
 	return acct_storage_g_get_problems(db_conn, getuid(), assoc_cond);
 }
diff --git a/src/db_api/job_report_functions.c b/src/db_api/job_report_functions.c
index 6bdc9cdd6..f12523968 100644
--- a/src/db_api/job_report_functions.c
+++ b/src/db_api/job_report_functions.c
@@ -70,7 +70,7 @@ static void _check_create_grouping(
 {
 	ListIterator itr;
 	slurmdb_wckey_rec_t *wckey = (slurmdb_wckey_rec_t *)object;
-	slurmdb_association_rec_t *assoc = (slurmdb_association_rec_t *)object;
+	slurmdb_assoc_rec_t *assoc = (slurmdb_assoc_rec_t *)object;
 	slurmdb_report_cluster_grouping_t *cluster_group = NULL;
 	slurmdb_report_acct_grouping_t *acct_group = NULL;
 	slurmdb_report_job_grouping_t *job_group = NULL;
@@ -92,7 +92,7 @@ static void _check_create_grouping(
 	}
 
 	itr = list_iterator_create(cluster_group->acct_list);
-	while((acct_group = list_next(itr))) {
+	while ((acct_group = list_next(itr))) {
 		if (!strcmp(name, acct_group->acct))
 			break;
 	}
@@ -143,6 +143,7 @@ static void _check_create_grouping(
 	}
 }
 
+/* FIXME: This only works for CPUS now */
 static List _process_grouped_report(
 	void *db_conn, slurmdb_job_cond_t *job_cond, List grouping_list,
 	bool flat_view, bool wckey_type, bool both)
@@ -169,6 +170,7 @@ static List _process_grouped_report(
 	bool destroy_job_cond = 0;
 	bool destroy_grouping_list = 0;
 	bool individual = 0;
+	uint32_t tres_id = TRES_CPU;
 
 	uid_t my_uid = getuid();
 
@@ -203,14 +205,23 @@ static List _process_grouped_report(
 	/* make a group for each job size we find. */
 	if (!list_count(grouping_list)) {
 		char *group = NULL;
-		char *tmp = NULL;
+
 		individual = 1;
 		itr = list_iterator_create(job_list);
-		while((job = list_next(itr))) {
-			if (!job->elapsed || !job->alloc_cpus)
+		while ((job = list_next(itr))) {
+			char *tmp = NULL;
+			uint64_t count;
+
+			if (!job->elapsed)
 				continue;
-			tmp = xstrdup_printf("%u", job->alloc_cpus);
-			while((group = list_next(group_itr))) {
+
+			if (!(count = slurmdb_find_tres_count_in_string(
+				      job->tres_alloc_str, tres_id)))
+				continue;
+
+			tmp = xstrdup_printf("%"PRIu64, count);
+
+			while ((group = list_next(group_itr))) {
 				if (!strcmp(group, tmp)) {
 					break;
 				}
@@ -233,20 +244,19 @@ static List _process_grouped_report(
 		goto no_objects;
 
 	if (!wckey_type || both) {
-		slurmdb_association_cond_t assoc_cond;
-		memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+		slurmdb_assoc_cond_t assoc_cond;
+		memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 		assoc_cond.id_list = job_cond->associd_list;
 		assoc_cond.cluster_list = job_cond->cluster_list;
 		/* don't limit associations to having the partition_list */
 		//assoc_cond.partition_list = job_cond->partition_list;
 		if (!job_cond->acct_list || !list_count(job_cond->acct_list)) {
-			if (job_cond->acct_list)
-				list_destroy(job_cond->acct_list);
+			FREE_NULL_LIST(job_cond->acct_list);
 			job_cond->acct_list = list_create(NULL);
 			list_append(job_cond->acct_list, "root");
 		}
 		assoc_cond.parent_acct_list = job_cond->acct_list;
-		object_list = acct_storage_g_get_associations(db_conn, my_uid,
+		object_list = acct_storage_g_get_assocs(db_conn, my_uid,
 							      &assoc_cond);
 	}
 
@@ -272,11 +282,11 @@ static List _process_grouped_report(
 	itr = list_iterator_create(object_list);
 	if (object2_list)
 		itr2 = list_iterator_create(object2_list);
-	while((object = list_next(itr))) {
+	while ((object = list_next(itr))) {
 		char *cluster = NULL;
 		slurmdb_wckey_rec_t *wckey = (slurmdb_wckey_rec_t *)object;
-		slurmdb_association_rec_t *assoc =
-			(slurmdb_association_rec_t *)object;
+		slurmdb_assoc_rec_t *assoc =
+			(slurmdb_assoc_rec_t *)object;
 		if (!itr2) {
 			char *name = NULL;
 			if (wckey_type) {
@@ -292,11 +302,11 @@ static List _process_grouped_report(
 			continue;
 		}
 
-		while((object2 = list_next(itr2))) {
+		while ((object2 = list_next(itr2))) {
 			slurmdb_wckey_rec_t *wckey2 =
 				(slurmdb_wckey_rec_t *)object2;
-			slurmdb_association_rec_t *assoc2 =
-				(slurmdb_association_rec_t *)object2;
+			slurmdb_assoc_rec_t *assoc2 =
+				(slurmdb_assoc_rec_t *)object2;
 			char name[200];
 			if (!wckey_type) {
 				if (strcmp(assoc->cluster, wckey2->cluster))
@@ -456,20 +466,29 @@ no_objects:
 		}
 
 		local_itr = list_iterator_create(acct_group->groups);
-		while((job_group = list_next(local_itr))) {
-			uint64_t total_secs = 0;
-			if ((job->alloc_cpus < job_group->min_size)
-			   || (job->alloc_cpus > job_group->max_size))
+		while ((job_group = list_next(local_itr))) {
+			uint64_t count;
+
+			if (!(count = slurmdb_find_tres_count_in_string(
+				      job->tres_alloc_str, tres_id)) ||
+			    (count < job_group->min_size) ||
+			    (count > job_group->max_size))
 				continue;
+
 			list_append(job_group->jobs, job);
 			job_group->count++;
 			acct_group->count++;
 			cluster_group->count++;
-			total_secs = (uint64_t)job->elapsed
-				* (uint64_t)job->alloc_cpus;
-			job_group->cpu_secs += total_secs;
-			acct_group->cpu_secs += total_secs;
-			cluster_group->cpu_secs += total_secs;
+
+			slurmdb_transfer_tres_time(
+				&job_group->tres_list, job->tres_alloc_str,
+				job->elapsed);
+			slurmdb_transfer_tres_time(
+				&acct_group->tres_list, job->tres_alloc_str,
+				job->elapsed);
+			slurmdb_transfer_tres_time(
+				&cluster_group->tres_list, job->tres_alloc_str,
+				job->elapsed);
 		}
 		list_iterator_destroy(local_itr);
 	}
@@ -494,23 +513,18 @@ no_objects:
 	list_iterator_destroy(cluster_itr);
 
 end_it:
-	if (object_list)
-		list_destroy(object_list);
+	FREE_NULL_LIST(object_list);
 
-	if (object2_list)
-		list_destroy(object2_list);
+	FREE_NULL_LIST(object2_list);
 
 	if (destroy_job_cond)
 		slurmdb_destroy_job_cond(job_cond);
 
-	if (destroy_grouping_list && grouping_list)
-		list_destroy(grouping_list);
+	if (destroy_grouping_list)
+		FREE_NULL_LIST(grouping_list);
 
 	if (exit_code) {
-		if (cluster_list) {
-			list_destroy(cluster_list);
-			cluster_list = NULL;
-		}
+		FREE_NULL_LIST(cluster_list);
 	}
 
 	return cluster_list;
diff --git a/src/plugins/slurmctld/dynalloc/deallocate.h b/src/db_api/tres_functions.c
similarity index 63%
rename from src/plugins/slurmctld/dynalloc/deallocate.h
rename to src/db_api/tres_functions.c
index d71a18eed..f9cbcefc5 100644
--- a/src/plugins/slurmctld/dynalloc/deallocate.h
+++ b/src/db_api/tres_functions.c
@@ -1,9 +1,11 @@
 /*****************************************************************************\
- *  deallocate.h  - complete job resource allocation
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
+ *  tres_functions.c - Interface to functions dealing with tres
+ *                        in the database.
+ ******************************************************************************
+ *  Copyright (C) 2010 Lawrence Livermore National Security.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Danny Auble da@llnl.gov, et. al.
+ *  CODE-OCEC-09-009. All rights reserved.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -35,49 +37,33 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef DYNALLOC_DEALLOCATE_H_
-#define DYNALLOC_DEALLOCATE_H_
-
-#if HAVE_CONFIG_H
+#ifdef HAVE_CONFIG_H
 #  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
-
-#ifdef HAVE_SYS_TYPES_H
-#include <sys/types.h>
 #endif
 
-#include "msg.h"
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+#include "slurm/slurmdb.h"
 
-/**
- * deallocate the resources for slurm jobs.
- *
- * the deallocate msg can be like "deallocate slurm_jobid=123
- * job_return_code=0:slurm_jobid=124 job_return_code=0"
- *
- * IN:
- *	msg: the deallocate msg
- *
- */
-extern void deallocate(const char *msg);
+#include "src/common/slurm_accounting_storage.h"
 
-/**
- * deallocate the ports for a slurm job.
- *
- * deallocate the ports and remove the entry from List.
- *
- * IN:
- *	slurm_jobid: slurm jobid
- *
+/*
+ * add tres's to accounting system
+ * IN:  tres_list List of char *
+ * RET: SLURM_SUCCESS on success SLURM_ERROR else
  */
-extern void deallocate_port(uint32_t slurm_jobid);
+extern int slurmdb_tres_add(void *db_conn, uint32_t uid, List tres_list)
+{
+	return acct_storage_g_add_tres(db_conn, getuid(), tres_list);
+}
 
-#endif /* DYNALLOC_DEALLOCATE_H_ */
+/*
+ * get info from the storage
+ * IN:  slurmdb_tres_cond_t *
+ * RET: List of slurmdb_tres_rec_t *
+ * note List needs to be freed with slurm_list_destroy() when called
+ */
+extern List slurmdb_tres_get(void *db_conn, slurmdb_tres_cond_t *tres_cond)
+{
+	return acct_storage_g_get_tres(db_conn, getuid(), tres_cond);
+}
diff --git a/src/db_api/usage_functions.c b/src/db_api/usage_functions.c
index 5ae9a9eee..56e740823 100644
--- a/src/db_api/usage_functions.c
+++ b/src/db_api/usage_functions.c
@@ -49,7 +49,7 @@
 
 /*
  * get info from the storage
- * IN/OUT:  in void * (slurmdb_association_rec_t *) or
+ * IN/OUT:  in void * (slurmdb_assoc_rec_t *) or
  *          (slurmdb_wckey_rec_t *) of (slurmdb_cluster_rec_t *) with
  *          the id, and cluster set.
  * IN:  type what type is 'in'
diff --git a/src/db_api/user_report_functions.c b/src/db_api/user_report_functions.c
index fa855ff83..63e453c24 100644
--- a/src/db_api/user_report_functions.c
+++ b/src/db_api/user_report_functions.c
@@ -64,8 +64,7 @@ extern List slurmdb_report_user_top_usage(void *db_conn,
 	int exit_code = 0;
 	slurmdb_user_rec_t *user = NULL;
 	slurmdb_cluster_rec_t *cluster = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
-	slurmdb_accounting_rec_t *assoc_acct = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_report_user_rec_t *slurmdb_report_user = NULL;
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster = NULL;
 	uid_t my_uid = getuid();
@@ -81,7 +80,7 @@ extern List slurmdb_report_user_top_usage(void *db_conn,
 	if (!user_cond->assoc_cond) {
 		delete_assoc_cond = 1;
 		user_cond->assoc_cond =
-			xmalloc(sizeof(slurmdb_association_cond_t));
+			xmalloc(sizeof(slurmdb_assoc_cond_t));
 	}
 
 	if (!user_cond->assoc_cond->cluster_list) {
@@ -149,7 +148,7 @@ extern List slurmdb_report_user_top_usage(void *db_conn,
 			list_create(slurmdb_destroy_report_user_rec);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(usage_cluster_list);
+	FREE_NULL_LIST(usage_cluster_list);
 
 	itr = list_iterator_create(user_list);
 	cluster_itr = list_iterator_create(cluster_list);
@@ -252,16 +251,9 @@ extern List slurmdb_report_user_top_usage(void *db_conn,
 			if (!object)
 				list_append(slurmdb_report_user->acct_list,
 					    xstrdup(assoc->acct));
-			itr3 = list_iterator_create(assoc->accounting_list);
-			while((assoc_acct = list_next(itr3))) {
-				slurmdb_report_user->cpu_secs +=
-					(uint64_t)assoc_acct->alloc_secs;
-				slurmdb_report_user->consumed_energy +=
-					(uint64_t)assoc_acct->consumed_energy;
-/* 				slurmdb_report_cluster->cpu_secs +=  */
-/* 					(uint64_t)assoc_acct->alloc_secs; */
-			}
-			list_iterator_destroy(itr3);
+			slurmdb_transfer_acct_list_2_tres(
+				assoc->accounting_list,
+				&slurmdb_report_user->tres_list);
 		}
 		list_iterator_destroy(itr2);
 	}
@@ -270,12 +262,12 @@ extern List slurmdb_report_user_top_usage(void *db_conn,
 
 end_it:
 	if (delete_cluster_list) {
-		list_destroy(user_cond->assoc_cond->cluster_list);
+		FREE_NULL_LIST(user_cond->assoc_cond->cluster_list);
 		user_cond->assoc_cond->cluster_list = NULL;
 	}
 
 	if (delete_assoc_cond) {
-		slurmdb_destroy_association_cond(user_cond->assoc_cond);
+		slurmdb_destroy_assoc_cond(user_cond->assoc_cond);
 		user_cond->assoc_cond = NULL;
 	}
 
@@ -284,16 +276,10 @@ end_it:
 		user_cond = NULL;
 	}
 
-	if (user_list) {
-		list_destroy(user_list);
-		user_list = NULL;
-	}
+	FREE_NULL_LIST(user_list);
 
 	if (exit_code) {
-		if (cluster_list) {
-			list_destroy(cluster_list);
-			cluster_list = NULL;
-		}
+		FREE_NULL_LIST(cluster_list);
 	}
 
 	return cluster_list;
diff --git a/src/layouts/Makefile.am b/src/layouts/Makefile.am
new file mode 100644
index 000000000..d9d8a83ba
--- /dev/null
+++ b/src/layouts/Makefile.am
@@ -0,0 +1,4 @@
+# Makefile for layouts plugins
+
+SUBDIRS = unit power
+#basic racking energy power topology
diff --git a/src/layouts/Makefile.in b/src/layouts/Makefile.in
new file mode 100644
index 000000000..561495098
--- /dev/null
+++ b/src/layouts/Makefile.in
@@ -0,0 +1,779 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for layouts plugins
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/layouts
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+	ctags-recursive dvi-recursive html-recursive info-recursive \
+	install-data-recursive install-dvi-recursive \
+	install-exec-recursive install-html-recursive \
+	install-info-recursive install-pdf-recursive \
+	install-ps-recursive install-recursive installcheck-recursive \
+	installdirs-recursive pdf-recursive ps-recursive \
+	tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+  $(RECURSIVE_TARGETS) \
+  $(RECURSIVE_CLEAN_TARGETS) \
+  $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+	distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+SUBDIRS = unit power
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/layouts/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu src/layouts/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+#     (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+	@fail=; \
+	if $(am__make_keepgoing); then \
+	  failcom='fail=yes'; \
+	else \
+	  failcom='exit 1'; \
+	fi; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-recursive
+all-am: Makefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+	check-am clean clean-generic clean-libtool cscopelist-am ctags \
+	ctags-am distclean distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	installdirs-am maintainer-clean maintainer-clean-generic \
+	mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
+	ps ps-am tags tags-am uninstall uninstall-am
+
+#basic racking energy power topology
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/layouts/power/Makefile.am b/src/layouts/power/Makefile.am
new file mode 100644
index 000000000..05e0f676c
--- /dev/null
+++ b/src/layouts/power/Makefile.am
@@ -0,0 +1,13 @@
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = layouts_power_default.la layouts_power_cpufreq.la
+
+layouts_power_default_la_SOURCES = default.c
+layouts_power_default_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) 
+
+layouts_power_cpufreq_la_SOURCES = cpufreq.c
+layouts_power_cpufreq_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/layouts/power/Makefile.in b/src/layouts/power/Makefile.in
new file mode 100644
index 000000000..8d8cb3815
--- /dev/null
+++ b/src/layouts/power/Makefile.in
@@ -0,0 +1,825 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/layouts/power
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+layouts_power_cpufreq_la_LIBADD =
+am_layouts_power_cpufreq_la_OBJECTS = cpufreq.lo
+layouts_power_cpufreq_la_OBJECTS =  \
+	$(am_layouts_power_cpufreq_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+layouts_power_cpufreq_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(layouts_power_cpufreq_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+layouts_power_default_la_LIBADD =
+am_layouts_power_default_la_OBJECTS = default.lo
+layouts_power_default_la_OBJECTS =  \
+	$(am_layouts_power_default_la_OBJECTS)
+layouts_power_default_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(layouts_power_default_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(layouts_power_cpufreq_la_SOURCES) \
+	$(layouts_power_default_la_SOURCES)
+DIST_SOURCES = $(layouts_power_cpufreq_la_SOURCES) \
+	$(layouts_power_default_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = layouts_power_default.la layouts_power_cpufreq.la
+layouts_power_default_la_SOURCES = default.c
+layouts_power_default_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) 
+layouts_power_cpufreq_la_SOURCES = cpufreq.c
+layouts_power_cpufreq_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/layouts/power/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/layouts/power/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+layouts_power_cpufreq.la: $(layouts_power_cpufreq_la_OBJECTS) $(layouts_power_cpufreq_la_DEPENDENCIES) $(EXTRA_layouts_power_cpufreq_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(layouts_power_cpufreq_la_LINK) -rpath $(pkglibdir) $(layouts_power_cpufreq_la_OBJECTS) $(layouts_power_cpufreq_la_LIBADD) $(LIBS)
+
+layouts_power_default.la: $(layouts_power_default_la_OBJECTS) $(layouts_power_default_la_DEPENDENCIES) $(EXTRA_layouts_power_default_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(layouts_power_default_la_LINK) -rpath $(pkglibdir) $(layouts_power_default_la_OBJECTS) $(layouts_power_default_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/cpufreq.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/default.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/layouts/power/cpufreq.c b/src/layouts/power/cpufreq.c
new file mode 100644
index 000000000..c07ecfc70
--- /dev/null
+++ b/src/layouts/power/cpufreq.c
@@ -0,0 +1,167 @@
+/** TODO: copyright notice */
+
+#include "slurm/slurm.h"
+
+#include "src/common/layouts_mgr.h"
+#include "src/common/entity.h"
+#include "src/common/log.h"
+
+const char plugin_name[] = "power_cpufreq layouts plugin";
+const char plugin_type[] = "layouts/power";
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
+
+/* specific options for power tests layout */
+s_p_options_t entity_options[] = {
+	/* base keys */
+	{"CurrentCorePower", S_P_UINT32},
+	{"IdleCoreWatts", S_P_UINT32},
+	{"MaxCoreWatts", S_P_UINT32},
+	{"CurrentCoreFreq", S_P_UINT32},
+	{"Cpufreq1", S_P_UINT32},
+	{"Cpufreq2", S_P_UINT32},
+	{"Cpufreq3", S_P_UINT32},
+	{"Cpufreq4", S_P_UINT32},
+	{"Cpufreq5", S_P_UINT32},
+	{"Cpufreq6", S_P_UINT32},
+	{"Cpufreq7", S_P_UINT32},
+	{"Cpufreq8", S_P_UINT32},
+	{"Cpufreq1Watts", S_P_UINT32},
+	{"Cpufreq2Watts", S_P_UINT32},
+	{"Cpufreq3Watts", S_P_UINT32},
+	{"Cpufreq4Watts", S_P_UINT32},
+	{"Cpufreq5Watts", S_P_UINT32},
+	{"Cpufreq6Watts", S_P_UINT32},
+	{"Cpufreq7Watts", S_P_UINT32},
+	{"Cpufreq8Watts", S_P_UINT32},
+	{"NumFreqChoices", S_P_UINT16},
+	{"DownWatts",S_P_UINT32},
+	{"PowerSaveWatts",S_P_UINT32},
+	{"LastCore",S_P_UINT32},
+	/* children aggregated keys */
+	{"CurrentSumPower", S_P_UINT32},
+	{"IdleSumWatts", S_P_UINT32},
+	{"MaxSumWatts", S_P_UINT32},
+	{"CurrentPower", S_P_UINT32},
+	{"IdleWatts", S_P_UINT32},
+	{"MaxWatts", S_P_UINT32},
+	{"CoresCount", S_P_UINT32},
+	{NULL}
+};
+s_p_options_t options[] = {
+	{"Entity", S_P_EXPLINE, NULL, NULL, entity_options},
+	{NULL}
+};
+
+const layouts_keyspec_t keyspec[] = {
+	/* base keys */
+	{"CurrentCorePower", L_T_UINT32},
+	{"IdleCoreWatts", L_T_UINT32},
+	{"MaxCoreWatts", L_T_UINT32},
+	{"CurrentCoreFreq", L_T_UINT32},
+	{"Cpufreq1", L_T_UINT32},
+	{"Cpufreq2", L_T_UINT32},
+	{"Cpufreq3", L_T_UINT32},
+	{"Cpufreq4", L_T_UINT32},
+	{"Cpufreq5", L_T_UINT32},
+	{"Cpufreq6", L_T_UINT32},
+	{"Cpufreq7", L_T_UINT32},
+	{"Cpufreq8", L_T_UINT32},
+	{"Cpufreq1Watts", L_T_UINT32},
+	{"Cpufreq2Watts", L_T_UINT32},
+	{"Cpufreq3Watts", L_T_UINT32},
+	{"Cpufreq4Watts", L_T_UINT32},
+	{"Cpufreq5Watts", L_T_UINT32},
+	{"Cpufreq6Watts", L_T_UINT32},
+	{"Cpufreq7Watts", L_T_UINT32},
+	{"Cpufreq8Watts", L_T_UINT32},
+	{"DownWatts",L_T_UINT32},
+	{"PowerSaveWatts",L_T_UINT32},
+	{"NumFreqChoices",L_T_UINT16},
+	{"LastCore",L_T_UINT32},
+	/* parents aggregated keys */
+	{"CurrentSumPower", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "CurrentPower"},
+	{"IdleSumWatts", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "IdleWatts"},
+	{"MaxSumWatts", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "MaxWatts"},
+	{"CurrentPower", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "CurrentCorePower"},
+	{"IdleWatts", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "IdleCoreWatts"},
+	{"MaxWatts", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "MaxCoreWatts"},
+	{"CoresCount", L_T_UINT32, KEYSPEC_UPDATE_CHILDREN_COUNT},
+	{NULL}
+
+};
+
+/* types allowed in the entity's "type" field */
+const char* etypes[] = {
+	"Center",
+	"Node",
+	"Core",
+	NULL
+};
+
+const layouts_plugin_spec_t plugin_spec = {
+	options,
+	keyspec,
+	LAYOUT_STRUCT_TREE,
+	etypes,
+	true, /* if this evalued to true, keys inside plugin_keyspec present in
+	       * plugin_options having corresponding types, are automatically
+	       * handled by the layouts manager.
+	       */
+	true  /* if this evalued to true, keys updates trigger an automatic
+	       * update of their entities neighborhoods based on their
+	       * KEYSPEC_UPDATE_* set flags
+	       */
+};
+
+/* manager is lock when this function is called */
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+int layouts_p_conf_done(
+		xhash_t* entities, layout_t* layout, s_p_hashtbl_t* tbl)
+{
+	return 1;
+}
+
+
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+void layouts_p_entity_parsing(
+		entity_t* e, s_p_hashtbl_t* etbl, layout_t* layout)
+{
+}
+
+/* manager is lock then this function is called */
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+int layouts_p_update_done(layout_t* layout, entity_t** e_array, int e_cnt)
+{
+	int i;
+	debug3("layouts/power_cpufreq: receiving update callback for %d entities",
+	       e_cnt);
+	for (i = 0; i < e_cnt; i++) {
+		if (e_array[i] == NULL) {
+			debug3("layouts/power_cpufreq: skipping update of nullified"
+			       "entity[%d]", i);
+		} else {
+			debug3("layouts/power_cpufreq: updating entity[%d]=%s",
+			       i, e_array[i]->name);
+		}
+	}
+	return 1;
+}
+
+int init(void)
+{
+	return SLURM_SUCCESS;
+}
+
+int fini(void)
+{
+	return SLURM_SUCCESS;
+}
diff --git a/src/layouts/power/default.c b/src/layouts/power/default.c
new file mode 100644
index 000000000..0a80eb4cc
--- /dev/null
+++ b/src/layouts/power/default.c
@@ -0,0 +1,119 @@
+/** TODO: copyright notice */
+
+#include "slurm/slurm.h"
+
+#include "src/common/layouts_mgr.h"
+#include "src/common/entity.h"
+#include "src/common/log.h"
+
+const char plugin_name[] = "Power layouts plugin";
+const char plugin_type[] = "layouts/power";
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
+
+/* specific options for power tests layout */
+s_p_options_t entity_options[] = {
+	/* base keys */
+	{"CurrentPower", S_P_UINT32},
+	{"IdleWatts", S_P_UINT32},
+	{"MaxWatts", S_P_UINT32},
+	{"DownWatts",S_P_UINT32},
+	{"PowerSaveWatts",S_P_UINT32},
+	/* parents aggregated keys */
+	{"CurrentSumPower", S_P_UINT32},
+	{"IdleSumWatts", S_P_UINT32},
+	{"MaxSumWatts", S_P_UINT32},
+	{NULL}
+};
+s_p_options_t options[] = {
+	{"Entity", S_P_EXPLINE, NULL, NULL, entity_options},
+	{NULL}
+};
+
+const layouts_keyspec_t keyspec[] = {
+	/* base keys */
+	{"CurrentPower", L_T_UINT32},
+	{"IdleWatts", L_T_UINT32},
+	{"MaxWatts", L_T_UINT32},
+	{"DownWatts",L_T_UINT32},
+	{"PowerSaveWatts",L_T_UINT32},
+	{"NumFreqChoices",L_T_UINT32},
+	/* parents aggregated keys */
+	{"CurrentSumPower", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "CurrentPower"},
+	{"IdleSumWatts", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "IdleWatts"},
+	{"MaxSumWatts", L_T_UINT32,
+	KEYSPEC_UPDATE_CHILDREN_SUM, "MaxWatts"},
+	{NULL}
+
+};
+
+/* types allowed in the entity's "type" field */
+const char* etypes[] = {
+	"Center",
+	"Node",
+	NULL
+};
+
+const layouts_plugin_spec_t plugin_spec = {
+	options,
+	keyspec,
+	LAYOUT_STRUCT_TREE,
+	etypes,
+	true, /* if this evalued to true, keys inside plugin_keyspec present in
+	       * plugin_options having corresponding types, are automatically
+	       * handled by the layouts manager.
+	       */
+	true  /* if this evalued to true, keys updates trigger an automatic
+	       * update of their entities neighborhoods based on their
+	       * KEYSPEC_UPDATE_* set flags
+	       */
+};
+
+/* manager is lock when this function is called */
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+int layouts_p_conf_done(
+		xhash_t* entities, layout_t* layout, s_p_hashtbl_t* tbl)
+{
+	return 1;
+}
+
+
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+void layouts_p_entity_parsing(
+		entity_t* e, s_p_hashtbl_t* etbl, layout_t* layout)
+{
+}
+
+/* manager is lock then this function is called */
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+int layouts_p_update_done(layout_t* layout, entity_t** e_array, int e_cnt)
+{
+	int i;
+	debug3("layouts/power: receiving update callback for %d entities",
+	       e_cnt);
+	for (i = 0; i < e_cnt; i++) {
+		if (e_array[i] == NULL) {
+			debug3("layouts/power: skipping update of nullified"
+			       "entity[%d]", i);
+		} else {
+			debug3("layouts/power: updating entity[%d]=%s",
+			       i, e_array[i]->name);
+		}
+	}
+	return 1;
+}
+
+int init(void)
+{
+	return SLURM_SUCCESS;
+}
+
+int fini(void)
+{
+	return SLURM_SUCCESS;
+}
+
diff --git a/src/layouts/unit/Makefile.am b/src/layouts/unit/Makefile.am
new file mode 100644
index 000000000..ac5f50cc9
--- /dev/null
+++ b/src/layouts/unit/Makefile.am
@@ -0,0 +1,10 @@
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = layouts_unit_default.la
+
+layouts_unit_default_la_SOURCES = default.c
+layouts_unit_default_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/layouts/unit/Makefile.in b/src/layouts/unit/Makefile.in
new file mode 100644
index 000000000..c3e73f8da
--- /dev/null
+++ b/src/layouts/unit/Makefile.in
@@ -0,0 +1,809 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/layouts/unit
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+layouts_unit_default_la_LIBADD =
+am_layouts_unit_default_la_OBJECTS = default.lo
+layouts_unit_default_la_OBJECTS =  \
+	$(am_layouts_unit_default_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+layouts_unit_default_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(layouts_unit_default_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(layouts_unit_default_la_SOURCES)
+DIST_SOURCES = $(layouts_unit_default_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = layouts_unit_default.la
+layouts_unit_default_la_SOURCES = default.c
+layouts_unit_default_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/layouts/unit/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/layouts/unit/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+layouts_unit_default.la: $(layouts_unit_default_la_OBJECTS) $(layouts_unit_default_la_DEPENDENCIES) $(EXTRA_layouts_unit_default_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(layouts_unit_default_la_LINK) -rpath $(pkglibdir) $(layouts_unit_default_la_OBJECTS) $(layouts_unit_default_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/default.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/layouts/unit/default.c b/src/layouts/unit/default.c
new file mode 100644
index 000000000..e88c733e5
--- /dev/null
+++ b/src/layouts/unit/default.c
@@ -0,0 +1,225 @@
+/** TODO: copyright notice */
+
+#include "slurm/slurm.h"
+
+#include "src/common/layouts_mgr.h"
+#include "src/common/entity.h"
+#include "src/common/log.h"
+
+const char plugin_name[] = "Unit Tests layouts plugin";
+const char plugin_type[] = "layouts/unit";
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
+
+/* specific options for unit tests layout */
+s_p_options_t entity_options[] = {
+	/* base keys */
+	{"string", S_P_STRING},
+	{"long", S_P_LONG},
+	{"uint16", S_P_UINT16},
+	{"uint32", S_P_UINT32},
+	{"float", S_P_FLOAT},
+	{"double", S_P_DOUBLE},
+	{"ldouble", S_P_LONG_DOUBLE},
+	{"readonly", S_P_BOOLEAN},
+	/* parents aggregated keys */
+	{"parents_sum_long", S_P_LONG},
+	{"parents_fshare_long", S_P_LONG},
+	{"parents_sum_uint16", S_P_UINT16},
+	{"parents_fshare_uint16", S_P_UINT16},
+	{"parents_sum_uint32", S_P_UINT32},
+	{"parents_fshare_uint32", S_P_UINT32},
+	{"parents_sum_float", S_P_FLOAT},
+	{"parents_fshare_float", S_P_FLOAT},
+	{"parents_sum_double", S_P_DOUBLE},
+	{"parents_fshare_double", S_P_DOUBLE},
+	{"parents_sum_ldouble", S_P_LONG_DOUBLE},
+	{"parents_fshare_ldouble", S_P_LONG_DOUBLE},
+	/* children aggregated keys */
+	{"children_count", S_P_UINT32},
+	{"children_sum_long", S_P_LONG},
+	{"children_avg_long", S_P_LONG},
+	{"children_min_long", S_P_LONG},
+	{"children_max_long", S_P_LONG},
+	{"children_sum_uint16", S_P_UINT16},
+	{"children_avg_uint16", S_P_UINT16},
+	{"children_min_uint16", S_P_UINT16},
+	{"children_max_uint16", S_P_UINT16},
+	{"children_sum_uint32", S_P_UINT32},
+	{"children_avg_uint32", S_P_UINT32},
+	{"children_min_uint32", S_P_UINT32},
+	{"children_max_uint32", S_P_UINT32},
+	{"children_sum_float", S_P_FLOAT},
+	{"children_avg_float", S_P_FLOAT},
+	{"children_min_float", S_P_FLOAT},
+	{"children_max_float", S_P_FLOAT},
+	{"children_sum_double", S_P_DOUBLE},
+	{"children_avg_double", S_P_DOUBLE},
+	{"children_min_double", S_P_DOUBLE},
+	{"children_max_double", S_P_DOUBLE},
+	{"children_sum_ldouble", S_P_LONG_DOUBLE},
+	{"children_avg_ldouble", S_P_LONG_DOUBLE},
+	{"children_min_ldouble", S_P_LONG_DOUBLE},
+	{"children_max_ldouble", S_P_LONG_DOUBLE},
+	{NULL}
+};
+s_p_options_t options[] = {
+	{"Entity", S_P_EXPLINE, NULL, NULL, entity_options},
+	{NULL}
+};
+
+const layouts_keyspec_t keyspec[] = {
+	/* base keys */
+	{"string", L_T_STRING},
+	{"long", L_T_LONG},
+	{"uint16", L_T_UINT16},
+	{"uint32", L_T_UINT32},
+	{"float", L_T_FLOAT},
+	{"double", L_T_DOUBLE},
+	{"ldouble", L_T_LONG_DOUBLE},
+	{"readonly", L_T_BOOLEAN, KEYSPEC_RDONLY},
+	/* parents aggregated keys */
+	{"parents_sum_long", L_T_LONG,
+	 KEYSPEC_UPDATE_PARENTS_SUM, "long"},
+	{"parents_fshare_long", L_T_LONG,
+	 KEYSPEC_UPDATE_PARENTS_FSHARE, "long"},
+	{"parents_sum_uint16", L_T_UINT16,
+	 KEYSPEC_UPDATE_PARENTS_SUM, "uint16"},
+	{"parents_fshare_uint16", L_T_UINT16,
+	 KEYSPEC_UPDATE_PARENTS_FSHARE, "uint16"},
+	{"parents_sum_uint32", L_T_UINT32,
+	 KEYSPEC_UPDATE_PARENTS_SUM, "uint32"},
+	{"parents_fshare_uint32", L_T_UINT32,
+	 KEYSPEC_UPDATE_PARENTS_FSHARE, "uint32"},
+	{"parents_sum_float", L_T_FLOAT,
+	 KEYSPEC_UPDATE_PARENTS_SUM, "float"},
+	{"parents_fshare_float", L_T_FLOAT,
+	 KEYSPEC_UPDATE_PARENTS_FSHARE, "float"},
+	{"parents_sum_double", L_T_DOUBLE,
+	 KEYSPEC_UPDATE_PARENTS_SUM, "double"},
+	{"parents_fshare_double", L_T_DOUBLE,
+	 KEYSPEC_UPDATE_PARENTS_FSHARE, "double"},
+	{"parents_sum_ldouble", L_T_LONG_DOUBLE,
+	 KEYSPEC_UPDATE_PARENTS_SUM, "ldouble"},
+	{"parents_fshare_ldouble", L_T_LONG_DOUBLE,
+	 KEYSPEC_UPDATE_PARENTS_FSHARE, "ldouble"},
+	/* children aggregated keys */
+	{"children_count", L_T_UINT32, KEYSPEC_UPDATE_CHILDREN_COUNT},
+	{"children_sum_long", L_T_LONG,
+	 KEYSPEC_UPDATE_CHILDREN_SUM, "long"},
+	{"children_avg_long", L_T_LONG,
+	 KEYSPEC_UPDATE_CHILDREN_AVG, "long"},
+	{"children_min_long", L_T_LONG,
+	 KEYSPEC_UPDATE_CHILDREN_MIN, "long"},
+	{"children_max_long", L_T_LONG,
+	 KEYSPEC_UPDATE_CHILDREN_MAX, "long"},
+	{"children_sum_uint16", L_T_UINT16,
+	 KEYSPEC_UPDATE_CHILDREN_SUM, "uint16"},
+	{"children_avg_uint16", L_T_UINT16,
+	 KEYSPEC_UPDATE_CHILDREN_AVG, "uint16"},
+	{"children_min_uint16", L_T_UINT16,
+	 KEYSPEC_UPDATE_CHILDREN_MIN, "uint16"},
+	{"children_max_uint16", L_T_UINT16,
+	 KEYSPEC_UPDATE_CHILDREN_MAX, "uint16"},
+	{"children_sum_uint32", L_T_UINT32,
+	 KEYSPEC_UPDATE_CHILDREN_SUM, "uint32"},
+	{"children_avg_uint32", L_T_UINT32,
+	 KEYSPEC_UPDATE_CHILDREN_AVG, "uint32"},
+	{"children_min_uint32", L_T_UINT32,
+	 KEYSPEC_UPDATE_CHILDREN_MIN, "uint32"},
+	{"children_max_uint32", L_T_UINT32,
+	 KEYSPEC_UPDATE_CHILDREN_MAX, "uint32"},
+	{"children_sum_float", L_T_FLOAT,
+	 KEYSPEC_UPDATE_CHILDREN_SUM, "float"},
+	{"children_avg_float", L_T_FLOAT,
+	 KEYSPEC_UPDATE_CHILDREN_AVG, "float"},
+	{"children_min_float", L_T_FLOAT,
+	 KEYSPEC_UPDATE_CHILDREN_MIN, "float"},
+	{"children_max_float", L_T_FLOAT,
+	 KEYSPEC_UPDATE_CHILDREN_MAX, "float"},
+	{"children_sum_double", L_T_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_SUM, "double"},
+	{"children_avg_double", L_T_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_AVG, "double"},
+	{"children_min_double", L_T_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_MIN, "double"},
+	{"children_max_double", L_T_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_MAX, "double"},
+	{"children_sum_ldouble", L_T_LONG_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_SUM, "ldouble"},
+	{"children_avg_ldouble", L_T_LONG_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_AVG, "ldouble"},
+	{"children_min_ldouble", L_T_LONG_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_MIN, "ldouble"},
+	{"children_max_ldouble", L_T_LONG_DOUBLE,
+	 KEYSPEC_UPDATE_CHILDREN_MAX, "ldouble"},
+	{NULL}
+};
+
+/* types allowed in the entity's "type" field */
+const char* etypes[] = {
+	"UnitTestPass",
+	"UnitTest",
+	NULL
+};
+
+const layouts_plugin_spec_t plugin_spec = {
+	options,
+	keyspec,
+	LAYOUT_STRUCT_TREE,
+	etypes,
+	true, /* if this evalued to true, keys inside plugin_keyspec present in
+	       * plugin_options having corresponding types, are automatically
+	       * handled by the layouts manager.
+	       */
+	true  /* if this evalued to true, keys updates trigger an automatic
+	       * update of their entities neighborhoods based on their
+	       * KEYSPEC_UPDATE_* set flags
+	       */
+};
+
+/* manager is lock when this function is called */
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+int layouts_p_conf_done(
+		xhash_t* entities, layout_t* layout, s_p_hashtbl_t* tbl)
+{
+	return 1;
+}
+
+
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+void layouts_p_entity_parsing(
+		entity_t* e, s_p_hashtbl_t* etbl, layout_t* layout)
+{
+}
+
+/* manager is lock then this function is called */
+/* disable this callback by setting it to NULL, warn: not every callback can
+ * be desactivated this way */
+int layouts_p_update_done(layout_t* layout, entity_t** e_array, int e_cnt)
+{
+	int i;
+	debug3("layouts/unit: receiving update callback for %d entities",
+	       e_cnt);
+	for (i = 0; i < e_cnt; i++) {
+		if (e_array[i] == NULL) {
+			debug3("layouts/unit: skipping update of nullified"
+			       "entity[%d]", i);
+		} else {
+			debug3("layouts/unit: updating entity[%d]=%s",
+			       i, e_array[i]->name);
+		}
+	}
+	return 1;
+}
+
+int init(void)
+{
+	return SLURM_SUCCESS;
+}
+
+int fini(void)
+{
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am
index fc553cfd1..49204713c 100644
--- a/src/plugins/Makefile.am
+++ b/src/plugins/Makefile.am
@@ -5,6 +5,7 @@ SUBDIRS = \
 	acct_gather_infiniband  \
 	acct_gather_filesystem  \
 	auth			\
+	burst_buffer		\
 	checkpoint		\
 	core_spec		\
 	crypto			\
@@ -16,6 +17,7 @@ SUBDIRS = \
 	job_submit		\
 	launch			\
 	mpi			\
+	power			\
 	preempt			\
 	priority		\
 	proctrack		\
@@ -23,7 +25,7 @@ SUBDIRS = \
 	sched			\
 	select			\
 	slurmctld		\
-	slurmd		\
+	slurmd			\
 	switch			\
 	task			\
 	topology
diff --git a/src/plugins/Makefile.in b/src/plugins/Makefile.in
index 8ab5c793b..484689187 100644
--- a/src/plugins/Makefile.in
+++ b/src/plugins/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -452,6 +466,7 @@ SUBDIRS = \
 	acct_gather_infiniband  \
 	acct_gather_filesystem  \
 	auth			\
+	burst_buffer		\
 	checkpoint		\
 	core_spec		\
 	crypto			\
@@ -463,6 +478,7 @@ SUBDIRS = \
 	job_submit		\
 	launch			\
 	mpi			\
+	power			\
 	preempt			\
 	priority		\
 	proctrack		\
@@ -470,7 +486,7 @@ SUBDIRS = \
 	sched			\
 	select			\
 	slurmctld		\
-	slurmd		\
+	slurmd			\
 	switch			\
 	task			\
 	topology
diff --git a/src/plugins/accounting_storage/Makefile.in b/src/plugins/accounting_storage/Makefile.in
index 7356670c8..7779aabe4 100644
--- a/src/plugins/accounting_storage/Makefile.in
+++ b/src/plugins/accounting_storage/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/accounting_storage/common/Makefile.in b/src/plugins/accounting_storage/common/Makefile.in
index 64cfb014c..118f90962 100644
--- a/src/plugins/accounting_storage/common/Makefile.in
+++ b/src/plugins/accounting_storage/common/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -242,6 +245,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -291,8 +296,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -311,6 +320,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -354,6 +366,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -377,6 +390,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/accounting_storage/common/common_as.c b/src/plugins/accounting_storage/common/common_as.c
index dbfc86e28..bdd7bb2d7 100644
--- a/src/plugins/accounting_storage/common/common_as.c
+++ b/src/plugins/accounting_storage/common/common_as.c
@@ -43,23 +43,24 @@
 #include <sys/stat.h>
 #include <unistd.h>
 #include <fcntl.h>
+#include "src/common/env.h"
 #include "src/common/slurmdbd_defs.h"
 #include "src/common/slurm_auth.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xstring.h"
-#include "src/common/env.h"
 #include "src/slurmdbd/read_config.h"
 #include "common_as.h"
 
-extern char *assoc_hour_table;
 extern char *assoc_day_table;
+extern char *assoc_hour_table;
 extern char *assoc_month_table;
 
-extern char *cluster_hour_table;
 extern char *cluster_day_table;
+extern char *cluster_hour_table;
 extern char *cluster_month_table;
 
-extern char *wckey_hour_table;
 extern char *wckey_day_table;
+extern char *wckey_hour_table;
 extern char *wckey_month_table;
 
 /*
@@ -81,7 +82,7 @@ static int _sort_update_object_dec(void *a, void *b)
 
 static void _dump_slurmdb_assoc_records(List assoc_list)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	ListIterator itr = NULL;
 
 	itr = list_iterator_create(assoc_list);
@@ -142,7 +143,7 @@ extern int addto_update_list(List update_list, slurmdb_update_type_t type,
 			     void *object)
 {
 	slurmdb_update_object_t *update_object = NULL;
-	slurmdb_association_rec_t *assoc = object;
+	slurmdb_assoc_rec_t *assoc = object;
 	slurmdb_qos_rec_t *qos = object;
 	ListIterator itr = NULL;
 	if (!update_list) {
@@ -181,82 +182,46 @@ extern int addto_update_list(List update_list, slurmdb_update_type_t type,
 	case SLURMDB_REMOVE_COORD:
 		update_object->objects = list_create(slurmdb_destroy_user_rec);
 		break;
+	case SLURMDB_ADD_TRES:
+		xassert(((slurmdb_tres_rec_t *)object)->id);
+		update_object->objects = list_create(slurmdb_destroy_tres_rec);
+		break;
 	case SLURMDB_ADD_ASSOC:
 		/* We are going to send these to the slurmctld's so
-		   lets set up the correct limits to INIFINITE instead
+		   lets set up the correct limits to INFINITE instead
 		   of NO_VAL */
-		if (assoc->grp_cpu_mins == (uint64_t)NO_VAL)
-			assoc->grp_cpu_mins = (uint64_t)INFINITE;
-		if (assoc->grp_cpu_run_mins == (uint64_t)NO_VAL)
-			assoc->grp_cpu_run_mins = (uint64_t)INFINITE;
-		if (assoc->grp_cpus == NO_VAL)
-			assoc->grp_cpus = INFINITE;
 		if (assoc->grp_jobs == NO_VAL)
 			assoc->grp_jobs = INFINITE;
-		if (assoc->grp_mem == NO_VAL)
-			assoc->grp_mem = INFINITE;
-		if (assoc->grp_nodes == NO_VAL)
-			assoc->grp_nodes = INFINITE;
 		if (assoc->grp_submit_jobs == NO_VAL)
 			assoc->grp_submit_jobs = INFINITE;
 		if (assoc->grp_wall == NO_VAL)
 			assoc->grp_wall = INFINITE;
 
-		if (assoc->max_cpu_mins_pj == (uint64_t)NO_VAL)
-			assoc->max_cpu_mins_pj = (uint64_t)INFINITE;
-		if (assoc->max_cpu_run_mins == (uint64_t)NO_VAL)
-			assoc->max_cpu_run_mins = (uint64_t)INFINITE;
-		if (assoc->max_cpus_pj == NO_VAL)
-			assoc->max_cpus_pj = INFINITE;
 		if (assoc->max_jobs == NO_VAL)
 			assoc->max_jobs = INFINITE;
-		if (assoc->max_nodes_pj == NO_VAL)
-			assoc->max_nodes_pj = INFINITE;
 		if (assoc->max_submit_jobs == NO_VAL)
 			assoc->max_submit_jobs = INFINITE;
 		if (assoc->max_wall_pj == NO_VAL)
 			assoc->max_wall_pj = INFINITE;
 	case SLURMDB_MODIFY_ASSOC:
 	case SLURMDB_REMOVE_ASSOC:
-		xassert(((slurmdb_association_rec_t *)object)->cluster);
+		xassert(((slurmdb_assoc_rec_t *)object)->cluster);
 		update_object->objects = list_create(
-			slurmdb_destroy_association_rec);
+			slurmdb_destroy_assoc_rec);
 		break;
 	case SLURMDB_ADD_QOS:
 		/* We are going to send these to the slurmctld's so
-		   lets set up the correct limits to INIFINITE instead
+		   lets set up the correct limits to INFINITE instead
 		   of NO_VAL */
-		if (qos->grp_cpu_mins == (uint64_t)NO_VAL)
-			qos->grp_cpu_mins = (uint64_t)INFINITE;
-		if (qos->grp_cpu_run_mins == (uint64_t)NO_VAL)
-			qos->grp_cpu_run_mins = (uint64_t)INFINITE;
-		if (qos->grp_cpus == NO_VAL)
-			qos->grp_cpus = INFINITE;
 		if (qos->grp_jobs == NO_VAL)
 			qos->grp_jobs = INFINITE;
-		if (qos->grp_mem == NO_VAL)
-			qos->grp_mem = INFINITE;
-		if (qos->grp_nodes == NO_VAL)
-			qos->grp_nodes = INFINITE;
 		if (qos->grp_submit_jobs == NO_VAL)
 			qos->grp_submit_jobs = INFINITE;
 		if (qos->grp_wall == NO_VAL)
 			qos->grp_wall = INFINITE;
 
-		if (qos->max_cpu_mins_pj == (uint64_t)NO_VAL)
-			qos->max_cpu_mins_pj = (uint64_t)INFINITE;
-		if (qos->max_cpu_run_mins_pu == (uint64_t)NO_VAL)
-			qos->max_cpu_run_mins_pu = (uint64_t)INFINITE;
-		if (qos->max_cpus_pj == NO_VAL)
-			qos->max_cpus_pj = INFINITE;
-		if (qos->max_cpus_pu == NO_VAL)
-			qos->max_cpus_pu = INFINITE;
 		if (qos->max_jobs_pu == NO_VAL)
 			qos->max_jobs_pu = INFINITE;
-		if (qos->max_nodes_pj == NO_VAL)
-			qos->max_nodes_pj = INFINITE;
-		if (qos->max_nodes_pu == NO_VAL)
-			qos->max_nodes_pu = INFINITE;
 		if (qos->max_submit_jobs_pu == NO_VAL)
 			qos->max_submit_jobs_pu = INFINITE;
 		if (qos->max_wall_pj == NO_VAL)
@@ -324,6 +289,9 @@ extern void dump_update_list(List update_list)
 		case SLURMDB_REMOVE_COORD:
 			debug3("\tUSER RECORDS");
 			break;
+		case SLURMDB_ADD_TRES:
+			debug3("\tTRES RECORDS");
+			break;
 		case SLURMDB_ADD_ASSOC:
 		case SLURMDB_MODIFY_ASSOC:
 		case SLURMDB_REMOVE_ASSOC:
@@ -400,7 +368,7 @@ extern int cluster_first_reg(char *host, uint16_t port, uint16_t rpc_version)
 		 * for an arbitray fd or should these be fire
 		 * and forget?  For this, that we can probably
 		 * forget about it */
-		slurm_close_stream(fd);
+		slurm_close(fd);
 	}
 	return rc;
 }
@@ -414,7 +382,8 @@ extern int cluster_first_reg(char *host, uint16_t port, uint16_t rpc_version)
  * IN/OUT usage_end: end time
  * RET: error code
  */
-extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
+extern int set_usage_information(char **usage_table,
+				 slurmdbd_msg_type_t type,
 				 time_t *usage_start, time_t *usage_end)
 {
 	time_t start = (*usage_start), end = (*usage_end);
@@ -425,14 +394,14 @@ extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
 
 	/* Default is going to be the last day */
 	if (!end) {
-		if (!localtime_r(&my_time, &end_tm)) {
+		if (!slurm_localtime_r(&my_time, &end_tm)) {
 			error("Couldn't get localtime from end %ld",
 			      my_time);
 			return SLURM_ERROR;
 		}
 		end_tm.tm_hour = 0;
 	} else {
-		if (!localtime_r(&end, &end_tm)) {
+		if (!slurm_localtime_r(&end, &end_tm)) {
 			error("Couldn't get localtime from user end %ld",
 			      end);
 			return SLURM_ERROR;
@@ -441,10 +410,10 @@ extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
 	end_tm.tm_sec = 0;
 	end_tm.tm_min = 0;
 	end_tm.tm_isdst = -1;
-	end = mktime(&end_tm);
+	end = slurm_mktime(&end_tm);
 
 	if (!start) {
-		if (!localtime_r(&my_time, &start_tm)) {
+		if (!slurm_localtime_r(&my_time, &start_tm)) {
 			error("Couldn't get localtime from start %ld",
 			      my_time);
 			return SLURM_ERROR;
@@ -452,7 +421,7 @@ extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
 		start_tm.tm_hour = 0;
 		start_tm.tm_mday--;
 	} else {
-		if (!localtime_r(&start, &start_tm)) {
+		if (!slurm_localtime_r(&start, &start_tm)) {
 			error("Couldn't get localtime from user start %ld",
 			      start);
 			return SLURM_ERROR;
@@ -461,11 +430,11 @@ extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
 	start_tm.tm_sec = 0;
 	start_tm.tm_min = 0;
 	start_tm.tm_isdst = -1;
-	start = mktime(&start_tm);
+	start = slurm_mktime(&start_tm);
 
 	if (end-start < 3600) {
 		end = start + 3600;
-		if (!localtime_r(&end, &end_tm)) {
+		if (!slurm_localtime_r(&end, &end_tm)) {
 			error("2 Couldn't get localtime from user end %ld",
 			      end);
 			return SLURM_ERROR;
@@ -514,6 +483,7 @@ extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
 	(*usage_start) = start;
 	(*usage_end) = end;
 	(*usage_table) = my_usage_table;
+
 	return SLURM_SUCCESS;
 }
 
@@ -655,8 +625,8 @@ extern time_t archive_setup_end_time(time_t last_submit, uint32_t purge)
 		return 0;
 	}
 
-	/* use localtime to avoid any daylight savings issues */
-	if (!localtime_r(&last_submit, &time_tm)) {
+	/* use slurm_localtime to avoid any daylight savings issues */
+	if (!slurm_localtime_r(&last_submit, &time_tm)) {
 		error("Couldn't get localtime from first "
 		      "suspend start %ld", (long)last_submit);
 		return 0;
@@ -682,7 +652,7 @@ extern time_t archive_setup_end_time(time_t last_submit, uint32_t purge)
 	}
 
 	time_tm.tm_isdst = -1;
-	return (mktime(&time_tm) - 1);
+	return (slurm_mktime(&time_tm) - 1);
 }
 
 
@@ -810,7 +780,7 @@ static char *_make_archive_name(time_t period_start, time_t period_end,
 	char start_char[32];
 	char end_char[32];
 
-	localtime_r((time_t *)&period_start, &time_tm);
+	slurm_localtime_r((time_t *)&period_start, &time_tm);
 	time_tm.tm_sec = 0;
 	time_tm.tm_min = 0;
 
@@ -833,7 +803,7 @@ static char *_make_archive_name(time_t period_start, time_t period_end,
 		 time_tm.tm_min,
 		 time_tm.tm_sec);
 
-	localtime_r((time_t *)&period_end, &time_tm);
+	slurm_localtime_r((time_t *)&period_end, &time_tm);
 	snprintf(end_char, sizeof(end_char),
 		 "%4.4u-%2.2u-%2.2u"
 		 "T%2.2u:%2.2u:%2.2u",
diff --git a/src/plugins/accounting_storage/common/common_as.h b/src/plugins/accounting_storage/common/common_as.h
index cedaf34e9..4930630ae 100644
--- a/src/plugins/accounting_storage/common/common_as.h
+++ b/src/plugins/accounting_storage/common/common_as.h
@@ -50,7 +50,8 @@ extern void dump_update_list(List update_list);
 
 extern int cluster_first_reg(char *host, uint16_t port, uint16_t rpc_version);
 
-extern int set_usage_information(char **usage_table, slurmdbd_msg_type_t type,
+extern int set_usage_information(char **usage_table,
+				 slurmdbd_msg_type_t type,
 				 time_t *usage_start, time_t *usage_end);
 
 extern void merge_delta_qos_list(List qos_list, List delta_qos_list);
diff --git a/src/plugins/accounting_storage/filetxt/Makefile.in b/src/plugins/accounting_storage/filetxt/Makefile.in
index ec796a1c8..213281c40 100644
--- a/src/plugins/accounting_storage/filetxt/Makefile.in
+++ b/src/plugins/accounting_storage/filetxt/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
index cb61a0f89..83ed28233 100644
--- a/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
+++ b/src/plugins/accounting_storage/filetxt/accounting_storage_filetxt.c
@@ -65,16 +65,13 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Accounting storage FileTxt plugin";
 const char plugin_type[] = "accounting_storage/filetxt";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
+
 static FILE *		LOGFILE;
 static int		LOGFILE_FD;
 static pthread_mutex_t  logfile_lock = PTHREAD_MUTEX_INITIALIZER;
@@ -304,8 +301,14 @@ extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
-extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid,
-					   List association_list)
+extern int acct_storage_p_add_tres(void *db_conn,
+				     uint32_t uid, List tres_list)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int acct_storage_p_add_assocs(void *db_conn, uint32_t uid,
+				     List assoc_list)
 {
 	return SLURM_SUCCESS;
 }
@@ -355,9 +358,9 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
-extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
-					      slurmdb_association_cond_t *assoc_q,
-					      slurmdb_association_rec_t *assoc)
+extern List acct_storage_p_modify_assocs(void *db_conn, uint32_t uid,
+					      slurmdb_assoc_cond_t *assoc_q,
+					      slurmdb_assoc_rec_t *assoc)
 {
 	return SLURM_SUCCESS;
 }
@@ -421,8 +424,8 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
-extern List acct_storage_p_remove_associations(void *db_conn, uint32_t uid,
-					      slurmdb_association_cond_t *assoc_q)
+extern List acct_storage_p_remove_assocs(void *db_conn, uint32_t uid,
+					      slurmdb_assoc_cond_t *assoc_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -474,8 +477,14 @@ extern List acct_storage_p_get_config(void *db_conn, char *config_name)
 	return NULL;
 }
 
-extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
-					    slurmdb_association_cond_t *assoc_q)
+extern List acct_storage_p_get_tres(void *db_conn, uid_t uid,
+				      slurmdb_tres_cond_t *tres_cond)
+{
+	return NULL;
+}
+
+extern List acct_storage_p_get_assocs(void *db_conn, uid_t uid,
+				      slurmdb_assoc_cond_t *assoc_q)
 {
 	return NULL;
 }
@@ -487,7 +496,7 @@ extern List acct_storage_p_get_events(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_get_problems(void *db_conn, uid_t uid,
-					slurmdb_association_cond_t *assoc_q)
+					slurmdb_assoc_cond_t *assoc_q)
 {
 	return NULL;
 }
@@ -510,7 +519,7 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	return NULL;
 }
 
-extern List acct_storage_p_get_reservations(void *mysql_conn, uid_t uid,
+extern List acct_storage_p_get_reservations(void *db_conn, uid_t uid,
 					    slurmdb_reservation_cond_t *resv_cond)
 {
 	return NULL;
@@ -572,9 +581,9 @@ extern int clusteracct_storage_p_fini_ctld(void *db_conn,
 	return SLURM_SUCCESS;
 }
 
-extern int clusteracct_storage_p_cluster_cpus(void *db_conn,
+extern int clusteracct_storage_p_cluster_tres(void *db_conn,
 					      char *cluster_nodes,
-					      uint32_t cpus,
+					      char *tres_str_in,
 					      time_t event_time)
 {
 	return SLURM_SUCCESS;
@@ -648,7 +657,7 @@ extern int jobacct_storage_p_job_complete(void *db_conn,
 					  struct job_record *job_ptr)
 {
 	char buf[BUFFER_SIZE];
-	uint16_t job_state;
+	uint32_t job_state;
 	int duration;
 	uint32_t exit_code;
 
diff --git a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
index 4802d1896..ff5e7968d 100644
--- a/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
+++ b/src/plugins/accounting_storage/filetxt/filetxt_jobacct_process.c
@@ -87,7 +87,7 @@ typedef struct {
 	int32_t priority;
 	uint32_t ncpus;
 	uint32_t ntasks;
-	enum job_states	status;
+	uint32_t status; 		/* job state */
 	int32_t	exitcode;
 	uint32_t elapsed;
 	time_t end;
@@ -105,7 +105,7 @@ typedef struct {
 	uint32_t	stepnum;	/* job's step number */
 	char	        *nodes;
 	char	        *stepname;
-	enum job_states	status;
+	uint32_t 	status; 	/* job state */
 	int32_t	        exitcode;
 	uint32_t	ntasks;
 	uint32_t        ncpus;
@@ -224,8 +224,7 @@ static void _destroy_filetxt_job_rec(void *object)
 {
 	filetxt_job_rec_t *job = (filetxt_job_rec_t *)object;
 	if (job) {
-		if (job->steps)
-			list_destroy(job->steps);
+		FREE_NULL_LIST(job->steps);
 		_free_filetxt_header(&job->header);
 		xfree(job->jobname);
 		xfree(job->account);
@@ -254,7 +253,9 @@ static slurmdb_step_rec_t *_slurmdb_create_step_rec(
 	slurmdb_step->elapsed = filetxt_step->elapsed;
 	slurmdb_step->end = filetxt_step->end;
 	slurmdb_step->exitcode = filetxt_step->exitcode;
-	slurmdb_step->ncpus = filetxt_step->ncpus;
+	slurmdb_step->tres_alloc_str = xstrdup_printf(
+		"cpu=%u", filetxt_step->ncpus);
+
 	if (filetxt_step->nodes) {
 		hostlist_t hl = hostlist_create(filetxt_step->nodes);
 		slurmdb_step->nnodes = hostlist_count(hl);
@@ -319,7 +320,9 @@ no_cond:
 	slurmdb_job->jobname = xstrdup(filetxt_job->jobname);
 	slurmdb_job->partition = xstrdup(filetxt_job->header.partition);
 	slurmdb_job->req_cpus = filetxt_job->ncpus;
-	slurmdb_job->alloc_cpus = filetxt_job->ncpus;
+	slurmdb_job->tres_alloc_str = xstrdup_printf(
+		"cpu=%u", filetxt_job->ncpus);
+
 	if (filetxt_job->nodes) {
 		hostlist_t hl = hostlist_create(filetxt_job->nodes);
 		slurmdb_job->alloc_nodes = hostlist_count(hl);
@@ -1124,7 +1127,7 @@ extern List filetxt_jobacct_process_get_jobs(slurmdb_job_cond_t *job_cond)
 		list_iterator_destroy(itr2);
 
 	list_iterator_destroy(itr);
-	list_destroy(job_list);
+	FREE_NULL_LIST(job_list);
 
 	xfree(filein);
 
@@ -1447,9 +1450,9 @@ finished:
 	xfree(filein);
 
 	fclose(fd);
-	list_destroy(exp_list);
-	list_destroy(keep_list);
-	list_destroy(other_list);
+	FREE_NULL_LIST(exp_list);
+	FREE_NULL_LIST(keep_list);
+	FREE_NULL_LIST(other_list);
 	xfree(old_logfile_name);
 	xfree(logfile_name);
 
diff --git a/src/plugins/accounting_storage/mysql/Makefile.am b/src/plugins/accounting_storage/mysql/Makefile.am
index cf318c21b..bcb364c1b 100644
--- a/src/plugins/accounting_storage/mysql/Makefile.am
+++ b/src/plugins/accounting_storage/mysql/Makefile.am
@@ -8,9 +8,11 @@ AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
 AS_MYSQL_SOURCES = accounting_storage_mysql.c accounting_storage_mysql.h \
 		as_mysql_acct.c as_mysql_acct.h \
+		as_mysql_tres.c as_mysql_tres.h \
 		as_mysql_archive.c as_mysql_archive.h \
 		as_mysql_assoc.c as_mysql_assoc.h \
 		as_mysql_cluster.c as_mysql_cluster.h \
+		as_mysql_convert.c as_mysql_convert.h \
 		as_mysql_job.c as_mysql_job.h \
 		as_mysql_jobacct_process.c as_mysql_jobacct_process.h \
 		as_mysql_problems.c as_mysql_problems.h \
diff --git a/src/plugins/accounting_storage/mysql/Makefile.in b/src/plugins/accounting_storage/mysql/Makefile.in
index 885c68870..4c7167039 100644
--- a/src/plugins/accounting_storage/mysql/Makefile.in
+++ b/src/plugins/accounting_storage/mysql/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -166,10 +169,11 @@ am__DEPENDENCIES_1 =
 @WITH_MYSQL_TRUE@	../common/libaccounting_storage_common.la
 am__accounting_storage_mysql_la_SOURCES_DIST =  \
 	accounting_storage_mysql.c accounting_storage_mysql.h \
-	as_mysql_acct.c as_mysql_acct.h as_mysql_archive.c \
-	as_mysql_archive.h as_mysql_assoc.c as_mysql_assoc.h \
-	as_mysql_cluster.c as_mysql_cluster.h as_mysql_job.c \
-	as_mysql_job.h as_mysql_jobacct_process.c \
+	as_mysql_acct.c as_mysql_acct.h as_mysql_tres.c \
+	as_mysql_tres.h as_mysql_archive.c as_mysql_archive.h \
+	as_mysql_assoc.c as_mysql_assoc.h as_mysql_cluster.c \
+	as_mysql_cluster.h as_mysql_convert.c as_mysql_convert.h \
+	as_mysql_job.c as_mysql_job.h as_mysql_jobacct_process.c \
 	as_mysql_jobacct_process.h as_mysql_problems.c \
 	as_mysql_problems.h as_mysql_qos.c as_mysql_qos.h \
 	as_mysql_resource.c as_mysql_resource.h as_mysql_resv.c \
@@ -180,9 +184,11 @@ am__accounting_storage_mysql_la_SOURCES_DIST =  \
 am__objects_1 =  \
 	accounting_storage_mysql_la-accounting_storage_mysql.lo \
 	accounting_storage_mysql_la-as_mysql_acct.lo \
+	accounting_storage_mysql_la-as_mysql_tres.lo \
 	accounting_storage_mysql_la-as_mysql_archive.lo \
 	accounting_storage_mysql_la-as_mysql_assoc.lo \
 	accounting_storage_mysql_la-as_mysql_cluster.lo \
+	accounting_storage_mysql_la-as_mysql_convert.lo \
 	accounting_storage_mysql_la-as_mysql_job.lo \
 	accounting_storage_mysql_la-as_mysql_jobacct_process.lo \
 	accounting_storage_mysql_la-as_mysql_problems.lo \
@@ -198,10 +204,11 @@ am__objects_1 =  \
 @WITH_MYSQL_TRUE@	$(am__objects_1)
 am__EXTRA_accounting_storage_mysql_la_SOURCES_DIST =  \
 	accounting_storage_mysql.c accounting_storage_mysql.h \
-	as_mysql_acct.c as_mysql_acct.h as_mysql_archive.c \
-	as_mysql_archive.h as_mysql_assoc.c as_mysql_assoc.h \
-	as_mysql_cluster.c as_mysql_cluster.h as_mysql_job.c \
-	as_mysql_job.h as_mysql_jobacct_process.c \
+	as_mysql_acct.c as_mysql_acct.h as_mysql_tres.c \
+	as_mysql_tres.h as_mysql_archive.c as_mysql_archive.h \
+	as_mysql_assoc.c as_mysql_assoc.h as_mysql_cluster.c \
+	as_mysql_cluster.h as_mysql_convert.c as_mysql_convert.h \
+	as_mysql_job.c as_mysql_job.h as_mysql_jobacct_process.c \
 	as_mysql_jobacct_process.h as_mysql_problems.c \
 	as_mysql_problems.h as_mysql_qos.c as_mysql_qos.h \
 	as_mysql_resource.c as_mysql_resource.h as_mysql_resv.c \
@@ -325,6 +332,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -374,8 +383,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -394,6 +407,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -437,6 +453,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -460,6 +477,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -526,9 +544,11 @@ PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
 AS_MYSQL_SOURCES = accounting_storage_mysql.c accounting_storage_mysql.h \
 		as_mysql_acct.c as_mysql_acct.h \
+		as_mysql_tres.c as_mysql_tres.h \
 		as_mysql_archive.c as_mysql_archive.h \
 		as_mysql_assoc.c as_mysql_assoc.h \
 		as_mysql_cluster.c as_mysql_cluster.h \
+		as_mysql_convert.c as_mysql_convert.h \
 		as_mysql_job.c as_mysql_job.h \
 		as_mysql_jobacct_process.c as_mysql_jobacct_process.h \
 		as_mysql_problems.c as_mysql_problems.h \
@@ -636,6 +656,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_archive.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_assoc.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_cluster.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_convert.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_job.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_jobacct_process.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_problems.Plo@am__quote@
@@ -643,6 +664,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_resource.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_resv.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_rollup.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_tres.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_txn.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_usage.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/accounting_storage_mysql_la-as_mysql_user.Plo@am__quote@
@@ -683,6 +705,13 @@ accounting_storage_mysql_la-as_mysql_acct.lo: as_mysql_acct.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-as_mysql_acct.lo `test -f 'as_mysql_acct.c' || echo '$(srcdir)/'`as_mysql_acct.c
 
+accounting_storage_mysql_la-as_mysql_tres.lo: as_mysql_tres.c
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-as_mysql_tres.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-as_mysql_tres.Tpo -c -o accounting_storage_mysql_la-as_mysql_tres.lo `test -f 'as_mysql_tres.c' || echo '$(srcdir)/'`as_mysql_tres.c
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/accounting_storage_mysql_la-as_mysql_tres.Tpo $(DEPDIR)/accounting_storage_mysql_la-as_mysql_tres.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='as_mysql_tres.c' object='accounting_storage_mysql_la-as_mysql_tres.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-as_mysql_tres.lo `test -f 'as_mysql_tres.c' || echo '$(srcdir)/'`as_mysql_tres.c
+
 accounting_storage_mysql_la-as_mysql_archive.lo: as_mysql_archive.c
 @am__fastdepCC_TRUE@	$(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-as_mysql_archive.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-as_mysql_archive.Tpo -c -o accounting_storage_mysql_la-as_mysql_archive.lo `test -f 'as_mysql_archive.c' || echo '$(srcdir)/'`as_mysql_archive.c
 @am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/accounting_storage_mysql_la-as_mysql_archive.Tpo $(DEPDIR)/accounting_storage_mysql_la-as_mysql_archive.Plo
@@ -704,6 +733,13 @@ accounting_storage_mysql_la-as_mysql_cluster.lo: as_mysql_cluster.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-as_mysql_cluster.lo `test -f 'as_mysql_cluster.c' || echo '$(srcdir)/'`as_mysql_cluster.c
 
+accounting_storage_mysql_la-as_mysql_convert.lo: as_mysql_convert.c
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-as_mysql_convert.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-as_mysql_convert.Tpo -c -o accounting_storage_mysql_la-as_mysql_convert.lo `test -f 'as_mysql_convert.c' || echo '$(srcdir)/'`as_mysql_convert.c
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/accounting_storage_mysql_la-as_mysql_convert.Tpo $(DEPDIR)/accounting_storage_mysql_la-as_mysql_convert.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='as_mysql_convert.c' object='accounting_storage_mysql_la-as_mysql_convert.lo' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -c -o accounting_storage_mysql_la-as_mysql_convert.lo `test -f 'as_mysql_convert.c' || echo '$(srcdir)/'`as_mysql_convert.c
+
 accounting_storage_mysql_la-as_mysql_job.lo: as_mysql_job.c
 @am__fastdepCC_TRUE@	$(AM_V_CC)$(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(accounting_storage_mysql_la_CFLAGS) $(CFLAGS) -MT accounting_storage_mysql_la-as_mysql_job.lo -MD -MP -MF $(DEPDIR)/accounting_storage_mysql_la-as_mysql_job.Tpo -c -o accounting_storage_mysql_la-as_mysql_job.lo `test -f 'as_mysql_job.c' || echo '$(srcdir)/'`as_mysql_job.c
 @am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/accounting_storage_mysql_la-as_mysql_job.Tpo $(DEPDIR)/accounting_storage_mysql_la-as_mysql_job.Plo
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
index 35958f299..670923b4a 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.c
@@ -48,9 +48,11 @@
 
 #include "accounting_storage_mysql.h"
 #include "as_mysql_acct.h"
+#include "as_mysql_tres.h"
 #include "as_mysql_archive.h"
 #include "as_mysql_assoc.h"
 #include "as_mysql_cluster.h"
+#include "as_mysql_convert.h"
 #include "as_mysql_job.h"
 #include "as_mysql_jobacct_process.h"
 #include "as_mysql_problems.h"
@@ -104,16 +106,12 @@ pthread_mutex_t as_mysql_cluster_list_lock = PTHREAD_MUTEX_INITIALIZER;
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "accounting_storage/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Accounting storage MYSQL plugin";
 const char plugin_type[] = "accounting_storage/as_mysql";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 static mysql_db_info_t *mysql_db_info = NULL;
 static char *mysql_db_name = NULL;
@@ -122,6 +120,7 @@ static char *mysql_db_name = NULL;
 
 char *acct_coord_table = "acct_coord_table";
 char *acct_table = "acct_table";
+char *tres_table = "tres_table";
 char *assoc_day_table = "assoc_usage_day_table";
 char *assoc_hour_table = "assoc_usage_hour_table";
 char *assoc_month_table = "assoc_usage_month_table";
@@ -146,6 +145,15 @@ char *wckey_hour_table = "wckey_usage_hour_table";
 char *wckey_month_table = "wckey_usage_month_table";
 char *wckey_table = "wckey_table";
 
+char *event_view = "event_view";
+char *event_ext_view = "event_ext_view";
+char *job_view = "job_view";
+char *job_ext_view = "job_ext_view";
+char *resv_view = "resv_view";
+char *resv_ext_view = "resv_ext_view";
+char *step_view = "step_view";
+char *step_ext_view = "step_ext_view";
+
 uint64_t debug_flags = 0;
 
 static char *default_qos_str = NULL;
@@ -445,6 +453,36 @@ static bool _check_jobs_before_remove_without_assoctable(
 	return rc;
 }
 
+/* static int _add_remove_tres_limit(char *tres_limit_str, char *name, */
+/* 				  char **cols, char **vals, char **extra) */
+/* { */
+/* 	int rc = SLURM_SUCCESS; */
+/* 	char *tmp_str = tres_limit_str; */
+/* 	uint64_t value; */
+/* 	bool first = true; */
+
+/* 	if (!tmp_str || !tmp_str[0]) */
+/* 		return SLURM_SUCCESS; */
+
+/* 	while (tmp_str) { */
+/* 		if (id == atoi(tmp_str)) { */
+/* 			if (!(tmp_str = strchr(tmp_str, '='))) { */
+/* 				error("_add_remove_tres_limit: no value found"); */
+/* 				rc = SLURM_ERROR; */
+/* 				break; */
+/* 			} */
+/* 			if (first) */
+/* 				slurm_atoull(++tmp_str); */
+/* 		} */
+
+/* 		if (!(tmp_str = strchr(tmp_str, ','))) */
+/* 			break; */
+/* 		tmp_str++; */
+/* 	} */
+
+/* 	return SLURM_SUCCESS; */
+/* } */
+
 /* Any time a new table is added set it up here */
 static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 {
@@ -467,6 +505,15 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		{ NULL, NULL}
 	};
 
+	storage_field_t tres_table_fields[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "id", "int not null auto_increment" },
+		{ "type", "tinytext not null" },
+		{ "name", "tinytext not null default ''" },
+		{ NULL, NULL}
+	};
+
 	storage_field_t cluster_table_fields[] = {
 		{ "creation_time", "int unsigned not null" },
 		{ "mod_time", "int unsigned default 0 not null" },
@@ -504,27 +551,24 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		{ "grace_time", "int unsigned default NULL" },
 		{ "max_jobs_per_user", "int default NULL" },
 		{ "max_submit_jobs_per_user", "int default NULL" },
-		{ "max_cpus_per_job", "int default NULL" },
-		{ "max_cpus_per_user", "int default NULL" },
-		{ "max_nodes_per_job", "int default NULL" },
-		{ "max_nodes_per_user", "int default NULL" },
+		{ "max_tres_pj", "text not null default ''" },
+		{ "max_tres_pn", "text not null default ''" },
+		{ "max_tres_pu", "text not null default ''" },
+		{ "max_tres_mins_pj", "text not null default ''" },
+		{ "max_tres_run_mins_pu", "text not null default ''" },
+		{ "min_tres_pj", "text not null default ''" },
 		{ "max_wall_duration_per_job", "int default NULL" },
-		{ "max_cpu_mins_per_job", "bigint default NULL" },
-		{ "max_cpu_run_mins_per_user", "bigint default NULL" },
 		{ "grp_jobs", "int default NULL" },
 		{ "grp_submit_jobs", "int default NULL" },
-		{ "grp_cpus", "int default NULL" },
-		{ "grp_mem", "int default NULL" },
-		{ "grp_nodes", "int default NULL" },
+		{ "grp_tres", "text not null default ''" },
+		{ "grp_tres_mins", "text not null default ''" },
+		{ "grp_tres_run_mins", "text not null default ''" },
 		{ "grp_wall", "int default NULL" },
-		{ "grp_cpu_mins", "bigint default NULL" },
-		{ "grp_cpu_run_mins", "bigint default NULL" },
 		{ "preempt", "text not null default ''" },
 		{ "preempt_mode", "int default 0" },
 		{ "priority", "int unsigned default 0" },
 		{ "usage_factor", "double default 1.0 not null" },
 		{ "usage_thres", "double default NULL" },
-		{ "min_cpus_per_job", "int unsigned default 1 not null" },
 		{ NULL, NULL}
 	};
 
@@ -571,11 +615,11 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		"set @par_id = NULL; "
 		"set @mj = NULL; "
 		"set @msj = NULL; "
-		"set @mcpj = NULL; "
-		"set @mnpj = NULL; "
 		"set @mwpj = NULL; "
-		"set @mcmpj = NULL; "
-		"set @mcrm = NULL; "
+		"set @mtpj = ''; "
+		"set @mtpn = ''; "
+		"set @mtmpj = ''; "
+		"set @mtrm = ''; "
 		"set @def_qos_id = NULL; "
 		"set @qos = ''; "
 		"set @delta_qos = ''; "
@@ -583,14 +627,9 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		"if without_limits then "
 		"set @mj = 0; "
 		"set @msj = 0; "
-		"set @mcpj = 0; "
-		"set @mnpj = 0; "
 		"set @mwpj = 0; "
-		"set @mcmpj = 0; "
-		"set @mcrm = 0; "
 		"set @def_qos_id = 0; "
-		"set @qos = 0; "
-		"set @delta_qos = 0; "
+		"set @qos = 1; "
 		"end if; "
 		"REPEAT "
 		"set @s = 'select '; "
@@ -603,21 +642,9 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		"if @msj is NULL then set @s = CONCAT("
 		"@s, '@msj := max_submit_jobs, '); "
 		"end if; "
-		"if @mcpj is NULL then set @s = CONCAT("
-		"@s, '@mcpj := max_cpus_pj, ') ;"
-		"end if; "
-		"if @mnpj is NULL then set @s = CONCAT("
-		"@s, '@mnpj := max_nodes_pj, ') ;"
-		"end if; "
 		"if @mwpj is NULL then set @s = CONCAT("
 		"@s, '@mwpj := max_wall_pj, '); "
 		"end if; "
-		"if @mcmpj is NULL then set @s = CONCAT("
-		"@s, '@mcmpj := max_cpu_mins_pj, '); "
-		"end if; "
-		"if @mcrm is NULL then set @s = CONCAT("
-		"@s, '@mcrm := max_cpu_run_mins, '); "
-		"end if; "
 		"if @def_qos_id is NULL then set @s = CONCAT("
 		"@s, '@def_qos_id := def_qos_id, '); "
 		"end if; "
@@ -626,21 +653,140 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		"@delta_qos := REPLACE(CONCAT(delta_qos, @delta_qos), "
 		"\\\',,\\\', \\\',\\\'), '); "
 		"end if; "
-		"set @s = concat(@s, '@my_acct := parent_acct from \"', "
+		/* "set @s = CONCAT(@s, @mtpj := REPLACE(CONCAT(@mtpj, max_tres_pj), " */
+		/* "\\\',,\\\', \\\',\\\'), '); " */
+		/* "@mtmpj := REPLACE(CONCAT(@mtmpj, max_tres_mins_pj), " */
+		/* "\\\',,\\\', \\\',\\\'), '); " */
+		/* "@mtrm := REPLACE(CONCAT(@mtrm, max_tres_run_mins), " */
+		/* "\\\',,\\\', \\\',\\\'), '); " */
+		"set @s = concat(@s, "
+		"'@mtpj := REPLACE(CONCAT(@mtpj, max_tres_pj), "
+		"\\\',,\\\', \\\',\\\'), "
+		"@mtpn := REPLACE(CONCAT(@mtpn, max_tres_pn), "
+		"\\\',,\\\', \\\',\\\'), "
+		"@mtmpj := REPLACE(CONCAT(@mtmpj, max_tres_mins_pj), "
+		"\\\',,\\\', \\\',\\\'), "
+		"@mtrm := REPLACE(CONCAT(@mtrm, max_tres_run_mins), "
+		"\\\',,\\\', \\\',\\\'), "
+		"@my_acct_new := parent_acct from \"', "
 		"cluster, '_', my_table, '\" where "
 		"acct = \\\'', @my_acct, '\\\' && user=\\\'\\\''); "
 		"prepare query from @s; "
 		"execute query; "
 		"deallocate prepare query; "
-		"UNTIL (@mj != -1 && @msj != -1 && @mcpj != -1 "
-		"&& @mnpj != -1 && @mwpj != -1 && @mcmpj != -1 "
-		"&& @mcrm != -1 && @def_qos_id != -1 && @qos != '') "
-		"|| @my_acct = '' END REPEAT; "
+		"set @my_acct = @my_acct_new; "
+		"UNTIL without_limits || @my_acct = '' END REPEAT; "
+		"END;";
+	/* char *get_parent_proc = */
+	/* 	"drop procedure if exists get_parent_limits; " */
+	/* 	"create procedure get_parent_limits(" */
+	/* 	"my_table text, acct text, cluster text, without_limits int) " */
+	/* 	"begin " */
+	/* 	"set @par_id = NULL; " */
+	/* 	"set @mj = NULL; " */
+	/* 	"set @msj = NULL; " */
+	/* 	"set @mcpj = NULL; " */
+	/* 	"set @mnpj = NULL; " */
+	/* 	"set @mwpj = NULL; " */
+	/* 	"set @mcmpj = NULL; " */
+	/* 	"set @mcrm = NULL; " */
+	/* 	"set @def_qos_id = NULL; " */
+	/* 	"set @qos = ''; " */
+	/* 	"set @delta_qos = ''; " */
+	/* 	"set @my_acct = acct; " */
+	/* 	"if without_limits then " */
+	/* 	"set @mj = 0; " */
+	/* 	"set @msj = 0; " */
+	/* 	"set @mcpj = 0; " */
+	/* 	"set @mnpj = 0; " */
+	/* 	"set @mwpj = 0; " */
+	/* 	"set @mcmpj = 0; " */
+	/* 	"set @mcrm = 0; " */
+	/* 	"set @def_qos_id = 0; " */
+	/* 	"set @qos = 1; " */
+	/* 	"end if; " */
+	/* 	"REPEAT " */
+	/* 	"set @s = 'select '; " */
+	/* 	"if @par_id is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@par_id := id_assoc, '); " */
+	/* 	"end if; " */
+	/* 	"if @mj is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@mj := max_jobs, '); " */
+	/* 	"end if; " */
+	/* 	"if @msj is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@msj := max_submit_jobs, '); " */
+	/* 	"end if; " */
+	/* 	"if @mcpj is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@mcpj := max_cpus_pj, ') ;" */
+	/* 	"end if; " */
+	/* 	"if @mnpj is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@mnpj := max_nodes_pj, ') ;" */
+	/* 	"end if; " */
+	/* 	"if @mwpj is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@mwpj := max_wall_pj, '); " */
+	/* 	"end if; " */
+	/* 	"if @mcmpj is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@mcmpj := max_cpu_mins_pj, '); " */
+	/* 	"end if; " */
+	/* 	"if @mcrm is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@mcrm := max_cpu_run_mins, '); " */
+	/* 	"end if; " */
+	/* 	"if @def_qos_id is NULL then set @s = CONCAT(" */
+	/* 	"@s, '@def_qos_id := def_qos_id, '); " */
+	/* 	"end if; " */
+	/* 	"if @qos = '' then set @s = CONCAT(" */
+	/* 	"@s, '@qos := qos, " */
+	/* 	"@delta_qos := REPLACE(CONCAT(delta_qos, @delta_qos), " */
+	/* 	"\\\',,\\\', \\\',\\\'), '); " */
+	/* 	"end if; " */
+	/* 	"set @s = concat(@s, '@my_acct_new := parent_acct from \"', " */
+	/* 	"cluster, '_', my_table, '\" where " */
+	/* 	"acct = \\\'', @my_acct, '\\\' && user=\\\'\\\''); " */
+	/* 	"prepare query from @s; " */
+	/* 	"execute query; " */
+	/* 	"deallocate prepare query; " */
+	/* 	"set @my_acct = @my_acct_new; " */
+	/* 	"UNTIL (@mj != -1 && @msj != -1 && @mcpj != -1 " */
+	/* 	"&& @mnpj != -1 && @mwpj != -1 && @mcmpj != -1 " */
+	/* 	"&& @mcrm != -1 && @def_qos_id != -1 && @qos != '') " */
+	/* 	"|| @my_acct = '' END REPEAT; " */
+	/* 	"END;"; */
+	char *get_coord_qos =
+		"drop procedure if exists get_coord_qos; "
+		"create procedure get_coord_qos(my_table text, acct text, "
+		"cluster text, coord text) "
+		"begin "
+		"set @qos = ''; "
+		"set @delta_qos = ''; "
+		"set @found_coord = NULL; "
+		"set @my_acct = acct; "
+		"REPEAT "
+		"set @s = 'select @qos := t1.qos, "
+		"@delta_qos := REPLACE(CONCAT(t1.delta_qos, @delta_qos), "
+		"\\\',,\\\', \\\',\\\'), @my_acct_new := parent_acct, "
+		"@found_coord_curr := t2.user '; "
+		"set @s = concat(@s, 'from \"', cluster, '_', my_table, '\" "
+		"as t1 left outer join acct_coord_table as t2 on "
+		"t1.acct=t2.acct where t1.acct = @my_acct && t1.user=\\\'\\\' "
+		"&& (t2.user=\\\'', coord, '\\\' || t2.user is null)'); "
+		"prepare query from @s; "
+		"execute query; "
+		"deallocate prepare query; "
+		"if @found_coord_curr is not NULL then "
+		"set @found_coord = @found_coord_curr; "
+		"end if; "
+		"if @found_coord is NULL then "
+		"set @qos = ''; "
+		"set @delta_qos = ''; "
+		"end if; "
+		"set @my_acct = @my_acct_new; "
+		"UNTIL @qos != '' || @my_acct = '' END REPEAT; "
+		"select REPLACE(CONCAT(@qos, @delta_qos), ',,', ','); "
 		"END;";
 	char *query = NULL;
 	time_t now = time(NULL);
 	char *cluster_name = NULL;
-	int rc = SLURM_SUCCESS;
+	int rc = SLURM_SUCCESS, rc2;
 	ListIterator itr = NULL;
 
 	/* Make the cluster table first since we build other tables
@@ -657,6 +803,39 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 				  ", primary key (id))") == SLURM_ERROR)
 		return SLURM_ERROR;
 
+	if (mysql_db_create_table(mysql_conn, tres_table,
+				  tres_table_fields,
+				  ", primary key (id), "
+				  "unique index (type(20), name(20))) "
+				  "auto_increment=1001")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+	else {
+		/* We always want CPU to be the first one, so create
+		   it now.  We also add MEM here, the others tres
+		   are site specific and could vary.  None but CPU
+		   matter on order though.  CPU always has to be 1.
+		*/
+		query = xstrdup_printf(
+			"insert into %s (creation_time, id, type) values "
+			"(%ld, %d, 'cpu'), "
+			"(%ld, %d, 'mem'), "
+			"(%ld, %d, 'energy'), "
+			"(%ld, %d, 'node') "
+			"on duplicate key update deleted=0, type=VALUES(type);",
+			tres_table,
+			now, TRES_CPU,
+			now, TRES_MEM,
+			now, TRES_ENERGY,
+			now, TRES_NODE);
+		if (debug_flags & DEBUG_FLAG_DB_QOS)
+			DB_DEBUG(mysql_conn->conn, "%s", query);
+		rc = mysql_db_query(mysql_conn, query);
+		xfree(query);
+		if (rc != SLURM_SUCCESS)
+			fatal("problem adding tres 'cpu'");
+	}
+
 	slurm_mutex_lock(&as_mysql_cluster_list_lock);
 	if (!(as_mysql_cluster_list = _get_cluster_names(mysql_conn, 0))) {
 		error("issue getting contents of %s", cluster_table);
@@ -675,6 +854,12 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 		return SLURM_ERROR;
 	}
 
+	if (as_mysql_convert_tables(mysql_conn) != SLURM_SUCCESS) {
+		error("issue converting tables");
+		slurm_mutex_unlock(&as_mysql_cluster_list_lock);
+		return SLURM_ERROR;
+	}
+
 	/* might as well do all the cluster centric tables inside this
 	 * lock.  We need to do this on all the clusters deleted or
 	 * other wise just to make sure everything is kept up to
@@ -758,7 +943,7 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 				xfree(query);
 			}
 			list_iterator_destroy(itr);
-			list_destroy(char_list);
+			FREE_NULL_LIST(char_list);
 		} else {
 			query = xstrdup_printf(
 				"insert into %s "
@@ -778,21 +963,6 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 			xfree(query);
 		}
 
-		/* This was introduced in 2.6.7, once 2 versions of Slurm go
-		 * by we can remove this.
-		 * We need to have the last character in a preempt to
-		 * be ','.  In older versions of Slurm this was not the case. */
-		query = xstrdup_printf(
-			"update %s set "
-			"preempt=if(preempt='', '', concat(preempt, ',')) "
-			"where preempt not like '%%,';",
-			qos_table);
-		debug4("%d(%s:%d) query\n%s",
-		       mysql_conn->conn, THIS_FILE, __LINE__, query);
-		if (mysql_db_query(mysql_conn, query) != SLURM_SUCCESS)
-			error("Couldn't update qos_table!");
-		xfree(query);
-
 		if (_set_qos_cnt(mysql_conn) != SLURM_SUCCESS)
 			return SLURM_ERROR;
 	}
@@ -802,7 +972,12 @@ static int _as_mysql_acct_check_tables(mysql_conn_t *mysql_conn)
 				  ", primary key (name(20)))") == SLURM_ERROR)
 		return SLURM_ERROR;
 
-	rc = mysql_db_query(mysql_conn, get_parent_proc);
+	rc2 = mysql_db_query(mysql_conn, get_parent_proc);
+	if (rc2 != SLURM_SUCCESS)
+		rc = rc2;
+	rc2 = mysql_db_query(mysql_conn, get_coord_qos);
+	if (rc2 != SLURM_SUCCESS)
+		rc = rc2;
 
 	/* Add user root to be a user by default and have this default
 	 * account be root.  If already there just update
@@ -839,6 +1014,8 @@ extern int check_connection(mysql_conn_t *mysql_conn)
 		errno = ESLURM_DB_CONNECTION;
 		return ESLURM_DB_CONNECTION;
 	} else if (mysql_db_ping(mysql_conn) != 0) {
+		/* avoid memory leak and end thread */
+		mysql_db_close_db_connection(mysql_conn);
 		if (mysql_db_get_db_connection(
 			    mysql_conn, mysql_db_name, mysql_db_info)
 		    != SLURM_SUCCESS) {
@@ -900,7 +1077,7 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "mod_time", "int unsigned default 0 not null" },
 		{ "deleted", "tinyint default 0 not null" },
 		{ "is_def", "tinyint default 0 not null" },
-		{ "id_assoc", "int not null auto_increment" },
+		{ "id_assoc", "int unsigned not null auto_increment" },
 		{ "user", "tinytext not null default ''" },
 		{ "acct", "tinytext not null" },
 		{ "partition", "tinytext not null default ''" },
@@ -910,49 +1087,36 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "shares", "int default 1 not null" },
 		{ "max_jobs", "int default NULL" },
 		{ "max_submit_jobs", "int default NULL" },
-		{ "max_cpus_pj", "int default NULL" },
-		{ "max_nodes_pj", "int default NULL" },
+		{ "max_tres_pj", "text not null default ''" },
+		{ "max_tres_pn", "text not null default ''" },
+		{ "max_tres_mins_pj", "text not null default ''" },
+		{ "max_tres_run_mins", "text not null default ''" },
 		{ "max_wall_pj", "int default NULL" },
-		{ "max_cpu_mins_pj", "bigint default NULL" },
-		{ "max_cpu_run_mins", "bigint default NULL" },
 		{ "grp_jobs", "int default NULL" },
 		{ "grp_submit_jobs", "int default NULL" },
-		{ "grp_cpus", "int default NULL" },
-		{ "grp_mem", "int default NULL" },
-		{ "grp_nodes", "int default NULL" },
+		{ "grp_tres", "text not null default ''" },
+		{ "grp_tres_mins", "text not null default ''" },
+		{ "grp_tres_run_mins", "text not null default ''" },
 		{ "grp_wall", "int default NULL" },
-		{ "grp_cpu_mins", "bigint default NULL" },
-		{ "grp_cpu_run_mins", "bigint default NULL" },
 		{ "def_qos_id", "int default NULL" },
 		{ "qos", "blob not null default ''" },
 		{ "delta_qos", "blob not null default ''" },
 		{ NULL, NULL}
 	};
 
-	storage_field_t assoc_usage_table_fields[] = {
-		{ "creation_time", "int unsigned not null" },
-		{ "mod_time", "int unsigned default 0 not null" },
-		{ "deleted", "tinyint default 0 not null" },
-		{ "id_assoc", "int not null" },
-		{ "time_start", "int unsigned not null" },
-		{ "alloc_cpu_secs", "bigint default 0 not null" },
-		{ "consumed_energy", "bigint unsigned default 0 not null" },
-		{ NULL, NULL}
-	};
-
 	storage_field_t cluster_usage_table_fields[] = {
 		{ "creation_time", "int unsigned not null" },
 		{ "mod_time", "int unsigned default 0 not null" },
 		{ "deleted", "tinyint default 0 not null" },
+		{ "id_tres", "int not null" },
 		{ "time_start", "int unsigned not null" },
-		{ "cpu_count", "int default 0 not null" },
-		{ "alloc_cpu_secs", "bigint default 0 not null" },
-		{ "down_cpu_secs", "bigint default 0 not null" },
-		{ "pdown_cpu_secs", "bigint default 0 not null" },
-		{ "idle_cpu_secs", "bigint default 0 not null" },
-		{ "resv_cpu_secs", "bigint default 0 not null" },
-		{ "over_cpu_secs", "bigint default 0 not null" },
-		{ "consumed_energy", "bigint unsigned default 0 not null" },
+		{ "count", "bigint unsigned default 0 not null" },
+		{ "alloc_secs", "bigint unsigned default 0 not null" },
+		{ "down_secs", "bigint unsigned default 0 not null" },
+		{ "pdown_secs", "bigint unsigned default 0 not null" },
+		{ "idle_secs", "bigint unsigned default 0 not null" },
+		{ "resv_secs", "bigint unsigned default 0 not null" },
+		{ "over_secs", "bigint unsigned default 0 not null" },
 		{ NULL, NULL}
 	};
 
@@ -961,15 +1125,26 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "time_end", "int unsigned default 0 not null" },
 		{ "node_name", "tinytext default '' not null" },
 		{ "cluster_nodes", "text not null default ''" },
-		{ "cpu_count", "int not null" },
 		{ "reason", "tinytext not null" },
 		{ "reason_uid", "int unsigned default 0xfffffffe not null" },
 		{ "state", "smallint unsigned default 0 not null" },
+		{ "tres", "text not null default ''" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t id_usage_table_fields[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "id", "int unsigned not null" },
+		{ "id_tres", "int default 1 not null" },
+		{ "time_start", "int unsigned not null" },
+		{ "alloc_secs", "bigint unsigned default 0 not null" },
 		{ NULL, NULL}
 	};
 
 	storage_field_t job_table_fields[] = {
-		{ "job_db_inx", "int not null auto_increment" },
+		{ "job_db_inx", "int unsigned not null auto_increment" },
 		{ "mod_time", "int unsigned default 0 not null" },
 		{ "deleted", "tinyint default 0 not null" },
 		{ "account", "tinytext" },
@@ -977,7 +1152,6 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "array_max_tasks", "int unsigned default 0 not null" },
 		{ "array_task_pending", "int unsigned default 0 not null" },
 		{ "cpus_req", "int unsigned not null" },
-		{ "cpus_alloc", "int unsigned not null" },
 		{ "derived_ec", "int unsigned default 0 not null" },
 		{ "derived_es", "text" },
 		{ "exit_code", "int unsigned default 0 not null" },
@@ -1011,6 +1185,8 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "gres_used", "text not null default ''" },
 		{ "wckey", "tinytext not null default ''" },
 		{ "track_steps", "tinyint not null" },
+		{ "tres_alloc", "text not null default ''" },
+		{ "tres_req", "text not null default ''" },
 		{ NULL, NULL}
 	};
 
@@ -1025,20 +1201,19 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "id_resv", "int unsigned default 0 not null" },
 		{ "deleted", "tinyint default 0 not null" },
 		{ "assoclist", "text not null default ''" },
-		{ "cpus", "int unsigned not null" },
 		{ "flags", "smallint unsigned default 0 not null" },
 		{ "nodelist", "text not null default ''" },
 		{ "node_inx", "text not null default ''" },
 		{ "resv_name", "text not null" },
 		{ "time_start", "int unsigned default 0 not null"},
 		{ "time_end", "int unsigned default 0 not null" },
+		{ "tres", "text not null default ''" },
 		{ NULL, NULL}
 	};
 
 	storage_field_t step_table_fields[] = {
-		{ "job_db_inx", "int not null" },
+		{ "job_db_inx", "int unsigned not null" },
 		{ "deleted", "tinyint default 0 not null" },
-		{ "cpus_alloc", "int unsigned not null" },
 		{ "exit_code", "int default 0 not null" },
 		{ "id_step", "int not null" },
 		{ "kill_requid", "int default -1 not null" },
@@ -1074,7 +1249,9 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "ave_cpu", "double unsigned default 0.0 not null" },
 		{ "act_cpufreq", "double unsigned default 0.0 not null" },
 		{ "consumed_energy", "double unsigned default 0.0 not null" },
-		{ "req_cpufreq", "int unsigned default 0 not null" },
+		{ "req_cpufreq_min", "int unsigned default 0 not null" },
+		{ "req_cpufreq", "int unsigned default 0 not null" }, /* max */
+		{ "req_cpufreq_gov", "int unsigned default 0 not null" },
 		{ "max_disk_read", "double unsigned default 0.0 not null" },
 		{ "max_disk_read_task", "int unsigned default 0 not null" },
 		{ "max_disk_read_node", "int unsigned default 0 not null" },
@@ -1083,11 +1260,12 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "max_disk_write_task", "int unsigned default 0 not null" },
 		{ "max_disk_write_node", "int unsigned default 0 not null" },
 		{ "ave_disk_write", "double unsigned default 0.0 not null" },
+		{ "tres_alloc", "text not null default ''" },
 		{ NULL, NULL}
 	};
 
 	storage_field_t suspend_table_fields[] = {
-		{ "job_db_inx", "int not null" },
+		{ "job_db_inx", "int unsigned not null" },
 		{ "id_assoc", "int not null" },
 		{ "time_start", "int unsigned default 0 not null" },
 		{ "time_end", "int unsigned default 0 not null" },
@@ -1099,27 +1277,13 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		{ "mod_time", "int unsigned default 0 not null" },
 		{ "deleted", "tinyint default 0 not null" },
 		{ "is_def", "tinyint default 0 not null" },
-		{ "id_wckey", "int not null auto_increment" },
+		{ "id_wckey", "int unsigned not null auto_increment" },
 		{ "wckey_name", "tinytext not null default ''" },
 		{ "user", "tinytext not null" },
 		{ NULL, NULL}
 	};
 
-	storage_field_t wckey_usage_table_fields[] = {
-		{ "creation_time", "int unsigned not null" },
-		{ "mod_time", "int unsigned default 0 not null" },
-		{ "deleted", "tinyint default 0 not null" },
-		{ "id_wckey", "int not null" },
-		{ "time_start", "int unsigned not null" },
-		{ "alloc_cpu_secs", "bigint default 0" },
-		{ "resv_cpu_secs", "bigint default 0" },
-		{ "over_cpu_secs", "bigint default 0" },
-		{ "consumed_energy", "bigint unsigned default 0 not null" },
-		{ NULL, NULL}
-	};
-
 	char table_name[200];
-	char *query = NULL;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, assoc_table);
@@ -1128,109 +1292,93 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 				  ", primary key (id_assoc), "
 				  "unique index (user(20), acct(20), "
 				  "`partition`(20)), "
-				  "key lft (lft))")
+				  "key lft (lft), key account (acct(20)))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
-	/* This was introduced in 2.6.7, once 2 versions of Slurm go
-	 * by we can remove this.
-	 * We need to have the last character in a preempt to
-	 * be ','.  In older versions of Slurm this was not the case. */
-	query = xstrdup_printf(
-		"update %s set qos=if(qos='', '', concat(qos, ',')) "
-		"where qos not like '%%,';",
-		table_name);
-	debug4("%d(%s:%d) query\n%s",
-	       mysql_conn->conn, THIS_FILE, __LINE__, query);
-	if (mysql_db_query(mysql_conn, query) != SLURM_SUCCESS)
-		error("Couldn't update %s!", table_name);
-	xfree(query);
-
-	query = xstrdup_printf(
-		"update %s set delta_qos=if(delta_qos='', '', "
-		"concat(delta_qos, ',')) "
-		"where delta_qos not like '%%,';",
-		table_name);
-	if (debug_flags & DEBUG_FLAG_DB_QOS)
-		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-	if (mysql_db_query(mysql_conn, query) != SLURM_SUCCESS)
-		error("Couldn't update %s!", table_name);
-	xfree(query);
-
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, assoc_day_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
-				  assoc_usage_table_fields,
-				  ", primary key (id_assoc, "
-				  "time_start))")
+				  id_usage_table_fields,
+				  ", primary key (id, id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, assoc_hour_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
-				  assoc_usage_table_fields,
-				  ", primary key (id_assoc, "
-				  "time_start))")
+				  id_usage_table_fields,
+				  ", primary key (id, id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, assoc_month_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
-				  assoc_usage_table_fields,
-				  ", primary key (id_assoc, "
-				  "time_start))")
+				  id_usage_table_fields,
+				  ", primary key (id, id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, cluster_day_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
 				  cluster_usage_table_fields,
-				  ", primary key (time_start))")
+				  ", primary key (id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, cluster_hour_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
 				  cluster_usage_table_fields,
-				  ", primary key (time_start))")
+				  ", primary key (id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, cluster_month_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
 				  cluster_usage_table_fields,
-				  ", primary key (time_start))")
+				  ", primary key (id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, event_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
 				  event_table_fields,
-				  ", primary key (node_name(20), "
-				  "time_start))")
+				  ", primary key (node_name(20), time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, job_table);
+	/* sacct_def is the index for query's with state as time_tart is used in
+	 * these queries. sacct_def2 is for plain sacct queries. */
 	if (mysql_db_create_table(mysql_conn, table_name, job_table_fields,
 				  ", primary key (job_db_inx), "
 				  "unique index (id_job, "
 				  "id_assoc, time_submit), "
 				  "key rollup (time_eligible, time_end), "
+				  "key rollup2 (time_end, time_eligible), "
+				  "key nodes_alloc (nodes_alloc), "
 				  "key wckey (id_wckey), "
 				  "key qos (id_qos), "
 				  "key association (id_assoc), "
 				  "key array_job (id_array_job), "
 				  "key reserv (id_resv), "
 				  "key sacct_def (id_user, time_start, "
-				  "time_end))")
+				  "time_end), "
+				  "key sacct_def2 (id_user, time_end, "
+				  "time_eligible))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
@@ -1238,7 +1386,8 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		 cluster_name, last_ran_table);
 	if (mysql_db_create_table(mysql_conn, table_name,
 				  last_ran_table_fields,
-				  ")")
+				  ", primary key (hourly_rollup, "
+				  "daily_rollup, monthly_rollup))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
@@ -1262,7 +1411,8 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 		 cluster_name, suspend_table);
 	if (mysql_db_create_table(mysql_conn, table_name,
 				  suspend_table_fields,
-				  ", key job_db_inx_times (job_db_inx, "
+				  ", primary key (job_db_inx, time_start), "
+				  "key job_db_inx_times (job_db_inx, "
 				  "time_start, time_end))") == SLURM_ERROR)
 		return SLURM_ERROR;
 
@@ -1278,28 +1428,28 @@ extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, wckey_day_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
-				  wckey_usage_table_fields,
-				  ", primary key (id_wckey, "
-				  "time_start))")
+				  id_usage_table_fields,
+				  ", primary key (id, id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, wckey_hour_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
-				  wckey_usage_table_fields,
-				  ", primary key (id_wckey, "
-				  "time_start))")
+				  id_usage_table_fields,
+				  ", primary key (id, id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
 		 cluster_name, wckey_month_table);
+
 	if (mysql_db_create_table(mysql_conn, table_name,
-				  wckey_usage_table_fields,
-				  ", primary key (id_wckey, "
-				  "time_start))")
+				  id_usage_table_fields,
+				  ", primary key (id, id_tres, time_start))")
 	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
@@ -1336,11 +1486,11 @@ extern int remove_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 	}
 	mysql_free_result(result);
 	xstrfmtcat(mysql_conn->pre_commit_query,
-		   "drop table \"%s_%s\", \"%s_%s\", "
+		   "drop table \"%s_%s\", \"%s_%s\", \"%s_%s\", "
 		   "\"%s_%s\", \"%s_%s\", \"%s_%s\", \"%s_%s\", "
 		   "\"%s_%s\", \"%s_%s\", \"%s_%s\", \"%s_%s\", "
 		   "\"%s_%s\", \"%s_%s\", \"%s_%s\", \"%s_%s\", "
-		   "\"%s_%s\", \"%s_%s\", \"%s_%s\";",
+		   "\"%s_%s\", \"%s_%s\";",
 		   cluster_name, assoc_table,
 		   cluster_name, assoc_day_table,
 		   cluster_name, assoc_hour_table,
@@ -1366,11 +1516,15 @@ extern int remove_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name)
 	return rc;
 }
 
-extern int setup_association_limits(slurmdb_association_rec_t *assoc,
-				    char **cols, char **vals,
-				    char **extra, qos_level_t qos_level,
-				    bool for_add)
+extern int setup_assoc_limits(slurmdb_assoc_rec_t *assoc,
+			      char **cols, char **vals,
+			      char **extra, qos_level_t qos_level,
+			      bool for_add)
 {
+	uint32_t tres_str_flags = TRES_STR_FLAG_REMOVE |
+		TRES_STR_FLAG_SORT_ID | TRES_STR_FLAG_SIMPLE |
+		TRES_STR_FLAG_NO_NULL;
+
 	if (!assoc)
 		return SLURM_ERROR;
 
@@ -1380,32 +1534,14 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		*/
 		if (assoc->shares_raw == NO_VAL)
 			assoc->shares_raw = INFINITE;
-		if (assoc->grp_cpu_mins == (uint64_t)NO_VAL)
-			assoc->grp_cpu_mins = (uint64_t)INFINITE;
-		if (assoc->grp_cpu_run_mins == (uint64_t)NO_VAL)
-			assoc->grp_cpu_run_mins = (uint64_t)INFINITE;
-		if (assoc->grp_cpus == NO_VAL)
-			assoc->grp_cpus = INFINITE;
 		if (assoc->grp_jobs == NO_VAL)
 			assoc->grp_jobs = INFINITE;
-		if (assoc->grp_mem == NO_VAL)
-			assoc->grp_mem = INFINITE;
-		if (assoc->grp_nodes == NO_VAL)
-			assoc->grp_nodes = INFINITE;
 		if (assoc->grp_submit_jobs == NO_VAL)
 			assoc->grp_submit_jobs = INFINITE;
 		if (assoc->grp_wall == NO_VAL)
 			assoc->grp_wall = INFINITE;
-		if (assoc->max_cpu_mins_pj == (uint64_t)NO_VAL)
-			assoc->max_cpu_mins_pj = (uint64_t)INFINITE;
-		if (assoc->max_cpu_run_mins == (uint64_t)NO_VAL)
-			assoc->max_cpu_run_mins = (uint64_t)INFINITE;
-		if (assoc->max_cpus_pj == NO_VAL)
-			assoc->max_cpus_pj = INFINITE;
 		if (assoc->max_jobs == NO_VAL)
 			assoc->max_jobs = INFINITE;
-		if (assoc->max_nodes_pj == NO_VAL)
-			assoc->max_nodes_pj = INFINITE;
 		if (assoc->max_submit_jobs == NO_VAL)
 			assoc->max_submit_jobs = INFINITE;
 		if (assoc->max_wall_pj == NO_VAL)
@@ -1426,43 +1562,6 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		xstrfmtcat(*extra, ", shares=%u", assoc->shares_raw);
 	}
 
-	if (assoc->grp_cpu_mins == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", grp_cpu_mins");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_cpu_mins=NULL");
-	} else if ((assoc->grp_cpu_mins != (uint64_t)NO_VAL)
-		   && ((int64_t)assoc->grp_cpu_mins >= 0)) {
-		xstrcat(*cols, ", grp_cpu_mins");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   assoc->grp_cpu_mins);
-		xstrfmtcat(*extra, ", grp_cpu_mins=%"PRIu64"",
-			   assoc->grp_cpu_mins);
-	}
-
-	if (assoc->grp_cpu_run_mins == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", grp_cpu_run_mins");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_cpu_run_mins=NULL");
-	} else if ((assoc->grp_cpu_run_mins != (uint64_t)NO_VAL)
-		   && ((int64_t)assoc->grp_cpu_run_mins >= 0)) {
-		xstrcat(*cols, ", grp_cpu_run_mins");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   assoc->grp_cpu_run_mins);
-		xstrfmtcat(*extra, ", grp_cpu_run_mins=%"PRIu64"",
-			   assoc->grp_cpu_run_mins);
-	}
-
-	if (assoc->grp_cpus == INFINITE) {
-		xstrcat(*cols, ", grp_cpus");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_cpus=NULL");
-	} else if ((assoc->grp_cpus != NO_VAL)
-		   && ((int32_t)assoc->grp_cpus >= 0)) {
-		xstrcat(*cols, ", grp_cpus");
-		xstrfmtcat(*vals, ", %u", assoc->grp_cpus);
-		xstrfmtcat(*extra, ", grp_cpus=%u", assoc->grp_cpus);
-	}
-
 	if (assoc->grp_jobs == INFINITE) {
 		xstrcat(*cols, ", grp_jobs");
 		xstrcat(*vals, ", NULL");
@@ -1474,28 +1573,6 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		xstrfmtcat(*extra, ", grp_jobs=%u", assoc->grp_jobs);
 	}
 
-	if (assoc->grp_mem == INFINITE) {
-		xstrcat(*cols, ", grp_mem");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_mem=NULL");
-	} else if ((assoc->grp_mem != NO_VAL)
-		   && ((int32_t)assoc->grp_mem >= 0)) {
-		xstrcat(*cols, ", grp_mem");
-		xstrfmtcat(*vals, ", %u", assoc->grp_mem);
-		xstrfmtcat(*extra, ", grp_mem=%u", assoc->grp_mem);
-	}
-
-	if (assoc->grp_nodes == INFINITE) {
-		xstrcat(*cols, ", grp_nodes");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_nodes=NULL");
-	} else if ((assoc->grp_nodes != NO_VAL)
-		   && ((int32_t)assoc->grp_nodes >= 0)) {
-		xstrcat(*cols, ", grp_nodes");
-		xstrfmtcat(*vals, ", %u", assoc->grp_nodes);
-		xstrfmtcat(*extra, ", grp_nodes=%u", assoc->grp_nodes);
-	}
-
 	if (assoc->grp_submit_jobs == INFINITE) {
 		xstrcat(*cols, ", grp_submit_jobs");
 		xstrcat(*vals, ", NULL");
@@ -1529,43 +1606,6 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		xstrcat(*extra, ", is_def=1");
 	}
 
-	if (assoc->max_cpu_mins_pj == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", max_cpu_mins_pj");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpu_mins_pj=NULL");
-	} else if ((assoc->max_cpu_mins_pj != (uint64_t)NO_VAL)
-		   && ((int64_t)assoc->max_cpu_mins_pj >= 0)) {
-		xstrcat(*cols, ", max_cpu_mins_pj");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   assoc->max_cpu_mins_pj);
-		xstrfmtcat(*extra, ", max_cpu_mins_pj=%"PRIu64"",
-			   assoc->max_cpu_mins_pj);
-	}
-
-	if (assoc->max_cpu_run_mins == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", max_cpu_run_mins");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpu_run_mins=NULL");
-	} else if ((assoc->max_cpu_run_mins != (uint64_t)NO_VAL)
-		   && ((int64_t)assoc->max_cpu_run_mins >= 0)) {
-		xstrcat(*cols, ", max_cpu_run_mins");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   assoc->max_cpu_run_mins);
-		xstrfmtcat(*extra, ", max_cpu_run_mins=%"PRIu64"",
-			   assoc->max_cpu_run_mins);
-	}
-
-	if (assoc->max_cpus_pj == INFINITE) {
-		xstrcat(*cols, ", max_cpus_pj");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpus_pj=NULL");
-	} else if ((assoc->max_cpus_pj != NO_VAL)
-		   && ((int32_t)assoc->max_cpus_pj >= 0)) {
-		xstrcat(*cols, ", max_cpus_pj");
-		xstrfmtcat(*vals, ", %u", assoc->max_cpus_pj);
-		xstrfmtcat(*extra, ", max_cpus_pj=%u", assoc->max_cpus_pj);
-	}
-
 	if (assoc->max_jobs == INFINITE) {
 		xstrcat(*cols, ", max_jobs");
 		xstrcat(*vals, ", NULL");
@@ -1577,17 +1617,6 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		xstrfmtcat(*extra, ", max_jobs=%u", assoc->max_jobs);
 	}
 
-	if (assoc->max_nodes_pj == INFINITE) {
-		xstrcat(*cols, ", max_nodes_pj");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_nodes_pj=NULL");
-	} else if ((assoc->max_nodes_pj != NO_VAL)
-		   && ((int32_t)assoc->max_nodes_pj >= 0)) {
-		xstrcat(*cols, ", max_nodes_pj");
-		xstrfmtcat(*vals, ", %u", assoc->max_nodes_pj);
-		xstrfmtcat(*extra, ", max_nodes_pj=%u", assoc->max_nodes_pj);
-	}
-
 	if (assoc->max_submit_jobs == INFINITE) {
 		xstrcat(*cols, ", max_submit_jobs");
 		xstrcat(*vals, ", NULL");
@@ -1622,19 +1651,115 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		xstrfmtcat(*extra, ", def_qos_id=%u", assoc->def_qos_id);
 	}
 
-	/* when modifying the qos it happens in the actual function
-	   since we have to wait until we hear about the parent first. */
-	if (qos_level == QOS_LEVEL_MODIFY)
-		goto end_qos;
+	/* When modifying anything below this comment it happens in
+	 * the actual function since we have to wait until we hear
+	 * about the parent first.
+	 * What we do to make it known something needs to be changed
+	 * is we cat "" onto extra which will inform the caller
+	 * something needs changing.
+	 */
+
+	if (assoc->grp_tres) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", grp_tres");
+		slurmdb_combine_tres_strings(
+			&assoc->grp_tres, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->grp_tres);
+		xstrfmtcat(*extra, ", grp_tres='%s'", assoc->grp_tres);
+	}
+
+	if (assoc->grp_tres_mins) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", grp_tres_mins");
+		slurmdb_combine_tres_strings(
+			&assoc->grp_tres_mins, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->grp_tres_mins);
+		xstrfmtcat(*extra, ", grp_tres_mins='%s'",
+			   assoc->grp_tres_mins);
+	}
+
+	if (assoc->grp_tres_run_mins) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", grp_tres_run_mins");
+		slurmdb_combine_tres_strings(
+			&assoc->grp_tres_run_mins, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->grp_tres_run_mins);
+		xstrfmtcat(*extra, ", grp_tres_run_mins='%s'",
+			   assoc->grp_tres_run_mins);
+	}
+
+	if (assoc->max_tres_pj) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_pj");
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_pj, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->max_tres_pj);
+		xstrfmtcat(*extra, ", max_tres_pj='%s'", assoc->max_tres_pj);
+	}
+
+	if (assoc->max_tres_pn) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_pn");
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_pn, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->max_tres_pn);
+		xstrfmtcat(*extra, ", max_tres_pn='%s'", assoc->max_tres_pn);
+	}
+
+	if (assoc->max_tres_mins_pj) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_mins_pj");
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_mins_pj, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->max_tres_mins_pj);
+		xstrfmtcat(*extra, ", max_tres_mins_pj='%s'",
+			   assoc->max_tres_mins_pj);
+	}
+
+	if (assoc->max_tres_run_mins) {
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_run_mins");
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_run_mins, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", assoc->max_tres_run_mins);
+		xstrfmtcat(*extra, ", max_tres_run_mins='%s'",
+			   assoc->max_tres_run_mins);
+	}
 
 	if (assoc->qos_list && list_count(assoc->qos_list)) {
 		char *qos_type = "qos";
 		char *qos_val = NULL;
 		char *tmp_char = NULL;
 		int set = 0;
-		ListIterator qos_itr =
-			list_iterator_create(assoc->qos_list);
+		ListIterator qos_itr;
 
+		if (qos_level == QOS_LEVEL_MODIFY) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+
+		qos_itr = list_iterator_create(assoc->qos_list);
 		while ((tmp_char = list_next(qos_itr))) {
 			/* we don't want to include blank names */
 			if (!tmp_char[0])
@@ -1662,13 +1787,13 @@ extern int setup_association_limits(slurmdb_association_rec_t *assoc,
 		if (!assoc->qos_list)
 			assoc->qos_list = list_create(slurm_destroy_char);
 		slurm_addto_char_list(assoc->qos_list, default_qos_str);
-	} else {
+	} else if (qos_level != QOS_LEVEL_MODIFY) {
 		/* clear the qos */
 		xstrcat(*cols, ", qos, delta_qos");
 		xstrcat(*vals, ", '', ''");
 		xstrcat(*extra, ", qos='', delta_qos=''");
 	}
-end_qos:
+end_modify:
 
 	return SLURM_SUCCESS;
 
@@ -1699,7 +1824,7 @@ extern int modify_common(mysql_conn_t *mysql_conn,
 	    || (table == res_table) || (table == clus_res_table))
 		cluster_centric = false;
 
-	if (vals[1])
+	if (vals && vals[1])
 		tmp_vals = slurm_add_slash_to_quotes(vals+2);
 
 	if (cluster_centric) {
@@ -1756,7 +1881,7 @@ extern int remove_common(mysql_conn_t *mysql_conn,
 {
 	int rc = SLURM_SUCCESS;
 	char *query = NULL;
-	char *loc_assoc_char = NULL;
+	char *loc_assoc_char = NULL, *loc_usage_id_char = NULL;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	time_t day_old = now - DELETE_SEC_BACK;
@@ -1875,8 +2000,38 @@ extern int remove_common(mysql_conn_t *mysql_conn,
 	if (rc != SLURM_SUCCESS) {
 		reset_mysql_conn(mysql_conn);
 		return SLURM_ERROR;
+	} else if (table == qos_table) {
+		query = xstrdup_printf(
+			"update %s set "
+			"mod_time=%ld, deleted=1, "
+			"grace_time=DEFAULT, "
+			"max_jobs_per_user=DEFAULT, "
+			"max_submit_jobs_per_user=DEFAULT, "
+			"max_tres_pj=DEFAULT, "
+			"max_tres_pn=DEFAULT, "
+			"max_tres_pu=DEFAULT, "
+			"max_tres_mins_pj=DEFAULT, "
+			"max_tres_run_mins_pu=DEFAULT, "
+			"min_tres_pj=DEFAULT, "
+			"max_wall_duration_per_job=DEFAULT, "
+			"grp_jobs=DEFAULT, grp_submit_jobs=DEFAULT, "
+			"grp_tres=DEFAULT, "
+			"grp_tres_mins=DEFAULT, "
+			"grp_tres_run_mins=DEFAULT, "
+			"grp_wall=DEFAULT, "
+			"preempt=DEFAULT, "
+			"priority=DEFAULT, "
+			"usage_factor=DEFAULT, "
+			"usage_thres=DEFAULT "
+			"where (%s);",
+			qos_table, now,
+			name_char);
+		if (debug_flags & DEBUG_FLAG_DB_QOS)
+			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+		rc = mysql_db_query(mysql_conn, query);
+		xfree(query);
+		return rc;
 	} else if ((table == acct_coord_table)
-		   || (table == qos_table)
 		   || (table == wckey_table)
 		   || (table == clus_res_table)
 		   || (table == res_table))
@@ -1918,14 +2073,14 @@ extern int remove_common(mysql_conn_t *mysql_conn,
 		xfree(query);
 
 		rc = 0;
-		loc_assoc_char = NULL;
+		xfree(loc_assoc_char);
 		while ((row = mysql_fetch_row(result))) {
-			slurmdb_association_rec_t *rem_assoc = NULL;
+			slurmdb_assoc_rec_t *rem_assoc = NULL;
 			if (loc_assoc_char)
 				xstrcat(loc_assoc_char, " || ");
 			xstrfmtcat(loc_assoc_char, "id_assoc=%s", row[0]);
 
-			rem_assoc = xmalloc(sizeof(slurmdb_association_rec_t));
+			rem_assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
 			rem_assoc->id = slurm_atoul(row[0]);
 			rem_assoc->cluster = xstrdup(cluster_name);
 			if (addto_update_list(mysql_conn->update_list,
@@ -1942,6 +2097,9 @@ extern int remove_common(mysql_conn_t *mysql_conn,
 		return rc;
 	}
 
+	loc_usage_id_char = xstrdup(loc_assoc_char);
+	xstrsubstituteall(loc_usage_id_char, "id_assoc", "id");
+
 	/* We should not have to delete from usage table, only flag since we
 	 * only delete things that are typos.
 	 */
@@ -1949,9 +2107,10 @@ extern int remove_common(mysql_conn_t *mysql_conn,
 		   "update \"%s_%s\" set mod_time=%ld, deleted=1 where (%s);"
 		   "update \"%s_%s\" set mod_time=%ld, deleted=1 where (%s);"
 		   "update \"%s_%s\" set mod_time=%ld, deleted=1 where (%s);",
-		   cluster_name, assoc_day_table, now, loc_assoc_char,
-		   cluster_name, assoc_hour_table, now, loc_assoc_char,
-		   cluster_name, assoc_month_table, now, loc_assoc_char);
+		   cluster_name, assoc_day_table, now, loc_usage_id_char,
+		   cluster_name, assoc_hour_table, now, loc_usage_id_char,
+		   cluster_name, assoc_month_table, now, loc_usage_id_char);
+	xfree(loc_usage_id_char);
 
 	if (debug_flags & DEBUG_FLAG_DB_ASSOC)
 		DB_DEBUG(mysql_conn->conn, "query\n%s %zd",
@@ -2064,11 +2223,20 @@ just_update:
 	 * around.
 	 */
 	query = xstrdup_printf("update \"%s_%s\" as t1 set "
-			       "mod_time=%ld, deleted=1, def_qos_id=NULL, "
-			       "shares=1, max_jobs=NULL, "
-			       "max_nodes_pj=NULL, "
-			       "max_wall_pj=NULL, "
-			       "max_cpu_mins_pj=NULL "
+			       "mod_time=%ld, deleted=1, def_qos_id=DEFAULT, "
+			       "shares=DEFAULT, max_jobs=DEFAULT, "
+			       "max_submit_jobs=DEFAULT, "
+			       "max_wall_pj=DEFAULT, "
+			       "max_tres_pj=DEFAULT, "
+			       "max_tres_pn=DEFAULT, "
+			       "max_tres_mins_pj=DEFAULT, "
+			       "max_tres_run_mins=DEFAULT, "
+			       "grp_jobs=DEFAULT, grp_submit_jobs=DEFAULT, "
+			       "grp_wall=DEFAULT, "
+			       "grp_tres=DEFAULT, "
+			       "grp_tres_mins=DEFAULT, "
+			       "grp_tres_run_mins=DEFAULT, "
+			       "qos=DEFAULT, delta_qos=DEFAULT "
 			       "where (%s);",
 			       cluster_name, assoc_table, now,
 			       loc_assoc_char);
@@ -2096,6 +2264,57 @@ just_update:
 	return rc;
 }
 
+extern void mod_tres_str(char **out, char *mod, char *cur,
+			 char *cur_par, char *name, char **vals,
+			 uint32_t id, bool assoc)
+{
+	uint32_t tres_str_flags = TRES_STR_FLAG_REMOVE |
+		TRES_STR_FLAG_SORT_ID | TRES_STR_FLAG_SIMPLE |
+		TRES_STR_FLAG_NO_NULL;
+
+	xassert(out);
+	xassert(name);
+
+	if (!mod)
+		return;
+
+	/* We have to add strings in waves or we will not be able to
+	 * get removes to work correctly.  We want the string returned
+	 * after the first slurmdb_combine_tres_strings to be put in
+	 * the database.
+	 */
+	xfree(*out); /* just to make sure */
+	*out = xstrdup(mod);
+	slurmdb_combine_tres_strings(out, cur, tres_str_flags);
+
+	if (xstrcmp(*out, cur)) {
+		if (vals) {
+			/* This logic is here because while the change
+			 * we are doing on the limit is the same for
+			 * each limit the other limits on the
+			 * associations might not be.  What this does
+			 * is only change the limit on the association
+			 * given the id.  I'm hoping someone in the
+			 * future comes up with a better way to do
+			 * this since this seems like a hack, but it
+			 * does do the job.
+			 */
+			xstrfmtcat(*vals, ", %s = "
+				   "if (%s=%u, '%s', %s)",
+				   name, assoc ? "id_assoc" : "id", id,
+				   *out, name);
+			/* xstrfmtcat(*vals, ", %s='%s%s')", */
+			/* 	   name, */
+			/* 	   *out[0] ? "," : "", */
+			/* 	   *out); */
+		}
+		if (cur_par)
+			slurmdb_combine_tres_strings(
+				out, cur_par, tres_str_flags);
+	} else
+		xfree(*out);
+}
+
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
@@ -2158,19 +2377,14 @@ extern int init ( void )
 extern int fini ( void )
 {
 	slurm_mutex_lock(&as_mysql_cluster_list_lock);
-	if (as_mysql_cluster_list) {
-		list_destroy(as_mysql_cluster_list);
-		as_mysql_cluster_list = NULL;
-	}
-	if (as_mysql_total_cluster_list) {
-		list_destroy(as_mysql_total_cluster_list);
-		as_mysql_total_cluster_list = NULL;
-	}
+	FREE_NULL_LIST(as_mysql_cluster_list);
+	FREE_NULL_LIST(as_mysql_total_cluster_list);
 	slurm_mutex_unlock(&as_mysql_cluster_list_lock);
 	slurm_mutex_destroy(&as_mysql_cluster_list_lock);
 	destroy_mysql_db_info(mysql_db_info);
 	xfree(mysql_db_name);
 	xfree(default_qos_str);
+
 	mysql_db_cleanup();
 	return SLURM_SUCCESS;
 }
@@ -2288,7 +2502,7 @@ extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit)
 		}
 		mysql_free_result(result);
 	skip:
-		(void) assoc_mgr_update(mysql_conn->update_list);
+		(void) assoc_mgr_update(mysql_conn->update_list, 0);
 
 		slurm_mutex_lock(&as_mysql_cluster_list_lock);
 		itr2 = list_iterator_create(as_mysql_cluster_list);
@@ -2355,11 +2569,17 @@ extern int acct_storage_p_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	return as_mysql_add_clusters(mysql_conn, uid, cluster_list);
 }
 
-extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
-					   uint32_t uid,
-					   List association_list)
+extern int acct_storage_p_add_tres(mysql_conn_t *mysql_conn,
+				   uint32_t uid, List tres_list_in)
 {
-	return as_mysql_add_assocs(mysql_conn, uid, association_list);
+	return as_mysql_add_tres(mysql_conn, uid, tres_list_in);
+}
+
+extern int acct_storage_p_add_assocs(mysql_conn_t *mysql_conn,
+				     uint32_t uid,
+				     List assoc_list)
+{
+	return as_mysql_add_assocs(mysql_conn, uid, assoc_list);
 }
 
 extern int acct_storage_p_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
@@ -2408,10 +2628,10 @@ extern List acct_storage_p_modify_clusters(mysql_conn_t *mysql_conn,
 	return as_mysql_modify_clusters(mysql_conn, uid, cluster_cond, cluster);
 }
 
-extern List acct_storage_p_modify_associations(
+extern List acct_storage_p_modify_assocs(
 	mysql_conn_t *mysql_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond,
-	slurmdb_association_rec_t *assoc)
+	slurmdb_assoc_cond_t *assoc_cond,
+	slurmdb_assoc_rec_t *assoc)
 {
 	return as_mysql_modify_assocs(mysql_conn, uid, assoc_cond, assoc);
 }
@@ -2478,9 +2698,9 @@ extern List acct_storage_p_remove_clusters(mysql_conn_t *mysql_conn,
 	return as_mysql_remove_clusters(mysql_conn, uid, cluster_cond);
 }
 
-extern List acct_storage_p_remove_associations(
+extern List acct_storage_p_remove_assocs(
 	mysql_conn_t *mysql_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	return as_mysql_remove_assocs(mysql_conn, uid, assoc_cond);
 }
@@ -2529,9 +2749,16 @@ extern List acct_storage_p_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 	return as_mysql_get_clusters(mysql_conn, uid, cluster_cond);
 }
 
-extern List acct_storage_p_get_associations(
+extern List acct_storage_p_get_tres(
+	mysql_conn_t *mysql_conn, uid_t uid,
+	slurmdb_tres_cond_t *tres_cond)
+{
+	return as_mysql_get_tres(mysql_conn, uid, tres_cond);
+}
+
+extern List acct_storage_p_get_assocs(
 	mysql_conn_t *mysql_conn, uid_t uid,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	return as_mysql_get_assocs(mysql_conn, uid, assoc_cond);
 }
@@ -2543,14 +2770,19 @@ extern List acct_storage_p_get_events(mysql_conn_t *mysql_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_get_problems(mysql_conn_t *mysql_conn, uint32_t uid,
-					slurmdb_association_cond_t *assoc_cond)
+					slurmdb_assoc_cond_t *assoc_cond)
 {
 	List ret_list = NULL;
 
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
-	ret_list = list_create(slurmdb_destroy_association_rec);
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
+	ret_list = list_create(slurmdb_destroy_assoc_rec);
 
 	if (as_mysql_acct_no_assocs(mysql_conn, assoc_cond, ret_list)
 	    != SLURM_SUCCESS)
@@ -2718,13 +2950,13 @@ extern int clusteracct_storage_p_fini_ctld(mysql_conn_t *mysql_conn,
 	return as_mysql_fini_ctld(mysql_conn, cluster_rec);
 }
 
-extern int clusteracct_storage_p_cluster_cpus(mysql_conn_t *mysql_conn,
+extern int clusteracct_storage_p_cluster_tres(mysql_conn_t *mysql_conn,
 					      char *cluster_nodes,
-					      uint32_t cpus,
+					      char *tres_str_in,
 					      time_t event_time)
 {
-	return as_mysql_cluster_cpus(mysql_conn,
-				     cluster_nodes, cpus, event_time);
+	return as_mysql_cluster_tres(mysql_conn,
+				     cluster_nodes, &tres_str_in, event_time);
 }
 
 /*
@@ -2797,10 +3029,17 @@ extern List jobacct_storage_p_get_jobs_cond(mysql_conn_t *mysql_conn,
 extern int jobacct_storage_p_archive(mysql_conn_t *mysql_conn,
 				     slurmdb_archive_cond_t *arch_cond)
 {
+	int rc;
+
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
-	return as_mysql_jobacct_process_archive(mysql_conn, arch_cond);
+	/* Make sure only 1 archive is happening at a time. */
+	slurm_mutex_lock(&usage_rollup_lock);
+	rc = as_mysql_jobacct_process_archive(mysql_conn, arch_cond);
+	slurm_mutex_unlock(&usage_rollup_lock);
+
+	return rc;
 }
 
 /*
diff --git a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h
index 9eedacbe2..27c6481a1 100644
--- a/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h
+++ b/src/plugins/accounting_storage/mysql/accounting_storage_mysql.h
@@ -73,11 +73,12 @@
 
 extern char *acct_coord_table;
 extern char *acct_table;
+extern char *tres_table;
 extern char *assoc_day_table;
 extern char *assoc_hour_table;
 extern char *assoc_month_table;
 extern char *assoc_table;
-extern char * clus_res_table;
+extern char *clus_res_table;
 extern char *cluster_day_table;
 extern char *cluster_hour_table;
 extern char *cluster_month_table;
@@ -122,7 +123,7 @@ extern int last_affected_rows(mysql_conn_t *mysql_conn);
 extern void reset_mysql_conn(mysql_conn_t *mysql_conn);
 extern int create_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name);
 extern int remove_cluster_tables(mysql_conn_t *mysql_conn, char *cluster_name);
-extern int setup_association_limits(slurmdb_association_rec_t *assoc,
+extern int setup_assoc_limits(slurmdb_assoc_rec_t *assoc,
 				    char **cols, char **vals,
 				    char **extra, qos_level_t qos_level,
 				    bool for_add);
@@ -145,19 +146,24 @@ extern int remove_common(mysql_conn_t *mysql_conn,
 			 List ret_list,
 			 bool *jobs_running);
 
+extern void mod_tres_str(char **out, char *mod, char *cur,
+			 char *cur_par, char *name, char **vals,
+			 uint32_t id, bool assoc);
+
+
 /*local api functions */
 extern int acct_storage_p_commit(mysql_conn_t *mysql_conn, bool commit);
 
-extern int acct_storage_p_add_associations(mysql_conn_t *mysql_conn,
+extern int acct_storage_p_add_assocs(mysql_conn_t *mysql_conn,
 					   uint32_t uid,
-					   List association_list);
+					   List assoc_list);
 
 extern int acct_storage_p_add_wckeys(mysql_conn_t *mysql_conn, uint32_t uid,
 				     List wckey_list);
 
-extern List acct_storage_p_get_associations(
+extern List acct_storage_p_get_assocs(
 	mysql_conn_t *mysql_conn, uid_t uid,
-	slurmdb_association_cond_t *assoc_cond);
+	slurmdb_assoc_cond_t *assoc_cond);
 
 extern List acct_storage_p_get_wckeys(mysql_conn_t *mysql_conn, uid_t uid,
 				      slurmdb_wckey_cond_t *wckey_cond);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_acct.c b/src/plugins/accounting_storage/mysql/as_mysql_acct.c
index 56a304bf2..5034f5cf3 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_acct.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_acct.c
@@ -131,11 +131,29 @@ extern int as_mysql_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	char *extra = NULL, *tmp_extra = NULL;
 
 	int affect_rows = 0;
-	List assoc_list = list_create(slurmdb_destroy_association_rec);
+	List assoc_list = list_create(slurmdb_destroy_assoc_rec);
 
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		slurmdb_user_rec_t user;
+
+		memset(&user, 0, sizeof(slurmdb_user_rec_t));
+		user.uid = uid;
+
+		if (!is_user_any_coord(mysql_conn, &user)) {
+			error("Only admins/operators/coordinators "
+			      "can add accounts");
+			return ESLURM_ACCESS_DENIED;
+		}
+		/* If the user is a coord of any acct they can add
+		 * accounts they are only able to make associations to
+		 * these accounts if they are coordinators of the
+		 * parent they are trying to add to
+		 */
+	}
+
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(acct_list);
 	while ((object = list_next(itr))) {
@@ -208,7 +226,7 @@ extern int as_mysql_add_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		if (!assoc_list)
 			assoc_list =
-				list_create(slurmdb_destroy_association_rec);
+				list_create(slurmdb_destroy_assoc_rec);
 		list_transfer(assoc_list, object->assoc_list);
 	}
 	list_iterator_destroy(itr);
@@ -261,6 +279,11 @@ extern List as_mysql_modify_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	xstrcat(extra, "where deleted=0");
 	if (acct_cond->assoc_cond
 	    && acct_cond->assoc_cond->acct_list
@@ -363,7 +386,7 @@ extern List as_mysql_modify_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(user_name);
 	if (rc == SLURM_ERROR) {
 		error("Couldn't modify accounts");
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		errno = SLURM_ERROR;
 		ret_list = NULL;
 	}
@@ -399,6 +422,11 @@ extern List as_mysql_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	xstrcat(extra, "where deleted=0");
 	if (acct_cond->assoc_cond
 	    && acct_cond->assoc_cond->acct_list
@@ -490,8 +518,7 @@ extern List as_mysql_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	/* We need to remove these accounts from the coord's that have it */
 	coord_list = as_mysql_remove_coord(
 		mysql_conn, uid, ret_list, NULL);
-	if (coord_list)
-		list_destroy(coord_list);
+	FREE_NULL_LIST(coord_list);
 
 	user_name = uid_to_string((uid_t) uid);
 
@@ -512,7 +539,7 @@ extern List as_mysql_remove_accts(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(name_char);
 	xfree(assoc_char);
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -679,8 +706,7 @@ empty:
 		   this list in the acct->name so we don't
 		   free it here
 		*/
-		if (acct_cond->assoc_cond->acct_list)
-			list_destroy(acct_cond->assoc_cond->acct_list);
+		FREE_NULL_LIST(acct_cond->assoc_cond->acct_list);
 		acct_cond->assoc_cond->acct_list = list_create(NULL);
 		acct_cond->assoc_cond->with_deleted = acct_cond->with_deleted;
 	}
@@ -701,7 +727,7 @@ empty:
 		if (acct_cond && acct_cond->with_assocs) {
 			if (!acct_cond->assoc_cond) {
 				acct_cond->assoc_cond = xmalloc(
-					sizeof(slurmdb_association_cond_t));
+					sizeof(slurmdb_assoc_cond_t));
 			}
 
 			list_append(acct_cond->assoc_cond->acct_list,
@@ -714,7 +740,7 @@ empty:
 	    && list_count(acct_cond->assoc_cond->acct_list)) {
 		ListIterator assoc_itr = NULL;
 		slurmdb_account_rec_t *acct = NULL;
-		slurmdb_association_rec_t *assoc = NULL;
+		slurmdb_assoc_rec_t *assoc = NULL;
 		List assoc_list = as_mysql_get_assocs(
 			mysql_conn, uid, acct_cond->assoc_cond);
 
@@ -732,7 +758,7 @@ empty:
 
 				if (!acct->assoc_list)
 					acct->assoc_list = list_create(
-						slurmdb_destroy_association_rec);
+						slurmdb_destroy_assoc_rec);
 				list_append(acct->assoc_list, assoc);
 				list_remove(assoc_itr);
 			}
@@ -743,7 +769,7 @@ empty:
 		list_iterator_destroy(itr);
 		list_iterator_destroy(assoc_itr);
 
-		list_destroy(assoc_list);
+		FREE_NULL_LIST(assoc_list);
 	}
 
 	return acct_list;
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_archive.c b/src/plugins/accounting_storage/mysql/as_mysql_archive.c
index 304422dc0..3e7f724dd 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_archive.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_archive.c
@@ -43,28 +43,32 @@
 #include <unistd.h>
 
 #include "as_mysql_archive.h"
-#include "src/common/slurmdbd_defs.h"
 #include "src/common/env.h"
+#include "src/common/slurm_time.h"
+#include "src/common/slurmdbd_defs.h"
 
+#define SLURMDBD_2_6_VERSION   12	/* slurm version 2.6 */
 #define SLURMDBD_2_5_VERSION   11	/* slurm version 2.5 */
 
 #define MAX_PURGE_LIMIT 50000 /* Number of records that are purged at a time
 				 so that locks can be periodically released. */
+#define MAX_ARCHIVE_AGE (60 * 60 * 24 * 60) /* If archive data is older than
+					       this then archive by month to
+					       handle large datasets. */
 
 typedef struct {
 	char *cluster_nodes;
-	char *cpu_count;
 	char *node_name;
 	char *period_end;
 	char *period_start;
 	char *reason;
 	char *reason_uid;
 	char *state;
+	char *tres_str;
 } local_event_t;
 
 typedef struct {
 	char *account;
-	char *alloc_cpus;
 	char *alloc_nodes;
 	char *associd;
 	char *array_jobid;
@@ -77,7 +81,7 @@ typedef struct {
 	char *eligible;
 	char *end;
 	char *gid;
-	char *id;
+	char *job_db_inx;
 	char *jobid;
 	char *kill_requid;
 	char *name;
@@ -95,6 +99,8 @@ typedef struct {
 	char *suspended;
 	char *timelimit;
 	char *track_steps;
+	char *tres_alloc_str;
+	char *tres_req_str;
 	char *uid;
 	char *wckey;
 	char *wckey_id;
@@ -102,7 +108,6 @@ typedef struct {
 
 typedef struct {
 	char *assocs;
-	char *cpus;
 	char *flags;
 	char *id;
 	char *name;
@@ -110,6 +115,7 @@ typedef struct {
 	char *node_inx;
 	char *time_end;
 	char *time_start;
+	char *tres_str;
 } local_resv_t;
 
 typedef struct {
@@ -122,8 +128,7 @@ typedef struct {
 	char *ave_vsize;
 	char *exit_code;
 	char *consumed_energy;
-	char *cpus;
-	char *id;
+	char *job_db_inx;
 	char *kill_requid;
 	char *max_disk_read;
 	char *max_disk_read_node;
@@ -150,20 +155,23 @@ typedef struct {
 	char *period_end;
 	char *period_start;
 	char *period_suspended;
-	char *req_cpufreq;
+	char *req_cpufreq_min;
+	char *req_cpufreq_max;
+	char *req_cpufreq_gov;
 	char *state;
 	char *stepid;
 	char *sys_sec;
 	char *sys_usec;
 	char *tasks;
 	char *task_dist;
+	char *tres_alloc_str;
 	char *user_sec;
 	char *user_usec;
 } local_step_t;
 
 typedef struct {
 	char *associd;
-	char *id;
+	char *job_db_inx;
 	char *period_end;
 	char *period_start;
 } local_suspend_t;
@@ -175,10 +183,10 @@ char *event_req_inx[] = {
 	"time_end",
 	"node_name",
 	"cluster_nodes",
-	"cpu_count",
 	"reason",
 	"reason_uid",
 	"state",
+	"tres",
 };
 
 enum {
@@ -186,10 +194,10 @@ enum {
 	EVENT_REQ_END,
 	EVENT_REQ_NODE,
 	EVENT_REQ_CNODES,
-	EVENT_REQ_CPU,
 	EVENT_REQ_REASON,
 	EVENT_REQ_REASON_UID,
 	EVENT_REQ_STATE,
+	EVENT_REQ_TRES,
 	EVENT_REQ_COUNT
 };
 
@@ -198,7 +206,6 @@ enum {
 static char *job_req_inx[] = {
 	"account",
 	"array_max_tasks",
-	"cpus_alloc",
 	"nodes_alloc",
 	"id_assoc",
 	"id_array_job",
@@ -230,13 +237,14 @@ static char *job_req_inx[] = {
 	"track_steps",
 	"id_user",
 	"wckey",
-	"id_wckey"
+	"id_wckey",
+	"tres_alloc",
+	"tres_req",
 };
 
 enum {
 	JOB_REQ_ACCOUNT,
 	JOB_REQ_ARRAY_MAX,
-	JOB_REQ_ALLOC_CPUS,
 	JOB_REQ_ALLOC_NODES,
 	JOB_REQ_ASSOCID,
 	JOB_REQ_ARRAYJOBID,
@@ -249,18 +257,18 @@ enum {
 	JOB_REQ_ELIGIBLE,
 	JOB_REQ_END,
 	JOB_REQ_GID,
-	JOB_REQ_ID,
+	JOB_REQ_DB_INX,
 	JOB_REQ_JOBID,
 	JOB_REQ_KILL_REQUID,
 	JOB_REQ_NAME,
 	JOB_REQ_NODELIST,
 	JOB_REQ_NODE_INX,
-	JOB_REQ_RESVID,
 	JOB_REQ_PARTITION,
 	JOB_REQ_PRIORITY,
 	JOB_REQ_QOS,
 	JOB_REQ_REQ_CPUS,
 	JOB_REQ_REQ_MEM,
+	JOB_REQ_RESVID,
 	JOB_REQ_START,
 	JOB_REQ_STATE,
 	JOB_REQ_SUBMIT,
@@ -269,6 +277,8 @@ enum {
 	JOB_REQ_UID,
 	JOB_REQ_WCKEY,
 	JOB_REQ_WCKEYID,
+	JOB_REQ_TRESA,
+	JOB_REQ_TRESR,
 	JOB_REQ_COUNT
 };
 
@@ -276,8 +286,8 @@ enum {
 char *resv_req_inx[] = {
 	"id_resv",
 	"assoclist",
-	"cpus",
 	"flags",
+	"tres",
 	"nodelist",
 	"node_inx",
 	"resv_name",
@@ -288,8 +298,8 @@ char *resv_req_inx[] = {
 enum {
 	RESV_REQ_ID,
 	RESV_REQ_ASSOCS,
-	RESV_REQ_CPUS,
 	RESV_REQ_FLAGS,
+	RESV_REQ_TRES,
 	RESV_REQ_NODES,
 	RESV_REQ_NODE_INX,
 	RESV_REQ_NAME,
@@ -313,7 +323,6 @@ static char *step_req_inx[] = {
 	"kill_requid",
 	"exit_code",
 	"nodes_alloc",
-	"cpus_alloc",
 	"task_cnt",
 	"task_dist",
 	"user_sec",
@@ -338,7 +347,9 @@ static char *step_req_inx[] = {
 	"ave_cpu",
 	"act_cpufreq",
 	"consumed_energy",
+	"req_cpufreq_min",
 	"req_cpufreq",
+	"req_cpufreq_gov",
 	"max_disk_read",
 	"max_disk_read_task",
 	"max_disk_read_node",
@@ -346,12 +357,13 @@ static char *step_req_inx[] = {
 	"max_disk_write",
 	"max_disk_write_task",
 	"max_disk_write_node",
-	"ave_disk_write"
+	"ave_disk_write",
+	"tres_alloc",
 };
 
 
 enum {
-	STEP_REQ_ID,
+	STEP_REQ_DB_INX,
 	STEP_REQ_STEPID,
 	STEP_REQ_START,
 	STEP_REQ_END,
@@ -363,7 +375,6 @@ enum {
 	STEP_REQ_KILL_REQUID,
 	STEP_REQ_EXIT_CODE,
 	STEP_REQ_NODES,
-	STEP_REQ_CPUS,
 	STEP_REQ_TASKS,
 	STEP_REQ_TASKDIST,
 	STEP_REQ_USER_SEC,
@@ -388,7 +399,9 @@ enum {
 	STEP_REQ_AVE_CPU,
 	STEP_REQ_ACT_CPUFREQ,
 	STEP_REQ_CONSUMED_ENERGY,
-	STEP_REQ_REQ_CPUFREQ,
+	STEP_REQ_REQ_CPUFREQ_MIN,
+	STEP_REQ_REQ_CPUFREQ_MAX,
+	STEP_REQ_REQ_CPUFREQ_GOV,
 	STEP_REQ_MAX_DISK_READ,
 	STEP_REQ_MAX_DISK_READ_TASK,
 	STEP_REQ_MAX_DISK_READ_NODE,
@@ -397,6 +410,7 @@ enum {
 	STEP_REQ_MAX_DISK_WRITE_TASK,
 	STEP_REQ_MAX_DISK_WRITE_NODE,
 	STEP_REQ_AVE_DISK_WRITE,
+	STEP_REQ_TRES,
 	STEP_REQ_COUNT,
 };
 
@@ -412,26 +426,171 @@ static char *suspend_req_inx[] = {
 };
 
 enum {
-	SUSPEND_REQ_ID,
+	SUSPEND_REQ_DB_INX,
 	SUSPEND_REQ_ASSOCID,
 	SUSPEND_REQ_START,
 	SUSPEND_REQ_END,
 	SUSPEND_REQ_COUNT
 };
 
+typedef enum {
+	PURGE_EVENT,
+	PURGE_SUSPEND,
+	PURGE_RESV,
+	PURGE_JOB,
+	PURGE_STEP
+} purge_type_t;
+
+char *purge_type_str[] = {
+	"event",
+	"suspend",
+	"resv",
+	"job",
+	"step"
+};
+
+static uint32_t _archive_table(purge_type_t type, mysql_conn_t *mysql_conn,
+			       char *cluster_name, time_t period_end,
+			       char *arch_dir, uint32_t archive_period);
+
 static int high_buffer_size = (1024 * 1024);
 
+/* Free functions just incase they are ever needed. */
+/* static void _free_local_event(local_event_t *object) */
+/* { */
+/* 	xfree(object->cluster_nodes); */
+/* 	xfree(object->node_name); */
+/* 	xfree(object->period_end); */
+/* 	xfree(object->period_start); */
+/* 	xfree(object->reason); */
+/* 	xfree(object->reason_uid); */
+/* 	xfree(object->state); */
+/* 	xfree(object->tres_str); */
+/* } */
+
+/* static void _free_local_job(local_job_t *object) */
+/* { */
+/* 	xfree(object->account); */
+/* 	xfree(object->alloc_nodes); */
+/* 	xfree(object->associd); */
+/* 	xfree(object->array_jobid); */
+/* 	xfree(object->array_max_tasks); */
+/* 	xfree(object->array_taskid); */
+/* 	xfree(object->blockid); */
+/* 	xfree(object->derived_ec); */
+/* 	xfree(object->derived_es); */
+/* 	xfree(object->exit_code); */
+/* 	xfree(object->eligible); */
+/* 	xfree(object->end); */
+/* 	xfree(object->gid); */
+/* 	xfree(object->job_db_inx); */
+/* 	xfree(object->jobid); */
+/* 	xfree(object->kill_requid); */
+/* 	xfree(object->name); */
+/* 	xfree(object->nodelist); */
+/* 	xfree(object->node_inx); */
+/* 	xfree(object->partition); */
+/* 	xfree(object->priority); */
+/* 	xfree(object->qos); */
+/* 	xfree(object->req_cpus); */
+/* 	xfree(object->req_mem); */
+/* 	xfree(object->resvid); */
+/* 	xfree(object->start); */
+/* 	xfree(object->state); */
+/* 	xfree(object->submit); */
+/* 	xfree(object->suspended); */
+/* 	xfree(object->timelimit); */
+/* 	xfree(object->track_steps); */
+/* 	xfree(object->tres_alloc_str); */
+/* 	xfree(object->uid); */
+/* 	xfree(object->wckey); */
+/* 	xfree(object->wckey_id); */
+/* } */
+
+/* static void _free_local_resv(local_resv_t *object) */
+/* { */
+/* 	xfree(object->assocs); */
+/* 	xfree(object->flags); */
+/* 	xfree(object->id); */
+/* 	xfree(object->name); */
+/* 	xfree(object->nodes); */
+/* 	xfree(object->node_inx); */
+/* 	xfree(object->time_end); */
+/* 	xfree(object->time_start); */
+/* 	xfree(object->tres_str); */
+/* } */
+
+/* static void _free_local_step(local_step_t *object) */
+/* { */
+/* 	xfree(object->act_cpufreq); */
+/* 	xfree(object->ave_cpu); */
+/* 	xfree(object->ave_disk_read); */
+/* 	xfree(object->ave_disk_write); */
+/* 	xfree(object->ave_pages); */
+/* 	xfree(object->ave_rss); */
+/* 	xfree(object->ave_vsize); */
+/* 	xfree(object->exit_code); */
+/* 	xfree(object->consumed_energy); */
+/* 	xfree(object->job_db_inx); */
+/* 	xfree(object->kill_requid); */
+/* 	xfree(object->max_disk_read); */
+/* 	xfree(object->max_disk_read_node); */
+/* 	xfree(object->max_disk_read_task); */
+/* 	xfree(object->max_disk_write); */
+/* 	xfree(object->max_disk_write_node); */
+/* 	xfree(object->max_disk_write_task); */
+/* 	xfree(object->max_pages); */
+/* 	xfree(object->max_pages_node); */
+/* 	xfree(object->max_pages_task); */
+/* 	xfree(object->max_rss); */
+/* 	xfree(object->max_rss_node); */
+/* 	xfree(object->max_rss_task); */
+/* 	xfree(object->max_vsize); */
+/* 	xfree(object->max_vsize_node); */
+/* 	xfree(object->max_vsize_task); */
+/* 	xfree(object->min_cpu); */
+/* 	xfree(object->min_cpu_node); */
+/* 	xfree(object->min_cpu_task); */
+/* 	xfree(object->name); */
+/* 	xfree(object->nodelist); */
+/* 	xfree(object->nodes); */
+/* 	xfree(object->node_inx); */
+/* 	xfree(object->period_end); */
+/* 	xfree(object->period_start); */
+/* 	xfree(object->period_suspended); */
+/* 	xfree(object->req_cpufreq_min); */
+/* 	xfree(object->req_cpufreq_max); */
+/* 	xfree(object->req_cpufreq_gov); */
+/* 	xfree(object->state); */
+/* 	xfree(object->stepid); */
+/* 	xfree(object->sys_sec); */
+/* 	xfree(object->sys_usec); */
+/* 	xfree(object->tasks); */
+/* 	xfree(object->task_dist); */
+/* 	xfree(object->tres_alloc_str); */
+/* 	xfree(object->user_sec); */
+/* 	xfree(object->user_usec); */
+/* } */
+
+/* static void _free_local_suspend(local_suspend_t *object) */
+/* { */
+/* 	xfree(object->associd); */
+/* 	xfree(object->job_db_inx); */
+/* 	xfree(object->period_end); */
+/* 	xfree(object->period_start); */
+/* } */
+
 static void _pack_local_event(local_event_t *object,
 			      uint16_t rpc_version, Buf buffer)
 {
 	packstr(object->cluster_nodes, buffer);
-	packstr(object->cpu_count, buffer);
 	packstr(object->node_name, buffer);
 	packstr(object->period_end, buffer);
 	packstr(object->period_start, buffer);
 	packstr(object->reason, buffer);
 	packstr(object->reason_uid, buffer);
 	packstr(object->state, buffer);
+	packstr(object->tres_str, buffer);
 }
 
 /* this needs to be allocated before calling, and since we aren't
@@ -440,15 +599,28 @@ static int _unpack_local_event(local_event_t *object,
 			       uint16_t rpc_version, Buf buffer)
 {
 	uint32_t tmp32;
+	char *tmp_char;
 
-	unpackstr_ptr(&object->cluster_nodes, &tmp32, buffer);
-	unpackstr_ptr(&object->cpu_count, &tmp32, buffer);
-	unpackstr_ptr(&object->node_name, &tmp32, buffer);
-	unpackstr_ptr(&object->period_end, &tmp32, buffer);
-	unpackstr_ptr(&object->period_start, &tmp32, buffer);
-	unpackstr_ptr(&object->reason, &tmp32, buffer);
-	unpackstr_ptr(&object->reason_uid, &tmp32, buffer);
-	unpackstr_ptr(&object->state, &tmp32, buffer);
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		unpackstr_ptr(&object->cluster_nodes, &tmp32, buffer);
+		unpackstr_ptr(&object->node_name, &tmp32, buffer);
+		unpackstr_ptr(&object->period_end, &tmp32, buffer);
+		unpackstr_ptr(&object->period_start, &tmp32, buffer);
+		unpackstr_ptr(&object->reason, &tmp32, buffer);
+		unpackstr_ptr(&object->reason_uid, &tmp32, buffer);
+		unpackstr_ptr(&object->state, &tmp32, buffer);
+		unpackstr_ptr(&object->tres_str, &tmp32, buffer);
+	} else {
+		unpackstr_ptr(&object->cluster_nodes, &tmp32, buffer);
+		unpackstr_ptr(&tmp_char, &tmp32, buffer);
+		object->tres_str = xstrdup_printf("%d=%s", TRES_CPU, tmp_char);
+		unpackstr_ptr(&object->node_name, &tmp32, buffer);
+		unpackstr_ptr(&object->period_end, &tmp32, buffer);
+		unpackstr_ptr(&object->period_start, &tmp32, buffer);
+		unpackstr_ptr(&object->reason, &tmp32, buffer);
+		unpackstr_ptr(&object->reason_uid, &tmp32, buffer);
+		unpackstr_ptr(&object->state, &tmp32, buffer);
+	}
 
 	return SLURM_SUCCESS;
 }
@@ -457,7 +629,6 @@ static void _pack_local_job(local_job_t *object,
 			    uint16_t rpc_version, Buf buffer)
 {
 	packstr(object->account, buffer);
-	packstr(object->alloc_cpus, buffer);
 	packstr(object->alloc_nodes, buffer);
 	packstr(object->associd, buffer);
 	packstr(object->array_jobid, buffer);
@@ -471,23 +642,25 @@ static void _pack_local_job(local_job_t *object,
 	packstr(object->eligible, buffer);
 	packstr(object->end, buffer);
 	packstr(object->gid, buffer);
-	packstr(object->id, buffer);
+	packstr(object->job_db_inx, buffer);
 	packstr(object->jobid, buffer);
 	packstr(object->kill_requid, buffer);
 	packstr(object->name, buffer);
 	packstr(object->nodelist, buffer);
 	packstr(object->node_inx, buffer);
-	packstr(object->partition, buffer); /* priority */
-	packstr(object->priority, buffer);  /* qos */
-	packstr(object->qos, buffer);       /* req_cpus */
-	packstr(object->req_cpus, buffer);  /* req_mem */
-	packstr(object->req_mem, buffer);   /* resvid */
-	packstr(object->resvid, buffer);    /* partition */
+	packstr(object->partition, buffer);
+	packstr(object->priority, buffer);
+	packstr(object->qos, buffer);
+	packstr(object->req_cpus, buffer);
+	packstr(object->req_mem, buffer);
+	packstr(object->resvid, buffer);
 	packstr(object->start, buffer);
 	packstr(object->state, buffer);
 	packstr(object->submit, buffer);
 	packstr(object->suspended, buffer);
 	packstr(object->track_steps, buffer);
+	packstr(object->tres_alloc_str, buffer);
+	packstr(object->tres_req_str, buffer);
 	packstr(object->uid, buffer);
 	packstr(object->wckey, buffer);
 	packstr(object->wckey_id, buffer);
@@ -499,6 +672,7 @@ static int _unpack_local_job(local_job_t *object,
 			     uint16_t rpc_version, Buf buffer)
 {
 	uint32_t tmp32;
+	char *tmp_char;
 
 	/* For protocols <= 14_11, job_req_inx and it's corresponding enum,
 	 * were out of sync. This caused the following variables to have the
@@ -513,11 +687,14 @@ static int _unpack_local_job(local_job_t *object,
 	 * The values were packed in the above order. To unpack the values
 	 * into the correct variables, the unpacking order is changed to
 	 * accomodate the shift in values. job->partition is unpacked before
-	 * job->start instead of after job->node_inx. */
+	 * job->start instead of after job->node_inx.
+	 *
+	 * 15.08: job_req_inx and the it's corresponding enum were synced up
+	 * and it unpacks in the expected order.
+	 */
 
-	if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		unpackstr_ptr(&object->account, &tmp32, buffer);
-		unpackstr_ptr(&object->alloc_cpus, &tmp32, buffer);
 		unpackstr_ptr(&object->alloc_nodes, &tmp32, buffer);
 		unpackstr_ptr(&object->associd, &tmp32, buffer);
 		unpackstr_ptr(&object->array_jobid, &tmp32, buffer);
@@ -531,7 +708,47 @@ static int _unpack_local_job(local_job_t *object,
 		unpackstr_ptr(&object->eligible, &tmp32, buffer);
 		unpackstr_ptr(&object->end, &tmp32, buffer);
 		unpackstr_ptr(&object->gid, &tmp32, buffer);
-		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
+		unpackstr_ptr(&object->jobid, &tmp32, buffer);
+		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
+		unpackstr_ptr(&object->name, &tmp32, buffer);
+		unpackstr_ptr(&object->nodelist, &tmp32, buffer);
+		unpackstr_ptr(&object->node_inx, &tmp32, buffer);
+		unpackstr_ptr(&object->partition, &tmp32, buffer);
+		unpackstr_ptr(&object->priority, &tmp32, buffer);
+		unpackstr_ptr(&object->qos, &tmp32, buffer);
+		unpackstr_ptr(&object->req_cpus, &tmp32, buffer);
+		unpackstr_ptr(&object->req_mem, &tmp32, buffer);
+		unpackstr_ptr(&object->resvid, &tmp32, buffer);
+		unpackstr_ptr(&object->start, &tmp32, buffer);
+		unpackstr_ptr(&object->state, &tmp32, buffer);
+		unpackstr_ptr(&object->submit, &tmp32, buffer);
+		unpackstr_ptr(&object->suspended, &tmp32, buffer);
+		unpackstr_ptr(&object->track_steps, &tmp32, buffer);
+		unpackstr_ptr(&object->tres_alloc_str, &tmp32, buffer);
+		unpackstr_ptr(&object->tres_req_str, &tmp32, buffer);
+		unpackstr_ptr(&object->uid, &tmp32, buffer);
+		unpackstr_ptr(&object->wckey, &tmp32, buffer);
+		unpackstr_ptr(&object->wckey_id, &tmp32, buffer);
+	} else if (rpc_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		unpackstr_ptr(&object->account, &tmp32, buffer);
+		unpackstr_ptr(&tmp_char, &tmp32, buffer);
+		object->tres_alloc_str = xstrdup_printf(
+			"%d=%s", TRES_CPU, tmp_char);
+		unpackstr_ptr(&object->alloc_nodes, &tmp32, buffer);
+		unpackstr_ptr(&object->associd, &tmp32, buffer);
+		unpackstr_ptr(&object->array_jobid, &tmp32, buffer);
+		unpackstr_ptr(&object->array_max_tasks, &tmp32, buffer);
+		unpackstr_ptr(&object->array_taskid, &tmp32, buffer);
+		unpackstr_ptr(&object->blockid, &tmp32, buffer);
+		unpackstr_ptr(&object->derived_ec, &tmp32, buffer);
+		unpackstr_ptr(&object->derived_es, &tmp32, buffer);
+		unpackstr_ptr(&object->exit_code, &tmp32, buffer);
+		unpackstr_ptr(&object->timelimit, &tmp32, buffer);
+		unpackstr_ptr(&object->eligible, &tmp32, buffer);
+		unpackstr_ptr(&object->end, &tmp32, buffer);
+		unpackstr_ptr(&object->gid, &tmp32, buffer);
+		unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
 		unpackstr_ptr(&object->jobid, &tmp32, buffer);
 		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
 		unpackstr_ptr(&object->name, &tmp32, buffer);
@@ -553,7 +770,9 @@ static int _unpack_local_job(local_job_t *object,
 		unpackstr_ptr(&object->wckey_id, &tmp32, buffer);
 	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
 		unpackstr_ptr(&object->account, &tmp32, buffer);
-		unpackstr_ptr(&object->alloc_cpus, &tmp32, buffer);
+		unpackstr_ptr(&tmp_char, &tmp32, buffer);
+		object->tres_alloc_str = xstrdup_printf(
+			"%d=%s", TRES_CPU, tmp_char);
 		unpackstr_ptr(&object->alloc_nodes, &tmp32, buffer);
 		unpackstr_ptr(&object->associd, &tmp32, buffer);
 		unpackstr_ptr(&object->blockid, &tmp32, buffer);
@@ -564,7 +783,7 @@ static int _unpack_local_job(local_job_t *object,
 		unpackstr_ptr(&object->eligible, &tmp32, buffer);
 		unpackstr_ptr(&object->end, &tmp32, buffer);
 		unpackstr_ptr(&object->gid, &tmp32, buffer);
-		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
 		unpackstr_ptr(&object->jobid, &tmp32, buffer);
 		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
 		unpackstr_ptr(&object->name, &tmp32, buffer);
@@ -586,7 +805,9 @@ static int _unpack_local_job(local_job_t *object,
 		unpackstr_ptr(&object->wckey_id, &tmp32, buffer);
 	} else {
 		unpackstr_ptr(&object->account, &tmp32, buffer);
-		unpackstr_ptr(&object->alloc_cpus, &tmp32, buffer);
+		unpackstr_ptr(&tmp_char, &tmp32, buffer);
+		object->tres_alloc_str = xstrdup_printf(
+			"%d=%s", TRES_CPU, tmp_char);
 		unpackstr_ptr(&object->alloc_nodes, &tmp32, buffer);
 		unpackstr_ptr(&object->associd, &tmp32, buffer);
 		unpackstr_ptr(&object->blockid, &tmp32, buffer);
@@ -597,7 +818,7 @@ static int _unpack_local_job(local_job_t *object,
 		unpackstr_ptr(&object->eligible, &tmp32, buffer);
 		unpackstr_ptr(&object->end, &tmp32, buffer);
 		unpackstr_ptr(&object->gid, &tmp32, buffer);
-		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
 		unpackstr_ptr(&object->jobid, &tmp32, buffer);
 		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
 		unpackstr_ptr(&object->name, &tmp32, buffer);
@@ -624,7 +845,6 @@ static void _pack_local_resv(local_resv_t *object,
 			     uint16_t rpc_version, Buf buffer)
 {
 	packstr(object->assocs, buffer);
-	packstr(object->cpus, buffer);
 	packstr(object->flags, buffer);
 	packstr(object->id, buffer);
 	packstr(object->name, buffer);
@@ -632,6 +852,7 @@ static void _pack_local_resv(local_resv_t *object,
 	packstr(object->node_inx, buffer);
 	packstr(object->time_end, buffer);
 	packstr(object->time_start, buffer);
+	packstr(object->tres_str, buffer);
 }
 
 /* this needs to be allocated before calling, and since we aren't
@@ -640,16 +861,30 @@ static int _unpack_local_resv(local_resv_t *object,
 			      uint16_t rpc_version, Buf buffer)
 {
 	uint32_t tmp32;
+	char *tmp_char;
 
-	unpackstr_ptr(&object->assocs, &tmp32, buffer);
-	unpackstr_ptr(&object->cpus, &tmp32, buffer);
-	unpackstr_ptr(&object->flags, &tmp32, buffer);
-	unpackstr_ptr(&object->id, &tmp32, buffer);
-	unpackstr_ptr(&object->name, &tmp32, buffer);
-	unpackstr_ptr(&object->nodes, &tmp32, buffer);
-	unpackstr_ptr(&object->node_inx, &tmp32, buffer);
-	unpackstr_ptr(&object->time_end, &tmp32, buffer);
-	unpackstr_ptr(&object->time_start, &tmp32, buffer);
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		unpackstr_ptr(&object->assocs, &tmp32, buffer);
+		unpackstr_ptr(&object->flags, &tmp32, buffer);
+		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&object->name, &tmp32, buffer);
+		unpackstr_ptr(&object->nodes, &tmp32, buffer);
+		unpackstr_ptr(&object->node_inx, &tmp32, buffer);
+		unpackstr_ptr(&object->time_end, &tmp32, buffer);
+		unpackstr_ptr(&object->time_start, &tmp32, buffer);
+		unpackstr_ptr(&object->tres_str, &tmp32, buffer);
+	} else {
+		unpackstr_ptr(&object->assocs, &tmp32, buffer);
+		unpackstr_ptr(&tmp_char, &tmp32, buffer);
+		object->tres_str = xstrdup_printf("%d=%s", TRES_CPU, tmp_char);
+		unpackstr_ptr(&object->flags, &tmp32, buffer);
+		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&object->name, &tmp32, buffer);
+		unpackstr_ptr(&object->nodes, &tmp32, buffer);
+		unpackstr_ptr(&object->node_inx, &tmp32, buffer);
+		unpackstr_ptr(&object->time_end, &tmp32, buffer);
+		unpackstr_ptr(&object->time_start, &tmp32, buffer);
+	}
 
 	return SLURM_SUCCESS;
 }
@@ -657,128 +892,54 @@ static int _unpack_local_resv(local_resv_t *object,
 static void _pack_local_step(local_step_t *object,
 			     uint16_t rpc_version, Buf buffer)
 {
-	if (rpc_version >= SLURMDBD_2_6_VERSION) {
-		packstr(object->act_cpufreq, buffer);
-		packstr(object->ave_cpu, buffer);
-		packstr(object->ave_disk_read, buffer);
-		packstr(object->ave_disk_write, buffer);
-		packstr(object->ave_pages, buffer);
-		packstr(object->ave_rss, buffer);
-		packstr(object->ave_vsize, buffer);
-		packstr(object->exit_code, buffer);
-		packstr(object->consumed_energy, buffer);
-		packstr(object->cpus, buffer);
-		packstr(object->id, buffer);
-		packstr(object->kill_requid, buffer);
-		packstr(object->max_disk_read, buffer);
-		packstr(object->max_disk_read_node, buffer);
-		packstr(object->max_disk_read_task, buffer);
-		packstr(object->max_disk_write, buffer);
-		packstr(object->max_disk_write_node, buffer);
-		packstr(object->max_disk_write_task, buffer);
-		packstr(object->max_pages, buffer);
-		packstr(object->max_pages_node, buffer);
-		packstr(object->max_pages_task, buffer);
-		packstr(object->max_rss, buffer);
-		packstr(object->max_rss_node, buffer);
-		packstr(object->max_rss_task, buffer);
-		packstr(object->max_vsize, buffer);
-		packstr(object->max_vsize_node, buffer);
-		packstr(object->max_vsize_task, buffer);
-		packstr(object->min_cpu, buffer);
-		packstr(object->min_cpu_node, buffer);
-		packstr(object->min_cpu_task, buffer);
-		packstr(object->name, buffer);
-		packstr(object->nodelist, buffer);
-		packstr(object->nodes, buffer);
-		packstr(object->node_inx, buffer);
-		packstr(object->period_end, buffer);
-		packstr(object->period_start, buffer);
-		packstr(object->period_suspended, buffer);
-		packstr(object->req_cpufreq, buffer);
-		packstr(object->state, buffer);
-		packstr(object->stepid, buffer);
-		packstr(object->sys_sec, buffer);
-		packstr(object->sys_usec, buffer);
-		packstr(object->tasks, buffer);
-		packstr(object->task_dist, buffer);
-		packstr(object->user_sec, buffer);
-		packstr(object->user_usec, buffer);
-	} else if (rpc_version >= SLURMDBD_2_5_VERSION) {
-		packstr(object->act_cpufreq, buffer);
-		packstr(object->ave_cpu, buffer);
-		packstr(object->ave_pages, buffer);
-		packstr(object->ave_rss, buffer);
-		packstr(object->ave_vsize, buffer);
-		packstr(object->exit_code, buffer);
-		packstr(object->consumed_energy, buffer);
-		packstr(object->cpus, buffer);
-		packstr(object->id, buffer);
-		packstr(object->kill_requid, buffer);
-		packstr(object->max_pages, buffer);
-		packstr(object->max_pages_node, buffer);
-		packstr(object->max_pages_task, buffer);
-		packstr(object->max_rss, buffer);
-		packstr(object->max_rss_node, buffer);
-		packstr(object->max_rss_task, buffer);
-		packstr(object->max_vsize, buffer);
-		packstr(object->max_vsize_node, buffer);
-		packstr(object->max_vsize_task, buffer);
-		packstr(object->min_cpu, buffer);
-		packstr(object->min_cpu_node, buffer);
-		packstr(object->min_cpu_task, buffer);
-		packstr(object->name, buffer);
-		packstr(object->nodelist, buffer);
-		packstr(object->nodes, buffer);
-		packstr(object->node_inx, buffer);
-		packstr(object->period_end, buffer);
-		packstr(object->period_start, buffer);
-		packstr(object->period_suspended, buffer);
-		packstr(object->state, buffer);
-		packstr(object->stepid, buffer);
-		packstr(object->sys_sec, buffer);
-		packstr(object->sys_usec, buffer);
-		packstr(object->tasks, buffer);
-		packstr(object->task_dist, buffer);
-		packstr(object->user_sec, buffer);
-		packstr(object->user_usec, buffer);
-	} else {
-		packstr(object->ave_cpu, buffer);
-		packstr(object->ave_pages, buffer);
-		packstr(object->ave_rss, buffer);
-		packstr(object->ave_vsize, buffer);
-		packstr(object->exit_code, buffer);
-		packstr(object->cpus, buffer);
-		packstr(object->id, buffer);
-		packstr(object->kill_requid, buffer);
-		packstr(object->max_pages, buffer);
-		packstr(object->max_pages_node, buffer);
-		packstr(object->max_pages_task, buffer);
-		packstr(object->max_rss, buffer);
-		packstr(object->max_rss_node, buffer);
-		packstr(object->max_rss_task, buffer);
-		packstr(object->max_vsize, buffer);
-		packstr(object->max_vsize_node, buffer);
-		packstr(object->max_vsize_task, buffer);
-		packstr(object->min_cpu, buffer);
-		packstr(object->min_cpu_node, buffer);
-		packstr(object->min_cpu_task, buffer);
-		packstr(object->name, buffer);
-		packstr(object->nodelist, buffer);
-		packstr(object->nodes, buffer);
-		packstr(object->node_inx, buffer);
-		packstr(object->period_end, buffer);
-		packstr(object->period_start, buffer);
-		packstr(object->period_suspended, buffer);
-		packstr(object->state, buffer);
-		packstr(object->stepid, buffer);
-		packstr(object->sys_sec, buffer);
-		packstr(object->sys_usec, buffer);
-		packstr(object->tasks, buffer);
-		packstr(object->task_dist, buffer);
-		packstr(object->user_sec, buffer);
-		packstr(object->user_usec, buffer);
-	}
+	packstr(object->act_cpufreq, buffer);
+	packstr(object->ave_cpu, buffer);
+	packstr(object->ave_disk_read, buffer);
+	packstr(object->ave_disk_write, buffer);
+	packstr(object->ave_pages, buffer);
+	packstr(object->ave_rss, buffer);
+	packstr(object->ave_vsize, buffer);
+	packstr(object->exit_code, buffer);
+	packstr(object->consumed_energy, buffer);
+	packstr(object->job_db_inx, buffer);
+	packstr(object->kill_requid, buffer);
+	packstr(object->max_disk_read, buffer);
+	packstr(object->max_disk_read_node, buffer);
+	packstr(object->max_disk_read_task, buffer);
+	packstr(object->max_disk_write, buffer);
+	packstr(object->max_disk_write_node, buffer);
+	packstr(object->max_disk_write_task, buffer);
+	packstr(object->max_pages, buffer);
+	packstr(object->max_pages_node, buffer);
+	packstr(object->max_pages_task, buffer);
+	packstr(object->max_rss, buffer);
+	packstr(object->max_rss_node, buffer);
+	packstr(object->max_rss_task, buffer);
+	packstr(object->max_vsize, buffer);
+	packstr(object->max_vsize_node, buffer);
+	packstr(object->max_vsize_task, buffer);
+	packstr(object->min_cpu, buffer);
+	packstr(object->min_cpu_node, buffer);
+	packstr(object->min_cpu_task, buffer);
+	packstr(object->name, buffer);
+	packstr(object->nodelist, buffer);
+	packstr(object->nodes, buffer);
+	packstr(object->node_inx, buffer);
+	packstr(object->period_end, buffer);
+	packstr(object->period_start, buffer);
+	packstr(object->period_suspended, buffer);
+	packstr(object->req_cpufreq_min, buffer);
+	packstr(object->req_cpufreq_max, buffer);
+	packstr(object->req_cpufreq_gov, buffer);
+	packstr(object->state, buffer);
+	packstr(object->stepid, buffer);
+	packstr(object->sys_sec, buffer);
+	packstr(object->sys_usec, buffer);
+	packstr(object->tasks, buffer);
+	packstr(object->task_dist, buffer);
+	packstr(object->tres_alloc_str, buffer);
+	packstr(object->user_sec, buffer);
+	packstr(object->user_usec, buffer);
 }
 
 /* this needs to be allocated before calling, and since we aren't
@@ -787,8 +948,9 @@ static int _unpack_local_step(local_step_t *object,
 			      uint16_t rpc_version, Buf buffer)
 {
 	uint32_t tmp32;
+	char *tmp_char;
 
-	if (rpc_version >= SLURMDBD_2_6_VERSION) {
+	if (rpc_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		unpackstr_ptr(&object->act_cpufreq, &tmp32, buffer);
 		unpackstr_ptr(&object->ave_cpu, &tmp32, buffer);
 		unpackstr_ptr(&object->ave_disk_read, &tmp32, buffer);
@@ -798,8 +960,7 @@ static int _unpack_local_step(local_step_t *object,
 		unpackstr_ptr(&object->ave_vsize, &tmp32, buffer);
 		unpackstr_ptr(&object->exit_code, &tmp32, buffer);
 		unpackstr_ptr(&object->consumed_energy, &tmp32, buffer);
-		unpackstr_ptr(&object->cpus, &tmp32, buffer);
-		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
 		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
 		unpackstr_ptr(&object->max_disk_read, &tmp32, buffer);
 		unpackstr_ptr(&object->max_disk_read_node, &tmp32, buffer);
@@ -826,26 +987,39 @@ static int _unpack_local_step(local_step_t *object,
 		unpackstr_ptr(&object->period_end, &tmp32, buffer);
 		unpackstr_ptr(&object->period_start, &tmp32, buffer);
 		unpackstr_ptr(&object->period_suspended, &tmp32, buffer);
-		unpackstr_ptr(&object->req_cpufreq, &tmp32, buffer);
+		unpackstr_ptr(&object->req_cpufreq_min, &tmp32, buffer);
+		unpackstr_ptr(&object->req_cpufreq_max, &tmp32, buffer);
+		unpackstr_ptr(&object->req_cpufreq_gov, &tmp32, buffer);
 		unpackstr_ptr(&object->state, &tmp32, buffer);
 		unpackstr_ptr(&object->stepid, &tmp32, buffer);
 		unpackstr_ptr(&object->sys_sec, &tmp32, buffer);
 		unpackstr_ptr(&object->sys_usec, &tmp32, buffer);
 		unpackstr_ptr(&object->tasks, &tmp32, buffer);
 		unpackstr_ptr(&object->task_dist, &tmp32, buffer);
+		unpackstr_ptr(&object->tres_alloc_str, &tmp32, buffer);
 		unpackstr_ptr(&object->user_sec, &tmp32, buffer);
 		unpackstr_ptr(&object->user_usec, &tmp32, buffer);
-	} else if (rpc_version >= SLURMDBD_2_5_VERSION) {
+	} else if (rpc_version >= SLURMDBD_2_6_VERSION) {
 		unpackstr_ptr(&object->act_cpufreq, &tmp32, buffer);
 		unpackstr_ptr(&object->ave_cpu, &tmp32, buffer);
+		unpackstr_ptr(&object->ave_disk_read, &tmp32, buffer);
+		unpackstr_ptr(&object->ave_disk_write, &tmp32, buffer);
 		unpackstr_ptr(&object->ave_pages, &tmp32, buffer);
 		unpackstr_ptr(&object->ave_rss, &tmp32, buffer);
 		unpackstr_ptr(&object->ave_vsize, &tmp32, buffer);
 		unpackstr_ptr(&object->exit_code, &tmp32, buffer);
 		unpackstr_ptr(&object->consumed_energy, &tmp32, buffer);
-		unpackstr_ptr(&object->cpus, &tmp32, buffer);
-		unpackstr_ptr(&object->id, &tmp32, buffer);
+		unpackstr_ptr(&tmp_char, &tmp32, buffer);
+		object->tres_alloc_str = xstrdup_printf(
+			"%d=%s", TRES_CPU, tmp_char);
+		unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
 		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
+		unpackstr_ptr(&object->max_disk_read, &tmp32, buffer);
+		unpackstr_ptr(&object->max_disk_read_node, &tmp32, buffer);
+		unpackstr_ptr(&object->max_disk_read_task, &tmp32, buffer);
+		unpackstr_ptr(&object->max_disk_write, &tmp32, buffer);
+		unpackstr_ptr(&object->max_disk_write_node, &tmp32, buffer);
+		unpackstr_ptr(&object->max_disk_write_task, &tmp32, buffer);
 		unpackstr_ptr(&object->max_pages, &tmp32, buffer);
 		unpackstr_ptr(&object->max_pages_node, &tmp32, buffer);
 		unpackstr_ptr(&object->max_pages_task, &tmp32, buffer);
@@ -865,6 +1039,7 @@ static int _unpack_local_step(local_step_t *object,
 		unpackstr_ptr(&object->period_end, &tmp32, buffer);
 		unpackstr_ptr(&object->period_start, &tmp32, buffer);
 		unpackstr_ptr(&object->period_suspended, &tmp32, buffer);
+		unpackstr_ptr(&object->req_cpufreq_max, &tmp32, buffer);
 		unpackstr_ptr(&object->state, &tmp32, buffer);
 		unpackstr_ptr(&object->stepid, &tmp32, buffer);
 		unpackstr_ptr(&object->sys_sec, &tmp32, buffer);
@@ -874,51 +1049,20 @@ static int _unpack_local_step(local_step_t *object,
 		unpackstr_ptr(&object->user_sec, &tmp32, buffer);
 		unpackstr_ptr(&object->user_usec, &tmp32, buffer);
 	} else {
-		unpackstr_ptr(&object->ave_cpu, &tmp32, buffer);
-		unpackstr_ptr(&object->ave_pages, &tmp32, buffer);
-		unpackstr_ptr(&object->ave_rss, &tmp32, buffer);
-		unpackstr_ptr(&object->ave_vsize, &tmp32, buffer);
-		unpackstr_ptr(&object->exit_code, &tmp32, buffer);
-		unpackstr_ptr(&object->cpus, &tmp32, buffer);
-		unpackstr_ptr(&object->id, &tmp32, buffer);
-		unpackstr_ptr(&object->kill_requid, &tmp32, buffer);
-		unpackstr_ptr(&object->max_pages, &tmp32, buffer);
-		unpackstr_ptr(&object->max_pages_node, &tmp32, buffer);
-		unpackstr_ptr(&object->max_pages_task, &tmp32, buffer);
-		unpackstr_ptr(&object->max_rss, &tmp32, buffer);
-		unpackstr_ptr(&object->max_rss_node, &tmp32, buffer);
-		unpackstr_ptr(&object->max_rss_task, &tmp32, buffer);
-		unpackstr_ptr(&object->max_vsize, &tmp32, buffer);
-		unpackstr_ptr(&object->max_vsize_node, &tmp32, buffer);
-		unpackstr_ptr(&object->max_vsize_task, &tmp32, buffer);
-		unpackstr_ptr(&object->min_cpu, &tmp32, buffer);
-		unpackstr_ptr(&object->min_cpu_node, &tmp32, buffer);
-		unpackstr_ptr(&object->min_cpu_task, &tmp32, buffer);
-		unpackstr_ptr(&object->name, &tmp32, buffer);
-		unpackstr_ptr(&object->nodelist, &tmp32, buffer);
-		unpackstr_ptr(&object->nodes, &tmp32, buffer);
-		unpackstr_ptr(&object->node_inx, &tmp32, buffer);
-		unpackstr_ptr(&object->period_end, &tmp32, buffer);
-		unpackstr_ptr(&object->period_start, &tmp32, buffer);
-		unpackstr_ptr(&object->period_suspended, &tmp32, buffer);
-		unpackstr_ptr(&object->state, &tmp32, buffer);
-		unpackstr_ptr(&object->stepid, &tmp32, buffer);
-		unpackstr_ptr(&object->sys_sec, &tmp32, buffer);
-		unpackstr_ptr(&object->sys_usec, &tmp32, buffer);
-		unpackstr_ptr(&object->tasks, &tmp32, buffer);
-		unpackstr_ptr(&object->task_dist, &tmp32, buffer);
-		unpackstr_ptr(&object->user_sec, &tmp32, buffer);
-		unpackstr_ptr(&object->user_usec, &tmp32, buffer);
+		goto unpack_error;
 	}
 
 	return SLURM_SUCCESS;
+
+unpack_error:
+	return SLURM_ERROR;
 }
 
 static void _pack_local_suspend(local_suspend_t *object,
 				uint16_t rpc_version, Buf buffer)
 {
 	packstr(object->associd, buffer);
-	packstr(object->id, buffer);
+	packstr(object->job_db_inx, buffer);
 	packstr(object->period_end, buffer);
 	packstr(object->period_start, buffer);
 }
@@ -931,14 +1075,15 @@ static int _unpack_local_suspend(local_suspend_t *object,
 	uint32_t tmp32;
 
 	unpackstr_ptr(&object->associd, &tmp32, buffer);
-	unpackstr_ptr(&object->id, &tmp32, buffer);
+	unpackstr_ptr(&object->job_db_inx, &tmp32, buffer);
 	unpackstr_ptr(&object->period_end, &tmp32, buffer);
 	unpackstr_ptr(&object->period_start, &tmp32, buffer);
 
 	return SLURM_SUCCESS;
 }
 
-static int _process_old_sql_line(const char *data_in, char **data_full_out)
+static int _process_old_sql_line(const char *data_in,
+				 char **cluster_name, char **data_full_out)
 {
 	int start = 0, i = 0;
 	char *beginning = NULL;
@@ -949,7 +1094,6 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 	char *new_vals = NULL;
 	char *vals = NULL;
 	char *new_cluster_name = NULL;
-	char *cluster_name = NULL;
 	int rc = SLURM_SUCCESS;
 	int cnt = 0, cluster_inx = -1, ending_start = 0, ending_end = 0;
 	bool delete = 0;
@@ -1041,6 +1185,9 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 		} else if (!strncmp("period_end", data_in+i, 10)) {
 			xstrcat(fields, "time_end");
 			i+=10;
+		} else if (!strncmp("cpu_count", data_in+i, 9)) {
+			xstrcat(fields, "count");
+			i+=9;
 		} else if (!strncmp("jobid", data_in+i, 5)) {
 			xstrcat(fields, "id_job");
 			i+=5;
@@ -1098,6 +1245,18 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 			else if (table == step_table)
 				xstrcat(fields, "step_name");
 			i+=4;
+		} else if (!strncmp("id_tres", data_in+i, 7)) {
+			start = i;
+			while (data_in[i]
+			       && data_in[i] != ',' && data_in[i] != ')') {
+				i++;
+			}
+			if (!data_in[i]) {
+				error("returning here end");
+				rc = SLURM_ERROR;
+				goto end_it;
+			}
+			xstrncat(fields, data_in+start, (i-start));
 		} else if (!strncmp("id", data_in+i, 2)) {
 			i+=2;
 			if ((table == assoc_day_table)
@@ -1155,9 +1314,9 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 					rc = SLURM_ERROR;
 					goto end_it;
 				}
-
-				cluster_name = xstrndup(data_in+start,
-							(i-start));
+				xfree(*cluster_name);
+				*cluster_name = xstrndup(data_in+start,
+							 (i-start));
 				i++;
 			}
 		} else {
@@ -1314,9 +1473,11 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 			cnt = 0;
 			while ((i < ending_start) && data_in[i] != ')') {
 				start = i;
-				while ((i < ending_start)
-				       && data_in[i] != ','
-				       && data_in[i] != ')') {
+				while (i < ending_start) {
+					if (data_in[i] == ',' ||
+					    (data_in[i] == ')' &&
+					     data_in[i-1] != '('))
+						break;
 					i++;
 				}
 				if (!data_in[i]) {
@@ -1328,19 +1489,22 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 					   ticks */
 					xstrncat(new_cluster_name,
 						 data_in+start+1, (i-start-2));
-					if (cluster_name) {
-						if (strcmp(cluster_name,
+					if (*cluster_name) {
+						if (strcmp(*cluster_name,
 							   new_cluster_name))
 							new_cluster = 1;
 						else
 							xfree(new_cluster_name);
 					} else {
-						cluster_name = new_cluster_name;
+						xfree(*cluster_name);
+						*cluster_name =
+							new_cluster_name;
 						new_cluster_name = NULL;
 					}
 				} else {
 					xstrncat(new_vals, data_in+start,
 						 (i-start));
+
 					if (data_in[i]) {
 						if (data_in[i] == ',')
 							xstrcat(new_vals, ", ");
@@ -1369,12 +1533,12 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 				/*      fields, vals, ending); */
 				xstrfmtcat(data_out,
 					   "%s \"%s_%s\" (%s) values %s %s",
-					   beginning, cluster_name,
+					   beginning, *cluster_name,
 					   table, fields, vals, ending);
 				new_cluster = 0;
 				xfree(vals);
-				xfree(cluster_name);
-				cluster_name = new_cluster_name;
+				xfree(*cluster_name);
+				*cluster_name = new_cluster_name;
 				new_cluster_name = NULL;
 			}
 
@@ -1389,7 +1553,7 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 		i = ending_end;
 	}
 
-	if (!cluster_name) {
+	if (!*cluster_name) {
 		error("No cluster given for %s", table);
 		goto end_it;
 	}
@@ -1398,24 +1562,23 @@ static int _process_old_sql_line(const char *data_in, char **data_full_out)
 		/* info("adding insert\n%s \"%s_%s\" (%s) values %s %s",
 		   beginning, cluster_name, table, fields, vals, ending); */
 		xstrfmtcat(data_out, "%s \"%s_%s\" (%s) values %s %s",
-			   beginning, cluster_name, table, fields,
+			   beginning, *cluster_name, table, fields,
 			   vals, ending);
 	} else {
 		if (fields) {
 			/* info("adding delete\n%s \"%s_%s\" %s", */
 			/*      beginning, cluster_name, table, fields); */
 			xstrfmtcat(data_out, "%s \"%s_%s\" %s",
-				   beginning, cluster_name, table, fields);
+				   beginning, *cluster_name, table, fields);
 		} else {
 			/* info("adding drop\ndrop table \"%s_%s\";", */
 			/*      cluster_name, table); */
 			xstrfmtcat(data_out, "drop table \"%s_%s\";",
-				   cluster_name, table);
+				   *cluster_name, table);
 		}
 	}
 
 end_it:
-	xfree(cluster_name);
 	xfree(beginning);
 	xfree(ending);
 	xfree(fields);
@@ -1433,14 +1596,16 @@ static int _process_old_sql(char **data)
 	char *data_in = *data;
 	char *data_out = NULL;
 	int rc = SLURM_SUCCESS;
+	char *cluster_name = NULL;
 
 	while (data_in[i]) {
-		if ((rc = _process_old_sql_line(data_in+i, &data_out)) == -1)
+		if ((rc = _process_old_sql_line(
+			     data_in+i, &cluster_name, &data_out)) == -1)
 			break;
 		i += rc;
 	}
 	//rc = -1;
-
+	xfree(cluster_name);
 	xfree(data_in);
 	if (rc == -1)
 		xfree(data_out);
@@ -1449,47 +1614,56 @@ static int _process_old_sql(char **data)
 	return rc;
 }
 
-/* returns count of events archived or SLURM_ERROR on error */
-static uint32_t _archive_events(mysql_conn_t *mysql_conn, char *cluster_name,
-				time_t period_end, char *arch_dir,
-				uint32_t archive_period)
+static char *_get_archive_columns(purge_type_t type)
 {
-	MYSQL_RES *result = NULL;
-	MYSQL_ROW row;
-	char *tmp = NULL, *query = NULL;
-	time_t period_start = 0;
-	uint32_t cnt = 0;
-	local_event_t event;
-	Buf buffer;
-	int error_code = 0, i = 0;
-
-	xfree(tmp);
-	xstrfmtcat(tmp, "%s", event_req_inx[0]);
-	for(i=1; i<EVENT_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", event_req_inx[i]);
-	}
+	char **cols = NULL;
+	char *tmp = NULL;
+	int col_count = 0, i = 0;
 
-	/* get all the events started before this time listed */
-	query = xstrdup_printf("select %s from \"%s_%s\" where "
-			       "time_start <= %ld "
-			       "&& time_end != 0 order by time_start asc",
-			       tmp, cluster_name, event_table, period_end);
-	xfree(tmp);
+	xfree(cols);
 
-//	START_TIMER;
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
-		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
-		xfree(query);
-		return SLURM_ERROR;
+	switch (type) {
+	case PURGE_EVENT:
+		cols      = event_req_inx;
+		col_count = EVENT_REQ_COUNT;
+		break;
+	case PURGE_SUSPEND:
+		cols      = suspend_req_inx;
+		col_count = SUSPEND_REQ_COUNT;
+		break;
+	case PURGE_RESV:
+		cols      = resv_req_inx;
+		col_count = RESV_REQ_COUNT;
+		break;
+	case PURGE_JOB:
+		cols      = job_req_inx;
+		col_count = JOB_REQ_COUNT;
+		break;
+	case PURGE_STEP:
+		cols      = step_req_inx;
+		col_count = STEP_REQ_COUNT;
+		break;
+	default:
+		xassert(0);
+		return NULL;
 	}
-	xfree(query);
 
-	if (!(cnt = mysql_num_rows(result))) {
-		mysql_free_result(result);
-		return 0;
+	xstrfmtcat(tmp, "%s", cols[0]);
+	for (i=1; i<col_count; i++) {
+		xstrfmtcat(tmp, ", %s", cols[i]);
 	}
 
+	return tmp;
+}
+
+
+static Buf _pack_archive_events(MYSQL_RES *result, char *cluster_name,
+				uint32_t cnt, time_t *period_start)
+{
+	MYSQL_ROW row;
+	Buf buffer;
+	local_event_t event;
+
 	buffer = init_buf(high_buffer_size);
 	pack16(SLURM_PROTOCOL_VERSION, buffer);
 	pack_time(time(NULL), buffer);
@@ -1498,36 +1672,24 @@ static uint32_t _archive_events(mysql_conn_t *mysql_conn, char *cluster_name,
 	pack32(cnt, buffer);
 
 	while ((row = mysql_fetch_row(result))) {
-		if (!period_start)
-			period_start = slurm_atoul(row[EVENT_REQ_START]);
+		if (period_start && !*period_start)
+			*period_start = slurm_atoul(row[EVENT_REQ_START]);
 
 		memset(&event, 0, sizeof(local_event_t));
 
 		event.cluster_nodes = row[EVENT_REQ_CNODES];
-		event.cpu_count = row[EVENT_REQ_CPU];
 		event.node_name = row[EVENT_REQ_NODE];
 		event.period_end = row[EVENT_REQ_END];
 		event.period_start = row[EVENT_REQ_START];
 		event.reason = row[EVENT_REQ_REASON];
 		event.reason_uid = row[EVENT_REQ_REASON_UID];
 		event.state = row[EVENT_REQ_STATE];
+		event.tres_str = row[EVENT_REQ_TRES];
 
 		_pack_local_event(&event, SLURM_PROTOCOL_VERSION, buffer);
 	}
-	mysql_free_result(result);
-
-//	END_TIMER2("step query");
-//	info("event query took %s", TIME_STR);
 
-	error_code = archive_write_file(buffer, cluster_name,
-					period_start, period_end,
-					arch_dir, "event", archive_period);
-	free_buf(buffer);
-
-	if (error_code != SLURM_SUCCESS)
-		return error_code;
-
-	return cnt;
+	return buffer;
 }
 
 /* returns sql statement from archived data or NULL on error */
@@ -1547,8 +1709,9 @@ _load_events(uint16_t rpc_version, Buf buffer, char *cluster_name,
 		xstrcat(format, ", '%s'");
 	}
 	xstrcat(insert, ") values ");
-	xstrcat(format, ")");
-	for(i=0; i<rec_cnt; i++) {
+	xstrcat(format, ");");
+
+	for (i=0; i<rec_cnt; i++) {
 		memset(&object, 0, sizeof(local_event_t));
 		if (_unpack_local_event(&object, rpc_version, buffer)
 		    != SLURM_SUCCESS) {
@@ -1557,6 +1720,7 @@ _load_events(uint16_t rpc_version, Buf buffer, char *cluster_name,
 			xfree(insert);
 			break;
 		}
+
 		if (i)
 			xstrcat(insert, ", ");
 
@@ -1565,11 +1729,13 @@ _load_events(uint16_t rpc_version, Buf buffer, char *cluster_name,
 			   object.period_end,
 			   object.node_name,
 			   object.cluster_nodes,
-			   object.cpu_count,
 			   object.reason,
 			   object.reason_uid,
-			   object.state);
+			   object.state,
+			   object.tres_str);
 
+		if (rpc_version < SLURM_15_08_PROTOCOL_VERSION)
+			xfree(object.tres_str);
 	}
 //	END_TIMER2("step query");
 //	info("event query took %s", TIME_STR);
@@ -1578,46 +1744,12 @@ _load_events(uint16_t rpc_version, Buf buffer, char *cluster_name,
 	return insert;
 }
 
-/* returns count of jobs archived or SLURM_ERROR on error */
-static uint32_t _archive_jobs(mysql_conn_t *mysql_conn, char *cluster_name,
-			      time_t period_end, char *arch_dir,
-			      uint32_t archive_period)
+static Buf _pack_archive_jobs(MYSQL_RES *result, char *cluster_name,
+			      uint32_t cnt, time_t *period_start)
 {
-	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	char *tmp = NULL, *query = NULL;
-	time_t period_start = 0;
-	uint32_t cnt = 0;
-	local_job_t job;
 	Buf buffer;
-	int error_code = 0, i = 0;
-
-	xfree(tmp);
-	xstrfmtcat(tmp, "%s", job_req_inx[0]);
-	for(i=1; i<JOB_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", job_req_inx[i]);
-	}
-
-	/* get all the events started before this time listed */
-	query = xstrdup_printf("select %s from \"%s_%s\" where "
-			       "time_submit < %ld && time_end != 0 && !deleted "
-			       "order by time_submit asc",
-			       tmp, cluster_name, job_table, period_end);
-	xfree(tmp);
-
-//	START_TIMER;
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
-		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
-		xfree(query);
-		return SLURM_ERROR;
-	}
-	xfree(query);
-
-	if (!(cnt = mysql_num_rows(result))) {
-		mysql_free_result(result);
-		return 0;
-	}
+	local_job_t job;
 
 	buffer = init_buf(high_buffer_size);
 	pack16(SLURM_PROTOCOL_VERSION, buffer);
@@ -1627,13 +1759,12 @@ static uint32_t _archive_jobs(mysql_conn_t *mysql_conn, char *cluster_name,
 	pack32(cnt, buffer);
 
 	while ((row = mysql_fetch_row(result))) {
-		if (!period_start)
-			period_start = slurm_atoul(row[JOB_REQ_SUBMIT]);
+		if (period_start && !*period_start)
+			*period_start = slurm_atoul(row[JOB_REQ_SUBMIT]);
 
 		memset(&job, 0, sizeof(local_job_t));
 
 		job.account = row[JOB_REQ_ACCOUNT];
-		job.alloc_cpus = row[JOB_REQ_ALLOC_CPUS];
 		job.alloc_nodes = row[JOB_REQ_ALLOC_NODES];
 		job.associd = row[JOB_REQ_ASSOCID];
 		job.array_jobid = row[JOB_REQ_ARRAYJOBID];
@@ -1647,43 +1778,33 @@ static uint32_t _archive_jobs(mysql_conn_t *mysql_conn, char *cluster_name,
 		job.eligible = row[JOB_REQ_ELIGIBLE];
 		job.end = row[JOB_REQ_END];
 		job.gid = row[JOB_REQ_GID];
-		job.id = row[JOB_REQ_ID];
+		job.job_db_inx = row[JOB_REQ_DB_INX];
 		job.jobid = row[JOB_REQ_JOBID];
 		job.kill_requid = row[JOB_REQ_KILL_REQUID];
 		job.name = row[JOB_REQ_NAME];
 		job.nodelist = row[JOB_REQ_NODELIST];
 		job.node_inx = row[JOB_REQ_NODE_INX];
-		job.partition = row[JOB_REQ_PARTITION]; /* priority */
-		job.priority = row[JOB_REQ_PRIORITY];   /* qos */
-		job.qos = row[JOB_REQ_QOS];             /* cpus_req */
-		job.req_cpus = row[JOB_REQ_REQ_CPUS];   /* mem_req */
-		job.req_mem = row[JOB_REQ_REQ_MEM];     /* id_resv */
-		job.resvid = row[JOB_REQ_RESVID];       /* partition */
+		job.partition = row[JOB_REQ_PARTITION];
+		job.priority = row[JOB_REQ_PRIORITY];
+		job.qos = row[JOB_REQ_QOS];
+		job.req_cpus = row[JOB_REQ_REQ_CPUS];
+		job.req_mem = row[JOB_REQ_REQ_MEM];
+		job.resvid = row[JOB_REQ_RESVID];
 		job.start = row[JOB_REQ_START];
 		job.state = row[JOB_REQ_STATE];
 		job.submit = row[JOB_REQ_SUBMIT];
 		job.suspended = row[JOB_REQ_SUSPENDED];
 		job.track_steps = row[JOB_REQ_TRACKSTEPS];
+		job.tres_alloc_str = row[JOB_REQ_TRESA];
+		job.tres_req_str = row[JOB_REQ_TRESR];
 		job.uid = row[JOB_REQ_UID];
 		job.wckey = row[JOB_REQ_WCKEY];
 		job.wckey_id = row[JOB_REQ_WCKEYID];
 
 		_pack_local_job(&job, SLURM_PROTOCOL_VERSION, buffer);
 	}
-	mysql_free_result(result);
-
-//	END_TIMER2("step query");
-//	info("event query took %s", TIME_STR);
-
-	error_code = archive_write_file(buffer, cluster_name,
-					period_start, period_end,
-					arch_dir, "job", archive_period);
-	free_buf(buffer);
-
-	if (error_code != SLURM_SUCCESS)
-		return error_code;
 
-	return cnt;
+	return buffer;
 }
 
 /* returns sql statement from archived data or NULL on error */
@@ -1720,7 +1841,6 @@ static char *_load_jobs(uint16_t rpc_version, Buf buffer,
 		xstrfmtcat(insert, format,
 			   object.account,
 			   object.array_max_tasks,
-			   object.alloc_cpus,
 			   object.alloc_nodes,
 			   object.associd,
 			   object.array_jobid,
@@ -1733,7 +1853,7 @@ static char *_load_jobs(uint16_t rpc_version, Buf buffer,
 			   object.eligible,
 			   object.end,
 			   object.gid,
-			   object.id,
+			   object.job_db_inx,
 			   object.jobid,
 			   object.kill_requid,
 			   object.name,
@@ -1752,8 +1872,14 @@ static char *_load_jobs(uint16_t rpc_version, Buf buffer,
 			   object.track_steps,
 			   object.uid,
 			   object.wckey,
-			   object.wckey_id);
+			   object.wckey_id,
+			   object.tres_alloc_str,
+			   object.tres_req_str);
 
+		if (rpc_version < SLURM_15_08_PROTOCOL_VERSION) {
+			xfree(object.tres_alloc_str);
+			xfree(object.tres_req_str);
+		}
 	}
 //	END_TIMER2("step query");
 //	info("job query took %s", TIME_STR);
@@ -1773,46 +1899,12 @@ _init_local_job(local_job_t *job)
 	xstrcat(job->array_taskid, "4294967294");
 }
 
-/* returns count of resvations archived or SLURM_ERROR on error */
-static uint32_t _archive_resvs(mysql_conn_t *mysql_conn, char *cluster_name,
-			       time_t period_end, char *arch_dir,
-			       uint32_t archive_period)
+static Buf _pack_archive_resvs(MYSQL_RES *result, char *cluster_name,
+			       uint32_t cnt, time_t *period_start)
 {
-	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	char *tmp = NULL, *query = NULL;
-	time_t period_start = 0;
-	uint32_t cnt = 0;
-	local_resv_t resv;
 	Buf buffer;
-	int error_code = 0, i = 0;
-
-	xfree(tmp);
-	xstrfmtcat(tmp, "%s", resv_req_inx[0]);
-	for(i=1; i<RESV_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", resv_req_inx[i]);
-	}
-
-	/* get all the events started before this time listed */
-	query = xstrdup_printf("select %s from \"%s_%s\" where "
-			       "time_start <= %ld "
-			       "&& time_end != 0 order by time_start asc",
-			       tmp, cluster_name, resv_table, period_end);
-	xfree(tmp);
-
-//	START_TIMER;
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
-		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
-		xfree(query);
-		return SLURM_ERROR;
-	}
-	xfree(query);
-
-	if (!(cnt = mysql_num_rows(result))) {
-		mysql_free_result(result);
-		return 0;
-	}
+	local_resv_t resv;
 
 	buffer = init_buf(high_buffer_size);
 	pack16(SLURM_PROTOCOL_VERSION, buffer);
@@ -1822,13 +1914,12 @@ static uint32_t _archive_resvs(mysql_conn_t *mysql_conn, char *cluster_name,
 	pack32(cnt, buffer);
 
 	while ((row = mysql_fetch_row(result))) {
-		if (!period_start)
-			period_start = slurm_atoul(row[RESV_REQ_START]);
+		if (period_start && !*period_start)
+			*period_start = slurm_atoul(row[RESV_REQ_START]);
 
 		memset(&resv, 0, sizeof(local_resv_t));
 
 		resv.assocs = row[RESV_REQ_ASSOCS];
-		resv.cpus = row[RESV_REQ_CPUS];
 		resv.flags = row[RESV_REQ_FLAGS];
 		resv.id = row[RESV_REQ_ID];
 		resv.name = row[RESV_REQ_NAME];
@@ -1836,23 +1927,12 @@ static uint32_t _archive_resvs(mysql_conn_t *mysql_conn, char *cluster_name,
 		resv.node_inx = row[RESV_REQ_NODE_INX];
 		resv.time_end = row[RESV_REQ_END];
 		resv.time_start = row[RESV_REQ_START];
+		resv.tres_str = row[RESV_REQ_TRES];
 
 		_pack_local_resv(&resv, SLURM_PROTOCOL_VERSION, buffer);
 	}
-	mysql_free_result(result);
 
-//	END_TIMER2("step query");
-//	info("event query took %s", TIME_STR);
-
-	error_code = archive_write_file(buffer, cluster_name,
-					period_start, period_end,
-					arch_dir, "resv", archive_period);
-	free_buf(buffer);
-
-	if (error_code != SLURM_SUCCESS)
-		return error_code;
-
-	return cnt;
+	return buffer;
 }
 
 /* returns sql statement from archived data or NULL on error */
@@ -1881,19 +1961,23 @@ static char *_load_resvs(uint16_t rpc_version, Buf buffer,
 			xfree(insert);
 			break;
 		}
+
 		if (i)
 			xstrcat(insert, ", ");
 
 		xstrfmtcat(insert, format,
 			   object.id,
 			   object.assocs,
-			   object.cpus,
 			   object.flags,
+			   object.tres_str,
 			   object.nodes,
 			   object.node_inx,
 			   object.name,
 			   object.time_start,
 			   object.time_end);
+
+		if (rpc_version < SLURM_15_08_PROTOCOL_VERSION)
+			xfree(object.tres_str);
 	}
 //	END_TIMER2("step query");
 //	info("resv query took %s", TIME_STR);
@@ -1902,46 +1986,12 @@ static char *_load_resvs(uint16_t rpc_version, Buf buffer,
 	return insert;
 }
 
-/* returns count of steps archived or SLURM_ERROR on error */
-static uint32_t _archive_steps(mysql_conn_t *mysql_conn, char *cluster_name,
-			       time_t period_end, char *arch_dir,
-			       uint32_t archive_period)
+static Buf _pack_archive_steps(MYSQL_RES *result, char *cluster_name,
+			       uint32_t cnt, time_t *period_start)
 {
-	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	char *tmp = NULL, *query = NULL;
-	time_t period_start = 0;
-	uint32_t cnt = 0;
-	local_step_t step;
 	Buf buffer;
-	int error_code = 0, i = 0;
-
-	xfree(tmp);
-	xstrfmtcat(tmp, "%s", step_req_inx[0]);
-	for(i=1; i<STEP_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", step_req_inx[i]);
-	}
-
-	/* get all the events started before this time listed */
-	query = xstrdup_printf("select %s from \"%s_%s\" where "
-			       "time_start <= %ld && time_end != 0 "
-			       "&& !deleted order by time_start asc",
-			       tmp, cluster_name, step_table, period_end);
-	xfree(tmp);
-
-//	START_TIMER;
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
-		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
-		xfree(query);
-		return SLURM_ERROR;
-	}
-	xfree(query);
-
-	if (!(cnt = mysql_num_rows(result))) {
-		mysql_free_result(result);
-		return 0;
-	}
+	local_step_t step;
 
 	buffer = init_buf(high_buffer_size);
 	pack16(SLURM_PROTOCOL_VERSION, buffer);
@@ -1951,8 +2001,8 @@ static uint32_t _archive_steps(mysql_conn_t *mysql_conn, char *cluster_name,
 	pack32(cnt, buffer);
 
 	while ((row = mysql_fetch_row(result))) {
-		if (!period_start)
-			period_start = slurm_atoul(row[STEP_REQ_START]);
+		if (period_start && !*period_start)
+			*period_start = slurm_atoul(row[STEP_REQ_START]);
 
 		memset(&step, 0, sizeof(local_step_t));
 
@@ -1965,8 +2015,7 @@ static uint32_t _archive_steps(mysql_conn_t *mysql_conn, char *cluster_name,
 		step.ave_rss = row[STEP_REQ_AVE_RSS];
 		step.ave_vsize = row[STEP_REQ_AVE_VSIZE];
 		step.exit_code = row[STEP_REQ_EXIT_CODE];
-		step.cpus = row[STEP_REQ_CPUS];
-		step.id = row[STEP_REQ_ID];
+		step.job_db_inx = row[STEP_REQ_DB_INX];
 		step.kill_requid = row[STEP_REQ_KILL_REQUID];
 		step.max_disk_read = row[STEP_REQ_MAX_DISK_READ];
 		step.max_disk_read_node = row[STEP_REQ_MAX_DISK_READ_NODE];
@@ -1993,32 +2042,23 @@ static uint32_t _archive_steps(mysql_conn_t *mysql_conn, char *cluster_name,
 		step.period_end = row[STEP_REQ_END];
 		step.period_start = row[STEP_REQ_START];
 		step.period_suspended = row[STEP_REQ_SUSPENDED];
-		step.req_cpufreq = row[STEP_REQ_REQ_CPUFREQ];
+		step.req_cpufreq_min = row[STEP_REQ_REQ_CPUFREQ_MIN];
+		step.req_cpufreq_max = row[STEP_REQ_REQ_CPUFREQ_MAX];
+		step.req_cpufreq_gov = row[STEP_REQ_REQ_CPUFREQ_GOV];
 		step.state = row[STEP_REQ_STATE];
 		step.stepid = row[STEP_REQ_STEPID];
 		step.sys_sec = row[STEP_REQ_SYS_SEC];
 		step.sys_usec = row[STEP_REQ_SYS_USEC];
 		step.tasks = row[STEP_REQ_TASKS];
 		step.task_dist = row[STEP_REQ_TASKDIST];
+		step.tres_alloc_str = row[STEP_REQ_TRES];
 		step.user_sec = row[STEP_REQ_USER_SEC];
 		step.user_usec = row[STEP_REQ_USER_USEC];
 
 		_pack_local_step(&step, SLURM_PROTOCOL_VERSION, buffer);
 	}
-	mysql_free_result(result);
 
-//	END_TIMER2("step query");
-//	info("event query took %s", TIME_STR);
-
-	error_code = archive_write_file(buffer, cluster_name,
-					period_start, period_end,
-					arch_dir, "step", archive_period);
-	free_buf(buffer);
-
-	if (error_code != SLURM_SUCCESS)
-		return error_code;
-
-	return cnt;
+	return buffer;
 }
 
 /* returns sql statement from archived data or NULL on error */
@@ -2027,18 +2067,18 @@ static char *_load_steps(uint16_t rpc_version, Buf buffer,
 {
 	char *insert = NULL, *format = NULL;
 	local_step_t object;
-	int i = 0;
+	int i;
 
 	xstrfmtcat(insert, "insert into \"%s_%s\" (%s",
 		   cluster_name, step_table, step_req_inx[0]);
 	xstrcat(format, "('%s'");
-	for(i=1; i<STEP_REQ_COUNT; i++) {
+	for (i=1; i<STEP_REQ_COUNT; i++) {
 		xstrfmtcat(insert, ", %s", step_req_inx[i]);
 		xstrcat(format, ", '%s'");
 	}
 	xstrcat(insert, ") values ");
 	xstrcat(format, ")");
-	for(i=0; i<rec_cnt; i++) {
+	for (i=0; i<rec_cnt; i++) {
 		memset(&object, 0, sizeof(local_step_t));
 		if (_unpack_local_step(&object, rpc_version, buffer)
 		    != SLURM_SUCCESS) {
@@ -2047,11 +2087,12 @@ static char *_load_steps(uint16_t rpc_version, Buf buffer,
 			xfree(insert);
 			break;
 		}
+
 		if (i)
 			xstrcat(insert, ", ");
 
 		xstrfmtcat(insert, format,
-			   object.id,
+			   object.job_db_inx,
 			   object.stepid,
 			   object.period_start,
 			   object.period_end,
@@ -2063,7 +2104,6 @@ static char *_load_steps(uint16_t rpc_version, Buf buffer,
 			   object.kill_requid,
 			   object.exit_code,
 			   object.nodes,
-			   object.cpus,
 			   object.tasks,
 			   object.task_dist,
 			   object.user_sec,
@@ -2088,7 +2128,7 @@ static char *_load_steps(uint16_t rpc_version, Buf buffer,
 			   object.ave_cpu,
 			   object.act_cpufreq,
 			   object.consumed_energy,
-			   object.req_cpufreq,
+			   object.req_cpufreq_max,
 			   object.max_disk_read,
 			   object.max_disk_read_task,
 			   object.max_disk_read_node,
@@ -2096,8 +2136,13 @@ static char *_load_steps(uint16_t rpc_version, Buf buffer,
 			   object.max_disk_write,
 			   object.max_disk_write_task,
 			   object.max_disk_write_node,
-			   object.ave_disk_write);
+			   object.ave_disk_write,
+			   object.req_cpufreq_min,
+			   object.req_cpufreq_gov,
+			   object.tres_alloc_str);
 
+		if (rpc_version < SLURM_15_08_PROTOCOL_VERSION)
+			xfree(object.tres_alloc_str);
 	}
 //	END_TIMER2("step query");
 //	info("step query took %s", TIME_STR);
@@ -2106,46 +2151,12 @@ static char *_load_steps(uint16_t rpc_version, Buf buffer,
 	return insert;
 }
 
-/* returns count of events archived or SLURM_ERROR on error */
-static uint32_t _archive_suspend(mysql_conn_t *mysql_conn, char *cluster_name,
-				 time_t period_end, char *arch_dir,
-				 uint32_t archive_period)
+static Buf _pack_archive_suspends(MYSQL_RES *result, char *cluster_name,
+				  uint32_t cnt, time_t *period_start)
 {
-	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	char *tmp = NULL, *query = NULL;
-	time_t period_start = 0;
-	uint32_t cnt = 0;
-	local_suspend_t suspend;
 	Buf buffer;
-	int error_code = 0, i = 0;
-
-	xfree(tmp);
-	xstrfmtcat(tmp, "%s", suspend_req_inx[0]);
-	for(i=1; i<SUSPEND_REQ_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", suspend_req_inx[i]);
-	}
-
-	/* get all the events started before this time listed */
-	query = xstrdup_printf("select %s from \"%s_%s\" where "
-			       "time_start <= %ld && time_end != 0 "
-			       "order by time_start asc",
-			       tmp, cluster_name, suspend_table, period_end);
-	xfree(tmp);
-
-//	START_TIMER;
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
-		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
-		xfree(query);
-		return SLURM_ERROR;
-	}
-	xfree(query);
-
-	if (!(cnt = mysql_num_rows(result))) {
-		mysql_free_result(result);
-		return 0;
-	}
+	local_suspend_t suspend;
 
 	buffer = init_buf(high_buffer_size);
 	pack16(SLURM_PROTOCOL_VERSION, buffer);
@@ -2155,34 +2166,23 @@ static uint32_t _archive_suspend(mysql_conn_t *mysql_conn, char *cluster_name,
 	pack32(cnt, buffer);
 
 	while ((row = mysql_fetch_row(result))) {
-		if (!period_start)
-			period_start = slurm_atoul(row[SUSPEND_REQ_START]);
+		if (period_start && !*period_start)
+			*period_start = slurm_atoul(row[SUSPEND_REQ_START]);
 
 		memset(&suspend, 0, sizeof(local_suspend_t));
 
-		suspend.id = row[SUSPEND_REQ_ID];
+		suspend.job_db_inx = row[SUSPEND_REQ_DB_INX];
 		suspend.associd = row[SUSPEND_REQ_ASSOCID];
 		suspend.period_start = row[SUSPEND_REQ_START];
 		suspend.period_end = row[SUSPEND_REQ_END];
 
 		_pack_local_suspend(&suspend, SLURM_PROTOCOL_VERSION, buffer);
 	}
-	mysql_free_result(result);
 
-//	END_TIMER2("step query");
-//	info("event query took %s", TIME_STR);
-
-	error_code = archive_write_file(buffer, cluster_name,
-					period_start, period_end,
-					arch_dir, "suspend", archive_period);
-	free_buf(buffer);
-
-	if (error_code != SLURM_SUCCESS)
-		return error_code;
-
-	return cnt;
+	return buffer;
 }
 
+
 /* returns sql statement from archived data or NULL on error */
 static char *_load_suspend(uint16_t rpc_version, Buf buffer,
 			   char *cluster_name, uint32_t rec_cnt)
@@ -2209,15 +2209,15 @@ static char *_load_suspend(uint16_t rpc_version, Buf buffer,
 			xfree(insert);
 			break;
 		}
+
 		if (i)
 			xstrcat(insert, ", ");
 
 		xstrfmtcat(insert, format,
-			   object.id,
+			   object.job_db_inx,
 			   object.associd,
 			   object.period_start,
 			   object.period_end);
-
 	}
 //	END_TIMER2("suspend query");
 //	info("suspend query took %s", TIME_STR);
@@ -2226,230 +2226,313 @@ static char *_load_suspend(uint16_t rpc_version, Buf buffer,
 	return insert;
 }
 
-static int _execute_archive(mysql_conn_t *mysql_conn,
-			    char *cluster_name,
-			    slurmdb_archive_cond_t *arch_cond)
+/* returns count of events archived or SLURM_ERROR on error */
+static uint32_t _archive_table(purge_type_t type, mysql_conn_t *mysql_conn,
+			       char *cluster_name, time_t period_end,
+			       char *arch_dir, uint32_t archive_period)
 {
-	int rc = SLURM_SUCCESS;
-	char *query = NULL;
-	time_t curr_end;
-	time_t last_submit = time(NULL);
-
-	if (arch_cond->archive_script)
-		return archive_run_script(arch_cond, cluster_name, last_submit);
-	else if (!arch_cond->archive_dir) {
-		error("No archive dir given, can't process");
-		return SLURM_ERROR;
-	}
-
-	if (arch_cond->purge_event != NO_VAL) {
-		/* remove all data from event table that was older than
-		 * period_start * arch_cond->purge_event.
-		 */
-		if (!(curr_end = archive_setup_end_time(
-			      last_submit, arch_cond->purge_event))) {
-			error("Parsing purge event");
-			return SLURM_ERROR;
-		}
+	MYSQL_RES *result = NULL;
+	char *cols = NULL, *query = NULL;
+	time_t period_start = 0;
+	uint32_t cnt = 0;
+	Buf buffer;
+	int error_code = 0;
+	static Buf (*pack_func)(MYSQL_RES *result, char *cluster_name,
+				uint32_t cnt, time_t *period_start);
 
-		debug4("Purging event entries before %ld for %s",
-		       curr_end, cluster_name);
+	cols = _get_archive_columns(type);
 
-		if (SLURMDB_PURGE_ARCHIVE_SET(arch_cond->purge_event)) {
-			rc = _archive_events(mysql_conn, cluster_name,
-					     curr_end, arch_cond->archive_dir,
-					     arch_cond->purge_event);
-			if (!rc)
-				goto exit_events;
-			else if (rc == SLURM_ERROR)
-				return rc;
-		}
-		query = xstrdup_printf("delete from \"%s_%s\" where "
+	switch (type) {
+	case PURGE_EVENT:
+		pack_func = &_pack_archive_events;
+		query = xstrdup_printf("select %s from \"%s_%s\" where "
 				       "time_start <= %ld && time_end != 0 "
-				       "LIMIT %d",
-				       cluster_name, event_table, curr_end,
-				       MAX_PURGE_LIMIT);
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+				       "order by time_start asc",
+				       cols, cluster_name, event_table,
+				       period_end);
+		break;
+	case PURGE_SUSPEND:
+		pack_func = &_pack_archive_suspends;
+		query = xstrdup_printf("select %s from \"%s_%s\" where "
+				       "time_start <= %ld && time_end != 0 "
+				       "order by time_start asc",
+				       cols, cluster_name, suspend_table,
+				       period_end);
+		break;
+	case PURGE_RESV:
+		pack_func = &_pack_archive_resvs;
+		query = xstrdup_printf("select %s from \"%s_%s\" where "
+				       "time_start <= %ld && time_end != 0 "
+				       "order by time_start asc",
+				       cols, cluster_name, resv_table,
+				       period_end);
+		break;
+	case PURGE_JOB:
+		pack_func = &_pack_archive_jobs;
+		query = xstrdup_printf("select %s from \"%s_%s\" where "
+				       "time_submit < %ld && time_end != 0 "
+				       "&& !deleted order by time_submit asc",
+				       cols, cluster_name, job_table,
+				       period_end);
+		break;
+	case PURGE_STEP:
+		pack_func = &_pack_archive_steps;
+		query = xstrdup_printf("select %s from \"%s_%s\" where "
+				       "time_start <= %ld && time_end != 0 "
+				       "&& !deleted order by time_start asc",
+				       cols, cluster_name, step_table,
+				       period_end);
+		break;
+	default:
+		fatal("Unknown purge type: %d", type);
+		return SLURM_ERROR;
+	}
 
-		while ((rc = mysql_db_delete_affected_rows(
-						mysql_conn, query)) > 0);
+	xfree(cols);
 
+	if (debug_flags & DEBUG_FLAG_DB_ARCHIVE)
+		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
 		xfree(query);
-		if (rc != SLURM_SUCCESS) {
-			error("Couldn't remove old event data");
-			return SLURM_ERROR;
-		}
+		return SLURM_ERROR;
+	}
+	xfree(query);
+
+	if (!(cnt = mysql_num_rows(result))) {
+		mysql_free_result(result);
+		return 0;
 	}
 
-exit_events:
+	buffer = (*pack_func)(result, cluster_name, cnt, &period_start);
+	mysql_free_result(result);
 
-	if (arch_cond->purge_suspend != NO_VAL) {
-		/* remove all data from suspend table that was older than
-		 * period_start * arch_cond->purge_suspend.
-		 */
-		if (!(curr_end = archive_setup_end_time(
-			      last_submit, arch_cond->purge_suspend))) {
-			error("Parsing purge suspend");
-			return SLURM_ERROR;
-		}
+	error_code = archive_write_file(buffer, cluster_name,
+					period_start, period_end,
+					arch_dir, purge_type_str[type],
+					archive_period);
+	free_buf(buffer);
 
-		debug4("Purging suspend entries before %ld for %s",
-		       curr_end, cluster_name);
+	if (error_code != SLURM_SUCCESS)
+		return error_code;
 
-		if (SLURMDB_PURGE_ARCHIVE_SET(arch_cond->purge_suspend)) {
-			rc = _archive_suspend(mysql_conn, cluster_name,
-					      curr_end, arch_cond->archive_dir,
-					      arch_cond->purge_suspend);
-			if (!rc)
-				goto exit_suspend;
-			else if (rc == SLURM_ERROR)
-				return rc;
-		}
-		query = xstrdup_printf("delete from \"%s_%s\" where "
-				       "time_start <= %ld && time_end != 0 "
-				       "LIMIT %d",
-				       cluster_name, suspend_table, curr_end,
-				       MAX_PURGE_LIMIT);
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	return cnt;
+}
 
-		while ((rc = mysql_db_delete_affected_rows(
-						mysql_conn, query)) > 0);
-		xfree(query);
-		if (rc != SLURM_SUCCESS) {
-			error("Couldn't remove old suspend data");
-			return SLURM_ERROR;
-		}
-	}
+uint32_t _get_begin_next_month(time_t start)
+{
+	struct tm parts;
 
-exit_suspend:
+	slurm_localtime_r(&start, &parts);
 
-	if (arch_cond->purge_step != NO_VAL) {
-		/* remove all data from step table that was older than
-		 * start * arch_cond->purge_step.
-		 */
-		if (!(curr_end = archive_setup_end_time(
-			      last_submit, arch_cond->purge_step))) {
-			error("Parsing purge step");
-			return SLURM_ERROR;
-		}
+	parts.tm_mon++;
+	parts.tm_mday  = 1;
+	parts.tm_hour  = 0;
+	parts.tm_min   = 0;
+	parts.tm_sec   = 0;
+	parts.tm_isdst = -1;
 
-		debug4("Purging step entries before %ld for %s",
-		       curr_end, cluster_name);
+	if (parts.tm_mon > 11) {
+		parts.tm_year++;
+		parts.tm_mon = 0;
+	}
 
-		if (SLURMDB_PURGE_ARCHIVE_SET(arch_cond->purge_step)) {
-			rc = _archive_steps(mysql_conn, cluster_name,
-					    curr_end, arch_cond->archive_dir,
-					    arch_cond->purge_step);
-			if (!rc)
-				goto exit_steps;
-			else if (rc == SLURM_ERROR)
-				return rc;
-		}
+	return slurm_mktime(&parts);
+}
 
-		query = xstrdup_printf("delete from \"%s_%s\" where "
-				       "time_start <= %ld && time_end != 0 "
-				       "LIMIT %d",
-				       cluster_name, step_table, curr_end,
-				       MAX_PURGE_LIMIT);
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+/* Get the oldest purge'able record.
+ * Returns SLURM_ERROR for mysql error, 0 no purge'able records found,
+ * 1 found purgeable record.
+ */
+static int _get_oldest_record(mysql_conn_t *mysql_conn, char *cluster,
+				 char *table, char *col_name,
+				 time_t period_end, time_t *record_start)
+{
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *query = NULL;
 
-		while ((rc = mysql_db_delete_affected_rows(
-						mysql_conn, query)) > 0);
+	if (record_start == NULL)
+		return SLURM_ERROR;
 
+	/* get oldest record */
+	query = xstrdup_printf("select %s from \"%s_%s\" where %s <= %ld "
+			       "&& time_end != 0 order by %s asc LIMIT 1",
+			       col_name, cluster, table, col_name, period_end,
+			       col_name);
+
+	if (debug_flags & DEBUG_FLAG_DB_ARCHIVE)
+		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
 		xfree(query);
-		if (rc != SLURM_SUCCESS) {
-			error("Couldn't remove old step data");
-			return SLURM_ERROR;
-		}
+		return SLURM_ERROR;
 	}
-exit_steps:
+	xfree(query);
 
-	if (arch_cond->purge_job != NO_VAL) {
-		/* remove all data from job table that was older than
-		 * last_submit * arch_cond->purge_job.
-		 */
-		if (!(curr_end = archive_setup_end_time(
-			      last_submit, arch_cond->purge_job))) {
-			error("Parsing purge job");
-			return SLURM_ERROR;
-		}
+	if (!(mysql_num_rows(result))) {
+		mysql_free_result(result);
+		return 0;
+	}
+	row = mysql_fetch_row(result);
+	*record_start = slurm_atoul(row[0]);
+	mysql_free_result(result);
 
-		debug4("Purging job entries before %ld for %s",
-		       curr_end, cluster_name);
+	return 1; /* found one record */
+}
 
-		if (SLURMDB_PURGE_ARCHIVE_SET(arch_cond->purge_job)) {
-			rc = _archive_jobs(mysql_conn, cluster_name,
-					   curr_end, arch_cond->archive_dir,
-					   arch_cond->purge_job);
-			if (!rc)
-				goto exit_jobs;
+/* Archive and purge a table.
+ *
+ * Returns SLURM_ERROR on error and SLURM_SUCCESS on success.
+ */
+static int _archive_purge_table(purge_type_t purge_type,
+				mysql_conn_t *mysql_conn, char *cluster_name,
+				slurmdb_archive_cond_t *arch_cond)
+{
+	int      rc          = SLURM_SUCCESS;
+	uint32_t purge_attr  = 0;
+	time_t   last_submit = time(NULL);
+	time_t   curr_end    = 0, tmp_end = 0, record_start = 0;
+	char    *query = NULL, *sql_table = NULL,
+		*col_name = NULL;
+	uint32_t tmp_archive_period;
+
+	/* FIXME: the cluster usage tables need to get
+	   purged here as well, they don't need to get
+	   archived since this can be recreated from archive.
+	*/
+	switch (purge_type) {
+	case PURGE_EVENT:
+		purge_attr = arch_cond->purge_event;
+		sql_table  = event_table;
+		col_name   = event_req_inx[EVENT_REQ_START];
+		break;
+	case PURGE_SUSPEND:
+		purge_attr = arch_cond->purge_suspend;
+		sql_table  = suspend_table;
+		col_name   = suspend_req_inx[SUSPEND_REQ_START];
+		break;
+	case PURGE_RESV:
+		purge_attr = arch_cond->purge_resv;
+		sql_table  = resv_table;
+		col_name   = step_req_inx[STEP_REQ_START];
+		break;
+	case PURGE_JOB:
+		purge_attr = arch_cond->purge_job;
+		sql_table  = job_table;
+		col_name   = job_req_inx[JOB_REQ_SUBMIT];
+		break;
+	case PURGE_STEP:
+		purge_attr = arch_cond->purge_step;
+		sql_table  = step_table;
+		col_name   = step_req_inx[STEP_REQ_START];
+		break;
+	default:
+		fatal("Unknown purge type: %d", purge_type);
+		return SLURM_ERROR;
+	}
+
+	if (!(curr_end = archive_setup_end_time(last_submit, purge_attr))) {
+		error("Parsing purge %s", purge_type_str[purge_type]);
+		return SLURM_ERROR;
+	}
+
+	do {
+		rc = _get_oldest_record(mysql_conn, cluster_name, sql_table,
+					col_name, curr_end, &record_start);
+		if (!rc) /* no purgeable records found */
+			break;
+		else if (rc == SLURM_ERROR)
+			return rc;
+
+		tmp_archive_period = purge_attr;
+
+		if (curr_end - record_start > MAX_ARCHIVE_AGE) {
+			/* old stuff, catch up by archiving by month */
+			tmp_archive_period = SLURMDB_PURGE_MONTHS;
+			tmp_end = MIN(curr_end,
+				      _get_begin_next_month(record_start));
+		} else
+			tmp_end = curr_end;
+
+		if (debug_flags & DEBUG_FLAG_DB_ARCHIVE)
+			debug("Purging %s entries before %ld for %s",
+			      purge_type_str[purge_type],
+			      tmp_end, cluster_name);
+
+		if (SLURMDB_PURGE_ARCHIVE_SET(purge_attr)) {
+			rc = _archive_table(purge_type, mysql_conn,
+					    cluster_name, tmp_end,
+					    arch_cond->archive_dir,
+					    tmp_archive_period);
+			if (!rc) /* no records archived */
+				continue;
 			else if (rc == SLURM_ERROR)
 				return rc;
 		}
 
-		query = xstrdup_printf("delete from \"%s_%s\" "
-				       "where time_submit <= %ld "
-				       "&& time_end != 0 LIMIT %d",
-				       cluster_name, job_table, curr_end,
-				       MAX_PURGE_LIMIT);
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
+		query = xstrdup_printf("delete from \"%s_%s\" where "
+				       "%s <= %ld && time_end != 0 LIMIT %d",
+				       cluster_name, sql_table, col_name,
+				       tmp_end, MAX_PURGE_LIMIT);
+		if (debug_flags & DEBUG_FLAG_DB_ARCHIVE)
 			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 
 		while ((rc = mysql_db_delete_affected_rows(
-						mysql_conn, query)) > 0);
+						mysql_conn,query)) > 0);
 
 		xfree(query);
 		if (rc != SLURM_SUCCESS) {
-			error("Couldn't remove old job data");
+			error("Couldn't remove old event data");
 			return SLURM_ERROR;
 		}
-	}
-exit_jobs:
+	} while (tmp_end < curr_end);
 
-	if (arch_cond->purge_resv != NO_VAL) {
-		/* remove all data from resv table that was older than
-		 * last_submit * arch_cond->purge_resv.
-		 */
-		if (!(curr_end = archive_setup_end_time(
-			      last_submit, arch_cond->purge_resv))) {
-			error("Parsing purge resv");
-			return SLURM_ERROR;
-		}
+	return SLURM_SUCCESS;
+}
 
-		debug4("Purging resv entries before %ld for %s",
-		       curr_end, cluster_name);
+static int _execute_archive(mysql_conn_t *mysql_conn,
+			    char *cluster_name,
+			    slurmdb_archive_cond_t *arch_cond)
+{
+	int rc = SLURM_SUCCESS;
+	time_t last_submit = time(NULL);
 
-		if (SLURMDB_PURGE_ARCHIVE_SET(arch_cond->purge_resv)) {
-			rc = _archive_resvs(mysql_conn, cluster_name,
-					    curr_end, arch_cond->archive_dir,
-					    arch_cond->purge_resv);
-			if (!rc)
-				goto exit_resvs;
-			else if (rc == SLURM_ERROR)
-				return rc;
-		}
+	if (arch_cond->archive_script)
+		return archive_run_script(arch_cond, cluster_name, last_submit);
+	else if (!arch_cond->archive_dir) {
+		error("No archive dir given, can't process");
+		return SLURM_ERROR;
+	}
 
-		query = xstrdup_printf("delete from \"%s_%s\" "
-				       "where time_start <= %ld "
-				       "&& time_end != 0 LIMIT %d",
-				       cluster_name, resv_table, curr_end,
-				       MAX_PURGE_LIMIT);
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	if (arch_cond->purge_event != NO_VAL) {
+		if ((rc = _archive_purge_table(PURGE_EVENT, mysql_conn,
+					       cluster_name, arch_cond)))
+			return rc;
+	}
 
-		while ((rc = mysql_db_delete_affected_rows(
-						mysql_conn, query)) > 0);
+	if (arch_cond->purge_suspend != NO_VAL) {
+		if ((rc = _archive_purge_table(PURGE_SUSPEND, mysql_conn,
+					       cluster_name, arch_cond)))
+			return rc;
+	}
 
-		xfree(query);
-		if (rc != SLURM_SUCCESS) {
-			error("Couldn't remove old resv data");
-			return SLURM_ERROR;
-		}
+	if (arch_cond->purge_step != NO_VAL) {
+		if ((rc = _archive_purge_table(PURGE_STEP, mysql_conn,
+					       cluster_name, arch_cond)))
+			return rc;
+	}
+
+	if (arch_cond->purge_job != NO_VAL) {
+		if ((rc = _archive_purge_table(PURGE_JOB, mysql_conn,
+					       cluster_name, arch_cond)))
+			return rc;
 	}
-exit_resvs:
+
+	if (arch_cond->purge_resv != NO_VAL) {
+		if ((rc = _archive_purge_table(PURGE_RESV, mysql_conn,
+					       cluster_name, arch_cond)))
+			return rc;
+	}
+
 	return SLURM_SUCCESS;
 }
 
@@ -2458,21 +2541,33 @@ extern int as_mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 {
 	int rc = SLURM_SUCCESS;
 	char *cluster_name = NULL;
-	List use_cluster_list = as_mysql_cluster_list;
+	List use_cluster_list;
+	bool new_cluster_list = false;
 	ListIterator itr = NULL;
 
-//	DEF_TIMERS;
-
 	if (!arch_cond) {
 		error("No arch_cond was given to archive from.  returning");
 		return SLURM_ERROR;
 	}
 
 	if (arch_cond->job_cond && arch_cond->job_cond->cluster_list
-	    && list_count(arch_cond->job_cond->cluster_list))
+	    && list_count(arch_cond->job_cond->cluster_list)) {
 		use_cluster_list = arch_cond->job_cond->cluster_list;
-	else
+	} else {
+		/* execute_archive may take a long time to run, so
+		 * don't keep the as_mysql_cluster_list_lock locked
+		 * the whole time, just copy the list and work off
+		 * that.
+		 */
+		new_cluster_list = true;
+		use_cluster_list = list_create(slurm_destroy_char);
 		slurm_mutex_lock(&as_mysql_cluster_list_lock);
+		itr = list_iterator_create(as_mysql_cluster_list);
+		while ((cluster_name = list_next(itr)))
+			list_append(use_cluster_list, xstrdup(cluster_name));
+		list_iterator_destroy(itr);
+		slurm_mutex_unlock(&as_mysql_cluster_list_lock);
+	}
 
 	itr = list_iterator_create(use_cluster_list);
 	while ((cluster_name = list_next(itr))) {
@@ -2481,8 +2576,9 @@ extern int as_mysql_jobacct_process_archive(mysql_conn_t *mysql_conn,
 			break;
 	}
 	list_iterator_destroy(itr);
-	if (use_cluster_list == as_mysql_cluster_list)
-		slurm_mutex_unlock(&as_mysql_cluster_list_lock);
+
+	if (new_cluster_list)
+		FREE_NULL_LIST(use_cluster_list);
 
 	return rc;
 }
@@ -2562,9 +2658,9 @@ extern int as_mysql_jobacct_process_archive_load(
 	buffer = create_buf(data, data_size);
 
 	safe_unpack16(&ver, buffer);
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
+	if (debug_flags & DEBUG_FLAG_DB_ARCHIVE)
 		DB_DEBUG(mysql_conn->conn,
-			 "Version in assoc_mgr_state header is %u", ver);
+			 "Version in archive header is %u", ver);
 	/* Don't verify the lower limit as we should be keeping all
 	   older versions around here just to support super old
 	   archive files since they don't get regenerated all the
@@ -2618,7 +2714,7 @@ got_sql:
 		error("No data to load");
 		return SLURM_ERROR;
 	}
-	if (debug_flags & DEBUG_FLAG_DB_USAGE)
+	if (debug_flags & DEBUG_FLAG_DB_ARCHIVE)
 		DB_DEBUG(mysql_conn->conn, "query\n%s", data);
 	error_code = mysql_db_query_check_after(mysql_conn, data);
 	xfree(data);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_assoc.c b/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
index f50b10ba3..1dc945ab5 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_assoc.c
@@ -49,19 +49,17 @@ char *assoc_req_inx[] = {
 	"acct",
 	"`partition`",
 	"shares",
-	"grp_cpu_mins",
-	"grp_cpu_run_mins",
-	"grp_cpus",
+	"grp_tres_mins",
+	"grp_tres_run_mins",
+	"grp_tres",
 	"grp_jobs",
-	"grp_mem",
-	"grp_nodes",
 	"grp_submit_jobs",
 	"grp_wall",
-	"max_cpu_mins_pj",
-	"max_cpu_run_mins",
-	"max_cpus_pj",
+	"max_tres_mins_pj",
+	"max_tres_run_mins",
+	"max_tres_pj",
+	"max_tres_pn",
 	"max_jobs",
-	"max_nodes_pj",
 	"max_submit_jobs",
 	"max_wall_pj",
 	"parent_acct",
@@ -78,19 +76,17 @@ enum {
 	ASSOC_REQ_ACCT,
 	ASSOC_REQ_PART,
 	ASSOC_REQ_FS,
-	ASSOC_REQ_GCM,
-	ASSOC_REQ_GCRM,
-	ASSOC_REQ_GC,
+	ASSOC_REQ_GTM,
+	ASSOC_REQ_GTRM,
+	ASSOC_REQ_GT,
 	ASSOC_REQ_GJ,
-	ASSOC_REQ_GMEM,
-	ASSOC_REQ_GN,
 	ASSOC_REQ_GSJ,
 	ASSOC_REQ_GW,
-	ASSOC_REQ_MCMPJ,
-	ASSOC_REQ_MCRM,
-	ASSOC_REQ_MCPJ,
+	ASSOC_REQ_MTMPJ,
+	ASSOC_REQ_MTRM,
+	ASSOC_REQ_MTPJ,
+	ASSOC_REQ_MTPN,
 	ASSOC_REQ_MJ,
-	ASSOC_REQ_MNPJ,
 	ASSOC_REQ_MSJ,
 	ASSOC_REQ_MWPJ,
 	ASSOC_REQ_PARENT,
@@ -102,19 +98,19 @@ enum {
 };
 
 static char *get_parent_limits_select =
-	"select @par_id, @mj, @msj, @mcpj, "
-	"@mnpj, @mwpj, @mcmpj, @mcrm, "
+	"select @par_id, @mj, @msj, "
+	"@mwpj, @mtpj, @mtpn, @mtmpj, @mtrm, "
 	"@def_qos_id, @qos, @delta_qos;";
 
 enum {
 	ASSOC2_REQ_PARENT_ID,
 	ASSOC2_REQ_MJ,
 	ASSOC2_REQ_MSJ,
-	ASSOC2_REQ_MCPJ,
-	ASSOC2_REQ_MNPJ,
 	ASSOC2_REQ_MWPJ,
-	ASSOC2_REQ_MCMPJ,
-	ASSOC2_REQ_MCRM,
+	ASSOC2_REQ_MTPJ,
+	ASSOC2_REQ_MTPN,
+	ASSOC2_REQ_MTMPJ,
+	ASSOC2_REQ_MTRM,
 	ASSOC2_REQ_DEF_QOS,
 	ASSOC2_REQ_QOS,
 	ASSOC2_REQ_DELTA_QOS,
@@ -146,6 +142,13 @@ static char *massoc_req_inx[] = {
 	"lft",
 	"rgt",
 	"qos",
+	"grp_tres_mins",
+	"grp_tres_run_mins",
+	"grp_tres",
+	"max_tres_mins_pj",
+	"max_tres_run_mins",
+	"max_tres_pj",
+	"max_tres_pn",
 };
 
 enum {
@@ -157,6 +160,13 @@ enum {
 	MASSOC_LFT,
 	MASSOC_RGT,
 	MASSOC_QOS,
+	MASSOC_GTM,
+	MASSOC_GTRM,
+	MASSOC_GT,
+	MASSOC_MTMPJ,
+	MASSOC_MTRM,
+	MASSOC_MTPJ,
+	MASSOC_MTPN,
 	MASSOC_COUNT
 };
 
@@ -183,8 +193,8 @@ enum {
 
 static int _assoc_sort_cluster(void *r1, void *r2)
 {
-	slurmdb_association_rec_t *rec_a = *(slurmdb_association_rec_t **)r1;
-	slurmdb_association_rec_t *rec_b = *(slurmdb_association_rec_t **)r2;
+	slurmdb_assoc_rec_t *rec_a = *(slurmdb_assoc_rec_t **)r1;
+	slurmdb_assoc_rec_t *rec_b = *(slurmdb_assoc_rec_t **)r2;
 	int diff;
 
 	diff = strcmp(rec_a->cluster, rec_b->cluster);
@@ -200,7 +210,7 @@ static int _assoc_sort_cluster(void *r1, void *r2)
  * changed while running the function.
  */
 static int _reset_default_assoc(mysql_conn_t *mysql_conn,
-				slurmdb_association_rec_t *assoc,
+				slurmdb_assoc_rec_t *assoc,
 				char **query,
 				bool add_to_update)
 {
@@ -241,9 +251,9 @@ static int _reset_default_assoc(mysql_conn_t *mysql_conn,
 		xfree(sel_query);
 
 		while ((row = mysql_fetch_row(result))) {
-			slurmdb_association_rec_t *mod_assoc = xmalloc(
-				sizeof(slurmdb_association_rec_t));
-			slurmdb_init_association_rec(mod_assoc, 0);
+			slurmdb_assoc_rec_t *mod_assoc = xmalloc(
+				sizeof(slurmdb_assoc_rec_t));
+			slurmdb_init_assoc_rec(mod_assoc, 0);
 
 			mod_assoc->cluster = xstrdup(assoc->cluster);
 			mod_assoc->id = slurm_atoul(row[0]);
@@ -252,7 +262,7 @@ static int _reset_default_assoc(mysql_conn_t *mysql_conn,
 					      SLURMDB_MODIFY_ASSOC,
 					      mod_assoc)
 			    != SLURM_SUCCESS) {
-				slurmdb_destroy_association_rec(mod_assoc);
+				slurmdb_destroy_assoc_rec(mod_assoc);
 				error("couldn't add to the update list");
 				rc = SLURM_ERROR;
 				break;
@@ -264,6 +274,66 @@ end_it:
 	return rc;
 }
 
+/* assoc_mgr_lock_t should be clear before coming in here. */
+static int _check_coord_qos(mysql_conn_t *mysql_conn, char *cluster_name,
+			    char *account, char *coord_name, List qos_list)
+{
+	char *query;
+	bitstr_t *request_qos, *valid_qos;
+	MYSQL_RES *result;
+	MYSQL_ROW row;
+	int rc = SLURM_SUCCESS;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!qos_list || !list_count(qos_list))
+		return SLURM_SUCCESS;
+
+	/* If there is a variable cleared here we need to make
+	   sure we get the parent's information, if any. */
+	query = xstrdup_printf(
+		"call get_coord_qos('%s', '%s', '%s', '%s');",
+		assoc_table, account,
+		cluster_name, coord_name);
+	debug4("%d(%s:%d) query\n%s",
+	       mysql_conn->conn, THIS_FILE, __LINE__, query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 1))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+
+	if (!(row = mysql_fetch_row(result)) || !row[0]) {
+		mysql_free_result(result);
+		return SLURM_ERROR;
+	}
+
+	/* First set the values of the valid ones this coordinator has
+	   access to.
+	*/
+
+	assoc_mgr_lock(&locks);
+	valid_qos = bit_alloc(g_qos_count);
+	request_qos = bit_alloc(g_qos_count);
+	assoc_mgr_unlock(&locks);
+
+	set_qos_bitstr_from_string(valid_qos, row[0]);
+
+	mysql_free_result(result);
+
+	/* Now set the ones they are requesting */
+	set_qos_bitstr_from_list(request_qos, qos_list);
+
+	/* If they are authorized their list should be in the super set */
+	if (!bit_super_set(request_qos, valid_qos))
+		rc = SLURM_ERROR;
+
+	FREE_NULL_BITMAP(valid_qos);
+	FREE_NULL_BITMAP(request_qos);
+
+	return rc;
+}
+
 /* This needs to happen to make since 2.1 code doesn't have enough
  * smarts to figure out it isn't adding a default account if just
  * adding an association to the mix.
@@ -568,7 +638,7 @@ static uint32_t _get_parent_id(
 }
 
 static int _set_assoc_lft_rgt(
-	mysql_conn_t *mysql_conn, slurmdb_association_rec_t *assoc)
+	mysql_conn_t *mysql_conn, slurmdb_assoc_rec_t *assoc)
 {
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
@@ -604,26 +674,14 @@ static int _set_assoc_lft_rgt(
 }
 
 static int _set_assoc_limits_for_add(
-	mysql_conn_t *mysql_conn, slurmdb_association_rec_t *assoc)
+	mysql_conn_t *mysql_conn, slurmdb_assoc_rec_t *assoc)
 {
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	char *query = NULL;
 	char *parent = NULL;
 	char *qos_delta = NULL;
-
-	enum {
-		ASSOC_REQ_PARENT_ID,
-		ASSOC_REQ_MJ,
-		ASSOC_REQ_MSJ,
-		ASSOC_REQ_MCPJ,
-		ASSOC_REQ_MNPJ,
-		ASSOC_REQ_MWPJ,
-		ASSOC_REQ_MCMPJ,
-		ASSOC_REQ_MCRM,
-		ASSOC_REQ_QOS,
-		ASSOC_REQ_DELTA_QOS,
-	};
+	uint32_t tres_str_flags = TRES_STR_FLAG_REMOVE;
 
 	xassert(assoc);
 
@@ -654,23 +712,31 @@ static int _set_assoc_limits_for_add(
 	else if (assoc->def_qos_id == INFINITE)
 		assoc->def_qos_id = 0;
 
-	if (row[ASSOC2_REQ_MCMPJ]
-	    && assoc->max_cpu_mins_pj == (uint64_t)INFINITE)
-		assoc->max_cpu_mins_pj = slurm_atoull(row[ASSOC2_REQ_MCMPJ]);
-	if (row[ASSOC2_REQ_MCRM]
-	    && assoc->max_cpu_run_mins == (uint64_t)INFINITE)
-		assoc->max_cpu_run_mins = slurm_atoull(row[ASSOC2_REQ_MCRM]);
-	if (row[ASSOC2_REQ_MCPJ] && assoc->max_cpus_pj == INFINITE)
-		assoc->max_cpus_pj = slurm_atoul(row[ASSOC2_REQ_MCPJ]);
 	if (row[ASSOC2_REQ_MJ] && assoc->max_jobs == INFINITE)
 		assoc->max_jobs = slurm_atoul(row[ASSOC2_REQ_MJ]);
-	if (row[ASSOC2_REQ_MNPJ] && assoc->max_nodes_pj == INFINITE)
-		assoc->max_nodes_pj = slurm_atoul(row[ASSOC2_REQ_MNPJ]);
 	if (row[ASSOC2_REQ_MSJ] && assoc->max_submit_jobs == INFINITE)
 		assoc->max_submit_jobs = slurm_atoul(row[ASSOC2_REQ_MSJ]);
 	if (row[ASSOC2_REQ_MWPJ] && assoc->max_wall_pj == INFINITE)
 		assoc->max_wall_pj = slurm_atoul(row[ASSOC2_REQ_MWPJ]);
 
+	/* For the tres limits we just concatted the limits going up
+	 * the heirarchy slurmdb_tres_list_from_string will just skip
+	 * over any reoccuring limit to give us the first one per
+	 * TRES.
+	 */
+	slurmdb_combine_tres_strings(
+		&assoc->max_tres_pj, row[ASSOC2_REQ_MTPJ],
+		tres_str_flags);
+	slurmdb_combine_tres_strings(
+		&assoc->max_tres_pn, row[ASSOC2_REQ_MTPN],
+		tres_str_flags);
+	slurmdb_combine_tres_strings(
+		&assoc->max_tres_mins_pj, row[ASSOC2_REQ_MTMPJ],
+		tres_str_flags);
+	slurmdb_combine_tres_strings(
+		&assoc->max_tres_run_mins, row[ASSOC2_REQ_MTRM],
+		tres_str_flags);
+
 	if (assoc->qos_list) {
 		int set = 0;
 		char *tmp_char = NULL;
@@ -720,7 +786,7 @@ end_it:
  * a previous change to it's parent.
  */
 static int _modify_unset_users(mysql_conn_t *mysql_conn,
-			       slurmdb_association_rec_t *assoc,
+			       slurmdb_assoc_rec_t *assoc,
 			       char *acct,
 			       uint32_t lft, uint32_t rgt,
 			       List ret_list, int moved_parent)
@@ -729,6 +795,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 	MYSQL_ROW row;
 	char *query = NULL, *object = NULL;
 	int i;
+	uint32_t tres_str_flags = TRES_STR_FLAG_REMOVE | TRES_STR_FLAG_NO_NULL;
 
 	char *assoc_inx[] = {
 		"id_assoc",
@@ -737,11 +804,11 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		"`partition`",
 		"max_jobs",
 		"max_submit_jobs",
-		"max_nodes_pj",
-		"max_cpus_pj",
+		"max_tres_pj",
+		"max_tres_pn",
 		"max_wall_pj",
-		"max_cpu_mins_pj",
-		"max_cpu_run_mins",
+		"max_tres_mins_pj",
+		"max_tres_run_mins",
 		"def_qos_id",
 		"qos",
 		"delta_qos",
@@ -756,11 +823,11 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 		ASSOC_PART,
 		ASSOC_MJ,
 		ASSOC_MSJ,
-		ASSOC_MNPJ,
-		ASSOC_MCPJ,
+		ASSOC_MTPJ,
+		ASSOC_MTPN,
 		ASSOC_MWPJ,
-		ASSOC_MCMPJ,
-		ASSOC_MCRM,
+		ASSOC_MTMPJ,
+		ASSOC_MTRM,
 		ASSOC_DEF_QOS,
 		ASSOC_QOS,
 		ASSOC_DELTA_QOS,
@@ -799,11 +866,12 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 	xfree(query);
 
 	while ((row = mysql_fetch_row(result))) {
-		slurmdb_association_rec_t *mod_assoc = NULL;
+		slurmdb_assoc_rec_t *mod_assoc = NULL;
 		int modified = 0;
+		char *tmp_char = NULL;
 
-		mod_assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-		slurmdb_init_association_rec(mod_assoc, 0);
+		mod_assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+		slurmdb_init_assoc_rec(mod_assoc, 0);
 		mod_assoc->id = slurm_atoul(row[ASSOC_ID]);
 		mod_assoc->cluster = xstrdup(assoc->cluster);
 
@@ -822,33 +890,50 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			modified = 1;
 		}
 
-		if (!row[ASSOC_MNPJ] && assoc->max_nodes_pj != NO_VAL) {
-			mod_assoc->max_nodes_pj = assoc->max_nodes_pj;
+		if (!row[ASSOC_MWPJ] && assoc->max_wall_pj != NO_VAL) {
+			mod_assoc->max_wall_pj = assoc->max_wall_pj;
 			modified = 1;
 		}
 
-		if (!row[ASSOC_MCPJ] && assoc->max_cpus_pj != NO_VAL) {
-			mod_assoc->max_cpus_pj = assoc->max_cpus_pj;
+		if (assoc->max_tres_pj) {
+			tmp_char = xstrdup(row[ASSOC_MTPJ]);
+			slurmdb_combine_tres_strings(
+				&tmp_char, assoc->max_tres_pj,
+				tres_str_flags);
+			mod_assoc->max_tres_pj = tmp_char;
+			tmp_char = NULL;
 			modified = 1;
 		}
 
-		if (!row[ASSOC_MWPJ] && assoc->max_wall_pj != NO_VAL) {
-			mod_assoc->max_wall_pj = assoc->max_wall_pj;
+		if (assoc->max_tres_pn) {
+			tmp_char = xstrdup(row[ASSOC_MTPN]);
+			slurmdb_combine_tres_strings(
+				&tmp_char, assoc->max_tres_pn,
+				tres_str_flags);
+			mod_assoc->max_tres_pn = tmp_char;
+			tmp_char = NULL;
 			modified = 1;
 		}
 
-		if (!row[ASSOC_MCMPJ]
-		    && assoc->max_cpu_mins_pj != (uint64_t)NO_VAL) {
-			mod_assoc->max_cpu_mins_pj = assoc->max_cpu_mins_pj;
+		if (assoc->max_tres_mins_pj) {
+			tmp_char = xstrdup(row[ASSOC_MTMPJ]);
+			slurmdb_combine_tres_strings(
+				&tmp_char, assoc->max_tres_mins_pj,
+				tres_str_flags);
+			mod_assoc->max_tres_mins_pj = tmp_char;
+			tmp_char = NULL;
 			modified = 1;
 		}
 
-		if (!row[ASSOC_MCRM]
-		    && assoc->max_cpu_run_mins != (uint64_t)NO_VAL) {
-			mod_assoc->max_cpu_run_mins = assoc->max_cpu_run_mins;
+		if (assoc->max_tres_run_mins) {
+			tmp_char = xstrdup(row[ASSOC_MTRM]);
+			slurmdb_combine_tres_strings(
+				&tmp_char, assoc->max_tres_run_mins,
+				tres_str_flags);
+			mod_assoc->max_tres_run_mins = tmp_char;
+			tmp_char = NULL;
 			modified = 1;
 		}
-
 		if (!row[ASSOC_QOS][0] && assoc->qos_list) {
 			List delta_qos_list = NULL;
 			char *qos_char = NULL, *delta_char = NULL;
@@ -895,7 +980,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			    || !list_count(assoc->qos_list))
 				modified = 1;
 			else {
-				list_destroy(mod_assoc->qos_list);
+				FREE_NULL_LIST(mod_assoc->qos_list);
 				mod_assoc->qos_list = NULL;
 			}
 		}
@@ -915,7 +1000,7 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 						    slurm_atoul(row[ASSOC_LFT]),
 						    slurm_atoul(row[ASSOC_RGT]),
 						    ret_list, moved_parent);
-				slurmdb_destroy_association_rec(mod_assoc);
+				slurmdb_destroy_assoc_rec(mod_assoc);
 				continue;
 			}
 			/* We do want to send all user accounts though */
@@ -937,19 +1022,19 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 			list_append(ret_list, object);
 
 			if (moved_parent)
-				slurmdb_destroy_association_rec(mod_assoc);
+				slurmdb_destroy_assoc_rec(mod_assoc);
 			else
 				if (addto_update_list(mysql_conn->update_list,
 						      SLURMDB_MODIFY_ASSOC,
 						      mod_assoc)
 				    != SLURM_SUCCESS) {
-					slurmdb_destroy_association_rec(
+					slurmdb_destroy_assoc_rec(
 						mod_assoc);
 					error("couldn't add to "
 					      "the update list");
 				}
 		} else
-			slurmdb_destroy_association_rec(mod_assoc);
+			slurmdb_destroy_assoc_rec(mod_assoc);
 
 	}
 	mysql_free_result(result);
@@ -959,8 +1044,8 @@ static int _modify_unset_users(mysql_conn_t *mysql_conn,
 
 /* when doing a select on this all the select should have a prefix of
  * t1. Returns "where" clause which needs to be xfreed. */
-static char *_setup_association_cond_qos(slurmdb_association_cond_t *assoc_cond,
-					 char *cluster_name)
+static char *_setup_assoc_cond_qos(slurmdb_assoc_cond_t *assoc_cond,
+				   char *cluster_name)
 {
 	int set = 0;
 	ListIterator itr = NULL;
@@ -1010,8 +1095,8 @@ static char *_setup_association_cond_qos(slurmdb_association_cond_t *assoc_cond,
 }
 
 /* When doing a select on this all the select should have a prefix of t1. */
-static int _setup_association_cond_limits(
-	slurmdb_association_cond_t *assoc_cond,
+static int _setup_assoc_cond_limits(
+	slurmdb_assoc_cond_t *assoc_cond,
 	const char *prefix, char **extra)
 {
 	int set = 0;
@@ -1062,267 +1147,6 @@ static int _setup_association_cond_limits(
 		xstrcat(*extra, ")");
 	}
 
-	if (assoc_cond->fairshare_list
-	    && list_count(assoc_cond->fairshare_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->fairshare_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			if (!strncasecmp(object, "parent", 6))
-				xstrfmtcat(*extra, "%s.shares='%u'",
-					   prefix, SLURMDB_FS_USE_PARENT);
-			else
-				xstrfmtcat(*extra, "%s.shares='%s'",
-					   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_cpu_mins_list
-	    && list_count(assoc_cond->grp_cpu_mins_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_cpu_mins_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_cpu_mins='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_cpu_run_mins_list
-	    && list_count(assoc_cond->grp_cpu_run_mins_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_cpu_run_mins_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_cpu_run_mins='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_cpus_list
-	    && list_count(assoc_cond->grp_cpus_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_cpus_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_cpus='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_jobs_list
-	    && list_count(assoc_cond->grp_jobs_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_jobs_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_jobs='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_mem_list
-	    && list_count(assoc_cond->grp_mem_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_mem_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_mem='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_nodes_list
-	    && list_count(assoc_cond->grp_nodes_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_nodes_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_nodes='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_submit_jobs_list
-	    && list_count(assoc_cond->grp_submit_jobs_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_submit_jobs_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_submit_jobs='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->grp_wall_list
-	    && list_count(assoc_cond->grp_wall_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->grp_wall_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.grp_wall='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_cpu_mins_pj_list
-	    && list_count(assoc_cond->max_cpu_mins_pj_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_cpu_mins_pj_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.max_cpu_mins_pj='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_cpu_run_mins_list
-	    && list_count(assoc_cond->max_cpu_run_mins_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_cpu_run_mins_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.max_cpu_run_mins='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_cpus_pj_list
-	    && list_count(assoc_cond->max_cpus_pj_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_cpus_pj_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.max_cpus_pj='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_jobs_list
-	    && list_count(assoc_cond->max_jobs_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_jobs_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.max_jobs='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_nodes_pj_list
-	    && list_count(assoc_cond->max_nodes_pj_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_nodes_pj_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.max_nodes_pj='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_submit_jobs_list
-	    && list_count(assoc_cond->max_submit_jobs_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_submit_jobs_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra, "%s.max_submit_jobs='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
-	if (assoc_cond->max_wall_pj_list
-	    && list_count(assoc_cond->max_wall_pj_list)) {
-		set = 0;
-		xstrcat(*extra, " && (");
-		itr = list_iterator_create(assoc_cond->max_wall_pj_list);
-		while ((object = list_next(itr))) {
-			if (set)
-				xstrcat(*extra, " || ");
-			xstrfmtcat(*extra,
-				   "%s.max_wall_pj='%s'",
-				   prefix, object);
-			set = 1;
-		}
-		list_iterator_destroy(itr);
-		xstrcat(*extra, ")");
-	}
-
 	if (assoc_cond->user_list && list_count(assoc_cond->user_list)) {
 		set = 0;
 		xstrcat(*extra, " && (");
@@ -1391,7 +1215,7 @@ static int _setup_association_cond_limits(
 
 static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 					 MYSQL_RES *result,
-					 slurmdb_association_rec_t *assoc,
+					 slurmdb_assoc_rec_t *assoc,
 					 slurmdb_user_rec_t *user,
 					 char *cluster_name, char *sent_vals,
 					 bool is_admin, bool same_user,
@@ -1415,7 +1239,8 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 	vals = xstrdup(sent_vals);
 
 	while ((row = mysql_fetch_row(result))) {
-		slurmdb_association_rec_t *mod_assoc = NULL;
+		MYSQL_RES *result2 = NULL;
+		slurmdb_assoc_rec_t *mod_assoc = NULL, alt_assoc;
 		int account_type=0;
 		/* If parent changes these also could change
 		   so we need to keep track of the latest
@@ -1423,7 +1248,11 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 		*/
 		uint32_t lft = slurm_atoul(row[MASSOC_LFT]);
 		uint32_t rgt = slurm_atoul(row[MASSOC_RGT]);
-		char *account = row[MASSOC_ACCT];
+		char *orig_acct, *account;
+
+		orig_acct = account = row[MASSOC_ACCT];
+
+		slurmdb_init_assoc_rec(&alt_assoc, 0);
 
 		/* Here we want to see if the person
 		 * is a coord of the parent account
@@ -1477,6 +1306,28 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 
 				rc = ESLURM_ACCESS_DENIED;
 				goto end_it;
+			} else if (_check_coord_qos(mysql_conn, cluster_name,
+						    account, user->name,
+						    assoc->qos_list)
+				   == SLURM_ERROR) {
+				assoc_mgr_lock_t locks = {
+					NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+					NO_LOCK, NO_LOCK, NO_LOCK };
+				char *requested_qos;
+
+				assoc_mgr_lock(&locks);
+				requested_qos = get_qos_complete_str(
+					assoc_mgr_qos_list, assoc->qos_list);
+				assoc_mgr_unlock(&locks);
+				error("Coordinator %s(%d) does not have the "
+				      "access to all the qos requested (%s), "
+				      "so they can't modify account "
+				      "%s with it.",
+				      user->name, user->uid, requested_qos,
+				      account);
+				xfree(requested_qos);
+				rc = ESLURM_ACCESS_DENIED;
+				goto end_it;
 			}
 		}
 
@@ -1538,8 +1389,7 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 			xstrfmtcat(name_char, "(id_assoc=%s", row[MASSOC_ID]);
 
 		/* Only do this when not dealing with the root association. */
-		if (strcmp(account, "root") || row[MASSOC_USER][0]) {
-			MYSQL_RES *result2;
+		if (strcmp(orig_acct, "root") || row[MASSOC_USER][0]) {
 			MYSQL_ROW row2;
 			/* If there is a variable cleared here we need to make
 			   sure we get the parent's information, if any. */
@@ -1561,69 +1411,102 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 			if ((row2 = mysql_fetch_row(result2))) {
 				if (assoc->def_qos_id == INFINITE
 				    && row2[ASSOC2_REQ_DEF_QOS])
-					assoc->def_qos_id = slurm_atoul(
+					alt_assoc.def_qos_id = slurm_atoul(
 						row2[ASSOC2_REQ_DEF_QOS]);
 
 				if ((assoc->max_jobs == INFINITE)
 				    && row2[ASSOC2_REQ_MJ])
-					assoc->max_jobs = slurm_atoul(
+					alt_assoc.max_jobs = slurm_atoul(
 						row2[ASSOC2_REQ_MJ]);
 				if ((assoc->max_submit_jobs == INFINITE)
 				    && row2[ASSOC2_REQ_MSJ])
-					assoc->max_submit_jobs = slurm_atoul(
+					alt_assoc.max_submit_jobs = slurm_atoul(
 						row2[ASSOC2_REQ_MSJ]);
-				if ((assoc->max_cpus_pj == INFINITE)
-				    && row2[ASSOC2_REQ_MCPJ])
-					assoc->max_cpus_pj = slurm_atoul(
-						row2[ASSOC2_REQ_MCPJ]);
-				if ((assoc->max_nodes_pj == INFINITE)
-				    && row2[ASSOC2_REQ_MNPJ])
-					assoc->max_nodes_pj = slurm_atoul(
-						row2[ASSOC2_REQ_MNPJ]);
 				if ((assoc->max_wall_pj == INFINITE)
 				    && row2[ASSOC2_REQ_MWPJ])
-					assoc->max_wall_pj = slurm_atoul(
+					alt_assoc.max_wall_pj = slurm_atoul(
 						row2[ASSOC2_REQ_MWPJ]);
-				if ((assoc->max_cpu_mins_pj ==
-				     (uint64_t)INFINITE)
-				    && row2[ASSOC2_REQ_MCMPJ])
-					assoc->max_cpu_mins_pj = slurm_atoull(
-						row2[ASSOC2_REQ_MCMPJ]);
-				if ((assoc->max_cpu_run_mins ==
-				     (uint64_t)INFINITE)
-				    && row2[ASSOC2_REQ_MCRM])
-					assoc->max_cpu_run_mins = slurm_atoull(
-						row2[ASSOC2_REQ_MCRM]);
+
+				/* We don't have to copy these strings
+				 * or check for there existance,
+				 * slurmdb_combine_tres_strings will
+				 * do this for us below.
+				 */
+				if (row2[ASSOC2_REQ_MTPJ][0])
+					alt_assoc.max_tres_pj =
+						row2[ASSOC2_REQ_MTPJ];
+				if (row2[ASSOC2_REQ_MTPN][0])
+					alt_assoc.max_tres_pn =
+						row2[ASSOC2_REQ_MTPN];
+				if (row2[ASSOC2_REQ_MTMPJ][0])
+					alt_assoc.max_tres_mins_pj =
+						row2[ASSOC2_REQ_MTMPJ];
+				if (row2[ASSOC2_REQ_MTRM][0])
+					alt_assoc.max_tres_run_mins =
+						row2[ASSOC2_REQ_MTRM];
 			}
-			mysql_free_result(result2);
 		}
-		mod_assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-		slurmdb_init_association_rec(mod_assoc, 0);
+		mod_assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+		slurmdb_init_assoc_rec(mod_assoc, 0);
 		mod_assoc->id = slurm_atoul(row[MASSOC_ID]);
 		mod_assoc->cluster = xstrdup(cluster_name);
 
-		mod_assoc->def_qos_id = assoc->def_qos_id;
+		if (alt_assoc.def_qos_id != NO_VAL)
+			mod_assoc->def_qos_id = alt_assoc.def_qos_id;
+		else
+			mod_assoc->def_qos_id = assoc->def_qos_id;
 
 		mod_assoc->is_def = assoc->is_def;
 
 		mod_assoc->shares_raw = assoc->shares_raw;
 
-		mod_assoc->grp_cpus = assoc->grp_cpus;
-		mod_assoc->grp_cpu_mins = assoc->grp_cpu_mins;
-		mod_assoc->grp_cpu_run_mins = assoc->grp_cpu_run_mins;
+		mod_tres_str(&mod_assoc->grp_tres,
+			     assoc->grp_tres, row[MASSOC_GT],
+			     NULL, "grp_tres", &vals, mod_assoc->id, 1);
+		mod_tres_str(&mod_assoc->grp_tres_mins,
+			     assoc->grp_tres_mins, row[MASSOC_GTM],
+			     NULL, "grp_tres_mins", &vals, mod_assoc->id, 1);
+		mod_tres_str(&mod_assoc->grp_tres_run_mins,
+			     assoc->grp_tres_run_mins, row[MASSOC_GTRM],
+			     NULL, "grp_tres_run_mins", &vals,
+			     mod_assoc->id, 1);
+
 		mod_assoc->grp_jobs = assoc->grp_jobs;
-		mod_assoc->grp_mem = assoc->grp_mem;
-		mod_assoc->grp_nodes = assoc->grp_nodes;
 		mod_assoc->grp_submit_jobs = assoc->grp_submit_jobs;
 		mod_assoc->grp_wall = assoc->grp_wall;
 
-		mod_assoc->max_cpus_pj = assoc->max_cpus_pj;
-		mod_assoc->max_cpu_mins_pj = assoc->max_cpu_mins_pj;
-		mod_assoc->max_cpu_run_mins = assoc->max_cpu_run_mins;
-		mod_assoc->max_jobs = assoc->max_jobs;
-		mod_assoc->max_nodes_pj = assoc->max_nodes_pj;
-		mod_assoc->max_submit_jobs = assoc->max_submit_jobs;
-		mod_assoc->max_wall_pj = assoc->max_wall_pj;
+		mod_tres_str(&mod_assoc->max_tres_pj,
+			     assoc->max_tres_pj, row[MASSOC_MTPJ],
+			     alt_assoc.max_tres_pj, "max_tres_pj",
+			     &vals, mod_assoc->id, 1);
+		mod_tres_str(&mod_assoc->max_tres_pn,
+			     assoc->max_tres_pn, row[MASSOC_MTPN],
+			     alt_assoc.max_tres_pn, "max_tres_pn",
+			     &vals, mod_assoc->id, 1);
+		mod_tres_str(&mod_assoc->max_tres_mins_pj,
+			     assoc->max_tres_mins_pj, row[MASSOC_MTMPJ],
+			     alt_assoc.max_tres_mins_pj, "max_tres_mins_pj",
+			     &vals, mod_assoc->id, 1);
+		mod_tres_str(&mod_assoc->max_tres_run_mins,
+			     assoc->max_tres_run_mins, row[MASSOC_MTRM],
+			     alt_assoc.max_tres_run_mins, "max_tres_run_mins",
+			     &vals, mod_assoc->id, 1);
+
+		if (result2)
+			mysql_free_result(result2);
+
+		if (alt_assoc.max_jobs != NO_VAL)
+			mod_assoc->max_jobs = alt_assoc.max_jobs;
+		else
+			mod_assoc->max_jobs = assoc->max_jobs;
+		if (alt_assoc.max_submit_jobs != NO_VAL)
+			mod_assoc->max_submit_jobs = alt_assoc.max_submit_jobs;
+		else
+			mod_assoc->max_submit_jobs = assoc->max_submit_jobs;
+		if (alt_assoc.max_wall_pj != NO_VAL)
+			mod_assoc->max_wall_pj = alt_assoc.max_wall_pj;
+		else
+			mod_assoc->max_wall_pj = assoc->max_wall_pj;
 
 		/* no need to get the parent id since if we moved
 		 * parent id's we will get it when we send the total list */
@@ -1714,8 +1597,8 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 			/* Use fresh one here so we don't have to
 			   worry about dealing with bad values.
 			*/
-			slurmdb_association_rec_t tmp_assoc;
-			slurmdb_init_association_rec(&tmp_assoc, 0);
+			slurmdb_assoc_rec_t tmp_assoc;
+			slurmdb_init_assoc_rec(&tmp_assoc, 0);
 			tmp_assoc.is_def = 1;
 			tmp_assoc.cluster = cluster_name;
 			tmp_assoc.acct = row[MASSOC_ACCT];
@@ -1729,13 +1612,13 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 			}
 		}
 
-		if (moved_parent)
-			slurmdb_destroy_association_rec(mod_assoc);
+		if (!vals || !vals[0] || moved_parent)
+			slurmdb_destroy_assoc_rec(mod_assoc);
 		else if (addto_update_list(mysql_conn->update_list,
 					   SLURMDB_MODIFY_ASSOC,
 					   mod_assoc) != SLURM_SUCCESS) {
 			error("couldn't add to the update list");
-			slurmdb_destroy_association_rec(mod_assoc);
+			slurmdb_destroy_assoc_rec(mod_assoc);
 		}
 	}
 
@@ -1751,7 +1634,7 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 	if (rc != SLURM_SUCCESS)
 		goto end_it;
 
-	if (vals) {
+	if (vals && vals[0]) {
 		char *user_name = uid_to_string((uid_t) user->uid);
 		rc = modify_common(mysql_conn, DBD_MODIFY_ASSOCS, now,
 				   user_name, assoc_table, name_char, vals,
@@ -1766,8 +1649,8 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 	if (moved_parent) {
 		List local_assoc_list = NULL;
 		ListIterator local_itr = NULL;
-		slurmdb_association_rec_t *local_assoc = NULL;
-		slurmdb_association_cond_t local_assoc_cond;
+		slurmdb_assoc_rec_t *local_assoc = NULL;
+		slurmdb_assoc_cond_t local_assoc_cond;
 		/* now we need to send the update of the new parents and
 		 * limits, so just to be safe, send the whole
 		 * tree because we could have some limits that
@@ -1780,12 +1663,12 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 		 */
 
 		memset(&local_assoc_cond, 0,
-		       sizeof(slurmdb_association_cond_t));
+		       sizeof(slurmdb_assoc_cond_t));
 		local_assoc_cond.cluster_list = list_create(NULL);
 		list_append(local_assoc_cond.cluster_list, cluster_name);
 		local_assoc_list = as_mysql_get_assocs(
 			mysql_conn, user->uid, &local_assoc_cond);
-		list_destroy(local_assoc_cond.cluster_list);
+		FREE_NULL_LIST(local_assoc_cond.cluster_list);
 		if (!local_assoc_list)
 			goto end_it;
 		/* NOTE: you can not use list_pop, or list_push
@@ -1804,7 +1687,7 @@ static int _process_modify_assoc_results(mysql_conn_t *mysql_conn,
 				list_remove(local_itr);
 		}
 		list_iterator_destroy(local_itr);
-		list_destroy(local_assoc_list);
+		FREE_NULL_LIST(local_assoc_list);
 	}
 
 	if (reset_query) {
@@ -1843,7 +1726,7 @@ static int _process_remove_assoc_results(mysql_conn_t *mysql_conn,
 		goto skip_process;
 
 	while ((row = mysql_fetch_row(result))) {
-		slurmdb_association_rec_t *rem_assoc = NULL;
+		slurmdb_assoc_rec_t *rem_assoc = NULL;
 		uint32_t lft;
 
 		if (!is_admin) {
@@ -1907,14 +1790,14 @@ static int _process_remove_assoc_results(mysql_conn_t *mysql_conn,
 		if (lft < smallest_lft)
 			smallest_lft = lft;
 
-		rem_assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-		slurmdb_init_association_rec(rem_assoc, 0);
+		rem_assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+		slurmdb_init_assoc_rec(rem_assoc, 0);
 		rem_assoc->id = slurm_atoul(row[RASSOC_ID]);
 		rem_assoc->cluster = xstrdup(cluster_name);
 		if (addto_update_list(mysql_conn->update_list,
 				      SLURMDB_REMOVE_ASSOC,
 				      rem_assoc) != SLURM_SUCCESS) {
-			slurmdb_destroy_association_rec(rem_assoc);
+			slurmdb_destroy_assoc_rec(rem_assoc);
 			error("couldn't add to the update list");
 		}
 
@@ -1940,7 +1823,7 @@ end_it:
 
 static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 			       slurmdb_user_rec_t *user,
-			       slurmdb_association_cond_t *assoc_cond,
+			       slurmdb_assoc_cond_t *assoc_cond,
 			       char *cluster_name,
 			       char *fields, char *sent_extra,
 			       bool is_admin, List sent_list)
@@ -1953,11 +1836,11 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 	uint32_t parent_def_qos_id = 0;
 	uint32_t parent_mj = INFINITE;
 	uint32_t parent_msj = INFINITE;
-	uint32_t parent_mcpj = INFINITE;
-	uint32_t parent_mnpj = INFINITE;
 	uint32_t parent_mwpj = INFINITE;
-	uint64_t parent_mcmpj = (uint64_t)INFINITE;
-	uint64_t parent_mcrm = (uint64_t)INFINITE;
+	char *parent_mtpj = NULL;
+	char *parent_mtpn = NULL;
+	char *parent_mtmpj = NULL;
+	char *parent_mtrm = NULL;
 	char *parent_acct = NULL;
 	char *parent_qos = NULL;
 	char *parent_delta_qos = NULL;
@@ -2036,7 +1919,7 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 		}
 	}
 
-	qos_extra = _setup_association_cond_qos(assoc_cond, cluster_name);
+	qos_extra = _setup_assoc_cond_qos(assoc_cond, cluster_name);
 
 
 	//START_TIMER;
@@ -2063,11 +1946,11 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 		return SLURM_SUCCESS;
 	}
 
-	assoc_list = list_create(slurmdb_destroy_association_rec);
+	assoc_list = list_create(slurmdb_destroy_assoc_rec);
 	delta_qos_list = list_create(slurm_destroy_char);
 	while ((row = mysql_fetch_row(result))) {
-		slurmdb_association_rec_t *assoc =
-			xmalloc(sizeof(slurmdb_association_rec_t));
+		slurmdb_assoc_rec_t *assoc =
+			xmalloc(sizeof(slurmdb_assoc_rec_t));
 		MYSQL_RES *result2 = NULL;
 		MYSQL_ROW row2;
 
@@ -2093,35 +1976,17 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 		else
 			assoc->grp_submit_jobs = INFINITE;
 
-		if (row[ASSOC_REQ_GC])
-			assoc->grp_cpus = slurm_atoul(row[ASSOC_REQ_GC]);
-		else
-			assoc->grp_cpus = INFINITE;
-
-		if (row[ASSOC_REQ_GMEM])
-			assoc->grp_mem = slurm_atoul(row[ASSOC_REQ_GMEM]);
-		else
-			assoc->grp_mem = INFINITE;
-
-		if (row[ASSOC_REQ_GN])
-			assoc->grp_nodes = slurm_atoul(row[ASSOC_REQ_GN]);
-		else
-			assoc->grp_nodes = INFINITE;
 		if (row[ASSOC_REQ_GW])
 			assoc->grp_wall = slurm_atoul(row[ASSOC_REQ_GW]);
 		else
 			assoc->grp_wall = INFINITE;
 
-		if (row[ASSOC_REQ_GCM])
-			assoc->grp_cpu_mins = slurm_atoull(row[ASSOC_REQ_GCM]);
-		else
-			assoc->grp_cpu_mins = INFINITE;
-
-		if (row[ASSOC_REQ_GCRM])
-			assoc->grp_cpu_run_mins =
-				slurm_atoull(row[ASSOC_REQ_GCRM]);
-		else
-			assoc->grp_cpu_run_mins = INFINITE;
+		if (row[ASSOC_REQ_GT][0])
+			assoc->grp_tres = xstrdup(row[ASSOC_REQ_GT]);
+		if (row[ASSOC_REQ_GTM][0])
+			assoc->grp_tres_mins = xstrdup(row[ASSOC_REQ_GTM]);
+		if (row[ASSOC_REQ_GTRM][0])
+			assoc->grp_tres_run_mins = xstrdup(row[ASSOC_REQ_GTRM]);
 
 		parent_acct = row[ASSOC_REQ_ACCT];
 		if (!without_parent_info
@@ -2187,49 +2052,41 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 				else
 					parent_msj = INFINITE;
 
-				if (row2[ASSOC2_REQ_MCPJ])
-					parent_mcpj = slurm_atoul(
-						row2[ASSOC2_REQ_MCPJ]);
-				else
-					parent_mcpj = INFINITE;
-
-				if (row2[ASSOC2_REQ_MNPJ])
-					parent_mnpj = slurm_atoul(
-						row2[ASSOC2_REQ_MNPJ]);
-				else
-					parent_mnpj = INFINITE;
-
 				if (row2[ASSOC2_REQ_MWPJ])
 					parent_mwpj = slurm_atoul(
 						row2[ASSOC2_REQ_MWPJ]);
 				else
 					parent_mwpj = INFINITE;
 
-				if (row2[ASSOC2_REQ_MCMPJ])
-					parent_mcmpj = slurm_atoull(
-						row2[ASSOC2_REQ_MCMPJ]);
-				else
-					parent_mcmpj = INFINITE;
+				xfree(parent_mtpj);
+				if (row2[ASSOC2_REQ_MTPJ][0])
+					parent_mtpj = xstrdup(
+						row2[ASSOC2_REQ_MTPJ]);
 
-				if (row2[ASSOC2_REQ_MCRM])
-					parent_mcrm = slurm_atoull(
-						row2[ASSOC2_REQ_MCRM]);
-				else
-					parent_mcrm = (uint64_t)INFINITE;
+				xfree(parent_mtpn);
+				if (row2[ASSOC2_REQ_MTPN][0])
+					parent_mtpn = xstrdup(
+						row2[ASSOC2_REQ_MTPN]);
+
+				xfree(parent_mtmpj);
+				if (row2[ASSOC2_REQ_MTMPJ][0])
+					parent_mtmpj = xstrdup(
+						row2[ASSOC2_REQ_MTMPJ]);
+
+				xfree(parent_mtrm);
+				if (row2[ASSOC2_REQ_MTRM][0])
+					parent_mtrm = xstrdup(
+						row2[ASSOC2_REQ_MTRM]);
 
 				xfree(parent_qos);
 				if (row2[ASSOC2_REQ_QOS][0])
 					parent_qos =
 						xstrdup(row2[ASSOC2_REQ_QOS]);
-				else
-					parent_qos = NULL;
 
 				xfree(parent_delta_qos);
 				if (row2[ASSOC2_REQ_DELTA_QOS][0])
 					parent_delta_qos = xstrdup(
 						row2[ASSOC2_REQ_DELTA_QOS]);
-				else
-					parent_delta_qos = NULL;
 			}
 			last_acct = parent_acct;
 			last_cluster = cluster_name;
@@ -2253,32 +2110,44 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 		else
 			assoc->max_submit_jobs = parent_msj;
 
-		if (row[ASSOC_REQ_MCPJ])
-			assoc->max_cpus_pj = slurm_atoul(row[ASSOC_REQ_MCPJ]);
-		else
-			assoc->max_cpus_pj = parent_mcpj;
-
-		if (row[ASSOC_REQ_MNPJ])
-			assoc->max_nodes_pj = slurm_atoul(row[ASSOC_REQ_MNPJ]);
-		else
-			assoc->max_nodes_pj = parent_mnpj;
-
 		if (row[ASSOC_REQ_MWPJ])
 			assoc->max_wall_pj = slurm_atoul(row[ASSOC_REQ_MWPJ]);
 		else
 			assoc->max_wall_pj = parent_mwpj;
 
-		if (row[ASSOC_REQ_MCMPJ])
-			assoc->max_cpu_mins_pj = slurm_atoull(
-				row[ASSOC_REQ_MCMPJ]);
-		else
-			assoc->max_cpu_mins_pj = parent_mcmpj;
+		if (row[ASSOC_REQ_MTPJ][0])
+			assoc->max_tres_pj = xstrdup(row[ASSOC_REQ_MTPJ]);
 
-		if (row[ASSOC_REQ_MCRM])
-			assoc->max_cpu_run_mins = slurm_atoull(
-				row[ASSOC_REQ_MCRM]);
-		else
-			assoc->max_cpu_run_mins = parent_mcrm;
+		if (row[ASSOC_REQ_MTPN][0])
+			assoc->max_tres_pn = xstrdup(row[ASSOC_REQ_MTPN]);
+
+		if (row[ASSOC_REQ_MTMPJ][0])
+			assoc->max_tres_mins_pj = xstrdup(row[ASSOC_REQ_MTMPJ]);
+
+		if (row[ASSOC_REQ_MTRM][0])
+			assoc->max_tres_run_mins = xstrdup(row[ASSOC_REQ_MTRM]);
+
+		/* For the tres limits we just concatted the limits going up
+		 * the heirarchy slurmdb_tres_list_from_string will just skip
+		 * over any reoccuring limit to give us the first one per
+		 * TRES.
+		 */
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_pj, parent_mtpj,
+			TRES_STR_FLAG_NONE);
+		xfree(parent_mtpj);
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_pn, parent_mtpn,
+			TRES_STR_FLAG_NONE);
+		xfree(parent_mtpn);
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_mins_pj, parent_mtmpj,
+			TRES_STR_FLAG_NONE);
+		xfree(parent_mtmpj);
+		slurmdb_combine_tres_strings(
+			&assoc->max_tres_run_mins, parent_mtrm,
+			TRES_STR_FLAG_NONE);
+		xfree(parent_mtrm);
 
 		assoc->qos_list = list_create(slurm_destroy_char);
 
@@ -2367,7 +2236,7 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 	}
 	mysql_free_result(result);
 
-	list_destroy(delta_qos_list);
+	FREE_NULL_LIST(delta_qos_list);
 
 	xfree(parent_delta_qos);
 	xfree(parent_qos);
@@ -2379,7 +2248,7 @@ static int _cluster_get_assocs(mysql_conn_t *mysql_conn,
 				   assoc_cond->usage_end);
 
 	list_transfer(sent_list, assoc_list);
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(assoc_list);
 	return SLURM_SUCCESS;
 }
 
@@ -2403,16 +2272,16 @@ extern int as_mysql_get_modified_lfts(mysql_conn_t *mysql_conn,
 	xfree(query);
 
 	while ((row = mysql_fetch_row(result))) {
-		slurmdb_association_rec_t *assoc =
-			xmalloc(sizeof(slurmdb_association_rec_t));
-		slurmdb_init_association_rec(assoc, 0);
+		slurmdb_assoc_rec_t *assoc =
+			xmalloc(sizeof(slurmdb_assoc_rec_t));
+		slurmdb_init_assoc_rec(assoc, 0);
 		assoc->id = slurm_atoul(row[0]);
 		assoc->lft = slurm_atoul(row[1]);
 		assoc->cluster = xstrdup(cluster_name);
 		if (addto_update_list(mysql_conn->update_list,
 				      SLURMDB_MODIFY_ASSOC,
 				      assoc) != SLURM_SUCCESS)
-			slurmdb_destroy_association_rec(assoc);
+			slurmdb_destroy_assoc_rec(assoc);
 	}
 	mysql_free_result(result);
 
@@ -2420,12 +2289,12 @@ extern int as_mysql_get_modified_lfts(mysql_conn_t *mysql_conn,
 }
 
 extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
-			       List association_list)
+			       List assoc_list)
 {
 	ListIterator itr = NULL;
 	int rc = SLURM_SUCCESS;
 	int i=0;
-	slurmdb_association_rec_t *object = NULL;
+	slurmdb_assoc_rec_t *object = NULL;
 	char *cols = NULL, *vals = NULL, *txn_query = NULL;
 	char *extra = NULL, *query = NULL, *update = NULL, *tmp_extra = NULL;
 	char *parent = NULL;
@@ -2441,8 +2310,9 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	char *last_parent = NULL, *last_cluster = NULL;
 	List local_cluster_list = NULL;
 	List added_user_list = NULL;
+	bool is_coord = false;
 
-	if (!association_list) {
+	if (!assoc_list) {
 		error("No association list given");
 		return SLURM_ERROR;
 	}
@@ -2450,11 +2320,12 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
-	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+	if (!is_user_min_admin_level(mysql_conn, uid,
+				     SLURMDB_ADMIN_OPERATOR)) {
 		ListIterator itr2 = NULL;
 		slurmdb_user_rec_t user;
 		slurmdb_coord_rec_t *coord = NULL;
-		slurmdb_association_rec_t *object = NULL;
+		slurmdb_assoc_rec_t *object = NULL;
 
 		memset(&user, 0, sizeof(slurmdb_user_rec_t));
 		user.uid = uid;
@@ -2465,7 +2336,7 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 			return ESLURM_ACCESS_DENIED;
 		}
 
-		itr = list_iterator_create(association_list);
+		itr = list_iterator_create(assoc_list);
 		itr2 = list_iterator_create(user.coord_accts);
 		while ((object = list_next(itr))) {
 			char *account = "root";
@@ -2489,14 +2360,15 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 			      user.name, user.uid);
 			return ESLURM_ACCESS_DENIED;
 		}
+		is_coord = true;
 	}
 
 	local_cluster_list = list_create(NULL);
 	user_name = uid_to_string((uid_t) uid);
 	/* these need to be in a specific order */
-	list_sort(association_list, (ListCmpF)_assoc_sort_cluster);
+	list_sort(assoc_list, (ListCmpF)_assoc_sort_cluster);
 
-	itr = list_iterator_create(association_list);
+	itr = list_iterator_create(assoc_list);
 	while ((object = list_next(itr))) {
 		if (!object->cluster || !object->cluster[0]
 		    || !object->acct || !object->acct[0]) {
@@ -2506,6 +2378,30 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 			continue;
 		}
 
+		if (is_coord && _check_coord_qos(mysql_conn, object->cluster,
+						 object->acct, user_name,
+						 object->qos_list)
+		    == SLURM_ERROR) {
+			assoc_mgr_lock_t locks = {
+				NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				NO_LOCK, NO_LOCK, NO_LOCK };
+			char *requested_qos;
+
+			assoc_mgr_lock(&locks);
+			requested_qos = get_qos_complete_str(
+				assoc_mgr_qos_list, object->qos_list);
+			assoc_mgr_unlock(&locks);
+			error("Coordinator %s(%d) does not have the "
+			      "access to all the qos requested (%s), "
+			      "so they can't add to account "
+			      "%s with it.",
+			      user_name, uid, requested_qos,
+			      object->acct);
+			xfree(requested_qos);
+			rc = ESLURM_ACCESS_DENIED;
+			break;
+		}
+
 		/* When adding if this isn't a default might as well
 		   force it to be 0 to avoid confusion since
 		   uninitialized it is NO_VAL.
@@ -2557,8 +2453,8 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 			list_append(added_user_list, object->user);
 		}
 
-		setup_association_limits(object, &cols, &vals, &extra,
-					 QOS_LEVEL_NONE, 1);
+		setup_assoc_limits(object, &cols, &vals, &extra,
+				   QOS_LEVEL_NONE, 1);
 
 		xstrcat(tmp_char, aassoc_req_inx[0]);
 		for(i=1; i<AASSOC_COUNT; i++)
@@ -2903,7 +2799,7 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	}
 
 	/* now reset all the other defaults accordingly. (if needed) */
-	itr = list_iterator_create(association_list);
+	itr = list_iterator_create(assoc_list);
 	while ((object = list_next(itr))) {
 		if ((object->is_def != 1) || !object->cluster
 		    || !object->acct || !object->user)
@@ -2947,9 +2843,9 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 		/* 	xfree(query); */
 
 		/* 	while ((row = mysql_fetch_row(result))) { */
-		/* 	      slurmdb_association_rec_t *mod_assoc = xmalloc( */
-		/* 			sizeof(slurmdb_association_rec_t)); */
-		/* 		slurmdb_init_association_rec(mod_assoc, 0); */
+		/* 	      slurmdb_assoc_rec_t *mod_assoc = xmalloc( */
+		/* 			sizeof(slurmdb_assoc_rec_t)); */
+		/* 		slurmdb_init_assoc_rec(mod_assoc, 0); */
 
 		/* 		mod_assoc->id = slurm_atoul(row[0]); */
 		/* 		mod_assoc->is_def = 0; */
@@ -2958,7 +2854,7 @@ extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 		/* 				      SLURMDB_MODIFY_ASSOC, */
 		/* 				      mod_assoc) */
 		/* 		    != SLURM_SUCCESS) { */
-		/* 			slurmdb_destroy_association_rec( */
+		/* 			slurmdb_destroy_assoc_rec( */
 		/* 				mod_assoc); */
 		/* 			error("couldn't add to " */
 		/* 			      "the update list"); */
@@ -3000,8 +2896,8 @@ end_it:
 		if (moved_parent) {
 			List assoc_list = NULL;
 			ListIterator itr = NULL;
-			slurmdb_association_rec_t *assoc = NULL;
-			slurmdb_association_cond_t assoc_cond;
+			slurmdb_assoc_rec_t *assoc = NULL;
+			slurmdb_assoc_cond_t assoc_cond;
 			/* now we need to send the update of the new parents and
 			 * limits, so just to be safe, send the whole
 			 * tree because we could have some limits that
@@ -3013,11 +2909,11 @@ end_it:
 			 * want to rewrite code to make it happen
 			 */
 			memset(&assoc_cond, 0,
-			       sizeof(slurmdb_association_cond_t));
+			       sizeof(slurmdb_assoc_cond_t));
 			assoc_cond.cluster_list = local_cluster_list;
 			if (!(assoc_list =
 			      as_mysql_get_assocs(mysql_conn, uid, NULL))) {
-				list_destroy(local_cluster_list);
+				FREE_NULL_LIST(local_cluster_list);
 				return rc;
 			}
 			/* NOTE: you can not use list_pop, or list_push
@@ -3036,20 +2932,20 @@ end_it:
 					list_remove(itr);
 			}
 			list_iterator_destroy(itr);
-			list_destroy(assoc_list);
+			FREE_NULL_LIST(assoc_list);
 		}
 	} else {
 		FREE_NULL_LIST(added_user_list);
 		xfree(txn_query);
 		reset_mysql_conn(mysql_conn);
 	}
-	list_destroy(local_cluster_list);
+	FREE_NULL_LIST(local_cluster_list);
 	return rc;
 }
 
 extern List as_mysql_modify_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
-				   slurmdb_association_cond_t *assoc_cond,
-				   slurmdb_association_rec_t *assoc)
+				   slurmdb_assoc_cond_t *assoc_cond,
+				   slurmdb_assoc_rec_t *assoc)
 {
 	ListIterator itr = NULL;
 	List ret_list = NULL;
@@ -3095,7 +2991,7 @@ extern List as_mysql_modify_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 				   only allowed to change the default
 				   account, and default QOS.
 				*/
-				slurmdb_init_association_rec(assoc, 1);
+				slurmdb_init_assoc_rec(assoc, 1);
 
 				assoc->is_def = is_def;
 				assoc->def_qos_id = def_qos_id;
@@ -3115,10 +3011,10 @@ is_same_user:
 	    || assoc_cond->with_sub_accts)
 		prefix = "t2";
 
-	(void) _setup_association_cond_limits(assoc_cond, prefix, &extra);
+	(void) _setup_assoc_cond_limits(assoc_cond, prefix, &extra);
 
 	/* This needs to be here to make sure we only modify the
-	   correct set of associations The first clause was already
+	   correct set of assocs The first clause was already
 	   taken care of above. */
 	if (assoc_cond->user_list && !list_count(assoc_cond->user_list)) {
 		debug4("no user specified looking at users");
@@ -3128,14 +3024,14 @@ is_same_user:
 		xstrcat(extra, " && user = '' ");
 	}
 
-	setup_association_limits(assoc, &tmp_char1, &tmp_char2,
-				 &vals, QOS_LEVEL_MODIFY, 0);
+	setup_assoc_limits(assoc, &tmp_char1, &tmp_char2,
+			   &vals, QOS_LEVEL_MODIFY, 0);
 	xfree(tmp_char1);
 	xfree(tmp_char2);
 
-	if (!extra || (!vals && !assoc->parent_acct
-		       && (!assoc->qos_list || !list_count(assoc->qos_list)))) {
+	if (!extra || (!vals && !assoc->parent_acct)) {
 		xfree(vals);
+		xfree(extra);
 		errno = SLURM_NO_CHANGE_IN_DATA;
 		error("Nothing to change");
 		return NULL;
@@ -3154,7 +3050,7 @@ is_same_user:
 
 	itr = list_iterator_create(use_cluster_list);
 	while ((cluster_name = list_next(itr))) {
-		char *qos_extra = _setup_association_cond_qos(
+		char *qos_extra = _setup_assoc_cond_qos(
 			assoc_cond, cluster_name);
 
 		xstrfmtcat(query, "select distinct %s "
@@ -3170,7 +3066,7 @@ is_same_user:
 			xfree(query);
 			if (mysql_errno(mysql_conn->db_conn)
 			    != ER_NO_SUCH_TABLE) {
-				list_destroy(ret_list);
+				FREE_NULL_LIST(ret_list);
 				ret_list = NULL;
 			}
 			break;
@@ -3186,7 +3082,7 @@ is_same_user:
 		    || (rc == ESLURM_SAME_PARENT_ACCOUNT)) {
 			continue;
 		} else if (rc != SLURM_SUCCESS) {
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			ret_list = NULL;
 			break;
 		}
@@ -3214,7 +3110,7 @@ is_same_user:
 }
 
 extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
-				   slurmdb_association_cond_t *assoc_cond)
+				   slurmdb_assoc_cond_t *assoc_cond)
 {
 	ListIterator itr = NULL;
 	List ret_list = NULL;
@@ -3254,7 +3150,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 	    || assoc_cond->with_sub_accts)
 		prefix = "t2";
 
-	(void)_setup_association_cond_limits(assoc_cond, prefix, &extra);
+	(void)_setup_assoc_cond_limits(assoc_cond, prefix, &extra);
 
 	xstrcat(object, rassoc_req_inx[0]);
 	for(i=1; i<RASSOC_COUNT; i++)
@@ -3269,7 +3165,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 
 	itr = list_iterator_create(use_cluster_list);
 	while ((cluster_name = list_next(itr))) {
-		char *qos_extra = _setup_association_cond_qos(
+		char *qos_extra = _setup_assoc_cond_qos(
 			assoc_cond, cluster_name);
 
 		query = xstrdup_printf("select distinct t1.lft, t1.rgt from "
@@ -3285,7 +3181,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 			xfree(query);
 			if (mysql_errno(mysql_conn->db_conn)
 			    != ER_NO_SUCH_TABLE) {
-				list_destroy(ret_list);
+				FREE_NULL_LIST(ret_list);
 				ret_list = NULL;
 			}
 			break;
@@ -3319,7 +3215,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 			      mysql_conn, query, 0))) {
 			xfree(query);
 			xfree(name_char);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			ret_list = NULL;
 			break;
 		}
@@ -3333,7 +3229,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 		mysql_free_result(result);
 
 		if (rc != SLURM_SUCCESS) {
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			ret_list = NULL;
 			break;
 		}
@@ -3362,7 +3258,7 @@ extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
 }
 
 extern List as_mysql_get_assocs(mysql_conn_t *mysql_conn, uid_t uid,
-				slurmdb_association_cond_t *assoc_cond)
+				slurmdb_assoc_cond_t *assoc_cond)
 {
 	//DEF_TIMERS;
 	char *extra = NULL;
@@ -3409,7 +3305,7 @@ extern List as_mysql_get_assocs(mysql_conn_t *mysql_conn, uid_t uid,
 	    || assoc_cond->with_sub_accts)
 		prefix = "t2";
 
-	(void) _setup_association_cond_limits(assoc_cond, prefix, &extra);
+	(void) _setup_assoc_cond_limits(assoc_cond, prefix, &extra);
 
 	if (assoc_cond->cluster_list && list_count(assoc_cond->cluster_list))
 		use_cluster_list = assoc_cond->cluster_list;
@@ -3419,7 +3315,7 @@ empty:
 	for(i=1; i<ASSOC_REQ_COUNT; i++) {
 		xstrfmtcat(tmp, ", t1.%s", assoc_req_inx[i]);
 	}
-	assoc_list = list_create(slurmdb_destroy_association_rec);
+	assoc_list = list_create(slurmdb_destroy_assoc_rec);
 
 	if (use_cluster_list == as_mysql_cluster_list)
 		slurm_mutex_lock(&as_mysql_cluster_list_lock);
@@ -3430,7 +3326,7 @@ empty:
 					      cluster_name, tmp, extra,
 					      is_admin, assoc_list))
 		    != SLURM_SUCCESS) {
-			list_destroy(assoc_list);
+			FREE_NULL_LIST(assoc_list);
 			assoc_list = NULL;
 			break;
 		}
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_assoc.h b/src/plugins/accounting_storage/mysql/as_mysql_assoc.h
index efabb6130..b758fdbb8 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_assoc.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_assoc.h
@@ -47,16 +47,16 @@ extern int as_mysql_get_modified_lfts(mysql_conn_t *mysql_conn,
 
 extern int as_mysql_add_assocs(mysql_conn_t *mysql_conn,
 			       uint32_t uid,
-			       List association_list);
+			       List assoc_list);
 
 extern List as_mysql_modify_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
-				   slurmdb_association_cond_t *assoc_cond,
-				   slurmdb_association_rec_t *assoc);
+				   slurmdb_assoc_cond_t *assoc_cond,
+				   slurmdb_assoc_rec_t *assoc);
 
 extern List as_mysql_remove_assocs(mysql_conn_t *mysql_conn, uint32_t uid,
-				   slurmdb_association_cond_t *assoc_cond);
+				   slurmdb_assoc_cond_t *assoc_cond);
 
 extern List as_mysql_get_assocs(mysql_conn_t *mysql_conn, uid_t uid,
-				slurmdb_association_cond_t *assoc_cond);
+				slurmdb_assoc_cond_t *assoc_cond);
 
 #endif
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_cluster.c b/src/plugins/accounting_storage/mysql/as_mysql_cluster.c
index 3c4b255ec..f64ea144c 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_cluster.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_cluster.c
@@ -37,6 +37,7 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#include "as_mysql_tres.h"
 #include "as_mysql_assoc.h"
 #include "as_mysql_cluster.h"
 #include "as_mysql_usage.h"
@@ -129,12 +130,15 @@ extern int as_mysql_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	int affect_rows = 0;
 	int added = 0;
 	List assoc_list = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
-	assoc_list = list_create(slurmdb_destroy_association_rec);
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER))
+		return ESLURM_ACCESS_DENIED;
+
+	assoc_list = list_create(slurmdb_destroy_assoc_rec);
 
 	user_name = uid_to_string((uid_t) uid);
 	/* Since adding tables make it so you can't roll back, if
@@ -173,9 +177,9 @@ extern int as_mysql_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		xstrfmtcat(vals, "%ld, %ld, 'root'", now, now);
 		xstrfmtcat(extra, ", mod_time=%ld", now);
 		if (object->root_assoc)
-			setup_association_limits(object->root_assoc, &cols,
-						 &vals, &extra,
-						 QOS_LEVEL_SET, 1);
+			setup_assoc_limits(object->root_assoc, &cols,
+					   &vals, &extra,
+					   QOS_LEVEL_SET, 1);
 		xstrfmtcat(query,
 			   "insert into %s (creation_time, mod_time, "
 			   "name, classification) "
@@ -279,8 +283,8 @@ extern int as_mysql_add_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		 * association.  This gets popped off so we need to
 		 * read it every time here.
 		 */
-		assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-		slurmdb_init_association_rec(assoc, 0);
+		assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+		slurmdb_init_assoc_rec(assoc, 0);
 		list_append(assoc_list, assoc);
 
 		assoc->cluster = xstrdup(object->name);
@@ -298,7 +302,7 @@ end_it:
 	list_iterator_destroy(itr);
 	xfree(user_name);
 
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(assoc_list);
 
 	if (!added)
 		reset_mysql_conn(mysql_conn);
@@ -323,7 +327,7 @@ extern List as_mysql_modify_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	bool clust_reg = false;
 
 	/* If you need to alter the default values of the cluster use
-	 * modify_associations since this is used only for registering
+	 * modify_assocs since this is used only for registering
 	 * the controller when it loads
 	 */
 
@@ -335,6 +339,12 @@ extern List as_mysql_modify_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(mysql_conn, uid,
+				     SLURMDB_ADMIN_SUPER_USER)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	/* force to only do non-deleted clusters */
 	cluster_cond->with_deleted = 0;
 	_setup_cluster_cond_limits(cluster_cond, &extra);
@@ -450,7 +460,7 @@ extern List as_mysql_modify_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(user_name);
 		if (rc == SLURM_ERROR) {
 			error("Couldn't modify cluster 1");
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			ret_list = NULL;
 			goto end_it;
 		}
@@ -489,6 +499,12 @@ extern List as_mysql_remove_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(
+		    mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	/* force to only do non-deleted clusters */
 	cluster_cond->with_deleted = 0;
 	_setup_cluster_cond_limits(cluster_cond, &extra);
@@ -555,7 +571,7 @@ extern List as_mysql_remove_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(assoc_char);
 
 	if (rc != SLURM_SUCCESS) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 	if (!jobs_running) {
@@ -565,7 +581,7 @@ extern List as_mysql_remove_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(query);
 		if (rc != SLURM_SUCCESS) {
 			reset_mysql_conn(mysql_conn);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			return NULL;
 		}
 
@@ -573,8 +589,7 @@ extern List as_mysql_remove_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 		memset(&wckey_cond, 0, sizeof(slurmdb_wckey_cond_t));
 		wckey_cond.cluster_list = ret_list;
 		tmp_list = as_mysql_remove_wckeys(mysql_conn, uid, &wckey_cond);
-		if (tmp_list)
-			list_destroy(tmp_list);
+		FREE_NULL_LIST(tmp_list);
 
 		itr = list_iterator_create(ret_list);
 		while ((object = list_next(itr))) {
@@ -591,7 +606,7 @@ extern List as_mysql_remove_clusters(mysql_conn_t *mysql_conn, uint32_t uid,
 
 		if (rc != SLURM_SUCCESS) {
 			reset_mysql_conn(mysql_conn);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			errno = rc;
 			return NULL;
 		}
@@ -613,10 +628,10 @@ extern List as_mysql_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 	int i=0;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	ListIterator assoc_itr = NULL;
 	slurmdb_cluster_rec_t *cluster = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	List assoc_list = NULL;
 
 	/* if this changes you will need to edit the corresponding enum */
@@ -628,7 +643,7 @@ extern List as_mysql_get_clusters(mysql_conn_t *mysql_conn, uid_t uid,
 		"rpc_version",
 		"dimensions",
 		"flags",
-		"plugin_id_select",
+		"plugin_id_select"
 	};
 	enum {
 		CLUSTER_REQ_NAME,
@@ -678,7 +693,7 @@ empty:
 
 	cluster_list = list_create(slurmdb_destroy_cluster_rec);
 
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 
 	if (cluster_cond) {
 		/* I don't think we want the with_usage flag here.
@@ -691,7 +706,6 @@ empty:
 	while ((row = mysql_fetch_row(result))) {
 		MYSQL_RES *result2 = NULL;
 		MYSQL_ROW row2;
-
 		cluster = xmalloc(sizeof(slurmdb_cluster_rec_t));
 		list_append(cluster_list, cluster);
 
@@ -699,15 +713,6 @@ empty:
 
 		list_append(assoc_cond.cluster_list, cluster->name);
 
-		/* get the usage if requested */
-		if (cluster_cond && cluster_cond->with_usage) {
-			as_mysql_get_usage(
-				mysql_conn, uid, cluster,
-				DBD_GET_CLUSTER_USAGE,
-				cluster_cond->usage_start,
-				cluster_cond->usage_end);
-		}
-
 		cluster->classification = slurm_atoul(row[CLUSTER_REQ_CLASS]);
 		cluster->control_host = xstrdup(row[CLUSTER_REQ_CH]);
 		cluster->control_port = slurm_atoul(row[CLUSTER_REQ_CP]);
@@ -718,28 +723,37 @@ empty:
 			slurm_atoul(row[CLUSTER_REQ_PI_SELECT]);
 
 		query = xstrdup_printf(
-			"select cpu_count, cluster_nodes from "
+			"select tres, cluster_nodes from "
 			"\"%s_%s\" where time_end=0 and node_name='' limit 1",
 			cluster->name, event_table);
-		debug4("%d(%s:%d) query\n%s",
-		       mysql_conn->conn, THIS_FILE, __LINE__, query);
-		if (!(result2 = mysql_db_query_ret(
-			      mysql_conn, query, 0))) {
+		if (debug_flags & DEBUG_FLAG_DB_TRES)
+			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+		if (!(result2 = mysql_db_query_ret(mysql_conn, query, 0))) {
 			xfree(query);
 			continue;
 		}
 		xfree(query);
 		if ((row2 = mysql_fetch_row(result2))) {
-			cluster->cpu_count = slurm_atoul(row2[0]);
+			cluster->tres_str = xstrdup(row2[0]);
 			if (row2[1] && row2[1][0])
 				cluster->nodes = xstrdup(row2[1]);
 		}
 		mysql_free_result(result2);
+
+		/* get the usage if requested */
+		if (cluster_cond && cluster_cond->with_usage) {
+			as_mysql_get_usage(
+				mysql_conn, uid, cluster,
+				DBD_GET_CLUSTER_USAGE,
+				cluster_cond->usage_start,
+				cluster_cond->usage_end);
+		}
+
 	}
 	mysql_free_result(result);
 
 	if (!list_count(assoc_cond.cluster_list)) {
-		list_destroy(assoc_cond.cluster_list);
+		FREE_NULL_LIST(assoc_cond.cluster_list);
 		return cluster_list;
 	}
 
@@ -750,9 +764,9 @@ empty:
 	list_append(assoc_cond.user_list, "");
 
 	assoc_list = as_mysql_get_assocs(mysql_conn, uid, &assoc_cond);
-	list_destroy(assoc_cond.cluster_list);
-	list_destroy(assoc_cond.acct_list);
-	list_destroy(assoc_cond.user_list);
+	FREE_NULL_LIST(assoc_cond.cluster_list);
+	FREE_NULL_LIST(assoc_cond.acct_list);
+	FREE_NULL_LIST(assoc_cond.user_list);
 
 	if (!assoc_list)
 		return cluster_list;
@@ -779,7 +793,7 @@ empty:
 	if (list_count(assoc_list))
 		error("I have %d left over associations",
 		      list_count(assoc_list));
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(assoc_list);
 
 	return cluster_list;
 }
@@ -803,24 +817,24 @@ extern List as_mysql_get_cluster_events(mysql_conn_t *mysql_conn, uint32_t uid,
 	/* if this changes you will need to edit the corresponding enum */
 	char *event_req_inx[] = {
 		"cluster_nodes",
-		"cpu_count",
 		"node_name",
 		"state",
 		"time_start",
 		"time_end",
 		"reason",
 		"reason_uid",
+		"tres",
 	};
 
 	enum {
 		EVENT_REQ_CNODES,
-		EVENT_REQ_CPU,
 		EVENT_REQ_NODE,
 		EVENT_REQ_STATE,
 		EVENT_REQ_START,
 		EVENT_REQ_END,
 		EVENT_REQ_REASON,
 		EVENT_REQ_REASON_UID,
+		EVENT_REQ_TRES,
 		EVENT_REQ_COUNT
 	};
 
@@ -837,11 +851,11 @@ extern List as_mysql_get_cluster_events(mysql_conn_t *mysql_conn, uint32_t uid,
 			xstrcat(extra, " where (");
 
 		if (event_cond->cpus_max) {
-			xstrfmtcat(extra, "cpu_count between %u and %u)",
+			xstrfmtcat(extra, "count between %u and %u)",
 				   event_cond->cpus_min, event_cond->cpus_max);
 
 		} else {
-			xstrfmtcat(extra, "cpu_count='%u')",
+			xstrfmtcat(extra, "count='%u')",
 				   event_cond->cpus_min);
 
 		}
@@ -986,7 +1000,7 @@ empty:
 			xfree(query);
 			if (mysql_errno(mysql_conn->db_conn)
 			    != ER_NO_SUCH_TABLE) {
-				list_destroy(ret_list);
+				FREE_NULL_LIST(ret_list);
 				ret_list = NULL;
 			}
 			break;
@@ -1007,7 +1021,6 @@ empty:
 			} else
 				event->event_type = SLURMDB_EVENT_CLUSTER;
 
-			event->cpu_count = slurm_atoul(row[EVENT_REQ_CPU]);
 			event->state = slurm_atoul(row[EVENT_REQ_STATE]);
 			event->period_start = slurm_atoul(row[EVENT_REQ_START]);
 			event->period_end = slurm_atoul(row[EVENT_REQ_END]);
@@ -1020,6 +1033,9 @@ empty:
 			if (row[EVENT_REQ_CNODES] && row[EVENT_REQ_CNODES][0])
 				event->cluster_nodes =
 					xstrdup(row[EVENT_REQ_CNODES]);
+
+			if (row[EVENT_REQ_TRES] && row[EVENT_REQ_TRES][0])
+				event->tres_str = xstrdup(row[EVENT_REQ_TRES]);
 		}
 		mysql_free_result(result);
 	}
@@ -1038,7 +1054,6 @@ extern int as_mysql_node_down(mysql_conn_t *mysql_conn,
 			      time_t event_time, char *reason,
 			      uint32_t reason_uid)
 {
-	uint16_t cpus;
 	int rc = SLURM_SUCCESS;
 	char *query = NULL;
 	char *my_reason;
@@ -1058,10 +1073,10 @@ extern int as_mysql_node_down(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	if (slurmctld_conf.fast_schedule && !slurmdbd_conf)
-		cpus = node_ptr->config_ptr->cpus;
-	else
-		cpus = node_ptr->cpus;
+	if (!node_ptr->tres_str) {
+		error("node ptr has no tres_list!");
+		return SLURM_ERROR;
+	}
 
 	query = xstrdup_printf("select state, reason from \"%s_%s\" where "
 			       "time_end=0 and node_name='%s';",
@@ -1093,8 +1108,8 @@ extern int as_mysql_node_down(mysql_conn_t *mysql_conn,
 	}
 	mysql_free_result(result);
 
-	debug2("inserting %s(%s) with %u cpus",
-	       node_ptr->name, mysql_conn->cluster_name, cpus);
+	debug2("inserting %s(%s) with tres of '%s'",
+	       node_ptr->name, mysql_conn->cluster_name, node_ptr->tres_str);
 
 	query = xstrdup_printf(
 		"update \"%s_%s\" set time_end=%ld where "
@@ -1111,14 +1126,14 @@ extern int as_mysql_node_down(mysql_conn_t *mysql_conn,
 	 */
 	xstrfmtcat(query,
 		   "insert into \"%s_%s\" "
-		   "(node_name, state, cpu_count, time_start, "
+		   "(node_name, state, tres, time_start, "
 		   "reason, reason_uid) "
-		   "values ('%s', %u, %u, %ld, '%s', %u) "
+		   "values ('%s', %u, '%s', %ld, '%s', %u) "
 		   "on duplicate key update time_end=0;",
 		   mysql_conn->cluster_name, event_table,
 		   node_ptr->name, node_ptr->node_state,
-		   cpus, event_time, my_reason, reason_uid);
-	debug4("%d(%s:%d) query\n%s",
+		   node_ptr->tres_str, event_time, my_reason, reason_uid);
+	debug2("%d(%s:%d) query\n%s",
 	       mysql_conn->conn, THIS_FILE, __LINE__, query);
 	rc = mysql_db_query(mysql_conn, query);
 	xfree(query);
@@ -1219,6 +1234,7 @@ extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
 	int rc = SLURM_SUCCESS;
 	time_t now = time(NULL);
 	char *query = NULL;
+	bool free_it = false;
 
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
@@ -1246,20 +1262,22 @@ extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
 	    || (slurmdbd_conf && !slurmdbd_conf->track_ctld))
 		return rc;
 
-	/* If cpus is 0 we can get the current number of cpus by
-	   sending 0 for the cpus param in the as_mysql_cluster_cpus
+	/* If tres is NULL we can get the current number of tres by
+	   sending NULL for the tres param in the as_mysql_cluster_tres
 	   function.
 	*/
-	if (!cluster_rec->cpu_count) {
-		cluster_rec->cpu_count = as_mysql_cluster_cpus(
-			mysql_conn, cluster_rec->control_host, 0, now);
+	if (!cluster_rec->tres_str) {
+		free_it = true;
+		as_mysql_cluster_tres(
+			mysql_conn, cluster_rec->control_host,
+			&cluster_rec->tres_str, now);
 	}
 
-	/* Since as_mysql_cluster_cpus could change the
+	/* Since as_mysql_cluster_tres could change the
 	   last_affected_rows we can't group this with the above
 	   return.
 	*/
-	if (!cluster_rec->cpu_count)
+	if (!cluster_rec->tres_str)
 		return rc;
 
 	/* If we affected things we need to now drain the nodes in the
@@ -1270,11 +1288,14 @@ extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
 	 * info.
 	 */
 	query = xstrdup_printf(
-		"insert into \"%s_%s\" (cpu_count, state, "
-		"time_start, reason) "
-		"values ('%u', %u, %ld, 'slurmctld disconnect')",
+		"insert into \"%s_%s\" (tres, state, time_start, reason) "
+		"values ('%s', %u, %ld, 'slurmctld disconnect');",
 		cluster_rec->name, event_table,
-		cluster_rec->cpu_count, NODE_STATE_DOWN, (long)now);
+		cluster_rec->tres_str, NODE_STATE_DOWN, (long)now);
+
+	if (free_it)
+		xfree(cluster_rec->tres_str);
+
 	if (debug_flags & DEBUG_FLAG_DB_EVENT)
 		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 	rc = mysql_db_query(mysql_conn, query);
@@ -1283,8 +1304,8 @@ extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
 	return rc;
 }
 
-extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
-				 char *cluster_nodes, uint32_t cpus,
+extern int as_mysql_cluster_tres(mysql_conn_t *mysql_conn,
+				 char *cluster_nodes, char **tres_str_in,
 				 time_t event_time)
 {
 	char* query;
@@ -1293,6 +1314,8 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 
+	xassert(tres_str_in);
+
  	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
@@ -1303,7 +1326,7 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 
 	/* Record the processor count */
 	query = xstrdup_printf(
-		"select cpu_count, cluster_nodes from \"%s_%s\" where "
+		"select tres, cluster_nodes from \"%s_%s\" where "
 		"time_end=0 and node_name='' and state=0 limit 1",
 		mysql_conn->cluster_name, event_table);
 	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
@@ -1330,7 +1353,7 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 		 * may not be up when we run this in the controller or
 		 * in the slurmdbd.
 		 */
-		if (!cpus) {
+		if (!*tres_str_in) {
 			rc = 0;
 			goto end_it;
 		}
@@ -1339,18 +1362,21 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 		goto add_it;
 	}
 
-	/* If cpus is 0 we want to return the cpu count for this cluster */
-	if (!cpus) {
-		rc = atoi(row[0]);
+	/* If tres is NULL we want to return the tres for this cluster */
+	if (!*tres_str_in) {
+		*tres_str_in = xstrdup(row[0]);
 		goto end_it;
-	}
-
-	if (slurm_atoul(row[0]) == cpus) {
+	} else if (xstrcmp(*tres_str_in, row[0])) {
+		debug("%s has changed tres from %s to %s",
+		      mysql_conn->cluster_name,
+		      row[0], *tres_str_in);
+	} else {
 		if (debug_flags & DEBUG_FLAG_DB_EVENT)
 			DB_DEBUG(mysql_conn->conn,
-				 "we have the same cpu count as before for %s, "
+				 "We have the same tres as before for %s, "
 				 "no need to update the database.",
 				 mysql_conn->cluster_name);
+
 		if (cluster_nodes) {
 			if (!row[1][0]) {
 				debug("Adding cluster nodes '%s' to "
@@ -1374,14 +1400,12 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 						 "update the database.");
 				goto update_it;
 			}
-		} else
-			goto end_it;
-	} else {
-		debug("%s has changed from %s cpus to %u",
-		      mysql_conn->cluster_name, row[0], cpus);
+		}
+
+		goto end_it;
 	}
 
-	/* reset all the entries for this cluster since the cpus
+	/* reset all the entries for this cluster since the tres
 	   changed some of the downed nodes may have gone away.
 	   Request them again with ACCOUNTING_FIRST_REG */
 	query = xstrdup_printf(
@@ -1394,11 +1418,11 @@ extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
 		goto end_it;
 add_it:
 	query = xstrdup_printf(
-		"insert into \"%s_%s\" (cluster_nodes, cpu_count, "
+		"insert into \"%s_%s\" (cluster_nodes, tres, "
 		"time_start, reason) "
-		"values ('%s', %u, %ld, 'Cluster processor count')",
+		"values ('%s', '%s', %ld, 'Cluster Registered TRES');",
 		mysql_conn->cluster_name, event_table,
-		cluster_nodes, cpus, event_time);
+		cluster_nodes, *tres_str_in, event_time);
 	(void) mysql_db_query(mysql_conn, query);
 	xfree(query);
 update_it:
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_cluster.h b/src/plugins/accounting_storage/mysql/as_mysql_cluster.h
index 8ed6eca43..2485e1462 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_cluster.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_cluster.h
@@ -72,7 +72,8 @@ extern int as_mysql_register_ctld(mysql_conn_t *mysql_conn,
 extern int as_mysql_fini_ctld(mysql_conn_t *mysql_conn,
 			      slurmdb_cluster_rec_t *cluster_rec);
 
-extern int as_mysql_cluster_cpus(mysql_conn_t *mysql_conn,
-				 char *cluster_nodes, uint32_t cpus,
+extern int as_mysql_cluster_tres(mysql_conn_t *mysql_conn,
+				 char *cluster_nodes, char **tres_str_in,
 				 time_t event_time);
+
 #endif
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_convert.c b/src/plugins/accounting_storage/mysql/as_mysql_convert.c
new file mode 100644
index 000000000..2d64476f7
--- /dev/null
+++ b/src/plugins/accounting_storage/mysql/as_mysql_convert.c
@@ -0,0 +1,953 @@
+/*****************************************************************************\
+ *  as_mysql_convert.c - functions dealing with converting from tables in
+ *                    slurm <= 14.11.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "as_mysql_convert.h"
+
+static int _rename_usage_columns(mysql_conn_t *mysql_conn, char *table)
+{
+	MYSQL_ROW row;
+	MYSQL_RES *result = NULL;
+	char *query = NULL;
+	int rc = SLURM_SUCCESS;
+
+	query = xstrdup_printf(
+		"show columns from %s where field like '%%cpu_%%' "
+		"|| field like 'id_assoc' || field like 'id_wckey';",
+		table);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	result = mysql_db_query_ret(mysql_conn, query, 0);
+	xfree(query);
+
+	if (!result)
+		return SLURM_ERROR;
+
+	while ((row = mysql_fetch_row(result))) {
+		char *new_char = xstrdup(row[0]);
+		xstrsubstitute(new_char, "cpu_", "");
+		xstrsubstitute(new_char, "_assoc", "");
+		xstrsubstitute(new_char, "_wckey", "");
+
+		if (!query)
+			query = xstrdup_printf("alter table %s ", table);
+		else
+			xstrcat(query, ", ");
+
+		if (!strcmp("id", new_char))
+			xstrfmtcat(query, "change %s %s int unsigned not null",
+				   row[0], new_char);
+		else
+			xstrfmtcat(query,
+				   "change %s %s bigint unsigned default "
+				   "0 not null",
+				   row[0], new_char);
+		xfree(new_char);
+	}
+	mysql_free_result(result);
+
+	if (query) {
+		debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+		if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+			error("Can't update %s %m", table);
+		xfree(query);
+	}
+
+	return rc;
+}
+
+static int _update_old_cluster_tables(mysql_conn_t *mysql_conn,
+				      char *cluster_name,
+				      char *count_col_name)
+{
+	/* These tables are the 14_11 defs plus things we added in 15.08 */
+	storage_field_t assoc_usage_table_fields_14_11[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "id_assoc", "int not null" },
+		{ "id_tres", "int default 1 not null" },
+		{ "time_start", "int unsigned not null" },
+		{ "alloc_cpu_secs", "bigint default 0 not null" },
+		{ "consumed_energy", "bigint unsigned default 0 not null" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t cluster_usage_table_fields_14_11[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "id_tres", "int default 1 not null" },
+		{ "time_start", "int unsigned not null" },
+		{ count_col_name, "int default 0 not null" },
+		{ "alloc_cpu_secs", "bigint default 0 not null" },
+		{ "down_cpu_secs", "bigint default 0 not null" },
+		{ "pdown_cpu_secs", "bigint default 0 not null" },
+		{ "idle_cpu_secs", "bigint default 0 not null" },
+		{ "resv_cpu_secs", "bigint default 0 not null" },
+		{ "over_cpu_secs", "bigint default 0 not null" },
+		{ "consumed_energy", "bigint unsigned default 0 not null" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t event_table_fields_14_11[] = {
+		{ "time_start", "int unsigned not null" },
+		{ "time_end", "int unsigned default 0 not null" },
+		{ "node_name", "tinytext default '' not null" },
+		{ "cluster_nodes", "text not null default ''" },
+		{ count_col_name, "int not null" },
+		{ "reason", "tinytext not null" },
+		{ "reason_uid", "int unsigned default 0xfffffffe not null" },
+		{ "state", "smallint unsigned default 0 not null" },
+		{ "tres", "text not null default ''" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t job_table_fields_14_11[] = {
+		{ "job_db_inx", "int not null auto_increment" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "account", "tinytext" },
+		{ "array_task_str", "text" },
+		{ "array_max_tasks", "int unsigned default 0 not null" },
+		{ "array_task_pending", "int unsigned default 0 not null" },
+		{ "cpus_req", "int unsigned not null" },
+		{ "cpus_alloc", "int unsigned not null" },
+		{ "derived_ec", "int unsigned default 0 not null" },
+		{ "derived_es", "text" },
+		{ "exit_code", "int unsigned default 0 not null" },
+		{ "job_name", "tinytext not null" },
+		{ "id_assoc", "int unsigned not null" },
+		{ "id_array_job", "int unsigned default 0 not null" },
+		{ "id_array_task", "int unsigned default 0xfffffffe not null" },
+		{ "id_block", "tinytext" },
+		{ "id_job", "int unsigned not null" },
+		{ "id_qos", "int unsigned default 0 not null" },
+		{ "id_resv", "int unsigned not null" },
+		{ "id_wckey", "int unsigned not null" },
+		{ "id_user", "int unsigned not null" },
+		{ "id_group", "int unsigned not null" },
+		{ "kill_requid", "int default -1 not null" },
+		{ "mem_req", "int unsigned default 0 not null" },
+		{ "nodelist", "text" },
+		{ "nodes_alloc", "int unsigned not null" },
+		{ "node_inx", "text" },
+		{ "partition", "tinytext not null" },
+		{ "priority", "int unsigned not null" },
+		{ "state", "smallint unsigned not null" },
+		{ "timelimit", "int unsigned default 0 not null" },
+		{ "time_submit", "int unsigned default 0 not null" },
+		{ "time_eligible", "int unsigned default 0 not null" },
+		{ "time_start", "int unsigned default 0 not null" },
+		{ "time_end", "int unsigned default 0 not null" },
+		{ "time_suspended", "int unsigned default 0 not null" },
+		{ "gres_req", "text not null default ''" },
+		{ "gres_alloc", "text not null default ''" },
+		{ "gres_used", "text not null default ''" },
+		{ "wckey", "tinytext not null default ''" },
+		{ "track_steps", "tinyint not null" },
+		{ "tres_alloc", "text not null default ''" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t resv_table_fields_14_11[] = {
+		{ "id_resv", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "assoclist", "text not null default ''" },
+		{ "cpus", "int unsigned not null" },
+		{ "flags", "smallint unsigned default 0 not null" },
+		{ "nodelist", "text not null default ''" },
+		{ "node_inx", "text not null default ''" },
+		{ "resv_name", "text not null" },
+		{ "time_start", "int unsigned default 0 not null"},
+		{ "time_end", "int unsigned default 0 not null" },
+		{ "tres", "text not null default ''" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t step_table_fields_14_11[] = {
+		{ "job_db_inx", "int not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "cpus_alloc", "int unsigned not null" },
+		{ "exit_code", "int default 0 not null" },
+		{ "id_step", "int not null" },
+		{ "kill_requid", "int default -1 not null" },
+		{ "nodelist", "text not null" },
+		{ "nodes_alloc", "int unsigned not null" },
+		{ "node_inx", "text" },
+		{ "state", "smallint unsigned not null" },
+		{ "step_name", "text not null" },
+		{ "task_cnt", "int unsigned not null" },
+		{ "task_dist", "smallint default 0 not null" },
+		{ "time_start", "int unsigned default 0 not null" },
+		{ "time_end", "int unsigned default 0 not null" },
+		{ "time_suspended", "int unsigned default 0 not null" },
+		{ "user_sec", "int unsigned default 0 not null" },
+		{ "user_usec", "int unsigned default 0 not null" },
+		{ "sys_sec", "int unsigned default 0 not null" },
+		{ "sys_usec", "int unsigned default 0 not null" },
+		{ "max_pages", "int unsigned default 0 not null" },
+		{ "max_pages_task", "int unsigned default 0 not null" },
+		{ "max_pages_node", "int unsigned default 0 not null" },
+		{ "ave_pages", "double unsigned default 0.0 not null" },
+		{ "max_rss", "bigint unsigned default 0 not null" },
+		{ "max_rss_task", "int unsigned default 0 not null" },
+		{ "max_rss_node", "int unsigned default 0 not null" },
+		{ "ave_rss", "double unsigned default 0.0 not null" },
+		{ "max_vsize", "bigint unsigned default 0 not null" },
+		{ "max_vsize_task", "int unsigned default 0 not null" },
+		{ "max_vsize_node", "int unsigned default 0 not null" },
+		{ "ave_vsize", "double unsigned default 0.0 not null" },
+		{ "min_cpu", "int unsigned default 0xfffffffe not null" },
+		{ "min_cpu_task", "int unsigned default 0 not null" },
+		{ "min_cpu_node", "int unsigned default 0 not null" },
+		{ "ave_cpu", "double unsigned default 0.0 not null" },
+		{ "act_cpufreq", "double unsigned default 0.0 not null" },
+		{ "consumed_energy", "double unsigned default 0.0 not null" },
+		{ "req_cpufreq", "int unsigned default 0 not null" },
+		{ "max_disk_read", "double unsigned default 0.0 not null" },
+		{ "max_disk_read_task", "int unsigned default 0 not null" },
+		{ "max_disk_read_node", "int unsigned default 0 not null" },
+		{ "ave_disk_read", "double unsigned default 0.0 not null" },
+		{ "max_disk_write", "double unsigned default 0.0 not null" },
+		{ "max_disk_write_task", "int unsigned default 0 not null" },
+		{ "max_disk_write_node", "int unsigned default 0 not null" },
+		{ "ave_disk_write", "double unsigned default 0.0 not null" },
+		{ "tres_alloc", "text not null default ''" },
+		{ NULL, NULL}
+	};
+
+	storage_field_t wckey_usage_table_fields_14_11[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "id_wckey", "int not null" },
+		{ "id_tres", "int default 1 not null" },
+		{ "time_start", "int unsigned not null" },
+		{ "alloc_cpu_secs", "bigint default 0" },
+		{ "resv_cpu_secs", "bigint default 0" },
+		{ "over_cpu_secs", "bigint default 0" },
+		{ "consumed_energy", "bigint unsigned default 0 not null" },
+		{ NULL, NULL}
+	};
+
+	char table_name[200];
+
+	xassert(cluster_name);
+	xassert(count_col_name);
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, assoc_day_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  assoc_usage_table_fields_14_11,
+				  ", primary key (id_assoc, "
+				  "id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, assoc_hour_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  assoc_usage_table_fields_14_11,
+				  ", primary key (id_assoc, "
+				  "id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, assoc_month_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  assoc_usage_table_fields_14_11,
+				  ", primary key (id_assoc, "
+				  "id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, cluster_day_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  cluster_usage_table_fields_14_11,
+				  ", primary key (id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, cluster_hour_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  cluster_usage_table_fields_14_11,
+				  ", primary key (id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, cluster_month_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  cluster_usage_table_fields_14_11,
+				  ", primary key (id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, event_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  event_table_fields_14_11,
+				  ", primary key (node_name(20), time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, job_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  job_table_fields_14_11,
+				  ", primary key (job_db_inx), "
+				  "unique index (id_job, "
+				  "id_assoc, time_submit), "
+				  "key rollup (time_eligible, time_end), "
+				  "key wckey (id_wckey), "
+				  "key qos (id_qos), "
+				  "key association (id_assoc), "
+				  "key array_job (id_array_job), "
+				  "key reserv (id_resv), "
+				  "key sacct_def (id_user, time_start, "
+				  "time_end))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, resv_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  resv_table_fields_14_11,
+				  ", primary key (id_resv, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, step_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  step_table_fields_14_11,
+				  ", primary key (job_db_inx, id_step))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, wckey_day_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  wckey_usage_table_fields_14_11,
+				  ", primary key (id_wckey, "
+				  "id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, wckey_hour_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  wckey_usage_table_fields_14_11,
+				  ", primary key (id_wckey, "
+				  "id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, wckey_month_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  wckey_usage_table_fields_14_11,
+				  ", primary key (id_wckey, "
+				  "id_tres, time_start))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	return SLURM_SUCCESS;
+}
+
+static int _update2_old_cluster_tables(mysql_conn_t *mysql_conn,
+				       char *cluster_name)
+{
+	/* These tables are the 14_11 defs plus things we added in 15.08 */
+
+	storage_field_t assoc_table_fields_14_11[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0 not null" },
+		{ "is_def", "tinyint default 0 not null" },
+		{ "id_assoc", "int not null auto_increment" },
+		{ "user", "tinytext not null default ''" },
+		{ "acct", "tinytext not null" },
+		{ "partition", "tinytext not null default ''" },
+		{ "parent_acct", "tinytext not null default ''" },
+		{ "lft", "int not null" },
+		{ "rgt", "int not null" },
+		{ "shares", "int default 1 not null" },
+		{ "max_jobs", "int default NULL" },
+		{ "max_submit_jobs", "int default NULL" },
+		{ "max_cpus_pj", "int default NULL" },
+		{ "max_nodes_pj", "int default NULL" },
+		{ "max_tres_pj", "text not null default ''" },
+		{ "max_tres_mins_pj", "text not null default ''" },
+		{ "max_tres_run_mins", "text not null default ''" },
+		{ "max_wall_pj", "int default NULL" },
+		{ "max_cpu_mins_pj", "bigint default NULL" },
+		{ "max_cpu_run_mins", "bigint default NULL" },
+		{ "grp_jobs", "int default NULL" },
+		{ "grp_submit_jobs", "int default NULL" },
+		{ "grp_cpus", "int default NULL" },
+		{ "grp_mem", "int default NULL" },
+		{ "grp_nodes", "int default NULL" },
+		{ "grp_tres", "text not null default ''" },
+		{ "grp_tres_mins", "text not null default ''" },
+		{ "grp_tres_run_mins", "text not null default ''" },
+		{ "grp_wall", "int default NULL" },
+		{ "grp_cpu_mins", "bigint default NULL" },
+		{ "grp_cpu_run_mins", "bigint default NULL" },
+		{ "def_qos_id", "int default NULL" },
+		{ "qos", "blob not null default ''" },
+		{ "delta_qos", "blob not null default ''" },
+		{ NULL, NULL}
+	};
+
+	char table_name[200];
+
+	snprintf(table_name, sizeof(table_name), "\"%s_%s\"",
+		 cluster_name, assoc_table);
+	if (mysql_db_create_table(mysql_conn, table_name,
+				  assoc_table_fields_14_11,
+				  ", primary key (id_assoc), "
+				  "unique index (user(20), acct(20), "
+				  "`partition`(20)), "
+				  "key lft (lft), key account (acct(20)))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	return SLURM_SUCCESS;
+}
+
+static int _update2_old_tables(mysql_conn_t *mysql_conn)
+{
+	/* These tables are the 14_11 defs plus things we added in 15.08 */
+	storage_field_t qos_table_fields_14_11[] = {
+		{ "creation_time", "int unsigned not null" },
+		{ "mod_time", "int unsigned default 0 not null" },
+		{ "deleted", "tinyint default 0" },
+		{ "id", "int not null auto_increment" },
+		{ "name", "tinytext not null" },
+		{ "description", "text" },
+		{ "flags", "int unsigned default 0" },
+		{ "grace_time", "int unsigned default NULL" },
+		{ "max_jobs_per_user", "int default NULL" },
+		{ "max_submit_jobs_per_user", "int default NULL" },
+		{ "max_tres_pj", "text not null default ''" },
+		{ "max_tres_pu", "text not null default ''" },
+		{ "max_tres_mins_pj", "text not null default ''" },
+		{ "max_tres_run_mins_pu", "text not null default ''" },
+		{ "min_tres_pj", "text not null default ''" },
+		{ "max_cpus_per_job", "int default NULL" },
+		{ "max_cpus_per_user", "int default NULL" },
+		{ "max_nodes_per_job", "int default NULL" },
+		{ "max_nodes_per_user", "int default NULL" },
+		{ "max_wall_duration_per_job", "int default NULL" },
+		{ "max_cpu_mins_per_job", "bigint default NULL" },
+		{ "max_cpu_run_mins_per_user", "bigint default NULL" },
+		{ "grp_jobs", "int default NULL" },
+		{ "grp_submit_jobs", "int default NULL" },
+		{ "grp_tres", "text not null default ''" },
+		{ "grp_tres_mins", "text not null default ''" },
+		{ "grp_tres_run_mins", "text not null default ''" },
+		{ "grp_cpus", "int default NULL" },
+		{ "grp_mem", "int default NULL" },
+		{ "grp_nodes", "int default NULL" },
+		{ "grp_wall", "int default NULL" },
+		{ "grp_cpu_mins", "bigint default NULL" },
+		{ "grp_cpu_run_mins", "bigint default NULL" },
+		{ "preempt", "text not null default ''" },
+		{ "preempt_mode", "int default 0" },
+		{ "priority", "int unsigned default 0" },
+		{ "usage_factor", "double default 1.0 not null" },
+		{ "usage_thres", "double default NULL" },
+		{ "min_cpus_per_job", "int unsigned default 1 not null" },
+		{ NULL, NULL}
+	};
+
+	if (mysql_db_create_table(mysql_conn, qos_table,
+				  qos_table_fields_14_11,
+				  ", primary key (id), "
+				  "unique index (name(20)))")
+	    == SLURM_ERROR)
+		return SLURM_ERROR;
+
+	return SLURM_SUCCESS;
+}
+
+static int _convert_assoc_table(mysql_conn_t *mysql_conn, char *cluster_name)
+{
+	char *query = NULL;
+	int rc;
+
+	query = xstrdup_printf(
+		"update \"%s_%s\" set grp_tres=concat_ws(',', "
+		"concat('%d=', grp_cpus), concat('%d=', grp_mem), "
+		"concat('%d=', grp_nodes)), "
+		"grp_tres_mins=concat_ws(',', concat('%d=', grp_cpu_mins)), "
+		"grp_tres_run_mins=concat_ws(',', "
+		"concat('%d=', grp_cpu_run_mins)), "
+		"max_tres_pj=concat_ws(',', concat('%d=', max_cpus_pj), "
+		"concat('%d=', max_nodes_pj)), "
+		"max_tres_mins_pj=concat_ws(',', "
+		"concat('%d=', max_cpu_mins_pj)), "
+		"max_tres_run_mins=concat_ws(',', "
+		"concat('%d=', max_cpu_run_mins)); ",
+		cluster_name, assoc_table,
+		TRES_CPU, TRES_MEM, TRES_NODE, TRES_CPU, TRES_CPU,
+		TRES_CPU, TRES_NODE, TRES_CPU, TRES_CPU);
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert assoc_table for %s: %m", cluster_name);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_qos_table(mysql_conn_t *mysql_conn)
+{
+	char *query = NULL;
+	int rc;
+
+	query = xstrdup_printf(
+		"update %s set grp_tres=concat_ws(',', "
+		"concat('%d=', grp_cpus), concat('%d=', grp_mem), "
+		"concat('%d=', grp_nodes)), "
+		"grp_tres_mins=concat_ws(',', concat('%d=', grp_cpu_mins)), "
+		"grp_tres_run_mins=concat_ws(',', "
+		"concat('%d=', grp_cpu_run_mins)), "
+		"max_tres_pj=concat_ws(',', concat('%d=', max_cpus_per_job), "
+		"concat('%d=', max_nodes_per_job)), "
+		"max_tres_pu=concat_ws(',', concat('%d=', max_cpus_per_user), "
+		"concat('%d=', max_nodes_per_user)), "
+		"min_tres_pj=concat_ws(',', concat('%d=', min_cpus_per_job)), "
+		"max_tres_mins_pj=concat_ws(',', "
+		"concat('%d=', max_cpu_mins_per_job)), "
+		"max_tres_run_mins_pu=concat_ws(',', "
+		"concat('%d=', max_cpu_run_mins_per_user)); ",
+		qos_table,
+		TRES_CPU, TRES_MEM, TRES_NODE, TRES_CPU, TRES_CPU,
+		TRES_CPU, TRES_NODE, TRES_CPU, TRES_NODE, TRES_CPU,
+		TRES_CPU, TRES_CPU);
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert qos_table: %m");
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_event_table(mysql_conn_t *mysql_conn, char *cluster_name,
+				char *count_col_name)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = xstrdup_printf(
+		"update \"%s_%s\" set tres=concat('%d=', %s);",
+		cluster_name, event_table, TRES_CPU, count_col_name);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert %s_%s info: %m",
+		      cluster_name, event_table);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_cluster_usage_table(mysql_conn_t *mysql_conn,
+					char *table)
+{
+	char *query = NULL;
+	int rc;
+
+	if ((rc = _rename_usage_columns(mysql_conn, table)) != SLURM_SUCCESS)
+		return rc;
+
+	query = xstrdup_printf("insert into %s (creation_time, mod_time, "
+			       "deleted, id_tres, time_start, alloc_secs) "
+			       "select creation_time, mod_time, deleted, "
+			       "%d, time_start, consumed_energy from %s where "
+			       "consumed_energy != 0 on duplicate key update "
+			       "mod_time=%ld, alloc_secs=VALUES(alloc_secs);",
+			       table, TRES_ENERGY, table, time(NULL));
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert %s info: %m", table);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_id_usage_table(mysql_conn_t *mysql_conn, char *table)
+{
+	char *query = NULL;
+	int rc;
+
+	if ((rc = _rename_usage_columns(mysql_conn, table)) != SLURM_SUCCESS)
+		return rc;
+
+	query = xstrdup_printf("insert into %s (creation_time, mod_time, "
+			       "deleted, id, id_tres, time_start, alloc_secs) "
+			       "select creation_time, mod_time, deleted, id, "
+			       "%d, time_start, consumed_energy from %s where "
+			       "consumed_energy != 0 on duplicate key update "
+			       "mod_time=%ld, alloc_secs=VALUES(alloc_secs);",
+			       table, TRES_ENERGY, table, time(NULL));
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert %s info: %m", table);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_cluster_usage_tables(mysql_conn_t *mysql_conn,
+					 char *cluster_name)
+{
+	char table[200];
+	int rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, cluster_day_table);
+	if ((rc = _convert_cluster_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, cluster_hour_table);
+	if ((rc = _convert_cluster_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, cluster_month_table);
+	if ((rc = _convert_cluster_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	/* assoc tables */
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, assoc_day_table);
+	if ((rc = _convert_id_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, assoc_hour_table);
+	if ((rc = _convert_id_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, assoc_month_table);
+	if ((rc = _convert_id_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	/* wckey tables */
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, wckey_day_table);
+	if ((rc = _convert_id_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, wckey_hour_table);
+	if ((rc = _convert_id_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	snprintf(table, sizeof(table), "\"%s_%s\"",
+		 cluster_name, wckey_month_table);
+	if ((rc = _convert_id_usage_table(mysql_conn, table))
+	    != SLURM_SUCCESS)
+		return rc;
+
+	return rc;
+}
+
+static int _convert_job_table(mysql_conn_t *mysql_conn, char *cluster_name)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = xstrdup_printf("update \"%s_%s\" set tres_alloc="
+				     "concat(concat('%d=', cpus_alloc));",
+				     cluster_name, job_table, TRES_CPU);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert %s_%s info: %m",
+		      cluster_name, job_table);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_step_table(mysql_conn_t *mysql_conn, char *cluster_name)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = xstrdup_printf(
+		"update \"%s_%s\" set tres_alloc=concat('%d=', cpus_alloc);",
+		cluster_name, step_table, TRES_CPU);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert %s_%s info: %m",
+		      cluster_name, step_table);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert_resv_table(mysql_conn_t *mysql_conn, char *cluster_name)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = xstrdup_printf(
+		"update \"%s_%s\" set tres=concat('%d=', cpus);",
+		cluster_name, resv_table, TRES_CPU);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if ((rc = mysql_db_query(mysql_conn, query)) != SLURM_SUCCESS)
+		error("Can't convert %s_%s info: %m",
+		      cluster_name, resv_table);
+	xfree(query);
+
+	return rc;
+}
+
+static int _convert2_tables(mysql_conn_t *mysql_conn)
+{
+	char *query;
+	MYSQL_RES *result = NULL;
+	int i = 0, rc = SLURM_SUCCESS;
+	ListIterator itr;
+	char *cluster_name;
+
+	/* no valid clusters, just return */
+	if (!(cluster_name = list_peek(as_mysql_total_cluster_list)))
+		return SLURM_SUCCESS;
+
+	/* See if the old table exist first.  If already ran here
+	   default_acct and default_wckey won't exist.
+	*/
+	query = xstrdup_printf("show columns from \"%s_%s\" where "
+			       "Field='grp_cpus';",
+			       cluster_name, assoc_table);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+	i = mysql_num_rows(result);
+	mysql_free_result(result);
+	result = NULL;
+
+	if (!i)
+		return 2;
+
+	info("Updating database tables, this may take some time, "
+	     "do not stop the process.");
+
+	/* make it up to date */
+	itr = list_iterator_create(as_mysql_total_cluster_list);
+	while ((cluster_name = list_next(itr))) {
+		query = xstrdup_printf("show columns from \"%s_%s\" where "
+				       "Field='grp_cpus';",
+				       cluster_name, assoc_table);
+
+		debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+		if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+			xfree(query);
+			error("QUERY BAD: No count col name for cluster %s, "
+			      "this should never happen", cluster_name);
+			continue;
+		}
+		xfree(query);
+
+		if (!mysql_num_rows(result)) {
+			error("No grp_cpus col name in assoc_table "
+			      "for cluster %s, this should never happen",
+			      cluster_name);
+			continue;
+		}
+
+		/* make sure old tables are up to date */
+		if ((rc = _update2_old_cluster_tables(mysql_conn, cluster_name)
+		     != SLURM_SUCCESS)) {
+			mysql_free_result(result);
+			break;
+		}
+
+		/* Convert the event table first */
+		info("converting assoc table for %s", cluster_name);
+		if ((rc = _convert_assoc_table(mysql_conn, cluster_name)
+		     != SLURM_SUCCESS)) {
+			mysql_free_result(result);
+			break;
+		}
+		mysql_free_result(result);
+	}
+	list_iterator_destroy(itr);
+
+	/* make sure old non-cluster tables are up to date */
+	if ((rc = _update2_old_tables(mysql_conn)) != SLURM_SUCCESS)
+		return rc;
+
+	if ((rc = _convert_qos_table(mysql_conn)) != SLURM_SUCCESS)
+		return rc;
+
+	return rc;
+}
+
+extern int as_mysql_convert_tables(mysql_conn_t *mysql_conn)
+{
+	char *query;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	int i = 0, rc = SLURM_SUCCESS;
+	ListIterator itr;
+	char *cluster_name;
+
+	xassert(as_mysql_total_cluster_list);
+
+	if ((rc = _convert2_tables(mysql_conn)) == 2) {
+		debug2("It appears the table conversions have already "
+		       "taken place, hooray!");
+		return SLURM_SUCCESS;
+	} else if (rc != SLURM_SUCCESS)
+		return rc;
+
+	/* no valid clusters, just return */
+	if (!(cluster_name = list_peek(as_mysql_total_cluster_list)))
+		return SLURM_SUCCESS;
+
+
+	/* See if the old table exist first.  If already ran here
+	   default_acct and default_wckey won't exist.
+	*/
+	query = xstrdup_printf("show columns from \"%s_%s\" where "
+			       "Field='cpu_count' || Field='count';",
+			       cluster_name, event_table);
+
+	debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+		xfree(query);
+		return SLURM_ERROR;
+	}
+	xfree(query);
+	i = mysql_num_rows(result);
+	mysql_free_result(result);
+	result = NULL;
+
+	if (!i) {
+		info("Conversion done: success!");
+		return SLURM_SUCCESS;
+	}
+
+	/* make it up to date */
+	itr = list_iterator_create(as_mysql_total_cluster_list);
+	while ((cluster_name = list_next(itr))) {
+		query = xstrdup_printf("show columns from \"%s_%s\" where "
+				       "Field='cpu_count' || Field='count';",
+				       cluster_name, event_table);
+
+		debug4("(%s:%d) query\n%s", THIS_FILE, __LINE__, query);
+		if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+			xfree(query);
+			error("QUERY BAD: No count col name for cluster %s, "
+			      "this should never happen", cluster_name);
+			continue;
+		}
+		xfree(query);
+
+		if (!(row = mysql_fetch_row(result)) || !row[0] || !row[0][0]) {
+			error("No count col name for cluster %s, "
+			      "this should never happen", cluster_name);
+			continue;
+		}
+
+		/* make sure old tables are up to date */
+		if ((rc = _update_old_cluster_tables(mysql_conn, cluster_name,
+						     row[0])
+		     != SLURM_SUCCESS)) {
+			mysql_free_result(result);
+			break;
+		}
+
+		/* Convert the event table first */
+		info("converting event table for %s", cluster_name);
+		if ((rc = _convert_event_table(mysql_conn, cluster_name, row[0])
+		     != SLURM_SUCCESS)) {
+			mysql_free_result(result);
+			break;
+		}
+		mysql_free_result(result);
+
+
+		/* Now convert the cluster usage tables */
+		info("converting cluster usage tables for %s", cluster_name);
+		if ((rc = _convert_cluster_usage_tables(
+			     mysql_conn, cluster_name) != SLURM_SUCCESS))
+			break;
+
+		/* Now convert the job tables */
+		info("converting job table for %s", cluster_name);
+		if ((rc = _convert_job_table(mysql_conn, cluster_name)
+		     != SLURM_SUCCESS))
+			break;
+
+		/* Now convert the reservation tables */
+		info("converting reservation table for %s", cluster_name);
+		if ((rc = _convert_resv_table(mysql_conn, cluster_name)
+		     != SLURM_SUCCESS))
+			break;
+
+		/* Now convert the step tables */
+		info("converting step table for %s", cluster_name);
+		if ((rc = _convert_step_table(mysql_conn, cluster_name)
+		     != SLURM_SUCCESS))
+			break;
+	}
+	list_iterator_destroy(itr);
+
+	if (rc == SLURM_SUCCESS)
+		info("Conversion done: success!");
+
+	return rc;
+}
diff --git a/src/slurmd/slurmd/xcpu.h b/src/plugins/accounting_storage/mysql/as_mysql_convert.h
similarity index 71%
rename from src/slurmd/slurmd/xcpu.h
rename to src/plugins/accounting_storage/mysql/as_mysql_convert.h
index 5a4a42229..940dd7fe3 100644
--- a/src/slurmd/slurmd/xcpu.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_convert.h
@@ -1,10 +1,10 @@
 /*****************************************************************************\
- *  src/slurmd/slurmd/xcpu.h - xcpu-based process management functions
+ *  as_mysql_convert.h - functions dealing with converting from tables in
+ *                    slurm <= 14.11.
  *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>.
- *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -15,15 +15,15 @@
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
  *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
@@ -36,19 +36,15 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef _XCPU_H
-#define _XCPU_H
+#ifndef _HAVE_AS_MYSQL_CONVERT_H
+#define _HAVE_AS_MYSQL_CONVERT_H
 
-#ifdef HAVE_XCPU
+#include "accounting_storage_mysql.h"
 
-/* Identify every XCPU process in a specific node and signal it.
- * Return the process count */
-extern int xcpu_signal(int sig, char *nodes);
+extern int as_mysql_convert_event_table(mysql_conn_t *mysql_conn, char *table);
 
-#else
+extern int as_mysql_convert_usage_table(mysql_conn_t *mysql_conn, char *table);
 
-/* Just returns a zero */
-extern int xcpu_signal(int sig, char *nodes);
+extern int as_mysql_convert_tables(mysql_conn_t *mysql_conn);
 
 #endif
-#endif
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_job.c b/src/plugins/accounting_storage/mysql/as_mysql_job.c
index 42a814ca6..23b34a38f 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_job.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_job.c
@@ -45,6 +45,7 @@
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_jobacct_gather.h"
+#include "src/common/slurm_time.h"
 
 #define BUFFER_SIZE 4096
 
@@ -112,7 +113,7 @@ static char *_get_user_from_associd(mysql_conn_t *mysql_conn,
 	}
 	xfree(query);
 
-	if ((row = mysql_fetch_row(result)))
+	if ((row = mysql_fetch_row(result)) && row[0][0])
 		user = xstrdup(row[0]);
 
 	mysql_free_result(result);
@@ -224,7 +225,7 @@ static uint32_t _get_wckeyid(mysql_conn_t *mysql_conn, char **name,
 						ACCOUNTING_ENFORCE_WCKEYS,
 						NULL);
 
-			list_destroy(wckey_list);
+			FREE_NULL_LIST(wckey_list);
 		}
 		xfree(user);
 		/* info("got wckeyid of %d", wckey_rec.id); */
@@ -248,7 +249,8 @@ extern int as_mysql_job_start(mysql_conn_t *mysql_conn,
 	int reinit = 0;
 	time_t begin_time, check_time, start_time, submit_time;
 	uint32_t wckeyid = 0;
-	int job_state, node_cnt = 0;
+	uint32_t job_state;
+	int node_cnt = 0;
 	uint32_t job_db_inx = job_ptr->db_index;
 	job_array_struct_t *array_recs = job_ptr->array_recs;
 
@@ -359,19 +361,19 @@ extern int as_mysql_job_start(mysql_conn_t *mysql_conn,
 			debug("Need to reroll usage from %s Job %u "
 			      "from %s started then and we are just "
 			      "now hearing about it.",
-			      slurm_ctime(&check_time),
+			      slurm_ctime2(&check_time),
 			      job_ptr->job_id, mysql_conn->cluster_name);
 		else if (begin_time)
 			debug("Need to reroll usage from %s Job %u "
 			      "from %s became eligible then and we are just "
 			      "now hearing about it.",
-			      slurm_ctime(&check_time),
+			      slurm_ctime2(&check_time),
 			      job_ptr->job_id, mysql_conn->cluster_name);
 		else
 			debug("Need to reroll usage from %s Job %u "
 			      "from %s was submitted then and we are just "
 			      "now hearing about it.",
-			      slurm_ctime(&check_time),
+			      slurm_ctime2(&check_time),
 			      job_ptr->job_id, mysql_conn->cluster_name);
 
 		global_last_rollup = check_time;
@@ -458,12 +460,12 @@ no_rollup_change:
 			begin_time = submit_time;
 		query = xstrdup_printf(
 			"insert into \"%s_%s\" "
-			"(id_job, id_array_job, id_array_task, "
+			"(id_job, mod_time, id_array_job, id_array_task, "
 			"id_assoc, id_qos, id_user, "
 			"id_group, nodelist, id_resv, timelimit, "
 			"time_eligible, time_submit, time_start, "
 			"job_name, track_steps, state, priority, cpus_req, "
-			"cpus_alloc, nodes_alloc, mem_req",
+			"nodes_alloc, mem_req",
 			mysql_conn->cluster_name, job_table);
 
 		if (wckeyid)
@@ -488,10 +490,16 @@ no_rollup_change:
 		else
 			xstrcat(query, ", array_task_str, array_task_pending");
 
+		if (job_ptr->tres_alloc_str)
+			xstrcat(query, ", tres_alloc");
+		if (job_ptr->tres_req_str)
+			xstrcat(query, ", tres_req");
+
 		xstrfmtcat(query,
-			   ") values (%u, %u, %u, %u, %u, %u, %u, "
+			   ") values (%u, UNIX_TIMESTAMP(), "
+			   "%u, %u, %u, %u, %u, %u, "
 			   "'%s', %u, %u, %ld, %ld, %ld, "
-			   "'%s', %u, %u, %u, %u, %u, %u, %u",
+			   "'%s', %u, %u, %u, %u, %u, %u",
 			   job_ptr->job_id, job_ptr->array_job_id,
 			   job_ptr->array_task_id, job_ptr->assoc_id,
 			   job_ptr->qos_id,
@@ -500,7 +508,7 @@ no_rollup_change:
 			   begin_time, submit_time, start_time,
 			   jname, track_steps, job_state,
 			   job_ptr->priority, job_ptr->details->min_cpus,
-			   job_ptr->total_cpus, node_cnt,
+			   node_cnt,
 			   job_ptr->details->pn_min_memory);
 
 		if (wckeyid)
@@ -527,23 +535,28 @@ no_rollup_change:
 		else
 			xstrcat(query, ", NULL, 0");
 
+		if (job_ptr->tres_alloc_str)
+			xstrfmtcat(query, ", '%s'", job_ptr->tres_alloc_str);
+		if (job_ptr->tres_req_str)
+			xstrfmtcat(query, ", '%s'", job_ptr->tres_req_str);
+
 		xstrfmtcat(query,
 			   ") on duplicate key update "
 			   "job_db_inx=LAST_INSERT_ID(job_db_inx), "
 			   "id_user=%u, id_group=%u, "
 			   "nodelist='%s', id_resv=%u, timelimit=%u, "
 			   "time_submit=%ld, time_eligible=%ld, "
-			   "time_start=%ld, "
+			   "time_start=%ld, mod_time=UNIX_TIMESTAMP(), "
 			   "job_name='%s', track_steps=%u, id_qos=%u, "
 			   "state=greatest(state, %u), priority=%u, "
-			   "cpus_req=%u, cpus_alloc=%u, nodes_alloc=%u, "
+			   "cpus_req=%u, nodes_alloc=%u, "
 			   "mem_req=%u, id_array_job=%u, id_array_task=%u",
 			   job_ptr->user_id, job_ptr->group_id, nodes,
 			   job_ptr->resv_id, job_ptr->time_limit,
 			   submit_time, begin_time, start_time,
 			   jname, track_steps, job_ptr->qos_id, job_state,
 			   job_ptr->priority, job_ptr->details->min_cpus,
-			   job_ptr->total_cpus, node_cnt,
+			   node_cnt,
 			   job_ptr->details->pn_min_memory,
 			   job_ptr->array_job_id,
 			   job_ptr->array_task_id);
@@ -574,6 +587,13 @@ no_rollup_change:
 			xstrfmtcat(query, ", array_task_str=NULL, "
 				   "array_task_pending=0");
 
+		if (job_ptr->tres_alloc_str)
+			xstrfmtcat(query, ", tres_alloc='%s'",
+				   job_ptr->tres_alloc_str);
+		if (job_ptr->tres_req_str)
+			xstrfmtcat(query, ", tres_req='%s'",
+				   job_ptr->tres_req_str);
+
 		if (debug_flags & DEBUG_FLAG_DB_JOB)
 			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 	try_again:
@@ -582,8 +602,6 @@ no_rollup_change:
 			if (!reinit) {
 				error("It looks like the storage has gone "
 				      "away trying to reconnect");
-				mysql_db_close_db_connection(
-					mysql_conn);
 				/* reconnect */
 				check_connection(mysql_conn);
 				reinit = 1;
@@ -623,14 +641,22 @@ no_rollup_change:
 			xstrfmtcat(query, "array_task_str=NULL, "
 				   "array_task_pending=0, ");
 
+		if (job_ptr->tres_alloc_str)
+			xstrfmtcat(query, "tres_alloc='%s', ",
+				   job_ptr->tres_alloc_str);
+		if (job_ptr->tres_req_str)
+			xstrfmtcat(query, "tres_req='%s', ",
+				   job_ptr->tres_req_str);
+
 		xstrfmtcat(query, "time_start=%ld, job_name='%s', state=%u, "
-			   "cpus_alloc=%u, nodes_alloc=%u, id_qos=%u, "
+			   "nodes_alloc=%u, id_qos=%u, "
 			   "id_assoc=%u, id_resv=%u, "
 			   "timelimit=%u, mem_req=%u, "
 			   "id_array_job=%u, id_array_task=%u, "
-			   "time_eligible=%ld where job_db_inx=%d",
+			   "time_eligible=%ld, mod_time=UNIX_TIMESTAMP() "
+			   "where job_db_inx=%d",
 			   start_time, jname, job_state,
-			   job_ptr->total_cpus, node_cnt, job_ptr->qos_id,
+			   node_cnt, job_ptr->qos_id,
 			   job_ptr->assoc_id,
 			   job_ptr->resv_id, job_ptr->time_limit,
 			   job_ptr->details->pn_min_memory,
@@ -761,7 +787,7 @@ extern List as_mysql_modify_job(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(vals);
 	if (rc == SLURM_ERROR) {
 		error("Couldn't modify job");
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		ret_list = NULL;
 	}
 
@@ -862,6 +888,7 @@ extern int as_mysql_job_complete(mysql_conn_t *mysql_conn,
 	 */
 
 	query = xstrdup_printf("update \"%s_%s\" set "
+			       "mod_time=UNIX_TIMESTAMP(), "
 			       "time_end=%ld, state=%d",
 			       mysql_conn->cluster_name, job_table,
 			       end_time, job_state);
@@ -899,7 +926,7 @@ extern int as_mysql_job_complete(mysql_conn_t *mysql_conn,
 extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 			       struct step_record *step_ptr)
 {
-	int cpus = 0, tasks = 0, nodes = 0, task_dist = 0;
+	int tasks = 0, nodes = 0, task_dist = 0;
 	int rc=SLURM_SUCCESS;
 	char node_list[BUFFER_SIZE];
 	char *node_inx = NULL, *step_name = NULL;
@@ -927,11 +954,10 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 	if (slurmdbd_conf) {
-		cpus = step_ptr->cpu_count;
 		if (step_ptr->job_ptr->details)
 			tasks = step_ptr->job_ptr->details->num_tasks;
 		else
-			tasks = cpus;
+			tasks = step_ptr->cpu_count;
 		snprintf(node_list, BUFFER_SIZE, "%s",
 			 step_ptr->job_ptr->nodes);
 		nodes = step_ptr->step_layout->node_cnt;
@@ -948,7 +974,7 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 		   script was running.
 		*/
 		snprintf(node_list, BUFFER_SIZE, "%s", step_ptr->gres);
-		nodes = cpus = tasks = 1;
+		nodes = tasks = 1;
 	} else {
 		char *ionodes = NULL, *temp_nodes = NULL;
 		char temp_bit[BUF_SIZE];
@@ -960,9 +986,9 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 #ifdef HAVE_BG_L_P
 		/* Only L and P use this code */
 		if (step_ptr->job_ptr->details)
-			tasks = cpus = step_ptr->job_ptr->details->min_cpus;
+			tasks = step_ptr->job_ptr->details->min_cpus;
 		else
-			tasks = cpus = step_ptr->job_ptr->cpu_cnt;
+			tasks = step_ptr->job_ptr->cpu_cnt;
 		select_g_select_jobinfo_get(step_ptr->job_ptr->select_jobinfo,
 					    SELECT_JOBDATA_NODE_CNT,
 					    &nodes);
@@ -970,11 +996,25 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 #else
 		if (!step_ptr->step_layout
 		    || !step_ptr->step_layout->task_cnt) {
-			tasks = cpus = step_ptr->job_ptr->total_cpus;
+			if (step_ptr->cpu_count)
+				tasks = step_ptr->cpu_count;
+			else {
+				if (!(tasks = slurmdb_find_tres_count_in_string(
+					      step_ptr->tres_alloc_str,
+					      TRES_CPU))) {
+					if (!(tasks =
+					      slurmdb_find_tres_count_in_string(
+						      step_ptr->job_ptr->
+						      tres_alloc_str,
+						      TRES_CPU)))
+						tasks = step_ptr->job_ptr->
+							total_nodes;
+				}
+			}
+
 			nodes = step_ptr->job_ptr->total_nodes;
 			temp_nodes = step_ptr->job_ptr->nodes;
 		} else {
-			cpus = step_ptr->cpu_count;
 			tasks = step_ptr->step_layout->task_cnt;
 #ifdef HAVE_BGQ
 			select_g_select_jobinfo_get(step_ptr->select_jobinfo,
@@ -1023,21 +1063,27 @@ extern int as_mysql_step_start(mysql_conn_t *mysql_conn,
 	/* The stepid could be -2 so use %d not %u */
 	query = xstrdup_printf(
 		"insert into \"%s_%s\" (job_db_inx, id_step, time_start, "
-		"step_name, state, "
-		"cpus_alloc, nodes_alloc, task_cnt, nodelist, "
-		"node_inx, task_dist, req_cpufreq) "
-		"values (%d, %d, %d, '%s', %d, %d, %d, %d, "
-		"'%s', '%s', %d, %u) "
-		"on duplicate key update cpus_alloc=%d, nodes_alloc=%d, "
-		"task_cnt=%d, time_end=0, state=%d, "
-		"nodelist='%s', node_inx='%s', task_dist=%d, req_cpufreq=%u",
+		"step_name, state, tres_alloc, "
+		"nodes_alloc, task_cnt, nodelist, node_inx, "
+		"task_dist, req_cpufreq, req_cpufreq_min, req_cpufreq_gov) "
+		"values (%d, %d, %d, '%s', %d, '%s', %d, %d, "
+		"'%s', '%s', %d, %u, %u, %u) "
+		"on duplicate key update "
+		"nodes_alloc=%d, task_cnt=%d, time_end=0, state=%d, "
+		"nodelist='%s', node_inx='%s', task_dist=%d, "
+		"req_cpufreq=%u, req_cpufreq_min=%u, req_cpufreq_gov=%u,"
+		"tres_alloc='%s';",
 		mysql_conn->cluster_name, step_table,
 		step_ptr->job_ptr->db_index,
 		step_ptr->step_id,
 		(int)start_time, step_name,
-		JOB_RUNNING, cpus, nodes, tasks, node_list, node_inx, task_dist,
-		step_ptr->cpu_freq, cpus, nodes, tasks, JOB_RUNNING,
-		node_list, node_inx, task_dist, step_ptr->cpu_freq);
+		JOB_RUNNING, step_ptr->tres_alloc_str,
+		nodes, tasks, node_list, node_inx, task_dist,
+		step_ptr->cpu_freq_max, step_ptr->cpu_freq_min,
+		step_ptr->cpu_freq_gov, nodes, tasks, JOB_RUNNING,
+		node_list, node_inx, task_dist, step_ptr->cpu_freq_max,
+		step_ptr->cpu_freq_min, step_ptr->cpu_freq_gov,
+		step_ptr->tres_alloc_str);
 	if (debug_flags & DEBUG_FLAG_DB_STEP)
 		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 	rc = mysql_db_query(mysql_conn, query);
@@ -1091,9 +1137,24 @@ extern int as_mysql_step_complete(mysql_conn_t *mysql_conn,
 		/* Only L and P use this code */
 		tasks = step_ptr->job_ptr->details->min_cpus;
 #else
-		if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt)
-			tasks = step_ptr->job_ptr->total_cpus;
-		else
+		if (!step_ptr->step_layout
+		    || !step_ptr->step_layout->task_cnt) {
+			if (step_ptr->cpu_count)
+				tasks = step_ptr->cpu_count;
+			else {
+				if (!(tasks = slurmdb_find_tres_count_in_string(
+					      step_ptr->tres_alloc_str,
+					      TRES_CPU))) {
+					if (!(tasks =
+					      slurmdb_find_tres_count_in_string(
+						      step_ptr->job_ptr->
+						      tres_alloc_str,
+						      TRES_CPU)))
+						tasks = step_ptr->job_ptr->
+							total_nodes;
+				}
+			}
+		} else
 			tasks = step_ptr->step_layout->task_cnt;
 #endif
 	}
@@ -1176,7 +1237,7 @@ extern int as_mysql_step_complete(mysql_conn_t *mysql_conn,
 			   "max_pages_node=%u, ave_pages=%f, "
 			   "min_cpu=%u, min_cpu_task=%u, "
 			   "min_cpu_node=%u, ave_cpu=%f, "
-			   "act_cpufreq=%u, consumed_energy=%u",
+			   "act_cpufreq=%u, consumed_energy=%"PRIu64"",
 			   /* user seconds */
 			   jobacct->user_cpu_sec,
 			   /* user microseconds */
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c b/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c
index 9ae2d0848..7b93f4d11 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_jobacct_process.c
@@ -57,7 +57,6 @@ char *job_req_inx[] = {
 	"t1.account",
 	"t1.array_max_tasks",
 	"t1.array_task_str",
-	"t1.cpus_alloc",
 	"t1.cpus_req",
 	"t1.derived_ec",
 	"t1.derived_es",
@@ -94,6 +93,8 @@ char *job_req_inx[] = {
 	"t1.gres_alloc",
 	"t1.gres_req",
 	"t1.gres_used",
+	"t1.tres_alloc",
+	"t1.tres_req",
 	"t2.acct",
 	"t2.lft",
 	"t2.user"
@@ -103,7 +104,6 @@ enum {
 	JOB_REQ_ACCOUNT1,
 	JOB_REQ_ARRAY_MAX,
 	JOB_REQ_ARRAY_STR,
-	JOB_REQ_ALLOC_CPUS,
 	JOB_REQ_REQ_CPUS,
 	JOB_REQ_DERIVED_EC,
 	JOB_REQ_DERIVED_ES,
@@ -140,6 +140,8 @@ enum {
 	JOB_REQ_GRES_ALLOC,
 	JOB_REQ_GRES_REQ,
 	JOB_REQ_GRES_USED,
+	JOB_REQ_TRESA,
+	JOB_REQ_TRESR,
 	JOB_REQ_ACCOUNT,
 	JOB_REQ_LFT,
 	JOB_REQ_USER_NAME,
@@ -160,7 +162,6 @@ char *step_req_inx[] = {
 	"t1.kill_requid",
 	"t1.exit_code",
 	"t1.nodes_alloc",
-	"t1.cpus_alloc",
 	"t1.task_cnt",
 	"t1.task_dist",
 	"t1.user_sec",
@@ -193,7 +194,10 @@ char *step_req_inx[] = {
 	"t1.ave_cpu",
 	"t1.act_cpufreq",
 	"t1.consumed_energy",
-	"t1.req_cpufreq"
+	"t1.req_cpufreq_min",
+	"t1.req_cpufreq",
+	"t1.req_cpufreq_gov",
+	"t1.tres_alloc"
 };
 
 enum {
@@ -208,7 +212,6 @@ enum {
 	STEP_REQ_KILL_REQUID,
 	STEP_REQ_EXIT_CODE,
 	STEP_REQ_NODES,
-	STEP_REQ_CPUS,
 	STEP_REQ_TASKS,
 	STEP_REQ_TASKDIST,
 	STEP_REQ_USER_SEC,
@@ -241,7 +244,10 @@ enum {
 	STEP_REQ_AVE_CPU,
 	STEP_REQ_ACT_CPUFREQ,
 	STEP_REQ_CONSUMED_ENERGY,
-	STEP_REQ_REQ_CPUFREQ,
+	STEP_REQ_REQ_CPUFREQ_MIN,
+	STEP_REQ_REQ_CPUFREQ_MAX,
+	STEP_REQ_REQ_CPUFREQ_GOV,
+	STEP_REQ_TRES,
 	STEP_REQ_COUNT
 };
 
@@ -365,7 +371,7 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 	slurmdb_step_rec_t *step = NULL;
 	time_t now = time(NULL);
 	List job_list = list_create(slurmdb_destroy_job_rec);
-	ListIterator itr = NULL;
+	ListIterator itr = NULL, itr2 = NULL;
 	List local_cluster_list = NULL;
 	int set = 0;
 	char *prefix="t2";
@@ -396,6 +402,7 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 			      mysql_conn, query, 0))) {
 			xfree(extra);
 			xfree(query);
+			info("here 3");
 			rc = SLURM_ERROR;
 			goto end_it;
 		}
@@ -518,7 +525,6 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 			list_append(job_list, job);
 		last_id = curr_id;
 
-		job->alloc_cpus = slurm_atoul(row[JOB_REQ_ALLOC_CPUS]);
 		if (row[JOB_REQ_GRES_ALLOC])
 			job->alloc_gres = xstrdup(row[JOB_REQ_GRES_ALLOC]);
 		else
@@ -611,7 +617,7 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 				if (!(result2 = mysql_db_query_ret(
 					      mysql_conn,
 					      query, 0))) {
-					list_destroy(job_list);
+					FREE_NULL_LIST(job_list);
 					job_list = NULL;
 					break;
 				}
@@ -691,6 +697,11 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 		job->qosid = slurm_atoul(row[JOB_REQ_QOS]);
 		job->show_full = 1;
 
+		if (row[JOB_REQ_TRESA])
+			job->tres_alloc_str = xstrdup(row[JOB_REQ_TRESA]);
+		if (row[JOB_REQ_TRESR])
+			job->tres_req_str = xstrdup(row[JOB_REQ_TRESR]);
+
 		if (only_pending || (job_cond && job_cond->without_steps))
 			goto skip_steps;
 
@@ -730,6 +741,7 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 			if (set)
 				xstrcat(extra, ")");
 		}
+
 		query =	xstrdup_printf("select %s from \"%s_%s\" as t1 "
 				       "where t1.job_db_inx=%s",
 				       step_fields, cluster_name,
@@ -776,14 +788,11 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 			step->state = slurm_atoul(step_row[STEP_REQ_STATE]);
 			step->exitcode =
 				slurm_atoul(step_row[STEP_REQ_EXIT_CODE]);
-			step->ncpus = slurm_atoul(step_row[STEP_REQ_CPUS]);
 			step->nnodes = slurm_atoul(step_row[STEP_REQ_NODES]);
 
 			step->ntasks = slurm_atoul(step_row[STEP_REQ_TASKS]);
 			step->task_dist =
 				slurm_atoul(step_row[STEP_REQ_TASKDIST]);
-			if (!step->ntasks)
-				step->ntasks = step->ncpus;
 
 			step->start = slurm_atoul(step_row[STEP_REQ_START]);
 
@@ -823,8 +832,13 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 			if ((int)step->elapsed < 0)
 				step->elapsed = 0;
 
-			step->req_cpufreq =
-				slurm_atoul(step_row[STEP_REQ_REQ_CPUFREQ]);
+			step->req_cpufreq_min = slurm_atoul(
+				step_row[STEP_REQ_REQ_CPUFREQ_MIN]);
+			step->req_cpufreq_max = slurm_atoul(
+				step_row[STEP_REQ_REQ_CPUFREQ_MAX]);
+			step->req_cpufreq_gov =	slurm_atoul(
+				step_row[STEP_REQ_REQ_CPUFREQ_GOV]);
+
 			step->stepname = xstrdup(step_row[STEP_REQ_NAME]);
 			step->nodes = xstrdup(step_row[STEP_REQ_NODELIST]);
 			step->requid =
@@ -838,8 +852,8 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 					step_row[STEP_REQ_USER_SEC]);
 				step->user_cpu_usec = slurm_atoul(
 					step_row[STEP_REQ_USER_USEC]);
-				step->sys_cpu_sec = slurm_atoul(
-					step_row[STEP_REQ_SYS_SEC]);
+				step->sys_cpu_sec =
+					slurm_atoul(step_row[STEP_REQ_SYS_SEC]);
 				step->sys_cpu_usec = slurm_atoul(
 					step_row[STEP_REQ_SYS_USEC]);
 				step->tot_cpu_sec +=
@@ -878,8 +892,8 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 					atof(step_row[STEP_REQ_AVE_PAGES]);
 				step->stats.cpu_min_taskid = slurm_atoul(
 					step_row[STEP_REQ_MIN_CPU_TASK]);
-				step->stats.cpu_ave = atof(
-					step_row[STEP_REQ_AVE_CPU]);
+				step->stats.cpu_ave =
+					atof(step_row[STEP_REQ_AVE_CPU]);
 				step->stats.act_cpufreq =
 					atof(step_row[STEP_REQ_ACT_CPUFREQ]);
 				step->stats.consumed_energy = atof(
@@ -893,23 +907,34 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 				step->stats.cpu_min_nodeid = slurm_atoul(
 					step_row[STEP_REQ_MIN_CPU_NODE]);
 			}
+
+			if (step_row[STEP_REQ_TRES])
+				step->tres_alloc_str =
+					xstrdup(step_row[STEP_REQ_TRES]);
 		}
 		mysql_free_result(step_result);
 
 		if (!job->track_steps) {
+			uint64_t j_cpus, s_cpus;
 			/* If we don't have track_steps we want to see
 			   if we have multiple steps.  If we only have
 			   1 step check the job name against the step
 			   name in most all cases it will be
 			   different.  If it is different print out
-			   the step separate.
+			   the step separate.  It could also be a single
+			   step/allocation where the job was allocated more than
+			   the step requested (eg. CR_Socket).
 			*/
 			if (list_count(job->steps) > 1)
 				job->track_steps = 1;
-			else if (step && step->stepname && job->jobname) {
-				if (strcmp(step->stepname, job->jobname))
+			else if (step &&
+				 (xstrcmp(step->stepname, job->jobname) ||
+				  ((j_cpus = slurmdb_find_tres_count_in_string(
+					job->tres_alloc_str, TRES_CPU)) &&
+				   (s_cpus = slurmdb_find_tres_count_in_string(
+					step->tres_alloc_str, TRES_CPU)) &&
+				   j_cpus != s_cpus)))
 					job->track_steps = 1;
-			}
 		}
 	skip_steps:
 		/* need to reset here to make the above test valid */
@@ -918,13 +943,15 @@ static int _cluster_get_jobs(mysql_conn_t *mysql_conn,
 	mysql_free_result(result);
 
 end_it:
-	if (local_cluster_list)
-		list_destroy(local_cluster_list);
+	if (itr2)
+		list_iterator_destroy(itr2);
+
+	FREE_NULL_LIST(local_cluster_list);
 
 	if (rc == SLURM_SUCCESS)
 		list_transfer(sent_list, job_list);
 
-	list_destroy(job_list);
+	FREE_NULL_LIST(job_list);
 	return rc;
 }
 
@@ -1049,7 +1076,7 @@ extern List setup_cluster_list_with_inx(mysql_conn_t *mysql_conn,
 	mysql_free_result(result);
 
 	if (!list_count(local_cluster_list)) {
-		list_destroy(local_cluster_list);
+		FREE_NULL_LIST(local_cluster_list);
 		local_cluster_list = NULL;
 		goto no_hosts;
 	}
@@ -1407,11 +1434,11 @@ extern int setup_job_cond_limits(mysql_conn_t *mysql_conn,
 			xstrcat(*extra, " where (");
 
 		if (job_cond->cpus_max) {
-			xstrfmtcat(*extra, "(t1.cpus_alloc between %u and %u))",
+			xstrfmtcat(*extra, "(t1.ext_1 between %u and %u))",
 				   job_cond->cpus_min, job_cond->cpus_max);
 
 		} else {
-			xstrfmtcat(*extra, "(t1.cpus_alloc='%u'))",
+			xstrfmtcat(*extra, "(t1.ext_1='%u'))",
 				   job_cond->cpus_min);
 
 		}
@@ -1539,6 +1566,8 @@ extern List as_mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 	int only_pending = 0;
 	List use_cluster_list = as_mysql_cluster_list;
 	char *cluster_name;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
 	memset(&user, 0, sizeof(slurmdb_user_rec_t));
 	user.uid = uid;
@@ -1585,6 +1614,8 @@ extern List as_mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 	else
 		slurm_mutex_lock(&as_mysql_cluster_list_lock);
 
+	assoc_mgr_lock(&locks);
+
 	job_list = list_create(slurmdb_destroy_job_rec);
 	itr = list_iterator_create(use_cluster_list);
 	while ((cluster_name = list_next(itr))) {
@@ -1598,6 +1629,8 @@ extern List as_mysql_jobacct_process_get_jobs(mysql_conn_t *mysql_conn,
 	}
 	list_iterator_destroy(itr);
 
+	assoc_mgr_unlock(&locks);
+
 	if (use_cluster_list == as_mysql_cluster_list)
 		slurm_mutex_unlock(&as_mysql_cluster_list_lock);
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_problems.c b/src/plugins/accounting_storage/mysql/as_mysql_problems.c
index c41c7def7..ac2055019 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_problems.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_problems.c
@@ -41,8 +41,8 @@
 #include "as_mysql_problems.h"
 #include "src/common/uid.h"
 
-static int _setup_association_cond_limits(
-	slurmdb_association_cond_t *assoc_cond,
+static int _setup_assoc_cond_limits(
+	slurmdb_assoc_cond_t *assoc_cond,
 	char **extra, bool user_query)
 {
 	int set = 0;
@@ -106,7 +106,7 @@ static int _setup_association_cond_limits(
 
 
 extern int as_mysql_acct_no_assocs(mysql_conn_t *mysql_conn,
-				   slurmdb_association_cond_t *assoc_cond,
+				   slurmdb_assoc_cond_t *assoc_cond,
 				   List ret_list)
 {
 	int rc = SLURM_SUCCESS;
@@ -155,7 +155,7 @@ extern int as_mysql_acct_no_assocs(mysql_conn_t *mysql_conn,
 	while ((row = mysql_fetch_row(result))) {
 		MYSQL_RES *result2 = NULL;
 		int cnt = 0;
-		slurmdb_association_rec_t *assoc = NULL;
+		slurmdb_assoc_rec_t *assoc = NULL;
 
 		/* See if we have at least 1 association in the system */
 		while ((cluster_name = list_next(itr))) {
@@ -183,7 +183,7 @@ extern int as_mysql_acct_no_assocs(mysql_conn_t *mysql_conn,
 		if (cnt)
 			continue;
 
-		assoc =	xmalloc(sizeof(slurmdb_association_rec_t));
+		assoc =	xmalloc(sizeof(slurmdb_assoc_rec_t));
 		list_append(ret_list, assoc);
 
 		assoc->id = SLURMDB_PROBLEM_ACCT_NO_ASSOC;
@@ -199,7 +199,7 @@ extern int as_mysql_acct_no_assocs(mysql_conn_t *mysql_conn,
 }
 
 extern int as_mysql_acct_no_users(mysql_conn_t *mysql_conn,
-				  slurmdb_association_cond_t *assoc_cond,
+				  slurmdb_assoc_cond_t *assoc_cond,
 				  List ret_list)
 {
 	int rc = SLURM_SUCCESS;
@@ -214,7 +214,7 @@ extern int as_mysql_acct_no_users(mysql_conn_t *mysql_conn,
 
 	xassert(ret_list);
 
-	_setup_association_cond_limits(assoc_cond, &extra, 0);
+	_setup_assoc_cond_limits(assoc_cond, &extra, 0);
 
 	/* if this changes you will need to edit the corresponding enum */
 	char *assoc_req_inx[] = {
@@ -274,8 +274,8 @@ extern int as_mysql_acct_no_users(mysql_conn_t *mysql_conn,
 	xfree(query);
 
 	while ((row = mysql_fetch_row(result))) {
-		slurmdb_association_rec_t *assoc =
-			xmalloc(sizeof(slurmdb_association_rec_t));
+		slurmdb_assoc_rec_t *assoc =
+			xmalloc(sizeof(slurmdb_assoc_rec_t));
 
 		list_append(ret_list, assoc);
 
@@ -299,7 +299,7 @@ extern int as_mysql_acct_no_users(mysql_conn_t *mysql_conn,
 
 extern int as_mysql_user_no_assocs_or_no_uid(
 	mysql_conn_t *mysql_conn,
-	slurmdb_association_cond_t *assoc_cond,
+	slurmdb_assoc_cond_t *assoc_cond,
 	List ret_list)
 {
 	int rc = SLURM_SUCCESS;
@@ -347,11 +347,11 @@ extern int as_mysql_user_no_assocs_or_no_uid(
 	while ((row = mysql_fetch_row(result))) {
 		MYSQL_RES *result2 = NULL;
 		int cnt = 0;
-		slurmdb_association_rec_t *assoc = NULL;
+		slurmdb_assoc_rec_t *assoc = NULL;
 		uid_t pw_uid;
 
 		if (uid_from_string (row[0], &pw_uid) < 0) {
-			assoc =	xmalloc(sizeof(slurmdb_association_rec_t));
+			assoc =	xmalloc(sizeof(slurmdb_assoc_rec_t));
 			list_append(ret_list, assoc);
 
 			assoc->id = SLURMDB_PROBLEM_USER_NO_UID;
@@ -386,7 +386,7 @@ extern int as_mysql_user_no_assocs_or_no_uid(
 		if (cnt)
 			continue;
 
-		assoc =	xmalloc(sizeof(slurmdb_association_rec_t));
+		assoc =	xmalloc(sizeof(slurmdb_assoc_rec_t));
 		list_append(ret_list, assoc);
 
 		assoc->id = SLURMDB_PROBLEM_USER_NO_ASSOC;
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_problems.h b/src/plugins/accounting_storage/mysql/as_mysql_problems.h
index 497014540..703bf7d76 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_problems.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_problems.h
@@ -44,13 +44,13 @@
 #include "accounting_storage_mysql.h"
 
 extern int as_mysql_acct_no_assocs(mysql_conn_t *mysql_conn,
-				slurmdb_association_cond_t *assoc_cond,
+				slurmdb_assoc_cond_t *assoc_cond,
 				List ret_list);
 extern int as_mysql_acct_no_users(mysql_conn_t *mysql_conn,
-			       slurmdb_association_cond_t *assoc_cond,
+			       slurmdb_assoc_cond_t *assoc_cond,
 			       List ret_list);
 extern int as_mysql_user_no_assocs_or_no_uid(
-	mysql_conn_t *mysql_conn, slurmdb_association_cond_t *assoc_cond,
+	mysql_conn_t *mysql_conn, slurmdb_assoc_cond_t *assoc_cond,
 	List ret_list);
 
 #endif
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_qos.c b/src/plugins/accounting_storage/mysql/as_mysql_qos.c
index f339d19bd..5ea1b51ed 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_qos.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_qos.c
@@ -39,6 +39,38 @@
 
 #include "as_mysql_qos.h"
 
+static char *mqos_req_inx[] = {
+	"id",
+	"name",
+	"preempt",
+	"grp_tres_mins",
+	"grp_tres_run_mins",
+	"grp_tres",
+	"max_tres_mins_pj",
+	"max_tres_run_mins_pu",
+	"max_tres_pj",
+	"max_tres_pn",
+	"max_tres_pu",
+	"min_tres_pj",
+};
+
+enum {
+	MQOS_ID,
+	MQOS_NAME,
+	MQOS_PREEMPT,
+	MQOS_GTM,
+	MQOS_GTRM,
+	MQOS_GT,
+	MQOS_MTMPJ,
+	MQOS_MTRM,
+	MQOS_MTPJ,
+	MQOS_MTPN,
+	MQOS_MTPU,
+	MQOS_MITPJ,
+	MQOS_COUNT
+};
+
+
 static int _preemption_loop(mysql_conn_t *mysql_conn, int begin_qosid,
 			    bitstr_t *preempt_bitstr)
 {
@@ -81,6 +113,10 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 			     char **extra, char **added_preempt,
 			     bool for_add)
 {
+	uint32_t tres_str_flags = TRES_STR_FLAG_REMOVE |
+		TRES_STR_FLAG_SORT_ID | TRES_STR_FLAG_SIMPLE |
+		TRES_STR_FLAG_NO_NULL;
+
 	if (!qos)
 		return SLURM_ERROR;
 
@@ -94,42 +130,18 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 			qos->flags = 0;
 		if (qos->grace_time == NO_VAL)
 			qos->grace_time = 0;
-		if (qos->grp_cpu_mins == (uint64_t)NO_VAL)
-			qos->grp_cpu_mins = (uint64_t)INFINITE;
-		if (qos->grp_cpu_run_mins == (uint64_t)NO_VAL)
-			qos->grp_cpu_run_mins = (uint64_t)INFINITE;
-		if (qos->grp_cpus == NO_VAL)
-			qos->grp_cpus = INFINITE;
 		if (qos->grp_jobs == NO_VAL)
 			qos->grp_jobs = INFINITE;
-		if (qos->grp_mem == NO_VAL)
-			qos->grp_mem = INFINITE;
-		if (qos->grp_nodes == NO_VAL)
-			qos->grp_nodes = INFINITE;
 		if (qos->grp_submit_jobs == NO_VAL)
 			qos->grp_submit_jobs = INFINITE;
 		if (qos->grp_wall == NO_VAL)
 			qos->grp_wall = INFINITE;
-		if (qos->max_cpu_mins_pj == (uint64_t)NO_VAL)
-			qos->max_cpu_mins_pj = (uint64_t)INFINITE;
-		if (qos->grp_cpu_run_mins == (uint64_t)NO_VAL)
-			qos->grp_cpu_run_mins = (uint64_t)INFINITE;
-		if (qos->max_cpus_pj == NO_VAL)
-			qos->max_cpus_pj = INFINITE;
-		if (qos->max_cpus_pu == NO_VAL)
-			qos->max_cpus_pu = INFINITE;
 		if (qos->max_jobs_pu == NO_VAL)
 			qos->max_jobs_pu = INFINITE;
-		if (qos->max_nodes_pj == NO_VAL)
-			qos->max_nodes_pj = INFINITE;
-		if (qos->max_nodes_pu == NO_VAL)
-			qos->max_nodes_pu = INFINITE;
 		if (qos->max_submit_jobs_pu == NO_VAL)
 			qos->max_submit_jobs_pu = INFINITE;
 		if (qos->max_wall_pj == NO_VAL)
 			qos->max_wall_pj = INFINITE;
-		if (qos->min_cpus_pj == NO_VAL)
-			qos->min_cpus_pj = 1;
 		if (qos->preempt_mode == (uint16_t)NO_VAL)
 			qos->preempt_mode = 0;
 		if (qos->priority == NO_VAL)
@@ -179,43 +191,6 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", grace_time=%u", qos->grace_time);
 	}
 
-	if (qos->grp_cpu_mins == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", grp_cpu_mins");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_cpu_mins=NULL");
-	} else if ((qos->grp_cpu_mins != (uint64_t)NO_VAL)
-		   && ((int64_t)qos->grp_cpu_mins >= 0)) {
-		xstrcat(*cols, ", grp_cpu_mins");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   qos->grp_cpu_mins);
-		xstrfmtcat(*extra, ", grp_cpu_mins=%"PRIu64"",
-			   qos->grp_cpu_mins);
-	}
-
-	if (qos->grp_cpu_run_mins == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", grp_cpu_run_mins");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_cpu_run_mins=NULL");
-	} else if ((qos->grp_cpu_run_mins != (uint64_t)NO_VAL)
-		   && (int64_t)qos->grp_cpu_run_mins >= 0) {
-		xstrcat(*cols, ", grp_cpu_run_mins");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   qos->grp_cpu_run_mins);
-		xstrfmtcat(*extra, ", grp_cpu_run_mins=%"PRIu64"",
-			   qos->grp_cpu_run_mins);
-	}
-
-	if (qos->grp_cpus == INFINITE) {
-		xstrcat(*cols, ", grp_cpus");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_cpus=NULL");
-	} else if ((qos->grp_cpus != NO_VAL)
-		   && ((int32_t)qos->grp_cpus >= 0)) {
-		xstrcat(*cols, ", grp_cpus");
-		xstrfmtcat(*vals, ", %u", qos->grp_cpus);
-		xstrfmtcat(*extra, ", grp_cpus=%u", qos->grp_cpus);
-	}
-
 	if (qos->grp_jobs == INFINITE) {
 		xstrcat(*cols, ", grp_jobs");
 		xstrcat(*vals, ", NULL");
@@ -227,28 +202,6 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", grp_jobs=%u", qos->grp_jobs);
 	}
 
-	if (qos->grp_mem == INFINITE) {
-		xstrcat(*cols, ", grp_mem");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_mem=NULL");
-	} else if ((qos->grp_mem != NO_VAL)
-		   && ((int32_t)qos->grp_mem >= 0)) {
-		xstrcat(*cols, ", grp_mem");
-		xstrfmtcat(*vals, ", %u", qos->grp_mem);
-		xstrfmtcat(*extra, ", grp_mem=%u", qos->grp_mem);
-	}
-
-	if (qos->grp_nodes == INFINITE) {
-		xstrcat(*cols, ", grp_nodes");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", grp_nodes=NULL");
-	} else if ((qos->grp_nodes != NO_VAL)
-		   && ((int32_t)qos->grp_nodes >= 0)) {
-		xstrcat(*cols, ", grp_nodes");
-		xstrfmtcat(*vals, ", %u", qos->grp_nodes);
-		xstrfmtcat(*extra, ", grp_nodes=%u", qos->grp_nodes);
-	}
-
 	if (qos->grp_submit_jobs == INFINITE) {
 		xstrcat(*cols, ", grp_submit_jobs");
 		xstrcat(*vals, ", NULL");
@@ -272,54 +225,6 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", grp_wall=%u", qos->grp_wall);
 	}
 
-	if (qos->max_cpu_mins_pj == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", max_cpu_mins_per_job");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpu_mins_per_job=NULL");
-	} else if ((qos->max_cpu_mins_pj != (uint64_t)NO_VAL)
-		   && ((int64_t)qos->max_cpu_mins_pj >= 0)) {
-		xstrcat(*cols, ", max_cpu_mins_per_job");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   qos->max_cpu_mins_pj);
-		xstrfmtcat(*extra, ", max_cpu_mins_per_job=%"PRIu64"",
-			   qos->max_cpu_mins_pj);
-	}
-
-	if (qos->max_cpu_run_mins_pu == (uint64_t)INFINITE) {
-		xstrcat(*cols, ", max_cpu_run_mins_per_user");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpu_run_mins_per_user=NULL");
-	} else if ((qos->max_cpu_run_mins_pu != (uint64_t)NO_VAL)
-		   && ((int64_t)qos->max_cpu_run_mins_pu >= 0)) {
-		xstrcat(*cols, ", max_cpu_run_mins_per_user");
-		xstrfmtcat(*vals, ", %"PRIu64"",
-			   qos->max_cpu_run_mins_pu);
-		xstrfmtcat(*extra, ", max_cpu_run_mins_per_user=%"PRIu64"",
-			   qos->max_cpu_run_mins_pu);
-	}
-
-	if (qos->max_cpus_pj == INFINITE) {
-		xstrcat(*cols, ", max_cpus_per_job");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpus_per_job=NULL");
-	} else if ((qos->max_cpus_pj != NO_VAL)
-		   && ((int32_t)qos->max_cpus_pj >= 0)) {
-		xstrcat(*cols, ", max_cpus_per_job");
-		xstrfmtcat(*vals, ", %u", qos->max_cpus_pj);
-		xstrfmtcat(*extra, ", max_cpus_per_job=%u", qos->max_cpus_pj);
-	}
-
-	if (qos->max_cpus_pu == INFINITE) {
-		xstrcat(*cols, ", max_cpus_per_user");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_cpus_per_user=NULL");
-	} else if ((qos->max_cpus_pu != NO_VAL)
-		   && ((int32_t)qos->max_cpus_pu >= 0)) {
-		xstrcat(*cols, ", max_cpus_per_user");
-		xstrfmtcat(*vals, ", %u", qos->max_cpus_pu);
-		xstrfmtcat(*extra, ", max_cpus_per_user=%u", qos->max_cpus_pu);
-	}
-
 	if (qos->max_jobs_pu == INFINITE) {
 		xstrcat(*cols, ", max_jobs_per_user");
 		xstrcat(*vals, ", NULL");
@@ -331,30 +236,6 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", max_jobs_per_user=%u", qos->max_jobs_pu);
 	}
 
-	if (qos->max_nodes_pj == INFINITE) {
-		xstrcat(*cols, ", max_nodes_per_job");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_nodes_per_job=NULL");
-	} else if ((qos->max_nodes_pj != NO_VAL)
-		   && ((int32_t)qos->max_nodes_pj >= 0)) {
-		xstrcat(*cols, ", max_nodes_per_job");
-		xstrfmtcat(*vals, ", %u", qos->max_nodes_pj);
-		xstrfmtcat(*extra, ", max_nodes_per_job=%u",
-			   qos->max_nodes_pj);
-	}
-
-	if (qos->max_nodes_pu == INFINITE) {
-		xstrcat(*cols, ", max_nodes_per_user");
-		xstrcat(*vals, ", NULL");
-		xstrcat(*extra, ", max_nodes_per_user=NULL");
-	} else if ((qos->max_nodes_pu != NO_VAL)
-		   && ((int32_t)qos->max_nodes_pu >= 0)) {
-		xstrcat(*cols, ", max_nodes_per_user");
-		xstrfmtcat(*vals, ", %u", qos->max_nodes_pu);
-		xstrfmtcat(*extra, ", max_nodes_per_user=%u",
-			   qos->max_nodes_pu);
-	}
-
 	if (qos->max_submit_jobs_pu == INFINITE) {
 		xstrcat(*cols, ", max_submit_jobs_per_user");
 		xstrcat(*vals, ", NULL");
@@ -379,17 +260,6 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrcat(*extra, ", max_wall_duration_per_job=NULL");
 	}
 
-	if (qos->min_cpus_pj == INFINITE) {
-		xstrcat(*cols, ", min_cpus_per_job");
-		xstrcat(*vals, ", 1");
-		xstrcat(*extra, ", min_cpus_per_job=1");
-	} else if ((qos->min_cpus_pj != NO_VAL)
-		   && ((int32_t)qos->min_cpus_pj >= 0)) {
-		xstrcat(*cols, ", min_cpus_per_job");
-		xstrfmtcat(*vals, ", %u", qos->min_cpus_pj);
-		xstrfmtcat(*extra, ", min_cpus_per_job=%u", qos->min_cpus_pj);
-	}
-
 	if (qos->preempt_list && list_count(qos->preempt_list)) {
 		char *preempt_val = NULL;
 		char *tmp_char = NULL, *last_preempt = NULL;
@@ -489,6 +359,128 @@ static int _setup_qos_limits(slurmdb_qos_rec_t *qos,
 		xstrfmtcat(*extra, ", usage_thres=%f", qos->usage_thres);
 	}
 
+	/* When modifying anything below this comment it happens in
+	 * the actual function since we have to wait until we hear
+	 * about the original first.
+	 * What we do to make it known something needs to be changed
+	 * is we cat "" onto extra which will inform the caller
+	 * something needs changing.
+	 */
+
+	if (qos->grp_tres) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", grp_tres");
+		slurmdb_combine_tres_strings(
+			&qos->grp_tres, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->grp_tres);
+		xstrfmtcat(*extra, ", grp_tres='%s'", qos->grp_tres);
+	}
+
+	if (qos->grp_tres_mins) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", grp_tres_mins");
+		slurmdb_combine_tres_strings(
+			&qos->grp_tres_mins, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->grp_tres_mins);
+		xstrfmtcat(*extra, ", grp_tres_mins='%s'",
+			   qos->grp_tres_mins);
+	}
+
+	if (qos->grp_tres_run_mins) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", grp_tres_run_mins");
+		slurmdb_combine_tres_strings(
+			&qos->grp_tres_run_mins, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->grp_tres_run_mins);
+		xstrfmtcat(*extra, ", grp_tres_run_mins='%s'",
+			   qos->grp_tres_run_mins);
+	}
+
+	if (qos->max_tres_pj) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_pj");
+		slurmdb_combine_tres_strings(
+			&qos->max_tres_pj, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->max_tres_pj);
+		xstrfmtcat(*extra, ", max_tres_pj='%s'", qos->max_tres_pj);
+	}
+
+	if (qos->max_tres_pn) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_pn");
+		slurmdb_combine_tres_strings(
+			&qos->max_tres_pn, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->max_tres_pn);
+		xstrfmtcat(*extra, ", max_tres_pn='%s'", qos->max_tres_pn);
+	}
+
+	if (qos->max_tres_pu) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_pu");
+		slurmdb_combine_tres_strings(
+			&qos->max_tres_pu, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->max_tres_pu);
+		xstrfmtcat(*extra, ", max_tres_pu='%s'", qos->max_tres_pu);
+	}
+
+	if (qos->max_tres_mins_pj) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_mins_pj");
+		slurmdb_combine_tres_strings(
+			&qos->max_tres_mins_pj, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->max_tres_mins_pj);
+		xstrfmtcat(*extra, ", max_tres_mins_pj='%s'",
+			   qos->max_tres_mins_pj);
+	}
+
+	if (qos->max_tres_run_mins_pu) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", max_tres_run_mins_pu");
+		slurmdb_combine_tres_strings(
+			&qos->max_tres_run_mins_pu, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->max_tres_run_mins_pu);
+		xstrfmtcat(*extra, ", max_tres_run_mins_pu='%s'",
+			   qos->max_tres_run_mins_pu);
+	}
+
+	if (qos->min_tres_pj) {
+		if (!for_add) {
+			xstrcat(*extra, "");
+			goto end_modify;
+		}
+		xstrcat(*cols, ", min_tres_pj");
+		slurmdb_combine_tres_strings(
+			&qos->min_tres_pj, NULL, tres_str_flags);
+		xstrfmtcat(*vals, ", '%s'", qos->min_tres_pj);
+		xstrfmtcat(*extra, ", min_tres_pj='%s'", qos->min_tres_pj);
+	}
+
+end_modify:
+
 	return SLURM_SUCCESS;
 
 }
@@ -510,6 +502,9 @@ extern int as_mysql_add_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER))
+		return ESLURM_ACCESS_DENIED;
+
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(qos_list);
 	while ((object = list_next(itr))) {
@@ -611,7 +606,7 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	char *vals = NULL, *extra = NULL, *query = NULL, *name_char = NULL;
 	time_t now = time(NULL);
 	char *user_name = NULL;
-	int set = 0;
+	int set = 0, i;
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	char *tmp_char1=NULL, *tmp_char2=NULL;
@@ -626,6 +621,12 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(mysql_conn, uid,
+				     SLURMDB_ADMIN_SUPER_USER)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	xstrcat(extra, "where deleted=0");
 
 	if (qos_cond->description_list
@@ -689,9 +690,15 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		error("Nothing to change");
 		return NULL;
 	}
-	query = xstrdup_printf("select name, preempt, id from %s %s;",
-			       qos_table, extra);
+
+	object = xstrdup(mqos_req_inx[0]);
+	for (i = 1; i < MQOS_COUNT; i++)
+		xstrfmtcat(object, ", %s", mqos_req_inx[i]);
+
+	query = xstrdup_printf("select %s from %s %s;",
+			       object, qos_table, extra);
 	xfree(extra);
+	xfree(object);
 	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
 		xfree(query);
 		FREE_NULL_BITMAP(preempt_bitstr);
@@ -702,12 +709,12 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	ret_list = list_create(slurm_destroy_char);
 	while ((row = mysql_fetch_row(result))) {
 		slurmdb_qos_rec_t *qos_rec = NULL;
-		uint32_t id = slurm_atoul(row[2]);
+		uint32_t id = slurm_atoul(row[MQOS_ID]);
 		if (preempt_bitstr) {
 			if (_preemption_loop(mysql_conn, id, preempt_bitstr))
 				break;
 		}
-		object = xstrdup(row[0]);
+		object = xstrdup(row[MQOS_NAME]);
 		list_append(ret_list, object);
 		if (!rc) {
 			xstrfmtcat(name_char, "(name='%s'", object);
@@ -721,27 +728,47 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		qos_rec->id = id;
 		qos_rec->flags = qos->flags;
 
-		qos_rec->grp_cpus = qos->grp_cpus;
 		qos_rec->grace_time = qos->grace_time;
-		qos_rec->grp_cpu_mins = qos->grp_cpu_mins;
-		qos_rec->grp_cpu_run_mins = qos->grp_cpu_run_mins;
+
+		mod_tres_str(&qos_rec->grp_tres,
+			     qos->grp_tres, row[MQOS_GT],
+			     NULL, "grp_tres", &vals, qos_rec->id, 0);
+		mod_tres_str(&qos_rec->grp_tres_mins,
+			     qos->grp_tres_mins, row[MQOS_GTM],
+			     NULL, "grp_tres_mins", &vals, qos_rec->id, 0);
+		mod_tres_str(&qos_rec->grp_tres_run_mins,
+			     qos->grp_tres_run_mins, row[MQOS_GTRM],
+			     NULL, "grp_tres_run_mins", &vals,
+			     qos_rec->id, 0);
+
 		qos_rec->grp_jobs = qos->grp_jobs;
-		qos_rec->grp_mem = qos->grp_mem;
-		qos_rec->grp_nodes = qos->grp_nodes;
 		qos_rec->grp_submit_jobs = qos->grp_submit_jobs;
 		qos_rec->grp_wall = qos->grp_wall;
 
-		qos_rec->max_cpus_pj = qos->max_cpus_pj;
-		qos_rec->max_cpus_pu = qos->max_cpus_pu;
-		qos_rec->max_cpu_mins_pj = qos->max_cpu_mins_pj;
-		qos_rec->max_cpu_run_mins_pu = qos->max_cpu_run_mins_pu;
+		mod_tres_str(&qos_rec->max_tres_pj,
+			     qos->max_tres_pj, row[MQOS_MTPJ],
+			     NULL, "max_tres_pj", &vals, qos_rec->id, 0);
+		mod_tres_str(&qos_rec->max_tres_pn,
+			     qos->max_tres_pn, row[MQOS_MTPN],
+			     NULL, "max_tres_pn", &vals, qos_rec->id, 0);
+		mod_tres_str(&qos_rec->max_tres_pu,
+			     qos->max_tres_pu, row[MQOS_MTPU],
+			     NULL, "max_tres_pu", &vals, qos_rec->id, 0);
+		mod_tres_str(&qos_rec->max_tres_mins_pj,
+			     qos->max_tres_mins_pj, row[MQOS_MTMPJ],
+			     NULL, "max_tres_mins_pj", &vals, qos_rec->id, 0);
+		mod_tres_str(&qos_rec->max_tres_run_mins_pu,
+			     qos->max_tres_run_mins_pu, row[MQOS_MTRM],
+			     NULL, "max_tres_run_mins_pu", &vals,
+			     qos_rec->id, 0);
+
 		qos_rec->max_jobs_pu  = qos->max_jobs_pu;
-		qos_rec->max_nodes_pj = qos->max_nodes_pj;
-		qos_rec->max_nodes_pu = qos->max_nodes_pu;
 		qos_rec->max_submit_jobs_pu  = qos->max_submit_jobs_pu;
 		qos_rec->max_wall_pj = qos->max_wall_pj;
 
-		qos_rec->min_cpus_pj = qos->min_cpus_pj;
+		mod_tres_str(&qos_rec->min_tres_pj,
+			     qos->min_tres_pj, row[MQOS_MITPJ],
+			     NULL, "min_tres_pj", &vals, qos_rec->id, 0);
 
 		qos_rec->preempt_mode = qos->preempt_mode;
 		qos_rec->priority = qos->priority;
@@ -753,8 +780,9 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 			bool cleared = 0;
 
 			qos_rec->preempt_bitstr = bit_alloc(g_qos_count);
-			if (row[1] && row[1][0])
-				bit_unfmt(qos_rec->preempt_bitstr, row[1]+1);
+			if (row[MQOS_PREEMPT] && row[MQOS_PREEMPT][0])
+				bit_unfmt(qos_rec->preempt_bitstr,
+					  row[MQOS_PREEMPT]+1);
 
 			while ((new_preempt = list_next(new_preempt_itr))) {
 				if (new_preempt[0] == '-') {
@@ -795,7 +823,7 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		xfree(vals);
 		xfree(name_char);
 		xfree(query);
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		ret_list = NULL;
 		errno = ESLURM_QOS_PREEMPTION_LOOP;
 		return ret_list;
@@ -821,7 +849,7 @@ extern List as_mysql_modify_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(vals);
 	if (rc == SLURM_ERROR) {
 		error("Couldn't modify qos");
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		ret_list = NULL;
 	}
 
@@ -851,6 +879,12 @@ extern List as_mysql_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(
+		    mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	xstrcat(extra, "where deleted=0");
 	if (qos_cond->description_list
 	    && list_count(qos_cond->description_list)) {
@@ -965,7 +999,7 @@ extern List as_mysql_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 		reset_mysql_conn(mysql_conn);
 		xfree(assoc_char);
 		xfree(name_char);
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -987,7 +1021,7 @@ extern List as_mysql_remove_qos(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(name_char);
 	xfree(user_name);
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -1015,21 +1049,18 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"id",
 		"flags",
 		"grace_time",
-		"grp_cpu_mins",
-		"grp_cpu_run_mins",
-		"grp_cpus",
+		"grp_tres_mins",
+		"grp_tres_run_mins",
+		"grp_tres",
 		"grp_jobs",
-		"grp_mem",
-		"grp_nodes",
 		"grp_submit_jobs",
 		"grp_wall",
-		"max_cpu_mins_per_job",
-		"max_cpu_run_mins_per_user",
-		"max_cpus_per_job",
-		"max_cpus_per_user",
+		"max_tres_mins_pj",
+		"max_tres_run_mins_pu",
+		"max_tres_pj",
+		"max_tres_pn",
+		"max_tres_pu",
 		"max_jobs_per_user",
-		"max_nodes_per_job",
-		"max_nodes_per_user",
 		"max_submit_jobs_per_user",
 		"max_wall_duration_per_job",
 		"substr(preempt, 1, length(preempt) - 1)",
@@ -1037,7 +1068,7 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		"priority",
 		"usage_factor",
 		"usage_thres",
-		"min_cpus_per_job",
+		"min_tres_pj",
 	};
 	enum {
 		QOS_REQ_NAME,
@@ -1045,21 +1076,18 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_ID,
 		QOS_REQ_FLAGS,
 		QOS_REQ_GRACE,
-		QOS_REQ_GCM,
-		QOS_REQ_GCRM,
-		QOS_REQ_GC,
+		QOS_REQ_GTM,
+		QOS_REQ_GTRM,
+		QOS_REQ_GT,
 		QOS_REQ_GJ,
-		QOS_REQ_GMEM,
-		QOS_REQ_GN,
 		QOS_REQ_GSJ,
 		QOS_REQ_GW,
-		QOS_REQ_MCMPJ,
-		QOS_REQ_MCRM,
-		QOS_REQ_MCPJ,
-		QOS_REQ_MCPU,
+		QOS_REQ_MTMPJ,
+		QOS_REQ_MTRM,
+		QOS_REQ_MTPJ,
+		QOS_REQ_MTPN,
+		QOS_REQ_MTPU,
 		QOS_REQ_MJPU,
-		QOS_REQ_MNPJ,
-		QOS_REQ_MNPU,
 		QOS_REQ_MSJPU,
 		QOS_REQ_MWPJ,
 		QOS_REQ_PREE,
@@ -1067,7 +1095,7 @@ extern List as_mysql_get_qos(mysql_conn_t *mysql_conn, uid_t uid,
 		QOS_REQ_PRIO,
 		QOS_REQ_UF,
 		QOS_REQ_UT,
-		QOS_REQ_MICPJ,
+		QOS_REQ_MITPJ,
 		QOS_REQ_COUNT
 	};
 
@@ -1170,30 +1198,17 @@ empty:
 		if (row[QOS_REQ_GRACE])
 			qos->grace_time = slurm_atoul(row[QOS_REQ_GRACE]);
 
-		if (row[QOS_REQ_GCM])
-			qos->grp_cpu_mins = slurm_atoull(row[QOS_REQ_GCM]);
-		else
-			qos->grp_cpu_mins = INFINITE;
-		if (row[QOS_REQ_GCRM])
-			qos->grp_cpu_run_mins = slurm_atoull(row[QOS_REQ_GCRM]);
-		else
-			qos->grp_cpu_run_mins = INFINITE;
-		if (row[QOS_REQ_GC])
-			qos->grp_cpus = slurm_atoul(row[QOS_REQ_GC]);
-		else
-			qos->grp_cpus = INFINITE;
+		if (row[QOS_REQ_GT][0])
+			qos->grp_tres = xstrdup(row[QOS_REQ_GT]);
+		if (row[QOS_REQ_GTM][0])
+			qos->grp_tres_mins = xstrdup(row[QOS_REQ_GTM]);
+		if (row[QOS_REQ_GTRM][0])
+			qos->grp_tres_run_mins = xstrdup(row[QOS_REQ_GTRM]);
+
 		if (row[QOS_REQ_GJ])
 			qos->grp_jobs = slurm_atoul(row[QOS_REQ_GJ]);
 		else
 			qos->grp_jobs = INFINITE;
-		if (row[QOS_REQ_GMEM])
-			qos->grp_mem = slurm_atoul(row[QOS_REQ_GMEM]);
-		else
-			qos->grp_mem = INFINITE;
-		if (row[QOS_REQ_GN])
-			qos->grp_nodes = slurm_atoul(row[QOS_REQ_GN]);
-		else
-			qos->grp_nodes = INFINITE;
 		if (row[QOS_REQ_GSJ])
 			qos->grp_submit_jobs = slurm_atoul(row[QOS_REQ_GSJ]);
 		else
@@ -1203,40 +1218,31 @@ empty:
 		else
 			qos->grp_wall = INFINITE;
 
-		if (row[QOS_REQ_MCMPJ])
-			qos->max_cpu_mins_pj = slurm_atoull(row[QOS_REQ_MCMPJ]);
-		else
-			qos->max_cpu_mins_pj = (uint64_t)INFINITE;
-		if (row[QOS_REQ_MCRM])
-			qos->max_cpu_run_mins_pu =
-				slurm_atoull(row[QOS_REQ_MCRM]);
-		else
-			qos->max_cpu_run_mins_pu = (uint64_t)INFINITE;
-		if (row[QOS_REQ_MCPJ])
-			qos->max_cpus_pj = slurm_atoul(row[QOS_REQ_MCPJ]);
-		else
-			qos->max_cpus_pj = INFINITE;
-		if (row[QOS_REQ_MCPU])
-			qos->max_cpus_pu = slurm_atoul(row[QOS_REQ_MCPU]);
-		else
-			qos->max_cpus_pu = INFINITE;
 		if (row[QOS_REQ_MJPU])
 			qos->max_jobs_pu = slurm_atoul(row[QOS_REQ_MJPU]);
 		else
 			qos->max_jobs_pu = INFINITE;
-		if (row[QOS_REQ_MNPJ])
-			qos->max_nodes_pj = slurm_atoul(row[QOS_REQ_MNPJ]);
-		else
-			qos->max_nodes_pj = INFINITE;
-		if (row[QOS_REQ_MNPU])
-			qos->max_nodes_pu = slurm_atoul(row[QOS_REQ_MNPU]);
-		else
-			qos->max_nodes_pu = INFINITE;
 		if (row[QOS_REQ_MSJPU])
 			qos->max_submit_jobs_pu =
 				slurm_atoul(row[QOS_REQ_MSJPU]);
 		else
 			qos->max_submit_jobs_pu = INFINITE;
+
+		if (row[QOS_REQ_MTPJ][0])
+			qos->max_tres_pj = xstrdup(row[QOS_REQ_MTPJ]);
+
+		if (row[QOS_REQ_MTPN][0])
+			qos->max_tres_pn = xstrdup(row[QOS_REQ_MTPN]);
+
+		if (row[QOS_REQ_MTPU][0])
+			qos->max_tres_pu = xstrdup(row[QOS_REQ_MTPU]);
+
+		if (row[QOS_REQ_MTMPJ][0])
+			qos->max_tres_mins_pj = xstrdup(row[QOS_REQ_MTMPJ]);
+
+		if (row[QOS_REQ_MTRM][0])
+			qos->max_tres_run_mins_pu = xstrdup(row[QOS_REQ_MTRM]);
+
 		if (row[QOS_REQ_MWPJ])
 			qos->max_wall_pj = slurm_atoul(row[QOS_REQ_MWPJ]);
 		else
@@ -1260,10 +1266,8 @@ empty:
 		else
 			qos->usage_thres = (double)INFINITE;
 
-		if (row[QOS_REQ_MICPJ])
-			qos->min_cpus_pj = slurm_atoul(row[QOS_REQ_MICPJ]);
-		else
-			qos->min_cpus_pj = INFINITE;
+		if (row[QOS_REQ_MITPJ][0])
+			qos->min_tres_pj = xstrdup(row[QOS_REQ_MITPJ]);
 	}
 	mysql_free_result(result);
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_resource.c b/src/plugins/accounting_storage/mysql/as_mysql_resource.c
index 47ef5157c..33186bb76 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_resource.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_resource.c
@@ -682,6 +682,9 @@ extern int as_mysql_add_res(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER))
+		return ESLURM_ACCESS_DENIED;
+
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(res_list);
 	while ((object = list_next(itr))) {
@@ -877,6 +880,11 @@ extern List as_mysql_remove_res(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(
+		    mysql_conn, uid, SLURMDB_ADMIN_SUPER_USER)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
 	/* force to only do non-deleted server resources */
 	res_cond->with_deleted = 0;
 
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_resv.c b/src/plugins/accounting_storage/mysql/as_mysql_resv.c
index 8cc14e096..657cbae29 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_resv.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_resv.c
@@ -91,12 +91,6 @@ static int _setup_resv_limits(slurmdb_reservation_rec_t *resv,
 		xstrfmtcat(*extra, ", assoclist='%s'", resv->assocs+start);
 	}
 
-	if (resv->cpus != (uint32_t)NO_VAL) {
-		xstrcat(*cols, ", cpus");
-		xstrfmtcat(*vals, ", %u", resv->cpus);
-		xstrfmtcat(*extra, ", cpus=%u", resv->cpus);
-	}
-
 	if (resv->flags != NO_VAL) {
 		xstrcat(*cols, ", flags");
 		xstrfmtcat(*vals, ", %u", resv->flags);
@@ -133,6 +127,11 @@ static int _setup_resv_limits(slurmdb_reservation_rec_t *resv,
 		xstrfmtcat(*extra, ", time_start=%ld", resv->time_start);
 	}
 
+	if (resv->tres_str) {
+		xstrcat(*cols, ", tres");
+		xstrfmtcat(*vals, ", '%s'", resv->tres_str);
+		xstrfmtcat(*extra, ", tres='%s'", resv->tres_str);
+	}
 
 	return SLURM_SUCCESS;
 }
@@ -213,7 +212,7 @@ extern int as_mysql_add_resv(mysql_conn_t *mysql_conn,
 {
 	int rc = SLURM_SUCCESS;
 	char *cols = NULL, *vals = NULL, *extra = NULL,
-		*query = NULL;//, *tmp_extra = NULL;
+		*query = NULL;
 
 	if (!resv) {
 		error("No reservation was given to edit");
@@ -221,15 +220,15 @@ extern int as_mysql_add_resv(mysql_conn_t *mysql_conn,
 	}
 
 	if (!resv->id) {
-		error("We need an id to edit a reservation.");
+		error("We need an id to add a reservation.");
 		return SLURM_ERROR;
 	}
 	if (!resv->time_start) {
-		error("We need a start time to edit a reservation.");
+		error("We need a start time to add a reservation.");
 		return SLURM_ERROR;
 	}
 	if (!resv->cluster || !resv->cluster[0]) {
-		error("We need a cluster name to edit a reservation.");
+		error("We need a cluster name to add a reservation.");
 		return SLURM_ERROR;
 	}
 
@@ -239,6 +238,7 @@ extern int as_mysql_add_resv(mysql_conn_t *mysql_conn,
 		   "insert into \"%s_%s\" (id_resv%s) values (%u%s) "
 		   "on duplicate key update deleted=0%s;",
 		   resv->cluster, resv_table, cols, resv->id, vals, extra);
+
 	if (debug_flags & DEBUG_FLAG_DB_RESV)
 		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 
@@ -259,29 +259,30 @@ extern int as_mysql_modify_resv(mysql_conn_t *mysql_conn,
 	MYSQL_ROW row;
 	int rc = SLURM_SUCCESS;
 	char *cols = NULL, *vals = NULL, *extra = NULL,
-		*query = NULL;//, *tmp_extra = NULL;
+		*query = NULL;
 	time_t start = 0, now = time(NULL);
 	int i;
 	int set = 0;
+
 	char *resv_req_inx[] = {
 		"assoclist",
 		"time_start",
 		"time_end",
-		"cpus",
 		"resv_name",
 		"nodelist",
 		"node_inx",
-		"flags"
+		"flags",
+		"tres"
 	};
 	enum {
 		RESV_ASSOCS,
 		RESV_START,
 		RESV_END,
-		RESV_CPU,
 		RESV_NAME,
 		RESV_NODES,
 		RESV_NODE_INX,
 		RESV_FLAGS,
+		RESV_TRES,
 		RESV_COUNT
 	};
 
@@ -310,7 +311,7 @@ extern int as_mysql_modify_resv(mysql_conn_t *mysql_conn,
 	}
 
 	xstrfmtcat(cols, "%s", resv_req_inx[0]);
-	for(i=1; i<RESV_COUNT; i++) {
+	for (i=1; i<RESV_COUNT; i++) {
 		xstrfmtcat(cols, ", %s", resv_req_inx[i]);
 	}
 
@@ -380,11 +381,6 @@ try_again:
 	else if (row[RESV_ASSOCS] && row[RESV_ASSOCS][0])
 		resv->assocs = xstrdup(row[RESV_ASSOCS]);
 
-	if (resv->cpus != (uint32_t)NO_VAL)
-		set = 1;
-	else
-		resv->cpus = slurm_atoul(row[RESV_CPU]);
-
 	if (resv->flags != NO_VAL)
 		set = 1;
 	else
@@ -400,6 +396,11 @@ try_again:
 	if (!resv->time_end)
 		resv->time_end = slurm_atoul(row[RESV_END]);
 
+	if (resv->tres_str)
+		set = 1;
+	else if (row[RESV_TRES] && row[RESV_TRES][0])
+		resv->tres_str = xstrdup(row[RESV_TRES]);
+
 	mysql_free_result(result);
 
 	_setup_resv_limits(resv, &cols, &vals, &extra);
@@ -422,7 +423,7 @@ try_again:
 		 * entry. */
 		query = xstrdup_printf("update \"%s_%s\" set time_end=%ld "
 				       "where deleted=0 && id_resv=%u "
-				       "&& time_start=%ld;",
+				       "and time_start=%ld;",
 				       resv->cluster, resv_table,
 				       resv->time_start-1,
 				       resv->id, start);
@@ -518,25 +519,25 @@ extern List as_mysql_get_resvs(mysql_conn_t *mysql_conn, uid_t uid,
 	char *resv_req_inx[] = {
 		"id_resv",
 		"assoclist",
-		"cpus",
 		"flags",
 		"nodelist",
 		"node_inx",
 		"resv_name",
 		"time_start",
 		"time_end",
+		"tres"
 	};
 
 	enum {
 		RESV_REQ_ID,
 		RESV_REQ_ASSOCS,
-		RESV_REQ_CPUS,
 		RESV_REQ_FLAGS,
 		RESV_REQ_NODES,
 		RESV_REQ_NODE_INX,
 		RESV_REQ_NAME,
 		RESV_REQ_START,
 		RESV_REQ_END,
+		RESV_REQ_TRES,
 		RESV_REQ_COUNT
 	};
 
@@ -586,6 +587,7 @@ empty:
 
 	if (use_cluster_list == as_mysql_cluster_list)
 		slurm_mutex_lock(&as_mysql_cluster_list_lock);
+
 	itr = list_iterator_create(use_cluster_list);
 	while ((cluster_name = list_next(itr))) {
 		if (query)
@@ -593,7 +595,8 @@ empty:
 		//START_TIMER;
 		xstrfmtcat(query, "select distinct %s,'%s' as cluster "
 			   "from \"%s_%s\" as t1%s",
-			   tmp, cluster_name, cluster_name, resv_table, extra);
+			   tmp, cluster_name, cluster_name, resv_table,
+			   extra ? extra : "");
 	}
 	list_iterator_destroy(itr);
 	if (use_cluster_list == as_mysql_cluster_list)
@@ -608,8 +611,7 @@ empty:
 		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
 		xfree(query);
-		if (local_cluster_list)
-			list_destroy(local_cluster_list);
+		FREE_NULL_LIST(local_cluster_list);
 		return NULL;
 	}
 	xfree(query);
@@ -634,16 +636,15 @@ empty:
 		}
 		resv->name = xstrdup(row[RESV_REQ_NAME]);
 		resv->cluster = xstrdup(row[RESV_REQ_COUNT]);
-		resv->cpus = slurm_atoul(row[RESV_REQ_CPUS]);
 		resv->assocs = xstrdup(row[RESV_REQ_ASSOCS]);
 		resv->nodes = xstrdup(row[RESV_REQ_NODES]);
 		resv->time_start = start;
 		resv->time_end = slurm_atoul(row[RESV_REQ_END]);
 		resv->flags = slurm_atoul(row[RESV_REQ_FLAGS]);
+		resv->tres_str = xstrdup(row[RESV_REQ_TRES]);
 	}
 
-	if (local_cluster_list)
-		list_destroy(local_cluster_list);
+	FREE_NULL_LIST(local_cluster_list);
 
 	if (with_usage && resv_list && list_count(resv_list)) {
 		List job_list = as_mysql_jobacct_process_get_jobs(
@@ -680,9 +681,9 @@ empty:
 				if ((elapsed = (end - start)) < 1)
 					continue;
 
-				if (job->alloc_cpus)
-					resv->alloc_secs +=
-						elapsed * job->alloc_cpus;
+				slurmdb_transfer_tres_time(
+					&resv->tres_list, job->tres_alloc_str,
+					elapsed);
 			}
 			list_iterator_reset(itr2);
 			if (!set) {
@@ -694,14 +695,10 @@ empty:
 		list_iterator_destroy(itr2);
 		list_iterator_destroy(itr);
 	no_jobs:
-		if (job_list)
-			list_destroy(job_list);
+		FREE_NULL_LIST(job_list);
 	}
 
-	if (job_cond.resvid_list) {
-		list_destroy(job_cond.resvid_list);
-		job_cond.resvid_list = NULL;
-	}
+	FREE_NULL_LIST(job_cond.resvid_list);
 
 	/* free result after we use the list with resv id's in it. */
 	mysql_free_result(result);
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_rollup.c b/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
index 308b792c4..1cfa43f76 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_rollup.c
@@ -41,42 +41,66 @@
 #include "as_mysql_rollup.h"
 #include "as_mysql_archive.h"
 #include "src/common/parse_time.h"
+#include "src/common/slurm_time.h"
+
+enum {
+	TIME_ALLOC,
+	TIME_DOWN,
+	TIME_PDOWN,
+	TIME_RESV
+};
+
+enum {
+	ASSOC_TABLES,
+	WCKEY_TABLES
+};
+
+typedef struct {
+	uint64_t count;
+	uint32_t id;
+	uint64_t time_alloc;
+	uint64_t time_down;
+	uint64_t time_idle;
+	uint64_t time_over;
+	uint64_t time_pd;
+	uint64_t time_resv;
+	uint64_t total_time;
+} local_tres_usage_t;
 
 typedef struct {
 	int id;
-	uint64_t a_cpu;
-	uint64_t energy;
+	List loc_tres;
 } local_id_usage_t;
 
 typedef struct {
+	time_t end;
 	int id; /*only needed for reservations */
-	uint64_t total_time;
-	uint64_t a_cpu;
-	int cpu_count;
-	uint64_t d_cpu;
-	uint64_t i_cpu;
-	uint64_t o_cpu;
-	uint64_t pd_cpu;
-	uint64_t r_cpu;
+	List loc_tres;
 	time_t start;
-	time_t end;
-	uint64_t energy;
 } local_cluster_usage_t;
 
 typedef struct {
-	uint64_t a_cpu;
+	time_t end;
 	int id;
 	List local_assocs; /* list of assocs to spread unused time
 			      over of type local_id_usage_t */
-	uint64_t total_time;
+	List loc_tres;
 	time_t start;
-	time_t end;
 } local_resv_usage_t;
 
+static void _destroy_local_tres_usage(void *object)
+{
+	local_tres_usage_t *a_usage = (local_tres_usage_t *)object;
+	if (a_usage) {
+		xfree(a_usage);
+	}
+}
+
 static void _destroy_local_id_usage(void *object)
 {
 	local_id_usage_t *a_usage = (local_id_usage_t *)object;
 	if (a_usage) {
+		FREE_NULL_LIST(a_usage->loc_tres);
 		xfree(a_usage);
 	}
 }
@@ -85,6 +109,7 @@ static void _destroy_local_cluster_usage(void *object)
 {
 	local_cluster_usage_t *c_usage = (local_cluster_usage_t *)object;
 	if (c_usage) {
+		FREE_NULL_LIST(c_usage->loc_tres);
 		xfree(c_usage);
 	}
 }
@@ -93,12 +118,237 @@ static void _destroy_local_resv_usage(void *object)
 {
 	local_resv_usage_t *r_usage = (local_resv_usage_t *)object;
 	if (r_usage) {
-		if (r_usage->local_assocs)
-			list_destroy(r_usage->local_assocs);
+		FREE_NULL_LIST(r_usage->local_assocs);
+		FREE_NULL_LIST(r_usage->loc_tres);
 		xfree(r_usage);
 	}
 }
 
+static int _find_loc_tres(void *x, void *key)
+{
+	local_tres_usage_t *loc_tres = (local_tres_usage_t *)x;
+	uint32_t tres_id = *(uint32_t *)key;
+
+	if (loc_tres->id == tres_id)
+		return 1;
+	return 0;
+}
+
+static int _find_id_usage(void *x, void *key)
+{
+	local_id_usage_t *loc = (local_id_usage_t *)x;
+	uint32_t id = *(uint32_t *)key;
+
+	if (loc->id == id)
+		return 1;
+	return 0;
+}
+
+static void _remove_job_tres_time_from_cluster(List c_tres, List j_tres,
+					       int seconds)
+{
+	ListIterator c_itr;
+	local_tres_usage_t *loc_c_tres, *loc_j_tres;
+	uint64_t time;
+
+	if ((seconds <= 0) || !c_tres || !j_tres ||
+	    !list_count(c_tres) || !list_count(j_tres))
+		return;
+
+	c_itr = list_iterator_create(c_tres);
+	while ((loc_c_tres = list_next(c_itr))) {
+		if (!(loc_j_tres = list_find_first(
+			      j_tres, _find_loc_tres, &loc_c_tres->id)))
+			continue;
+		time = seconds * loc_j_tres->count;
+
+		if (time >= loc_c_tres->total_time)
+			loc_c_tres->total_time = 0;
+		else
+			loc_c_tres->total_time -= time;
+	}
+	list_iterator_destroy(c_itr);
+}
+
+
+static local_tres_usage_t *_add_time_tres(List tres_list, int type, uint32_t id,
+					  uint64_t time, bool times_count)
+{
+	local_tres_usage_t *loc_tres;
+
+	if (!time)
+		return NULL;
+
+	loc_tres = list_find_first(tres_list, _find_loc_tres, &id);
+
+	if (!loc_tres) {
+		if (times_count)
+			return NULL;
+		loc_tres = xmalloc(sizeof(local_tres_usage_t));
+		loc_tres->id = id;
+		list_append(tres_list, loc_tres);
+	}
+
+	if (times_count) {
+		if (!loc_tres->count)
+			return NULL;
+		time *= loc_tres->count;
+	}
+
+	switch (type) {
+	case TIME_ALLOC:
+		loc_tres->time_alloc += time;
+		break;
+	case TIME_DOWN:
+		loc_tres->time_down += time;
+		break;
+	case TIME_PDOWN:
+		loc_tres->time_pd += time;
+		break;
+	case TIME_RESV:
+		loc_tres->time_resv += time;
+		break;
+	default:
+		error("_add_time_tres: unknown type %d given", type);
+		xassert(0);
+		break;
+	}
+
+	return loc_tres;
+}
+
+static void _add_time_tres_list(List tres_list_out, List tres_list_in, int type,
+				uint64_t time_in, bool times_count)
+{
+	ListIterator itr;
+	local_tres_usage_t *loc_tres;
+
+	xassert(tres_list_in);
+	xassert(tres_list_out);
+
+	itr = list_iterator_create(tres_list_in);
+	while ((loc_tres = list_next(itr)))
+		_add_time_tres(tres_list_out, type,
+			       loc_tres->id,
+			       time_in ? time_in : loc_tres->total_time,
+			       times_count);
+	list_iterator_destroy(itr);
+}
+
+static void _add_job_alloc_time_to_cluster(List c_tres_list, List j_tres)
+{
+	ListIterator c_itr = list_iterator_create(c_tres_list);
+	local_tres_usage_t *loc_c_tres, *loc_j_tres;
+
+	while ((loc_c_tres = list_next(c_itr))) {
+		if (!(loc_j_tres = list_find_first(
+			      j_tres, _find_loc_tres, &loc_c_tres->id)))
+			continue;
+		loc_c_tres->time_alloc += loc_j_tres->time_alloc;
+	}
+	list_iterator_destroy(c_itr);
+}
+
+static void _setup_cluster_tres(List tres_list, uint32_t id,
+				uint64_t count, int seconds)
+{
+	local_tres_usage_t *loc_tres =
+		list_find_first(tres_list, _find_loc_tres, &id);
+
+	if (!loc_tres) {
+		loc_tres = xmalloc(sizeof(local_tres_usage_t));
+		loc_tres->id = id;
+		list_append(tres_list, loc_tres);
+	}
+
+	loc_tres->count = count;
+	loc_tres->total_time += seconds * loc_tres->count;
+}
+
+static void _add_tres_2_list(List tres_list, char *tres_str, int seconds)
+{
+	char *tmp_str = tres_str;
+	int id;
+	uint64_t count;
+
+	xassert(tres_list);
+
+	if (!tres_str || !tres_str[0])
+		return;
+
+	while (tmp_str) {
+		id = atoi(tmp_str);
+		if (id < 1) {
+			error("_add_tres_2_list: no id "
+			      "found at %s instead", tmp_str);
+			break;
+		}
+
+		/* We don't run rollup on a node basis
+		 * because they are shared resources on
+		 * many systems so it will almost always
+		 * have over committed resources.
+		 */
+		if (id != TRES_NODE) {
+			if (!(tmp_str = strchr(tmp_str, '='))) {
+				error("_add_tres_2_list: no value found");
+				xassert(0);
+				break;
+			}
+			count = slurm_atoull(++tmp_str);
+			_setup_cluster_tres(tres_list, id, count, seconds);
+		}
+
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	return;
+}
+
+static void _add_tres_time_2_list(List tres_list, char *tres_str,
+				  int type, int seconds, bool times_count)
+{
+	char *tmp_str = tres_str;
+	int id;
+	uint64_t time, count;
+	local_tres_usage_t *loc_tres;
+
+	xassert(tres_list);
+
+	if (!tres_str || !tres_str[0])
+		return;
+
+	while (tmp_str) {
+		id = atoi(tmp_str);
+		if (id < 1) {
+			error("_add_tres_time_2_list: no id "
+			      "found at %s", tmp_str);
+			break;
+		}
+		if (!(tmp_str = strchr(tmp_str, '='))) {
+			error("_add_tres_time_2_list: no value found for "
+			      "id %d '%s'", id, tres_str);
+			xassert(0);
+			break;
+		}
+		count = slurm_atoull(++tmp_str);
+		time = count * seconds;
+
+		loc_tres = _add_time_tres(tres_list, type, id,
+					  time, times_count);
+		if (loc_tres && !loc_tres->count)
+			loc_tres->count = count;
+
+		if (!(tmp_str = strchr(tmp_str, ',')))
+			break;
+		tmp_str++;
+	}
+
+	return;
+}
+
 static int _process_purge(mysql_conn_t *mysql_conn,
 			  char *cluster_name,
 			  uint16_t archive_data,
@@ -150,46 +400,49 @@ static int _process_purge(mysql_conn_t *mysql_conn,
 
 	arch_cond.job_cond = &job_cond;
 	rc = as_mysql_jobacct_process_archive(mysql_conn, &arch_cond);
-	list_destroy(job_cond.cluster_list);
+	FREE_NULL_LIST(job_cond.cluster_list);
 
 	return rc;
 }
 
-static int _process_cluster_usage(mysql_conn_t *mysql_conn,
-				  char *cluster_name,
-				  time_t curr_start, time_t curr_end,
-				  time_t now, local_cluster_usage_t *c_usage)
+static void _setup_cluster_tres_usage(mysql_conn_t *mysql_conn,
+				      char *cluster_name,
+				      time_t curr_start, time_t curr_end,
+				      time_t now, time_t use_start,
+				      local_tres_usage_t *loc_tres,
+				      char **query)
 {
-	int rc = SLURM_SUCCESS;
-	char *query = NULL;
-	uint64_t total_used;
 	char start_char[20], end_char[20];
+	uint64_t total_used;
+
+	if (!loc_tres)
+		return;
 
-	if (!c_usage)
-		return rc;
 	/* Now put the lists into the usage tables */
 
 	/* sanity check to make sure we don't have more
 	   allocated cpus than possible. */
-	if (c_usage->total_time < c_usage->a_cpu) {
+	if (loc_tres->total_time
+	    && (loc_tres->total_time < loc_tres->time_alloc)) {
 		slurm_make_time_str(&curr_start, start_char,
 				    sizeof(start_char));
 		slurm_make_time_str(&curr_end, end_char,
 				    sizeof(end_char));
 		error("We have more allocated time than is "
 		      "possible (%"PRIu64" > %"PRIu64") for "
-		      "cluster %s(%d) from %s - %s",
-		      c_usage->a_cpu, c_usage->total_time,
-		      cluster_name, c_usage->cpu_count,
-		      start_char, end_char);
-		c_usage->a_cpu = c_usage->total_time;
+		      "cluster %s(%"PRIu64") from %s - %s tres %u",
+		      loc_tres->time_alloc, loc_tres->total_time,
+		      cluster_name, loc_tres->count,
+		      start_char, end_char, loc_tres->id);
+		loc_tres->time_alloc = loc_tres->total_time;
 	}
 
-	total_used = c_usage->a_cpu + c_usage->d_cpu + c_usage->pd_cpu;
+	total_used = loc_tres->time_alloc +
+		loc_tres->time_down + loc_tres->time_pd;
 
 	/* Make sure the total time we care about
 	   doesn't go over the limit */
-	if (c_usage->total_time < total_used) {
+	if (loc_tres->total_time && (loc_tres->total_time < total_used)) {
 		int64_t overtime;
 
 		slurm_make_time_str(&curr_start, start_char,
@@ -199,35 +452,43 @@ static int _process_cluster_usage(mysql_conn_t *mysql_conn,
 		error("We have more time than is "
 		      "possible (%"PRIu64"+%"PRIu64"+%"
 		      PRIu64")(%"PRIu64") > %"PRIu64" for "
-		      "cluster %s(%d) from %s - %s",
-		      c_usage->a_cpu, c_usage->d_cpu,
-		      c_usage->pd_cpu, total_used,
-		      c_usage->total_time,
-		      cluster_name, c_usage->cpu_count,
-		      start_char, end_char);
+		      "cluster %s(%"PRIu64") from %s - %s tres %u",
+		      loc_tres->time_alloc, loc_tres->time_down,
+		      loc_tres->time_pd, total_used,
+		      loc_tres->total_time,
+		      cluster_name, loc_tres->count,
+		      start_char, end_char, loc_tres->id);
 
 		/* First figure out how much actual down time
 		   we have and then how much
 		   planned down time we have. */
-		overtime = (int64_t)(c_usage->total_time -
-				     (c_usage->a_cpu + c_usage->d_cpu));
+		overtime = (int64_t)(loc_tres->total_time -
+				     (loc_tres->time_alloc +
+				      loc_tres->time_down));
 		if (overtime < 0) {
-			c_usage->d_cpu += overtime;
-			if ((int64_t)c_usage->d_cpu < 0)
-				c_usage->d_cpu = 0;
+			loc_tres->time_down += overtime;
+			if ((int64_t)loc_tres->time_down < 0)
+				loc_tres->time_down = 0;
 		}
 
-		overtime = (int64_t)(c_usage->total_time -
-				     (c_usage->a_cpu + c_usage->d_cpu
-				      + c_usage->pd_cpu));
+		overtime = (int64_t)(loc_tres->total_time -
+				     (loc_tres->time_alloc +
+				      loc_tres->time_down +
+				      loc_tres->time_pd));
 		if (overtime < 0) {
-			c_usage->pd_cpu += overtime;
-			if ((int64_t)c_usage->pd_cpu < 0)
-				c_usage->pd_cpu = 0;
+			loc_tres->time_pd += overtime;
+			if ((int64_t)loc_tres->time_pd < 0)
+				loc_tres->time_pd = 0;
 		}
 
-		total_used = c_usage->a_cpu +
-			c_usage->d_cpu + c_usage->pd_cpu;
+		total_used = loc_tres->time_alloc +
+			loc_tres->time_down + loc_tres->time_pd;
+		/* info("We now have (%"PRIu64"+%"PRIu64"+" */
+		/*      "%"PRIu64")(%"PRIu64") " */
+		/*       "?= %"PRIu64"", */
+		/*       loc_tres->time_alloc, loc_tres->time_down, */
+		/*       loc_tres->time_pd, total_used, */
+		/*       loc_tres->total_time); */
 	}
 	/* info("Cluster %s now has (%"PRIu64"+%"PRIu64"+" */
 	/*      "%"PRIu64")(%"PRIu64") ?= %"PRIu64"", */
@@ -236,82 +497,183 @@ static int _process_cluster_usage(mysql_conn_t *mysql_conn,
 	/*      c_usage->pd_cpu, total_used, */
 	/*      c_usage->total_time); */
 
-	c_usage->i_cpu = c_usage->total_time - total_used - c_usage->r_cpu;
+	loc_tres->time_idle = loc_tres->total_time -
+		total_used - loc_tres->time_resv;
 	/* sanity check just to make sure we have a
 	 * legitimate time after we calulated
 	 * idle/reserved time put extra in the over
 	 * commit field
 	 */
-	/* info("%s got idle of %lld", c_usage->name, */
-	/*      (int64_t)c_usage->i_cpu); */
-	if ((int64_t)c_usage->i_cpu < 0) {
-		/* info("got %d %d %d", c_usage->r_cpu, */
-		/*      c_usage->i_cpu, c_usage->o_cpu); */
-		c_usage->r_cpu += (int64_t)c_usage->i_cpu;
-		c_usage->o_cpu -= (int64_t)c_usage->i_cpu;
-		c_usage->i_cpu = 0;
-		if ((int64_t)c_usage->r_cpu < 0)
-			c_usage->r_cpu = 0;
+	/* info("%s got idle of %lld", loc_tres->name, */
+	/*      (int64_t)loc_tres->time_idle); */
+	if ((int64_t)loc_tres->time_idle < 0) {
+		/* info("got %d %d %d", loc_tres->time_resv, */
+		/*      loc_tres->time_idle, loc_tres->time_over); */
+		loc_tres->time_resv += (int64_t)loc_tres->time_idle;
+		loc_tres->time_over -= (int64_t)loc_tres->time_idle;
+		loc_tres->time_idle = 0;
+		if ((int64_t)loc_tres->time_resv < 0)
+			loc_tres->time_resv = 0;
 	}
 
 	/* info("cluster %s(%u) down %"PRIu64" alloc %"PRIu64" " */
 	/*      "resv %"PRIu64" idle %"PRIu64" over %"PRIu64" " */
 	/*      "total= %"PRIu64" ?= %"PRIu64" from %s", */
 	/*      cluster_name, */
-	/*      c_usage->cpu_count, c_usage->d_cpu, c_usage->a_cpu, */
-	/*      c_usage->r_cpu, c_usage->i_cpu, c_usage->o_cpu, */
-	/*      c_usage->d_cpu + c_usage->a_cpu + */
-	/*      c_usage->r_cpu + c_usage->i_cpu, */
-	/*      c_usage->total_time, */
-	/*      slurm_ctime(&c_usage->start)); */
-	/* info("to %s", slurm_ctime(&c_usage->end)); */
-	query = xstrdup_printf("insert into \"%s_%s\" "
-			       "(creation_time, "
-			       "mod_time, time_start, "
-			       "cpu_count, alloc_cpu_secs, "
-			       "down_cpu_secs, pdown_cpu_secs, "
-			       "idle_cpu_secs, over_cpu_secs, "
-			       "resv_cpu_secs, consumed_energy) "
-			       "values (%ld, %ld, %ld, %d, "
-			       "%"PRIu64", %"PRIu64", %"PRIu64", "
-			       "%"PRIu64", %"PRIu64", %"PRIu64", "
-			       "%"PRIu64")",
-			       cluster_name, cluster_hour_table,
-			       now, now,
-			       c_usage->start,
-			       c_usage->cpu_count,
-			       c_usage->a_cpu, c_usage->d_cpu,
-			       c_usage->pd_cpu, c_usage->i_cpu,
-			       c_usage->o_cpu, c_usage->r_cpu,
-			       c_usage->energy);
+	/*      loc_tres->count, loc_tres->time_down, */
+	/*      loc_tres->time_alloc, */
+	/*      loc_tres->time_resv, loc_tres->time_idle, */
+	/*      loc_tres->time_over, */
+	/*      loc_tres->time_down + loc_tres->time_alloc + */
+	/*      loc_tres->time_resv + loc_tres->time_idle, */
+	/*      loc_tres->total_time, */
+	/*      slurm_ctime2(&loc_tres->start)); */
+	/* info("to %s", slurm_ctime2(&loc_tres->end)); */
+	if (*query)
+		xstrfmtcat(*query, ", (%ld, %ld, %ld, %u, %"PRIu64", "
+			   "%"PRIu64", %"PRIu64", %"PRIu64", "
+			   "%"PRIu64", %"PRIu64", %"PRIu64")",
+			   now, now, use_start, loc_tres->id,
+			   loc_tres->count,
+			   loc_tres->time_alloc,
+			   loc_tres->time_down,
+			   loc_tres->time_pd,
+			   loc_tres->time_idle,
+			   loc_tres->time_over,
+			   loc_tres->time_resv);
+	else
+		xstrfmtcat(*query, "insert into \"%s_%s\" "
+			   "(creation_time, mod_time, "
+			   "time_start, id_tres, count, "
+			   "alloc_secs, down_secs, pdown_secs, "
+			   "idle_secs, over_secs, resv_secs) "
+			   "values (%ld, %ld, %ld, %u, %"PRIu64", "
+			   "%"PRIu64", %"PRIu64", %"PRIu64", "
+			   "%"PRIu64", %"PRIu64", %"PRIu64")",
+			   cluster_name, cluster_hour_table,
+			   now, now,
+			   use_start, loc_tres->id,
+			   loc_tres->count,
+			   loc_tres->time_alloc,
+			   loc_tres->time_down,
+			   loc_tres->time_pd,
+			   loc_tres->time_idle,
+			   loc_tres->time_over,
+			   loc_tres->time_resv);
+
+	return;
+}
+
+static int _process_cluster_usage(mysql_conn_t *mysql_conn,
+				  char *cluster_name,
+				  time_t curr_start, time_t curr_end,
+				  time_t now, local_cluster_usage_t *c_usage)
+{
+	int rc = SLURM_SUCCESS;
+	char *query = NULL;
+	ListIterator itr;
+	local_tres_usage_t *loc_tres;
+
+	if (!c_usage)
+		return rc;
+	/* Now put the lists into the usage tables */
+
+	xassert(c_usage->loc_tres);
+	itr = list_iterator_create(c_usage->loc_tres);
+	while ((loc_tres = list_next(itr))) {
+		_setup_cluster_tres_usage(mysql_conn, cluster_name,
+					  curr_start, curr_end, now,
+					  c_usage->start, loc_tres, &query);
+	}
+	list_iterator_destroy(itr);
+
+	if (!query)
+		return rc;
+
+	xstrfmtcat(query,
+		   " on duplicate key update "
+		   "mod_time=%ld, count=VALUES(count), "
+		   "alloc_secs=VALUES(alloc_secs), "
+		   "down_secs=VALUES(down_secs), "
+		   "pdown_secs=VALUES(pdown_secs), "
+		   "idle_secs=VALUES(idle_secs), "
+		   "over_secs=VALUES(over_secs), "
+		   "resv_secs=VALUES(resv_secs)",
+		   now);
 
 	/* Spacing out the inserts here instead of doing them
 	   all at once in the end proves to be faster.  Just FYI
 	   so we don't go testing again and again.
 	*/
-	if (query) {
-		xstrfmtcat(query,
-			   " on duplicate key update "
-			   "mod_time=%ld, cpu_count=VALUES(cpu_count), "
-			   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
-			   "down_cpu_secs=VALUES(down_cpu_secs), "
-			   "pdown_cpu_secs=VALUES(pdown_cpu_secs), "
-			   "idle_cpu_secs=VALUES(idle_cpu_secs), "
-			   "over_cpu_secs=VALUES(over_cpu_secs), "
-			   "resv_cpu_secs=VALUES(resv_cpu_secs), "
-			   "consumed_energy=VALUES(consumed_energy)",
-			   now);
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-		rc = mysql_db_query(mysql_conn, query);
-		xfree(query);
-		if (rc != SLURM_SUCCESS)
-			error("Couldn't add cluster hour rollup");
-	}
+	if (debug_flags & DEBUG_FLAG_DB_USAGE)
+		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	rc = mysql_db_query(mysql_conn, query);
+	xfree(query);
+	if (rc != SLURM_SUCCESS)
+		error("Couldn't add cluster hour rollup");
 
 	return rc;
 }
 
+static void _create_id_usage_insert(char *cluster_name, int type,
+				    time_t curr_start, time_t now,
+				    local_id_usage_t *id_usage,
+				    char **query)
+{
+	local_tres_usage_t *loc_tres;
+	ListIterator itr;
+	bool first;
+	char *table = NULL, *id_name = NULL;
+
+	xassert(query);
+
+	switch (type) {
+	case ASSOC_TABLES:
+		id_name = "id_assoc";
+		table = assoc_hour_table;
+		break;
+	case WCKEY_TABLES:
+		id_name = "id_wckey";
+		table = wckey_hour_table;
+		break;
+	default:
+		error("_create_id_usage_insert: unknown type %d", type);
+		return;
+		break;
+	}
+
+	if (!id_usage->loc_tres || !list_count(id_usage->loc_tres)) {
+		error("%s %d doesn't have any tres", id_name, id_usage->id);
+		return;
+	}
+
+	first = 1;
+	itr = list_iterator_create(id_usage->loc_tres);
+	while ((loc_tres = list_next(itr))) {
+		if (!first) {
+			xstrfmtcat(*query,
+				   ", (%ld, %ld, %u, %ld, %u, %"PRIu64")",
+				   now, now,
+				   id_usage->id, curr_start, loc_tres->id,
+				   loc_tres->time_alloc);
+		} else {
+			xstrfmtcat(*query,
+				   "insert into \"%s_%s\" "
+				   "(creation_time, mod_time, id, "
+				   "time_start, id_tres, alloc_secs) "
+				   "values (%ld, %ld, %u, %ld, %u, %"PRIu64")",
+				   cluster_name, table, now, now,
+				   id_usage->id, curr_start, loc_tres->id,
+				   loc_tres->time_alloc);
+			first = 0;
+		}
+	}
+	list_iterator_destroy(itr);
+	xstrfmtcat(*query,
+		   " on duplicate key update mod_time=%ld, "
+		   "alloc_secs=VALUES(alloc_secs);", now);
+}
+
 static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 						   char *cluster_name,
 						   time_t curr_start,
@@ -323,23 +685,23 @@ static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	int i = 0;
-	ListIterator c_itr = NULL;
+	ListIterator d_itr = NULL;
 	local_cluster_usage_t *loc_c_usage;
 
 	char *event_req_inx[] = {
 		"node_name",
-		"cpu_count",
 		"time_start",
 		"time_end",
 		"state",
+		"tres",
 	};
 	char *event_str = NULL;
 	enum {
 		EVENT_REQ_NAME,
-		EVENT_REQ_CPU,
 		EVENT_REQ_START,
 		EVENT_REQ_END,
 		EVENT_REQ_STATE,
+		EVENT_REQ_TRES,
 		EVENT_REQ_COUNT
 	};
 
@@ -368,13 +730,16 @@ static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 		xfree(query);
 		return NULL;
 	}
+
 	xfree(query);
-	c_itr = list_iterator_create(cluster_down_list);
+
+	d_itr = list_iterator_create(cluster_down_list);
 	while ((row = mysql_fetch_row(result))) {
 		time_t row_start = slurm_atoul(row[EVENT_REQ_START]);
 		time_t row_end = slurm_atoul(row[EVENT_REQ_END]);
-		uint32_t row_cpu = slurm_atoul(row[EVENT_REQ_CPU]);
 		uint16_t state = slurm_atoul(row[EVENT_REQ_STATE]);
+		int seconds;
+
 		if (row_start < curr_start)
 			row_start = curr_start;
 
@@ -384,26 +749,27 @@ static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 		/* Don't worry about it if the time is less
 		 * than 1 second.
 		 */
-		if ((row_end - row_start) < 1)
+		if ((seconds = (row_end - row_start)) < 1)
 			continue;
 
 		/* this means we are a cluster registration
 		   entry */
 		if (!row[EVENT_REQ_NAME][0]) {
+			local_cluster_usage_t *loc_c_usage;
+
 			/* if the cpu count changes we will
 			 * only care about the last cpu count but
 			 * we will keep a total of the time for
 			 * all cpus to get the correct cpu time
 			 * for the entire period.
 			 */
+
 			if (state || !c_usage) {
 				loc_c_usage = xmalloc(
 					sizeof(local_cluster_usage_t));
-				loc_c_usage->cpu_count = row_cpu;
-				loc_c_usage->total_time =
-					(row_end - row_start) * row_cpu;
 				loc_c_usage->start = row_start;
-				loc_c_usage->end = row_end;
+				loc_c_usage->loc_tres =
+					list_create(_destroy_local_tres_usage);
 				/* If this has a state it
 				   means the slurmctld went
 				   down and we should put this
@@ -416,13 +782,14 @@ static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 						    loc_c_usage);
 				else
 					c_usage = loc_c_usage;
-				loc_c_usage = NULL;
-			} else {
-				c_usage->cpu_count = row_cpu;
-				c_usage->total_time +=
-					(row_end - row_start) * row_cpu;
-				c_usage->end = row_end;
-			}
+			} else
+				loc_c_usage = c_usage;
+
+			loc_c_usage->end = row_end;
+
+			_add_tres_2_list(loc_c_usage->loc_tres,
+					 row[EVENT_REQ_TRES], seconds);
+
 			continue;
 		}
 
@@ -440,25 +807,17 @@ static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 				local_end = c_usage->end;
 			seconds = (local_end - local_start);
 			if (seconds > 0) {
-				/* info("%p node %s adds " */
-				/*      "(%d)(%ld-%ld) * %d = %"PRIu64" " */
-				/*      "to %"PRIu64" (%s - %s)", */
-				/*      c_usage, */
-				/*      row[EVENT_REQ_NAME], */
-				/*      seconds, */
-				/*      local_end, local_start, */
-				/*      row_cpu, */
-				/*      seconds * (uint64_t)row_cpu, */
-				/*      c_usage->d_cpu, */
-				/*      slurm_ctime(&local_start), */
-				/*      slurm_ctime(&local_end)); */
-				c_usage->d_cpu += seconds * (uint64_t)row_cpu;
+				_add_tres_time_2_list(c_usage->loc_tres,
+						      row[EVENT_REQ_TRES],
+						      TIME_DOWN,
+						      seconds, 0);
+
 				/* Now remove this time if there was a
 				   disconnected slurmctld during the
 				   down time.
 				*/
-				list_iterator_reset(c_itr);
-				while ((loc_c_usage = list_next(c_itr))) {
+				list_iterator_reset(d_itr);
+				while ((loc_c_usage = list_next(d_itr))) {
 					int temp_end = row_end;
 					int temp_start = row_start;
 					if (loc_c_usage->start > local_start)
@@ -469,26 +828,22 @@ static local_cluster_usage_t *_setup_cluster_usage(mysql_conn_t *mysql_conn,
 					if (seconds < 1)
 						continue;
 
-					seconds *= row_cpu;
-					if (seconds >= loc_c_usage->total_time)
-						loc_c_usage->total_time = 0;
-					else
-						loc_c_usage->total_time -=
-							seconds;
-
+					_remove_job_tres_time_from_cluster(
+						loc_c_usage->loc_tres,
+						c_usage->loc_tres, seconds);
 					/* info("Node %s was down for " */
 					/*      "%d seconds while " */
 					/*      "cluster %s's slurmctld " */
-					/*      "wasn't responding %"PRIu64, */
+					/*      "wasn't responding", */
 					/*      row[EVENT_REQ_NAME], */
-					/*      seconds, cluster_name, */
-					/*      loc_c_usage->total_time); */
+					/*      seconds, cluster_name); */
 				}
 			}
 		}
 	}
 	mysql_free_result(result);
-	list_iterator_destroy(c_itr);
+
+	list_iterator_destroy(d_itr);
 
 	return c_usage;
 }
@@ -516,6 +871,11 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 	List wckey_usage_list = list_create(_destroy_local_id_usage);
 	List resv_usage_list = list_create(_destroy_local_resv_usage);
 	uint16_t track_wckey = slurm_get_track_wckey();
+	local_cluster_usage_t *loc_c_usage = NULL;
+	local_cluster_usage_t *c_usage = NULL;
+	local_resv_usage_t *r_usage = NULL;
+	local_id_usage_t *a_usage = NULL;
+	local_id_usage_t *w_usage = NULL;
 	/* char start_char[20], end_char[20]; */
 
 	char *job_req_inx[] = {
@@ -528,9 +888,9 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		"job.time_start",
 		"job.time_end",
 		"job.time_suspended",
-		"job.cpus_alloc",
 		"job.cpus_req",
 		"job.id_resv",
+		"job.tres_alloc",
 		"SUM(step.consumed_energy)"
 	};
 	char *job_str = NULL;
@@ -544,9 +904,9 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		JOB_REQ_START,
 		JOB_REQ_END,
 		JOB_REQ_SUSPENDED,
-		JOB_REQ_ACPU,
 		JOB_REQ_RCPU,
 		JOB_REQ_RESVID,
+		JOB_REQ_TRES,
 		JOB_REQ_ENERGY,
 		JOB_REQ_COUNT
 	};
@@ -565,8 +925,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 	char *resv_req_inx[] = {
 		"id_resv",
 		"assoclist",
-		"cpus",
 		"flags",
+		"tres",
 		"time_start",
 		"time_end"
 	};
@@ -574,8 +934,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 	enum {
 		RESV_REQ_ID,
 		RESV_REQ_ASSOCS,
-		RESV_REQ_CPU,
 		RESV_REQ_FLAGS,
+		RESV_REQ_TRES,
 		RESV_REQ_START,
 		RESV_REQ_END,
 		RESV_REQ_COUNT
@@ -599,8 +959,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		xstrfmtcat(resv_str, ", %s", resv_req_inx[i]);
 	}
 
-/* 	info("begin start %s", slurm_ctime(&curr_start)); */
-/* 	info("begin end %s", slurm_ctime(&curr_end)); */
+/* 	info("begin start %s", slurm_ctime2(&curr_start)); */
+/* 	info("begin end %s", slurm_ctime2(&curr_end)); */
 	a_itr = list_iterator_create(assoc_usage_list);
 	c_itr = list_iterator_create(cluster_down_list);
 	w_itr = list_iterator_create(wckey_usage_list);
@@ -610,18 +970,13 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		int last_wckeyid = -1;
 		int seconds = 0;
 		int tot_time = 0;
-		local_cluster_usage_t *loc_c_usage = NULL;
-		local_cluster_usage_t *c_usage = NULL;
-		local_resv_usage_t *r_usage = NULL;
-		local_id_usage_t *a_usage = NULL;
-		local_id_usage_t *w_usage = NULL;
 
 		if (debug_flags & DEBUG_FLAG_DB_USAGE)
 			DB_DEBUG(mysql_conn->conn,
 				 "%s curr hour is now %ld-%ld",
 				 cluster_name, curr_start, curr_end);
-/* 		info("start %s", slurm_ctime(&curr_start)); */
-/* 		info("end %s", slurm_ctime(&curr_end)); */
+/* 		info("start %s", slurm_ctime2(&curr_start)); */
+/* 		info("end %s", slurm_ctime2(&curr_end)); */
 
 		c_usage = _setup_cluster_usage(mysql_conn, cluster_name,
 					       curr_start, curr_end,
@@ -646,12 +1001,14 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 		if (!(result = mysql_db_query_ret(
 			      mysql_conn, query, 0))) {
-			xfree(query);
-			_destroy_local_cluster_usage(c_usage);
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
+			goto end_it;
 		}
 		xfree(query);
 
+		if (c_usage)
+			xassert(c_usage->loc_tres);
+
 		/* If a reservation overlaps another reservation we
 		   total up everything here as if they didn't but when
 		   calculating the total time for a cluster we will
@@ -674,9 +1031,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		while ((row = mysql_fetch_row(result))) {
 			time_t row_start = slurm_atoul(row[RESV_REQ_START]);
 			time_t row_end = slurm_atoul(row[RESV_REQ_END]);
-			uint32_t row_cpu = slurm_atoul(row[RESV_REQ_CPU]);
 			uint32_t row_flags = slurm_atoul(row[RESV_REQ_FLAGS]);
-
+			int seconds;
 			if (row_start < curr_start)
 				row_start = curr_start;
 
@@ -686,7 +1042,7 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			/* Don't worry about it if the time is less
 			 * than 1 second.
 			 */
-			if ((row_end - row_start) < 1)
+			if ((seconds = (row_end - row_start)) < 1)
 				continue;
 
 			r_usage = xmalloc(sizeof(local_resv_usage_t));
@@ -695,8 +1051,12 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			r_usage->local_assocs = list_create(slurm_destroy_char);
 			slurm_addto_char_list(r_usage->local_assocs,
 					      row[RESV_REQ_ASSOCS]);
+			r_usage->loc_tres =
+				list_create(_destroy_local_tres_usage);
+
+			_add_tres_2_list(r_usage->loc_tres,
+					 row[RESV_REQ_TRES], seconds);
 
-			r_usage->total_time = (row_end - row_start) * row_cpu;
 			r_usage->start = row_start;
 			r_usage->end = row_end;
 			list_append(resv_usage_list, r_usage);
@@ -716,10 +1076,12 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			*/
 			if (!c_usage)
 				continue;
-			else if (row_flags & RESERVE_FLAG_MAINT)
-				c_usage->pd_cpu += r_usage->total_time;
-			else
-				c_usage->a_cpu += r_usage->total_time;
+
+			_add_time_tres_list(c_usage->loc_tres,
+					    r_usage->loc_tres,
+					    (row_flags & RESERVE_FLAG_MAINT) ?
+					    TIME_PDOWN : TIME_ALLOC, 0, 0);
+
 			/* slurm_make_time_str(&r_usage->start, start_char, */
 			/* 		    sizeof(start_char)); */
 			/* slurm_make_time_str(&r_usage->end, end_char, */
@@ -751,9 +1113,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 		if (!(result = mysql_db_query_ret(
 			      mysql_conn, query, 0))) {
-			xfree(query);
-			_destroy_local_cluster_usage(c_usage);
-			return SLURM_ERROR;
+			rc = SLURM_ERROR;
+			goto end_it;
 		}
 		xfree(query);
 
@@ -767,8 +1128,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			time_t row_eligible = slurm_atoul(row[JOB_REQ_ELG]);
 			time_t row_start = slurm_atoul(row[JOB_REQ_START]);
 			time_t row_end = slurm_atoul(row[JOB_REQ_END]);
-			uint32_t row_acpu = slurm_atoul(row[JOB_REQ_ACPU]);
 			uint32_t row_rcpu = slurm_atoul(row[JOB_REQ_RCPU]);
+			List loc_tres = NULL;
 			uint64_t row_energy = 0;
 			int loc_seconds = 0;
 			seconds = 0;
@@ -809,9 +1170,8 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				if (!(result2 = mysql_db_query_ret(
 					      mysql_conn,
 					      query, 0))) {
-					xfree(query);
-					_destroy_local_cluster_usage(c_usage);
-					return SLURM_ERROR;
+					rc = SLURM_ERROR;
+					goto end_it;
 				}
 				xfree(query);
 				while ((row2 = mysql_fetch_row(result2))) {
@@ -846,13 +1206,14 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				a_usage->id = assoc_id;
 				list_append(assoc_usage_list, a_usage);
 				last_id = assoc_id;
+				/* a_usage->loc_tres is made later,
+				   don't do it here.
+				*/
 			}
 
-			a_usage->a_cpu += seconds * row_acpu;
-			a_usage->energy += row_energy;
-
+			/* Short circuit this so so we don't get a pointer. */
 			if (!track_wckey)
-				goto calc_cluster;
+				last_wckeyid = wckey_id;
 
 			/* do the wckey calculation */
 			if (last_wckeyid != wckey_id) {
@@ -867,15 +1228,41 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					w_usage->id = wckey_id;
 					list_append(wckey_usage_list,
 						    w_usage);
+					w_usage->loc_tres = list_create(
+						_destroy_local_tres_usage);
 				}
-
 				last_wckeyid = wckey_id;
 			}
-			w_usage->a_cpu += seconds * row_acpu;
-			w_usage->energy += row_energy;
+
 			/* do the cluster allocated calculation */
 		calc_cluster:
 
+			if (!a_usage)
+				loc_tres = list_create(
+					_destroy_local_tres_usage);
+			else {
+				if (!a_usage->loc_tres)
+					a_usage->loc_tres = list_create(
+						_destroy_local_tres_usage);
+				loc_tres = a_usage->loc_tres;
+			}
+
+			_add_tres_time_2_list(loc_tres, row[JOB_REQ_TRES],
+					      TIME_ALLOC, seconds, 0);
+			if (w_usage)
+				_add_tres_time_2_list(w_usage->loc_tres,
+						      row[JOB_REQ_TRES],
+						      TIME_ALLOC, seconds, 0);
+
+			_add_time_tres(loc_tres,
+				       TIME_ALLOC, TRES_ENERGY,
+				       row_energy, 0);
+			if (w_usage)
+				_add_time_tres(
+					w_usage->loc_tres,
+					TIME_ALLOC, TRES_ENERGY,
+					row_energy, 0);
+
 			/* Now figure out there was a disconnected
 			   slurmctld durning this job.
 			*/
@@ -891,24 +1278,24 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				if (loc_seconds < 1)
 					continue;
 
-				loc_seconds *= row_acpu;
-				/* info(" Job %u was running for " */
+				_remove_job_tres_time_from_cluster(
+					loc_c_usage->loc_tres,
+					loc_tres,
+					loc_seconds);
+				/* info("Job %u was running for " */
 				/*      "%d seconds while " */
 				/*      "cluster %s's slurmctld " */
 				/*      "wasn't responding", */
 				/*      job_id, loc_seconds, cluster_name); */
-				if (loc_seconds >= loc_c_usage->total_time)
-					loc_c_usage->total_time = 0;
-				else {
-					loc_c_usage->total_time -=
-						loc_seconds * row_acpu;
-				}
 			}
 
 			/* first figure out the reservation */
 			if (resv_id) {
-				if (seconds <= 0)
+				if (seconds <= 0) {
+					if (!a_usage)
+						FREE_NULL_LIST(loc_tres);
 					continue;
+				}
 				/* Since we have already added the
 				   entire reservation as used time on
 				   the cluster we only need to
@@ -924,6 +1311,7 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 				   outside of the reservation. */
 				list_iterator_reset(r_itr);
 				while ((r_usage = list_next(r_itr))) {
+					int temp_end, temp_start;
 					/* since the reservation could
 					   have changed in some way,
 					   thus making a new
@@ -932,24 +1320,26 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					   sure all the reservations
 					   are checked to see if such
 					   a thing has happened */
-					if (r_usage->id == resv_id) {
-						int temp_end = row_end;
-						int temp_start = row_start;
-						if (r_usage->start > temp_start)
-							temp_start =
-								r_usage->start;
-						if (r_usage->end < temp_end)
-							temp_end = r_usage->end;
-
-						if ((temp_end - temp_start)
-						    > 0) {
-							r_usage->a_cpu +=
-								(temp_end
-								 - temp_start)
-								* row_acpu;
-						}
-					}
+					if (r_usage->id != resv_id)
+						continue;
+					temp_end = row_end;
+					temp_start = row_start;
+					if (r_usage->start > temp_start)
+						temp_start =
+							r_usage->start;
+					if (r_usage->end < temp_end)
+						temp_end = r_usage->end;
+
+					loc_seconds = (temp_end - temp_start);
+
+					if (loc_seconds > 0)
+						_add_time_tres_list(
+							r_usage->loc_tres,
+							loc_tres, TIME_ALLOC,
+							loc_seconds, 1);
 				}
+				if (!a_usage)
+					FREE_NULL_LIST(loc_tres);
 				continue;
 			}
 
@@ -957,25 +1347,33 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			   registered.  This continue should rarely if
 			   ever happen.
 			*/
-			if (!c_usage)
+			if (!c_usage) {
+				if (!a_usage)
+					FREE_NULL_LIST(loc_tres);
 				continue;
+			}
 
 			if (row_start && (seconds > 0)) {
 				/* info("%d assoc %d adds " */
-				/*      "(%d)(%ld-%ld) * %d = %d " */
-				/*      "to %"PRIu64, */
+				/*      "(%d)(%d-%d) * %d = %d " */
+				/*      "to %d", */
 				/*      job_id, */
 				/*      a_usage->id, */
 				/*      seconds, */
 				/*      row_end, row_start, */
 				/*      row_acpu, */
 				/*      seconds * row_acpu, */
-				/*      c_usage->a_cpu); */
+				/*      row_acpu); */
 
-				c_usage->a_cpu += seconds * row_acpu;
-				c_usage->energy += row_energy;
+				_add_job_alloc_time_to_cluster(
+					c_usage->loc_tres,
+					loc_tres);
 			}
 
+			/* The loc_tres isn't needed after this */
+			if (!a_usage)
+				FREE_NULL_LIST(loc_tres);
+
 			/* now reserved time */
 			if (!row_start || (row_start >= c_usage->start)) {
 				int temp_end = row_start;
@@ -1007,8 +1405,10 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 					/*      loc_seconds, */
 					/*      row_rcpu); */
 
-					c_usage->r_cpu +=
-						loc_seconds * row_rcpu;
+					_add_time_tres(c_usage->loc_tres,
+						       TIME_RESV, TRES_CPU,
+						       loc_seconds * row_rcpu,
+						       0);
 				}
 			}
 		}
@@ -1019,103 +1419,97 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		*/
 		list_iterator_reset(r_itr);
 		while ((r_usage = list_next(r_itr))) {
-			int64_t idle = r_usage->total_time - r_usage->a_cpu;
-			char *assoc = NULL;
-			ListIterator tmp_itr = NULL;
+			ListIterator t_itr;
+			local_tres_usage_t *loc_tres;
 
-			if (idle <= 0)
+			if (!r_usage->loc_tres ||
+			    !list_count(r_usage->loc_tres))
 				continue;
 
-			/* now divide that time by the number of
-			   associations in the reservation and add
-			   them to each association */
-			seconds = idle / list_count(r_usage->local_assocs);
-/* 			info("resv %d got %d for seconds for %d assocs", */
-/* 			     r_usage->id, seconds, */
-/* 			     list_count(r_usage->local_assocs)); */
-			tmp_itr = list_iterator_create(r_usage->local_assocs);
-			while ((assoc = list_next(tmp_itr))) {
-				uint32_t associd = slurm_atoul(assoc);
-				if (last_id != associd) {
-					list_iterator_reset(a_itr);
-					while ((a_usage = list_next(a_itr))) {
-						if (a_usage->id == associd) {
-							last_id = a_usage->id;
-							break;
-						}
+			t_itr = list_iterator_create(r_usage->loc_tres);
+			while ((loc_tres = list_next(t_itr))) {
+				int64_t idle = loc_tres->total_time -
+					loc_tres->time_alloc;
+				char *assoc = NULL;
+				ListIterator tmp_itr = NULL;
+
+				if (idle <= 0)
+					break; /* since this will be
+						* the same for all TRES	*/
+
+				/* now divide that time by the number of
+				   associations in the reservation and add
+				   them to each association */
+				seconds = idle /
+					list_count(r_usage->local_assocs);
+				/* info("resv %d got %d seconds for TRES %u " */
+				/*      "for %d assocs", */
+				/*      r_usage->id, seconds, loc_tres->id, */
+				/*      list_count(r_usage->local_assocs)); */
+				tmp_itr = list_iterator_create(
+					r_usage->local_assocs);
+				while ((assoc = list_next(tmp_itr))) {
+					uint32_t associd = slurm_atoul(assoc);
+					if ((last_id != associd) &&
+					    !(a_usage = list_find_first(
+						      assoc_usage_list,
+						      _find_id_usage,
+						      &associd))) {
+						a_usage = xmalloc(
+							sizeof(local_id_usage_t));
+						a_usage->id = associd;
+						list_append(assoc_usage_list,
+							    a_usage);
+						last_id = associd;
+						a_usage->loc_tres = list_create(
+							_destroy_local_tres_usage);
 					}
-				}
 
-				if (!a_usage) {
-					a_usage = xmalloc(
-						sizeof(local_id_usage_t));
-					a_usage->id = associd;
-					list_append(assoc_usage_list, a_usage);
-					last_id = associd;
+					_add_time_tres(a_usage->loc_tres,
+						       TIME_ALLOC, loc_tres->id,
+						       seconds, 0);
 				}
-
-				a_usage->a_cpu += seconds;
+				list_iterator_destroy(tmp_itr);
 			}
-			list_iterator_destroy(tmp_itr);
+			list_iterator_destroy(t_itr);
 		}
 
 		/* now apply the down time from the slurmctld disconnects */
 		if (c_usage) {
 			list_iterator_reset(c_itr);
-			while ((loc_c_usage = list_next(c_itr)))
-				c_usage->d_cpu += loc_c_usage->total_time;
+			while ((loc_c_usage = list_next(c_itr))) {
+				local_tres_usage_t *loc_tres;
+				ListIterator tmp_itr = list_iterator_create(
+					loc_c_usage->loc_tres);
+				while ((loc_tres = list_next(tmp_itr)))
+					_add_time_tres(c_usage->loc_tres,
+						       TIME_DOWN,
+						       loc_tres->id,
+						       loc_tres->total_time,
+						       0);
+				list_iterator_destroy(tmp_itr);
+			}
 
 			if ((rc = _process_cluster_usage(
 				     mysql_conn, cluster_name, curr_start,
 				     curr_end, now, c_usage))
 			    != SLURM_SUCCESS) {
-				_destroy_local_cluster_usage(c_usage);
 				goto end_it;
 			}
 		}
 
 		list_iterator_reset(a_itr);
-		while ((a_usage = list_next(a_itr))) {
-			/* info("association (%d) %d alloc %"PRIu64, */
-			/*      a_usage->id, last_id, */
-			/*      a_usage->a_cpu); */
-			if (query) {
-				xstrfmtcat(query,
-					   ", (%ld, %ld, %d, %ld, %"PRIu64", "
-					   "%"PRIu64")",
-					   now, now,
-					   a_usage->id, curr_start,
-					   a_usage->a_cpu, a_usage->energy);
-			} else {
-				xstrfmtcat(query,
-					   "insert into \"%s_%s\" "
-					   "(creation_time, "
-					   "mod_time, id_assoc, time_start, "
-					   "alloc_cpu_secs, consumed_energy) "
-					   "values "
-					   "(%ld, %ld, %d, %ld, %"PRIu64", "
-					   "%"PRIu64")",
-					   cluster_name, assoc_hour_table,
-					   now, now,
-					   a_usage->id, curr_start,
-					   a_usage->a_cpu, a_usage->energy);
-			}
-		}
+		while ((a_usage = list_next(a_itr)))
+			_create_id_usage_insert(cluster_name, ASSOC_TABLES,
+						curr_start, now,
+						a_usage, &query);
 		if (query) {
-			xstrfmtcat(query,
-				   " on duplicate key update "
-				   "mod_time=%ld, "
-				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
-				   "consumed_energy=VALUES(consumed_energy);",
-				   now);
-
 			if (debug_flags & DEBUG_FLAG_DB_USAGE)
 				DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 			rc = mysql_db_query(mysql_conn, query);
 			xfree(query);
 			if (rc != SLURM_SUCCESS) {
 				error("Couldn't add assoc hour rollup");
-				_destroy_local_cluster_usage(c_usage);
 				goto end_it;
 			}
 		}
@@ -1124,53 +1518,31 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 			goto end_loop;
 
 		list_iterator_reset(w_itr);
-		while ((w_usage = list_next(w_itr))) {
-/* 			info("association (%d) %d alloc %d", */
-/* 			     w_usage->id, last_id, */
-/* 			     w_usage->a_cpu); */
-			if (query) {
-				xstrfmtcat(query,
-					   ", (%ld, %ld, %d, %ld, "
-					   "%"PRIu64", %"PRIu64")",
-					   now, now,
-					   w_usage->id, curr_start,
-					   w_usage->a_cpu, w_usage->energy);
-			} else {
-				xstrfmtcat(query,
-					   "insert into \"%s_%s\" "
-					   "(creation_time, "
-					   "mod_time, id_wckey, time_start, "
-					   "alloc_cpu_secs, consumed_energy) "
-					   "values "
-					   "(%ld, %ld, %d, %ld, "
-					   "%"PRIu64", %"PRIu64")",
-					   cluster_name, wckey_hour_table,
-					   now, now,
-					   w_usage->id, curr_start,
-					   w_usage->a_cpu, w_usage->energy);
-			}
-		}
+		while ((w_usage = list_next(w_itr)))
+			_create_id_usage_insert(cluster_name, WCKEY_TABLES,
+						curr_start, now,
+						w_usage, &query);
 		if (query) {
-			xstrfmtcat(query,
-				   " on duplicate key update "
-				   "mod_time=%ld, "
-				   "alloc_cpu_secs=VALUES(alloc_cpu_secs), "
-				   "consumed_energy=VALUES(consumed_energy);",
-				   now);
-
 			if (debug_flags & DEBUG_FLAG_DB_USAGE)
 				DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 			rc = mysql_db_query(mysql_conn, query);
 			xfree(query);
 			if (rc != SLURM_SUCCESS) {
 				error("Couldn't add wckey hour rollup");
-				_destroy_local_cluster_usage(c_usage);
 				goto end_it;
 			}
 		}
 
 	end_loop:
 		_destroy_local_cluster_usage(c_usage);
+		_destroy_local_id_usage(a_usage);
+		_destroy_local_id_usage(w_usage);
+		_destroy_local_resv_usage(r_usage);
+		c_usage     = NULL;
+		r_usage     = NULL;
+		a_usage     = NULL;
+		w_usage     = NULL;
+
 		list_flush(assoc_usage_list);
 		list_flush(cluster_down_list);
 		list_flush(wckey_usage_list);
@@ -1179,21 +1551,30 @@ extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
 		curr_end = curr_start + add_sec;
 	}
 end_it:
+	xfree(query);
 	xfree(suspend_str);
 	xfree(job_str);
 	xfree(resv_str);
-	list_iterator_destroy(a_itr);
-	list_iterator_destroy(c_itr);
-	list_iterator_destroy(w_itr);
-	list_iterator_destroy(r_itr);
-
-	list_destroy(assoc_usage_list);
-	list_destroy(cluster_down_list);
-	list_destroy(wckey_usage_list);
-	list_destroy(resv_usage_list);
-
-/* 	info("stop start %s", slurm_ctime(&curr_start)); */
-/* 	info("stop end %s", slurm_ctime(&curr_end)); */
+	_destroy_local_cluster_usage(c_usage);
+	_destroy_local_id_usage(a_usage);
+	_destroy_local_id_usage(w_usage);
+	_destroy_local_resv_usage(r_usage);
+	if (a_itr)
+		list_iterator_destroy(a_itr);
+	if (c_itr)
+		list_iterator_destroy(c_itr);
+	if (w_itr)
+		list_iterator_destroy(w_itr);
+	if (r_itr)
+		list_iterator_destroy(r_itr);
+
+	FREE_NULL_LIST(assoc_usage_list);
+	FREE_NULL_LIST(cluster_down_list);
+	FREE_NULL_LIST(wckey_usage_list);
+	FREE_NULL_LIST(resv_usage_list);
+
+/* 	info("stop start %s", slurm_ctime2(&curr_start)); */
+/* 	info("stop end %s", slurm_ctime2(&curr_end)); */
 
 	/* go check to see if we archive and purge */
 
@@ -1203,10 +1584,11 @@ end_it:
 
 	return rc;
 }
-extern int as_mysql_daily_rollup(mysql_conn_t *mysql_conn,
-				 char *cluster_name,
-				 time_t start, time_t end,
-				 uint16_t archive_data)
+extern int as_mysql_nonhour_rollup(mysql_conn_t *mysql_conn,
+				   bool run_month,
+				   char *cluster_name,
+				   time_t start, time_t end,
+				   uint16_t archive_data)
 {
 	/* can't just add 86400 since daylight savings starts and ends every
 	 * once in a while
@@ -1218,39 +1600,49 @@ extern int as_mysql_daily_rollup(mysql_conn_t *mysql_conn,
 	time_t now = time(NULL);
 	char *query = NULL;
 	uint16_t track_wckey = slurm_get_track_wckey();
+	char *unit_name;
 
-	if (!localtime_r(&curr_start, &start_tm)) {
-		error("Couldn't get localtime from day start %ld", curr_start);
+	if (!slurm_localtime_r(&curr_start, &start_tm)) {
+		error("Couldn't get localtime from start %ld", curr_start);
 		return SLURM_ERROR;
 	}
+
 	start_tm.tm_sec = 0;
 	start_tm.tm_min = 0;
 	start_tm.tm_hour = 0;
-	start_tm.tm_mday++;
 	start_tm.tm_isdst = -1;
-	curr_end = mktime(&start_tm);
+
+	if (run_month) {
+		unit_name = "month";
+		start_tm.tm_mday = 1;
+		start_tm.tm_mon++;
+	} else {
+		unit_name = "day";
+		start_tm.tm_mday++;
+	}
+
+	curr_end = slurm_mktime(&start_tm);
 
 	while (curr_start < end) {
 		if (debug_flags & DEBUG_FLAG_DB_USAGE)
 			DB_DEBUG(mysql_conn->conn,
-				 "curr day is now %ld-%ld",
-				 curr_start, curr_end);
-/* 		info("start %s", slurm_ctime(&curr_start)); */
-/* 		info("end %s", slurm_ctime(&curr_end)); */
+				 "curr %s is now %ld-%ld",
+				 unit_name, curr_start, curr_end);
+/* 		info("start %s", slurm_ctime2(&curr_start)); */
+/* 		info("end %s", slurm_ctime2(&curr_end)); */
 		query = xstrdup_printf(
-			"insert into \"%s_%s\" (creation_time, mod_time, "
-			"id_assoc, "
-			"time_start, alloc_cpu_secs, consumed_energy) "
-			"select %ld, %ld, id_assoc, "
-			"%ld, @ASUM:=SUM(alloc_cpu_secs), "
-			"@ESUM:=SUM(consumed_energy) "
-			"from \"%s_%s\" where "
+			"insert into \"%s_%s\" (creation_time, mod_time, id, "
+			"id_tres, time_start, alloc_secs) "
+			"select %ld, %ld, id, id_tres, "
+			"%ld, @ASUM:=SUM(alloc_secs) from \"%s_%s\" where "
 			"(time_start < %ld && time_start >= %ld) "
-			"group by id_assoc on duplicate key update "
-			"mod_time=%ld, alloc_cpu_secs=@ASUM, "
-			"consumed_energy=@ESUM;",
-			cluster_name, assoc_day_table, now, now, curr_start,
-			cluster_name, assoc_hour_table,
+			"group by id, id_tres on duplicate key update "
+			"mod_time=%ld, alloc_secs=@ASUM;",
+			cluster_name,
+			run_month ? assoc_month_table : assoc_day_table,
+			now, now, curr_start,
+			cluster_name,
+			run_month ? assoc_day_table : assoc_hour_table,
 			curr_end, curr_start, now);
 		/* We group on deleted here so if there are no entries
 		   we don't get an error, just nothing is returned.
@@ -1258,47 +1650,48 @@ extern int as_mysql_daily_rollup(mysql_conn_t *mysql_conn,
 		*/
 		xstrfmtcat(query,
 			   "insert into \"%s_%s\" (creation_time, "
-			   "mod_time, time_start, cpu_count, "
-			   "alloc_cpu_secs, down_cpu_secs, pdown_cpu_secs, "
-			   "idle_cpu_secs, over_cpu_secs, resv_cpu_secs, "
-			   "consumed_energy) "
+			   "mod_time, time_start, id_tres, count, "
+			   "alloc_secs, down_secs, pdown_secs, "
+			   "idle_secs, over_secs, resv_secs) "
 			   "select %ld, %ld, "
-			   "%ld, @CPU:=MAX(cpu_count), "
-			   "@ASUM:=SUM(alloc_cpu_secs), "
-			   "@DSUM:=SUM(down_cpu_secs), "
-			   "@PDSUM:=SUM(pdown_cpu_secs), "
-			   "@ISUM:=SUM(idle_cpu_secs), "
-			   "@OSUM:=SUM(over_cpu_secs), "
-			   "@RSUM:=SUM(resv_cpu_secs), "
-			   "@ESUM:=SUM(consumed_energy) from \"%s_%s\" where "
+			   "%ld, id_tres, @CPU:=MAX(count), "
+			   "@ASUM:=SUM(alloc_secs), "
+			   "@DSUM:=SUM(down_secs), "
+			   "@PDSUM:=SUM(pdown_secs), "
+			   "@ISUM:=SUM(idle_secs), "
+			   "@OSUM:=SUM(over_secs), "
+			   "@RSUM:=SUM(resv_secs) from \"%s_%s\" where "
 			   "(time_start < %ld && time_start >= %ld) "
-			   "group by deleted "
+			   "group by deleted, id_tres "
 			   "on duplicate key update "
-			   "mod_time=%ld, cpu_count=@CPU, "
-			   "alloc_cpu_secs=@ASUM, down_cpu_secs=@DSUM, "
-			   "pdown_cpu_secs=@PDSUM, idle_cpu_secs=@ISUM, "
-			   "over_cpu_secs=@OSUM, resv_cpu_secs=@RSUM, "
-			   "consumed_energy=@ESUM;",
-			   cluster_name, cluster_day_table,
+			   "mod_time=%ld, count=@CPU, "
+			   "alloc_secs=@ASUM, down_secs=@DSUM, "
+			   "pdown_secs=@PDSUM, idle_secs=@ISUM, "
+			   "over_secs=@OSUM, resv_secs=@RSUM;",
+			   cluster_name,
+			   run_month ? cluster_month_table : cluster_day_table,
 			   now, now, curr_start,
-			   cluster_name, cluster_hour_table,
+			   cluster_name,
+			   run_month ? cluster_day_table : cluster_hour_table,
 			   curr_end, curr_start, now);
 		if (track_wckey) {
 			xstrfmtcat(query,
 				   "insert into \"%s_%s\" (creation_time, "
-				   "mod_time, id_wckey, time_start, "
-				   "alloc_cpu_secs, consumed_energy) "
+				   "mod_time, id, id_tres, time_start, "
+				   "alloc_secs) "
 				   "select %ld, %ld, "
-				   "id_wckey, %ld, @ASUM:=SUM(alloc_cpu_secs), "
-				   "@ESUM:=SUM(consumed_energy) "
+				   "id, id_tres, %ld, @ASUM:=SUM(alloc_secs) "
 				   "from \"%s_%s\" where (time_start < %ld && "
-				   "time_start >= %ld) "
-				   "group by id_wckey on duplicate key update "
-				   "mod_time=%ld, alloc_cpu_secs=@ASUM, "
-				   "consumed_energy=@ESUM;",
-				   cluster_name, wckey_day_table,
+				   "time_start >= %ld) group by id, id_tres "
+				   "on duplicate key update "
+				   "mod_time=%ld, alloc_secs=@ASUM;",
+				   cluster_name,
+				   run_month ? wckey_month_table :
+				   wckey_day_table,
 				   now, now, curr_start,
-				   cluster_name, wckey_hour_table,
+				   cluster_name,
+				   run_month ? wckey_day_table :
+				   wckey_hour_table,
 				   curr_end, curr_start, now);
 		}
 		if (debug_flags & DEBUG_FLAG_DB_USAGE)
@@ -1306,14 +1699,14 @@ extern int as_mysql_daily_rollup(mysql_conn_t *mysql_conn,
 		rc = mysql_db_query(mysql_conn, query);
 		xfree(query);
 		if (rc != SLURM_SUCCESS) {
-			error("Couldn't add day rollup");
+			error("Couldn't add %s rollup", unit_name);
 			return SLURM_ERROR;
 		}
 
 		curr_start = curr_end;
-		if (!localtime_r(&curr_start, &start_tm)) {
-			error("Couldn't get localtime from day start %ld",
-			      curr_start);
+		if (!slurm_localtime_r(&curr_start, &start_tm)) {
+			error("Couldn't get localtime from %s start %ld",
+			      unit_name, curr_start);
 			return SLURM_ERROR;
 		}
 		start_tm.tm_sec = 0;
@@ -1321,141 +1714,15 @@ extern int as_mysql_daily_rollup(mysql_conn_t *mysql_conn,
 		start_tm.tm_hour = 0;
 		start_tm.tm_mday++;
 		start_tm.tm_isdst = -1;
-		curr_end = mktime(&start_tm);
+		curr_end = slurm_mktime(&start_tm);
 	}
 
-/* 	info("stop start %s", slurm_ctime(&curr_start)); */
-/* 	info("stop end %s", slurm_ctime(&curr_end)); */
+/* 	info("stop start %s", slurm_ctime2(&curr_start)); */
+/* 	info("stop end %s", slurm_ctime2(&curr_end)); */
 
 	/* go check to see if we archive and purge */
 	rc = _process_purge(mysql_conn, cluster_name, archive_data,
+			    run_month ? SLURMDB_PURGE_MONTHS :
 			    SLURMDB_PURGE_DAYS);
 	return rc;
 }
-extern int as_mysql_monthly_rollup(mysql_conn_t *mysql_conn,
-				   char *cluster_name,
-				   time_t start, time_t end,
-				   uint16_t archive_data)
-{
-	int rc = SLURM_SUCCESS;
-	struct tm start_tm;
-	time_t curr_start = start;
-	time_t curr_end;
-	time_t now = time(NULL);
-	char *query = NULL;
-	uint16_t track_wckey = slurm_get_track_wckey();
-
-	if (!localtime_r(&curr_start, &start_tm)) {
-		error("Couldn't get localtime from month start %ld",
-		      curr_start);
-		return SLURM_ERROR;
-	}
-	start_tm.tm_sec = 0;
-	start_tm.tm_min = 0;
-	start_tm.tm_hour = 0;
-	start_tm.tm_mday = 1;
-	start_tm.tm_mon++;
-	start_tm.tm_isdst = -1;
-	curr_end = mktime(&start_tm);
-
-	while (curr_start < end) {
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn,
-				 "curr month is now %ld-%ld",
-				 curr_start, curr_end);
-/* 		info("start %s", slurm_ctime(&curr_start)); */
-/* 		info("end %s", slurm_ctime(&curr_end)); */
-		query = xstrdup_printf(
-			"insert into \"%s_%s\" (creation_time, "
-			"mod_time, id_assoc, "
-			"time_start, alloc_cpu_secs, consumed_energy) select "
-			"%ld, %ld, id_assoc, "
-			"%ld, @ASUM:=SUM(alloc_cpu_secs), "
-			"@ESUM:=SUM(consumed_energy) "
-			"from \"%s_%s\" where "
-			"(time_start < %ld && time_start >= %ld) "
-			"group by id_assoc on duplicate key update "
-			"mod_time=%ld, alloc_cpu_secs=@ASUM, "
-			"consumed_energy=@ESUM;",
-			cluster_name, assoc_month_table, now, now, curr_start,
-			cluster_name, assoc_day_table,
-			curr_end, curr_start, now);
-		/* We group on deleted here so if there are no entries
-		   we don't get an error, just nothing is returned.
-		   Else we get a bunch of NULL's
-		*/
-		xstrfmtcat(query,
-			   "insert into \"%s_%s\" (creation_time, "
-			   "mod_time, time_start, cpu_count, "
-			   "alloc_cpu_secs, down_cpu_secs, pdown_cpu_secs, "
-			   "idle_cpu_secs, over_cpu_secs, resv_cpu_secs, "
-			   "consumed_energy) "
-			   "select %ld, %ld, "
-			   "%ld, @CPU:=MAX(cpu_count), "
-			   "@ASUM:=SUM(alloc_cpu_secs), "
-			   "@DSUM:=SUM(down_cpu_secs), "
-			   "@PDSUM:=SUM(pdown_cpu_secs), "
-			   "@ISUM:=SUM(idle_cpu_secs), "
-			   "@OSUM:=SUM(over_cpu_secs), "
-			   "@RSUM:=SUM(resv_cpu_secs), "
-			   "@ESUM:=SUM(consumed_energy) from \"%s_%s\" where "
-			   "(time_start < %ld && time_start >= %ld) "
-			   "group by deleted "
-			   "on duplicate key update "
-			   "mod_time=%ld, cpu_count=@CPU, "
-			   "alloc_cpu_secs=@ASUM, down_cpu_secs=@DSUM, "
-			   "pdown_cpu_secs=@PDSUM, idle_cpu_secs=@ISUM, "
-			   "over_cpu_secs=@OSUM, resv_cpu_secs=@RSUM, "
-			   "consumed_energy=@ESUM;",
-			   cluster_name, cluster_month_table,
-			   now, now, curr_start,
-			   cluster_name, cluster_day_table,
-			   curr_end, curr_start, now);
-		if (track_wckey) {
-			xstrfmtcat(query,
-				   "insert into \"%s_%s\" "
-				   "(creation_time, mod_time, "
-				   "id_wckey, time_start, alloc_cpu_secs, "
-				   "consumed_energy) "
-				   "select %ld, %ld, id_wckey, %ld, "
-				   "@ASUM:=SUM(alloc_cpu_secs), "
-				   "@ESUM:=SUM(consumed_energy) "
-				   "from \"%s_%s\" where (time_start < %ld && "
-				   "time_start >= %ld) "
-				   "group by id_wckey on duplicate key update "
-				   "mod_time=%ld, alloc_cpu_secs=@ASUM, "
-				   "consumed_energy=@ESUM;",
-				   cluster_name, wckey_month_table,
-				   now, now, curr_start,
-				   cluster_name, wckey_day_table,
-				   curr_end, curr_start, now);
-		}
-		if (debug_flags & DEBUG_FLAG_DB_USAGE)
-			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
-		rc = mysql_db_query(mysql_conn, query);
-		xfree(query);
-		if (rc != SLURM_SUCCESS) {
-			error("Couldn't add day rollup");
-			return SLURM_ERROR;
-		}
-
-		curr_start = curr_end;
-		if (!localtime_r(&curr_start, &start_tm)) {
-			error("Couldn't get localtime from month start %ld",
-			      curr_start);
-		}
-		start_tm.tm_sec = 0;
-		start_tm.tm_min = 0;
-		start_tm.tm_hour = 0;
-		start_tm.tm_mday = 1;
-		start_tm.tm_mon++;
-		start_tm.tm_isdst = -1;
-		curr_end = mktime(&start_tm);
-	}
-
-	/* go check to see if we archive and purge */
-	rc = _process_purge(mysql_conn, cluster_name, archive_data,
-			    SLURMDB_PURGE_MONTHS);
-
-	return rc;
-}
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_rollup.h b/src/plugins/accounting_storage/mysql/as_mysql_rollup.h
index dd786b7c1..b72159bb9 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_rollup.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_rollup.h
@@ -45,18 +45,14 @@
 #include "accounting_storage_mysql.h"
 
 extern int as_mysql_hourly_rollup(mysql_conn_t *mysql_conn,
-				  char *cluster_name, 
+				  char *cluster_name,
 				  time_t start,
 				  time_t end,
 				  uint16_t archive_data);
-extern int as_mysql_daily_rollup(mysql_conn_t *mysql_conn,
-			      char *cluster_name,
-				 time_t start, 
-				 time_t end,
-				 uint16_t archive_data);
-extern int as_mysql_monthly_rollup(mysql_conn_t *mysql_conn,
-				char *cluster_name,
+extern int as_mysql_nonhour_rollup(mysql_conn_t *mysql_conn,
+				   bool run_month,
+				   char *cluster_name,
 				   time_t start,
 				   time_t end,
-				uint16_t archive_data);
+				   uint16_t archive_data);
 #endif
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_tres.c b/src/plugins/accounting_storage/mysql/as_mysql_tres.c
new file mode 100644
index 000000000..5469b4477
--- /dev/null
+++ b/src/plugins/accounting_storage/mysql/as_mysql_tres.c
@@ -0,0 +1,304 @@
+/*****************************************************************************\
+ *  as_mysql_tres.c - functions dealing with accounts.
+ *****************************************************************************
+ *
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "as_mysql_tres.h"
+#include "as_mysql_usage.h"
+#include "src/common/xstring.h"
+
+extern int as_mysql_add_tres(mysql_conn_t *mysql_conn,
+			     uint32_t uid, List tres_list_in)
+{
+	ListIterator itr = NULL;
+	int rc = SLURM_SUCCESS;
+	slurmdb_tres_rec_t *object = NULL;
+	char *cols = NULL, *extra = NULL, *vals = NULL, *query = NULL,
+		*tmp_extra = NULL;
+	time_t now = time(NULL);
+	char *user_name = NULL;
+	int affect_rows = 0;
+
+	if (check_connection(mysql_conn) != SLURM_SUCCESS)
+		return ESLURM_DB_CONNECTION;
+
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR))
+		return ESLURM_ACCESS_DENIED;
+
+	if (!tres_list_in) {
+		error("as_mysql_add_tres: Trying to add a blank list");
+		return SLURM_ERROR;
+	}
+
+	user_name = uid_to_string((uid_t) uid);
+	itr = list_iterator_create(tres_list_in);
+	while ((object = list_next(itr))) {
+		if (!object->type || !object->type[0]) {
+			error("We need a tres type.");
+			rc = SLURM_ERROR;
+			continue;
+		} else if ((!strcasecmp(object->type, "gres") ||
+			    !strcasecmp(object->type, "bb") ||
+			    !strcasecmp(object->type, "license"))) {
+			if (!object->name) {
+				error("%s type tres "
+				      "need to have a name, "
+				      "(i.e. Gres/GPU).  You gave none",
+				      object->type);
+				rc = SLURM_ERROR;
+				continue;
+			}
+		} else /* only the above have a name */
+			xfree(object->name);
+
+		xstrcat(cols, "creation_time, type");
+		xstrfmtcat(vals, "%ld, '%s'", now, object->type);
+		xstrfmtcat(extra, "type='%s'", object->type);
+		if (object->name) {
+			xstrcat(cols, ", name");
+			xstrfmtcat(vals, ", '%s'", object->name);
+			xstrfmtcat(extra, ", name='%s'", object->name);
+		}
+
+		xstrfmtcat(query,
+			   "insert into %s (%s) values (%s) "
+			   "on duplicate key update deleted=0;",
+			   tres_table, cols, vals);
+
+		if (debug_flags & DEBUG_FLAG_DB_TRES)
+			DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+		object->id = mysql_db_insert_ret_id(mysql_conn, query);
+		xfree(query);
+		if (!object->id) {
+			error("Couldn't add tres %s%s%s", object->type,
+			      object->name ? "/" : "",
+			      object->name ? object->name : "");
+			xfree(cols);
+			xfree(extra);
+			xfree(vals);
+			break;
+		}
+
+		affect_rows = last_affected_rows(mysql_conn);
+
+		if (!affect_rows) {
+			debug2("nothing changed %d", affect_rows);
+			xfree(cols);
+			xfree(extra);
+			xfree(vals);
+			continue;
+		}
+
+		tmp_extra = slurm_add_slash_to_quotes(extra);
+
+		xstrfmtcat(query,
+			   "insert into %s "
+			   "(timestamp, action, name, actor, info, cluster) "
+			   "values (%ld, %u, 'id=%d', '%s', '%s', '%s');",
+			   txn_table,
+			   now, DBD_ADD_TRES, object->id, user_name,
+			   tmp_extra, mysql_conn->cluster_name);
+
+		xfree(tmp_extra);
+		xfree(cols);
+		xfree(extra);
+		xfree(vals);
+		debug4("query\n%s", query);
+		rc = mysql_db_query(mysql_conn, query);
+		xfree(query);
+		if (rc != SLURM_SUCCESS) {
+			error("Couldn't add txn");
+		} else {
+			if (addto_update_list(mysql_conn->update_list,
+					      SLURMDB_ADD_TRES,
+					      object) == SLURM_SUCCESS)
+				list_remove(itr);
+		}
+
+	}
+	list_iterator_destroy(itr);
+	xfree(user_name);
+
+	if (list_count(mysql_conn->update_list)) {
+		/* We only want to update the local cache DBD or ctld */
+		assoc_mgr_update(mysql_conn->update_list, 0);
+		list_flush(mysql_conn->update_list);
+	}
+
+	return rc;
+}
+
+extern List as_mysql_get_tres(mysql_conn_t *mysql_conn, uid_t uid,
+				slurmdb_tres_cond_t *tres_cond)
+{
+	char *query = NULL;
+	char *extra = NULL;
+	char *tmp = NULL;
+	List my_tres_list = NULL;
+	ListIterator itr = NULL;
+	char *object = NULL;
+	int set = 0;
+	int i=0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	bool is_admin = false;
+
+	/* if this changes you will need to edit the corresponding enum */
+	char *tres_req_inx[] = {
+		"id",
+		"type",
+		"name"
+	};
+	enum {
+		SLURMDB_REQ_ID,
+		SLURMDB_REQ_TYPE,
+		SLURMDB_REQ_NAME,
+		SLURMDB_REQ_COUNT
+	};
+
+	if (check_connection(mysql_conn) != SLURM_SUCCESS)
+		return NULL;
+
+	if (!(is_admin = is_user_min_admin_level(
+		      mysql_conn, uid, SLURMDB_ADMIN_OPERATOR))) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
+	if (!tres_cond) {
+		xstrcat(extra, "where deleted=0");
+		goto empty;
+	}
+
+	if (tres_cond->with_deleted)
+		xstrcat(extra, "where (deleted=0 || deleted=1)");
+	else
+		xstrcat(extra, "where deleted=0");
+
+	if (tres_cond->id_list
+	    && list_count(tres_cond->id_list)) {
+		set = 0;
+		xstrcat(extra, " && (");
+		itr = list_iterator_create(tres_cond->id_list);
+		while ((object = list_next(itr))) {
+			if (set)
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "id='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if (tres_cond->type_list
+	    && list_count(tres_cond->type_list)) {
+		set = 0;
+		xstrcat(extra, " && (");
+		itr = list_iterator_create(tres_cond->type_list);
+		while ((object = list_next(itr))) {
+			char *slash;
+			if (set)
+				xstrcat(extra, " || ");
+			if (!(slash = strchr(object, '/')))
+				xstrfmtcat(extra, "type='%s'", object);
+			else {
+				/* This means we have the name
+				 * attached, so split the string and
+				 * handle it this way, only on this type.
+				 */
+				char *name = slash;
+				*slash = '\0';
+				name++;
+				xstrfmtcat(extra, "(type='%s' && name='%s')",
+					   object, name);
+			}
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+	if (tres_cond->name_list
+	    && list_count(tres_cond->name_list)) {
+		set = 0;
+		xstrcat(extra, " && (");
+		itr = list_iterator_create(tres_cond->name_list);
+		while ((object = list_next(itr))) {
+			if (set)
+				xstrcat(extra, " || ");
+			xstrfmtcat(extra, "name='%s'", object);
+			set = 1;
+		}
+		list_iterator_destroy(itr);
+		xstrcat(extra, ")");
+	}
+
+empty:
+
+	xfree(tmp);
+	xstrfmtcat(tmp, "%s", tres_req_inx[i]);
+	for(i=1; i<SLURMDB_REQ_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", tres_req_inx[i]);
+	}
+
+	query = xstrdup_printf("select %s from %s %s", tmp, tres_table, extra);
+	xfree(tmp);
+	xfree(extra);
+
+	if (debug_flags & DEBUG_FLAG_DB_TRES)
+		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
+		xfree(query);
+		return NULL;
+	}
+	xfree(query);
+
+	my_tres_list = list_create(slurmdb_destroy_tres_rec);
+
+	while ((row = mysql_fetch_row(result))) {
+		slurmdb_tres_rec_t *tres =
+			xmalloc(sizeof(slurmdb_tres_rec_t));
+		list_append(my_tres_list, tres);
+
+		tres->id =  slurm_atoul(row[SLURMDB_REQ_ID]);
+		if (row[SLURMDB_REQ_TYPE] && row[SLURMDB_REQ_TYPE][0])
+			tres->type = xstrdup(row[SLURMDB_REQ_TYPE]);
+		if (row[SLURMDB_REQ_NAME] && row[SLURMDB_REQ_NAME][0])
+			tres->name = xstrdup(row[SLURMDB_REQ_NAME]);
+	}
+	mysql_free_result(result);
+
+	return my_tres_list;
+}
diff --git a/src/sbatch/mult_cluster.h b/src/plugins/accounting_storage/mysql/as_mysql_tres.h
similarity index 81%
rename from src/sbatch/mult_cluster.h
rename to src/plugins/accounting_storage/mysql/as_mysql_tres.h
index b6d765a64..f524807d2 100644
--- a/src/sbatch/mult_cluster.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_tres.h
@@ -1,10 +1,9 @@
 /*****************************************************************************\
- *  mult_cluster.h - definitions for sbatch to submit job to multiple clusters
+ *  as_mysql_tres.c - functions dealing with tres.
  *****************************************************************************
- *  Copyright (C) 2010 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>,
- *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -36,11 +35,15 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef _HAVE_MULT_CLUSTER_H
-#define _HAVE_MULT_CLUSTER_H
+#ifndef _HAVE_AS_MYSQL_TRES_H
+#define _HAVE_AS_MYSQL_TRES_H
+
+#include "accounting_storage_mysql.h"
 
-#include "opt.h"
+extern int as_mysql_add_tres(mysql_conn_t *mysql_conn,
+			     uint32_t uid, List tres_list_in);
 
-extern int sbatch_set_first_avail_cluster(job_desc_msg_t *req);
+extern List as_mysql_get_tres(mysql_conn_t *mysql_conn, uid_t uid,
+				slurmdb_tres_cond_t *tres_cond);
 
 #endif
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_usage.c b/src/plugins/accounting_storage/mysql/as_mysql_usage.c
index 7763cb1f9..1a03ff510 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_usage.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_usage.c
@@ -37,13 +37,14 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#include "as_mysql_cluster.h"
 #include "as_mysql_usage.h"
 #include "as_mysql_rollup.h"
+#include "src/common/slurm_time.h"
 
 time_t global_last_rollup = 0;
 pthread_mutex_t rollup_lock = PTHREAD_MUTEX_INITIALIZER;
-
-static pthread_mutex_t usage_rollup_lock = PTHREAD_MUTEX_INITIALIZER;
+pthread_mutex_t usage_rollup_lock = PTHREAD_MUTEX_INITIALIZER;
 
 typedef struct {
 	uint16_t archive_data;
@@ -212,13 +213,13 @@ static void *_cluster_rollup_usage(void *arg)
 //	last_day = 1197033199;
 //	last_month = 1204358399;
 
-	if (!localtime_r(&last_hour, &start_tm)) {
+	if (!slurm_localtime_r(&last_hour, &start_tm)) {
 		error("Couldn't get localtime from hour start %ld", last_hour);
 		rc = SLURM_ERROR;
 		goto end_it;
 	}
 
-	if (!localtime_r(&my_time, &end_tm)) {
+	if (!slurm_localtime_r(&my_time, &end_tm)) {
 		error("Couldn't get localtime from hour end %ld", my_time);
 		rc = SLURM_ERROR;
 		goto end_it;
@@ -233,15 +234,15 @@ static void *_cluster_rollup_usage(void *arg)
 	start_tm.tm_sec = 0;
 	start_tm.tm_min = 0;
 	start_tm.tm_isdst = -1;
-	hour_start = mktime(&start_tm);
+	hour_start = slurm_mktime(&start_tm);
 
 	end_tm.tm_sec = 0;
 	end_tm.tm_min = 0;
 	end_tm.tm_isdst = -1;
-	hour_end = mktime(&end_tm);
+	hour_end = slurm_mktime(&end_tm);
 
-/* 	info("hour start %s", slurm_ctime(&hour_start)); */
-/* 	info("hour end %s", slurm_ctime(&hour_end)); */
+/* 	info("hour start %s", slurm_ctime2(&hour_start)); */
+/* 	info("hour end %s", slurm_ctime2(&hour_end)); */
 /* 	info("diff is %d", hour_end-hour_start); */
 
 	slurm_mutex_lock(&rollup_lock);
@@ -249,7 +250,7 @@ static void *_cluster_rollup_usage(void *arg)
 	slurm_mutex_unlock(&rollup_lock);
 
 	/* set up the day period */
-	if (!localtime_r(&last_day, &start_tm)) {
+	if (!slurm_localtime_r(&last_day, &start_tm)) {
 		error("Couldn't get localtime from day %ld", last_day);
 		rc = SLURM_ERROR;
 		goto end_it;
@@ -259,18 +260,18 @@ static void *_cluster_rollup_usage(void *arg)
 	start_tm.tm_min = 0;
 	start_tm.tm_hour = 0;
 	start_tm.tm_isdst = -1;
-	day_start = mktime(&start_tm);
+	day_start = slurm_mktime(&start_tm);
 
 	end_tm.tm_hour = 0;
 	end_tm.tm_isdst = -1;
-	day_end = mktime(&end_tm);
+	day_end = slurm_mktime(&end_tm);
 
-/* 	info("day start %s", slurm_ctime(&day_start)); */
-/* 	info("day end %s", slurm_ctime(&day_end)); */
+/* 	info("day start %s", slurm_ctime2(&day_start)); */
+/* 	info("day end %s", slurm_ctime2(&day_end)); */
 /* 	info("diff is %d", day_end-day_start); */
 
 	/* set up the month period */
-	if (!localtime_r(&last_month, &start_tm)) {
+	if (!slurm_localtime_r(&last_month, &start_tm)) {
 		error("Couldn't get localtime from month %ld", last_month);
 		rc = SLURM_ERROR;
 		goto end_it;
@@ -281,17 +282,17 @@ static void *_cluster_rollup_usage(void *arg)
 	start_tm.tm_hour = 0;
 	start_tm.tm_mday = 1;
 	start_tm.tm_isdst = -1;
-	month_start = mktime(&start_tm);
+	month_start = slurm_mktime(&start_tm);
 
 	end_tm.tm_sec = 0;
 	end_tm.tm_min = 0;
 	end_tm.tm_hour = 0;
 	end_tm.tm_mday = 1;
 	end_tm.tm_isdst = -1;
-	month_end = mktime(&end_tm);
+	month_end = slurm_mktime(&end_tm);
 
-/* 	info("month start %s", slurm_ctime(&month_start)); */
-/* 	info("month end %s", slurm_ctime(&month_end)); */
+/* 	info("month start %s", slurm_ctime2(&month_start)); */
+/* 	info("month end %s", slurm_ctime2(&month_end)); */
 /* 	info("diff is %d", month_end-month_start); */
 
 	if ((hour_end - hour_start) > 0) {
@@ -310,11 +311,11 @@ static void *_cluster_rollup_usage(void *arg)
 
 	if ((day_end - day_start) > 0) {
 		START_TIMER;
-		rc = as_mysql_daily_rollup(&mysql_conn,
-					   local_rollup->cluster_name,
-					   day_start,
-					   day_end,
-					   local_rollup->archive_data);
+		rc = as_mysql_nonhour_rollup(&mysql_conn, 0,
+					     local_rollup->cluster_name,
+					     day_start,
+					     day_end,
+					     local_rollup->archive_data);
 		snprintf(timer_str, sizeof(timer_str),
 			 "daily_rollup for %s", local_rollup->cluster_name);
 		END_TIMER3(timer_str, 5000000);
@@ -324,7 +325,7 @@ static void *_cluster_rollup_usage(void *arg)
 
 	if ((month_end - month_start) > 0) {
 		START_TIMER;
-		rc = as_mysql_monthly_rollup(&mysql_conn,
+		rc = as_mysql_nonhour_rollup(&mysql_conn, 1,
 					     local_rollup->cluster_name,
 					     month_start,
 					     month_end,
@@ -404,7 +405,111 @@ end_it:
 	return NULL;
 }
 
+/* assoc_mgr locks need to be unlocked before coming here */
+static int _get_object_usage(mysql_conn_t *mysql_conn,
+			     slurmdbd_msg_type_t type, char *my_usage_table,
+			     char *cluster_name, char *id_str,
+			     time_t start, time_t end, List *usage_list)
+{
+	char *tmp = NULL;
+	int i = 0;
+	MYSQL_RES *result = NULL;
+	MYSQL_ROW row;
+	char *query = NULL;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	char *usage_req_inx[] = {
+		"t3.id_assoc",
+		"t1.id_tres",
+		"t1.time_start",
+		"t1.alloc_secs",
+	};
+	enum {
+		USAGE_ID,
+		USAGE_TRES,
+		USAGE_START,
+		USAGE_ALLOC,
+		USAGE_COUNT
+	};
+
+	if (type == DBD_GET_WCKEY_USAGE)
+		usage_req_inx[0] = "t1.id";
+
+	xstrfmtcat(tmp, "%s", usage_req_inx[i]);
+	for (i=1; i<USAGE_COUNT; i++) {
+		xstrfmtcat(tmp, ", %s", usage_req_inx[i]);
+	}
+
+	switch (type) {
+	case DBD_GET_ASSOC_USAGE:
+		query = xstrdup_printf(
+			"select %s from \"%s_%s\" as t1, "
+			"\"%s_%s\" as t2, \"%s_%s\" as t3 "
+			"where (t1.time_start < %ld && t1.time_start >= %ld) "
+			"&& t1.id=t2.id_assoc && (%s) && "
+			"t2.lft between t3.lft and t3.rgt "
+			"order by t3.id_assoc, time_start;",
+			tmp, cluster_name, my_usage_table,
+			cluster_name, assoc_table, cluster_name, assoc_table,
+			end, start, id_str);
+		break;
+	case DBD_GET_WCKEY_USAGE:
+		query = xstrdup_printf(
+			"select %s from \"%s_%s\" as t1 "
+			"where (time_start < %ld && time_start >= %ld) "
+			"&& (%s) order by id, time_start;",
+			tmp, cluster_name, my_usage_table, end, start, id_str);
+		break;
+	default:
+		error("Unknown usage type %d", type);
+		xfree(tmp);
+		return SLURM_ERROR;
+		break;
+	}
+	xfree(tmp);
+
+	if (debug_flags & DEBUG_FLAG_DB_USAGE)
+		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
+	result = mysql_db_query_ret(mysql_conn, query, 0);
+	xfree(query);
+
+	if (!result)
+		return SLURM_ERROR;
 
+	if (!(*usage_list))
+		(*usage_list) = list_create(slurmdb_destroy_accounting_rec);
+
+	assoc_mgr_lock(&locks);
+	while ((row = mysql_fetch_row(result))) {
+		slurmdb_tres_rec_t *tres_rec;
+		slurmdb_accounting_rec_t *accounting_rec =
+			xmalloc(sizeof(slurmdb_accounting_rec_t));
+
+		accounting_rec->tres_rec.id = slurm_atoul(row[USAGE_TRES]);
+		if ((tres_rec = list_find_first(
+			     assoc_mgr_tres_list, slurmdb_find_tres_in_list,
+			     &accounting_rec->tres_rec.id))) {
+			accounting_rec->tres_rec.name =
+				xstrdup(tres_rec->name);
+			accounting_rec->tres_rec.type =
+				xstrdup(tres_rec->type);
+		}
+
+		accounting_rec->id = slurm_atoul(row[USAGE_ID]);
+		accounting_rec->period_start = slurm_atoul(row[USAGE_START]);
+		accounting_rec->alloc_secs = slurm_atoull(row[USAGE_ALLOC]);
+
+		list_append(*usage_list, accounting_rec);
+	}
+	assoc_mgr_unlock(&locks);
+
+	mysql_free_result(result);
+
+	return SLURM_SUCCESS;
+}
+
+/* assoc_mgr locks need to unlocked before you get here */
 static int _get_cluster_usage(mysql_conn_t *mysql_conn, uid_t uid,
 			      slurmdb_cluster_rec_t *cluster_rec,
 			      slurmdbd_msg_type_t type,
@@ -417,28 +522,30 @@ static int _get_cluster_usage(mysql_conn_t *mysql_conn, uid_t uid,
 	char *tmp = NULL;
 	char *my_usage_table = cluster_day_table;
 	char *query = NULL;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 	char *cluster_req_inx[] = {
-		"alloc_cpu_secs",
-		"down_cpu_secs",
-		"pdown_cpu_secs",
-		"idle_cpu_secs",
-		"resv_cpu_secs",
-		"over_cpu_secs",
-		"cpu_count",
+		"id_tres",
+		"alloc_secs",
+		"down_secs",
+		"pdown_secs",
+		"idle_secs",
+		"resv_secs",
+		"over_secs",
+		"count",
 		"time_start",
-		"consumed_energy"
 	};
 
 	enum {
+		CLUSTER_TRES,
 		CLUSTER_ACPU,
 		CLUSTER_DCPU,
 		CLUSTER_PDCPU,
 		CLUSTER_ICPU,
 		CLUSTER_RCPU,
 		CLUSTER_OCPU,
-		CLUSTER_CPU_COUNT,
+		CLUSTER_CNT,
 		CLUSTER_START,
-		CLUSTER_ENERGY,
 		CLUSTER_COUNT
 	};
 
@@ -465,11 +572,10 @@ static int _get_cluster_usage(mysql_conn_t *mysql_conn, uid_t uid,
 		tmp, cluster_rec->name, my_usage_table, end, start);
 
 	xfree(tmp);
-	debug4("%d(%s:%d) query\n%s",
-	       mysql_conn->conn, THIS_FILE, __LINE__, query);
+	if (debug_flags & DEBUG_FLAG_DB_USAGE)
+		DB_DEBUG(mysql_conn->conn, "query\n%s", query);
 
-	if (!(result = mysql_db_query_ret(
-		      mysql_conn, query, 0))) {
+	if (!(result = mysql_db_query_ret(mysql_conn, query, 0))) {
 		xfree(query);
 		return SLURM_ERROR;
 	}
@@ -479,20 +585,34 @@ static int _get_cluster_usage(mysql_conn_t *mysql_conn, uid_t uid,
 		cluster_rec->accounting_list =
 			list_create(slurmdb_destroy_cluster_accounting_rec);
 
+	assoc_mgr_lock(&locks);
 	while ((row = mysql_fetch_row(result))) {
+		slurmdb_tres_rec_t *tres_rec;
 		slurmdb_cluster_accounting_rec_t *accounting_rec =
 			xmalloc(sizeof(slurmdb_cluster_accounting_rec_t));
+
+		accounting_rec->tres_rec.id = slurm_atoul(row[CLUSTER_TRES]);
+		accounting_rec->tres_rec.count = slurm_atoul(row[CLUSTER_CNT]);
+		if ((tres_rec = list_find_first(
+			     assoc_mgr_tres_list, slurmdb_find_tres_in_list,
+			     &accounting_rec->tres_rec.id))) {
+			accounting_rec->tres_rec.name =
+				xstrdup(tres_rec->name);
+			accounting_rec->tres_rec.type =
+				xstrdup(tres_rec->type);
+		}
+
 		accounting_rec->alloc_secs = slurm_atoull(row[CLUSTER_ACPU]);
 		accounting_rec->down_secs = slurm_atoull(row[CLUSTER_DCPU]);
 		accounting_rec->pdown_secs = slurm_atoull(row[CLUSTER_PDCPU]);
 		accounting_rec->idle_secs = slurm_atoull(row[CLUSTER_ICPU]);
 		accounting_rec->over_secs = slurm_atoull(row[CLUSTER_OCPU]);
 		accounting_rec->resv_secs = slurm_atoull(row[CLUSTER_RCPU]);
-		accounting_rec->cpu_count = slurm_atoul(row[CLUSTER_CPU_COUNT]);
 		accounting_rec->period_start = slurm_atoul(row[CLUSTER_START]);
-		accounting_rec->consumed_energy = slurm_atoull(row[CLUSTER_ENERGY]);
 		list_append(cluster_rec->accounting_list, accounting_rec);
 	}
+	assoc_mgr_unlock(&locks);
+
 	mysql_free_result(result);
 	return rc;
 }
@@ -500,40 +620,22 @@ static int _get_cluster_usage(mysql_conn_t *mysql_conn, uid_t uid,
 
 
 /* checks should already be done before this to see if this is a valid
-   user or not.
+   user or not.  The assoc_mgr locks should be unlocked before coming here.
 */
 extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 			      slurmdbd_msg_type_t type, List object_list,
 			      char *cluster_name, time_t start, time_t end)
 {
 	int rc = SLURM_SUCCESS;
-	int i=0;
-	MYSQL_RES *result = NULL;
-	MYSQL_ROW row;
-	char *tmp = NULL;
 	char *my_usage_table = NULL;
-	char *query = NULL;
 	List usage_list = NULL;
 	char *id_str = NULL;
 	ListIterator itr = NULL, u_itr = NULL;
 	void *object = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_wckey_rec_t *wckey = NULL;
 	slurmdb_accounting_rec_t *accounting_rec = NULL;
 
-	/* Since for id in association table we
-	   use t3 and in wckey table we use t1 we can't define it here */
-	char **usage_req_inx = NULL;
-
-	enum {
-		USAGE_ID,
-		USAGE_START,
-		USAGE_ACPU,
-		USAGE_ENERGY,
-		USAGE_COUNT
-	};
-
-
 	if (!object_list) {
 		error("We need an object to set data for getting usage");
 		return SLURM_ERROR;
@@ -544,15 +646,6 @@ extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 
 	switch (type) {
 	case DBD_GET_ASSOC_USAGE:
-	{
-		char *temp_usage[] = {
-			"t3.id_assoc",
-			"t1.time_start",
-			"t1.alloc_cpu_secs",
-			"t1.consumed_energy",
-		};
-		usage_req_inx = temp_usage;
-
 		itr = list_iterator_create(object_list);
 		while ((assoc = list_next(itr))) {
 			if (id_str)
@@ -565,30 +658,19 @@ extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 
 		my_usage_table = assoc_day_table;
 		break;
-	}
 	case DBD_GET_WCKEY_USAGE:
-	{
-		char *temp_usage[] = {
-			"id_wckey",
-			"time_start",
-			"alloc_cpu_secs",
-			"consumed_energy"
-		};
-		usage_req_inx = temp_usage;
-
 		itr = list_iterator_create(object_list);
 		while ((wckey = list_next(itr))) {
 			if (id_str)
-				xstrfmtcat(id_str, " || id_wckey=%d",
+				xstrfmtcat(id_str, " || id=%d",
 					   wckey->id);
 			else
-				xstrfmtcat(id_str, "id_wckey=%d", wckey->id);
+				xstrfmtcat(id_str, "id=%d", wckey->id);
 		}
 		list_iterator_destroy(itr);
 
 		my_usage_table = wckey_day_table;
 		break;
-	}
 	default:
 		error("Unknown usage type %d", type);
 		return SLURM_ERROR;
@@ -601,63 +683,19 @@ extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 		return SLURM_ERROR;
 	}
 
-	xfree(tmp);
-	i=0;
-	xstrfmtcat(tmp, "%s", usage_req_inx[i]);
-	for(i=1; i<USAGE_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", usage_req_inx[i]);
-	}
-	switch (type) {
-	case DBD_GET_ASSOC_USAGE:
-		query = xstrdup_printf(
-			"select %s from \"%s_%s\" as t1, "
-			"\"%s_%s\" as t2, \"%s_%s\" as t3 "
-			"where (t1.time_start < %ld && t1.time_start >= %ld) "
-			"&& t1.id_assoc=t2.id_assoc && (%s) && "
-			"t2.lft between t3.lft and t3.rgt "
-			"order by t3.id_assoc, time_start;",
-			tmp, cluster_name, my_usage_table,
-			cluster_name, assoc_table, cluster_name, assoc_table,
-			end, start, id_str);
-		break;
-	case DBD_GET_WCKEY_USAGE:
-		query = xstrdup_printf(
-			"select %s from \"%s_%s\" "
-			"where (time_start < %ld && time_start >= %ld) "
-			"&& (%s) order by id_wckey, time_start;",
-			tmp, cluster_name, my_usage_table, end, start, id_str);
-		break;
-	default:
-		error("Unknown usage type %d", type);
+	if (_get_object_usage(mysql_conn, type, my_usage_table, cluster_name,
+			      id_str, start, end, &usage_list)
+	    != SLURM_SUCCESS) {
 		xfree(id_str);
-		xfree(tmp);
 		return SLURM_ERROR;
-		break;
 	}
+
 	xfree(id_str);
-	xfree(tmp);
 
-	debug4("%d(%s:%d) query\n%s",
-	       mysql_conn->conn, THIS_FILE, __LINE__, query);
-	if (!(result = mysql_db_query_ret(
-		      mysql_conn, query, 0))) {
-		xfree(query);
+	if (!usage_list) {
+		error("No usage given back?  This should never happen");
 		return SLURM_ERROR;
 	}
-	xfree(query);
-
-	usage_list = list_create(slurmdb_destroy_accounting_rec);
-
-	while ((row = mysql_fetch_row(result))) {
-		slurmdb_accounting_rec_t *accounting_rec =
-			xmalloc(sizeof(slurmdb_accounting_rec_t));
-		accounting_rec->id = slurm_atoul(row[USAGE_ID]);
-		accounting_rec->period_start = slurm_atoul(row[USAGE_START]);
-		accounting_rec->alloc_secs = slurm_atoull(row[USAGE_ACPU]);
-		accounting_rec->consumed_energy = slurm_atoull(row[USAGE_ENERGY]);
-		list_append(usage_list, accounting_rec);
-	}
-	mysql_free_result(result);
 
 	u_itr = list_iterator_create(usage_list);
 	itr = list_iterator_create(object_list);
@@ -668,7 +706,7 @@ extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 
 		switch (type) {
 		case DBD_GET_ASSOC_USAGE:
-			assoc = (slurmdb_association_rec_t *)object;
+			assoc = (slurmdb_assoc_rec_t *)object;
 			if (!assoc->accounting_list)
 				assoc->accounting_list = list_create(
 					slurmdb_destroy_accounting_rec);
@@ -716,96 +754,70 @@ extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 		error("we have %d records not added "
 		      "to the association list",
 		      list_count(usage_list));
-	list_destroy(usage_list);
-
+	FREE_NULL_LIST(usage_list);
 
 	return rc;
 }
 
+/*   The assoc_mgr locks should be unlocked before coming here. */
 extern int as_mysql_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 			      void *in, slurmdbd_msg_type_t type,
 			      time_t start, time_t end)
 {
 	int rc = SLURM_SUCCESS;
-	int i=0, is_admin=1;
-	MYSQL_RES *result = NULL;
-	MYSQL_ROW row;
-	char *tmp = NULL;
+	int is_admin=1;
 	char *my_usage_table = NULL;
-	slurmdb_association_rec_t *slurmdb_assoc = in;
+	slurmdb_assoc_rec_t *slurmdb_assoc = in;
 	slurmdb_wckey_rec_t *slurmdb_wckey = in;
-	char *query = NULL;
 	char *username = NULL;
 	uint16_t private_data = 0;
-	List *my_list;
-	uint32_t id = NO_VAL;
+	List *my_list = NULL;
 	char *cluster_name = NULL;
-	char **usage_req_inx = NULL;
+	char *id_str = NULL;
 
-	enum {
-		USAGE_ID,
-		USAGE_START,
-		USAGE_ACPU,
-		USAGE_COUNT,
-		USAGE_ENERGY
-	};
+	if (check_connection(mysql_conn) != SLURM_SUCCESS)
+		return ESLURM_DB_CONNECTION;
 
 	switch (type) {
 	case DBD_GET_ASSOC_USAGE:
-	{
-		char *temp_usage[] = {
-			"t3.id_assoc",
-			"t1.time_start",
-			"t1.alloc_cpu_secs"
-		};
-		usage_req_inx = temp_usage;
-
-		id = slurmdb_assoc->id;
+		if (!slurmdb_assoc->id) {
+			error("We need an id to set data for getting usage");
+			return SLURM_ERROR;
+		}
+		id_str = xstrdup_printf("t3.id_assoc=%u", slurmdb_assoc->id);
 		cluster_name = slurmdb_assoc->cluster;
 		username = slurmdb_assoc->user;
 		my_list = &slurmdb_assoc->accounting_list;
 		my_usage_table = assoc_day_table;
 		break;
-	}
 	case DBD_GET_WCKEY_USAGE:
-	{
-		char *temp_usage[] = {
-			"id_wckey",
-			"time_start",
-			"alloc_cpu_secs"
-		};
-		usage_req_inx = temp_usage;
-
-		id = slurmdb_wckey->id;
+		if (!slurmdb_wckey->id) {
+			error("We need an id to set data for getting usage");
+			return SLURM_ERROR;
+		}
+		id_str = xstrdup_printf("id=%d", slurmdb_wckey->id);
 		cluster_name = slurmdb_wckey->cluster;
 		username = slurmdb_wckey->user;
 		my_list = &slurmdb_wckey->accounting_list;
 		my_usage_table = wckey_day_table;
 		break;
-	}
 	case DBD_GET_CLUSTER_USAGE:
-	{
-		return _get_cluster_usage(mysql_conn, uid, in,
-					  type, start, end);
+		rc = _get_cluster_usage(mysql_conn, uid, in,
+					type, start, end);
+		return rc;
 		break;
-	}
 	default:
 		error("Unknown usage type %d", type);
 		return SLURM_ERROR;
 		break;
 	}
 
-	if (!id) {
-		error("We need an id to set data for getting usage");
-		return SLURM_ERROR;
-	} else if (!cluster_name) {
+	if (!cluster_name) {
 		error("We need a cluster_name to set data for getting usage");
+		xfree(id_str);
 		return SLURM_ERROR;
 	}
 
-	if (check_connection(mysql_conn) != SLURM_SUCCESS)
-		return ESLURM_DB_CONNECTION;
-
 	private_data = slurm_get_private_data();
 	if (private_data & PRIVATE_DATA_USAGE) {
 		if (!(is_admin = is_user_min_admin_level(
@@ -851,6 +863,7 @@ extern int as_mysql_get_usage(mysql_conn_t *mysql_conn, uid_t uid,
 
 		bad_user:
 			errno = ESLURM_ACCESS_DENIED;
+			xfree(id_str);
 			return SLURM_ERROR;
 		}
 	}
@@ -858,64 +871,13 @@ is_user:
 
 	if (set_usage_information(&my_usage_table, type, &start, &end)
 	    != SLURM_SUCCESS) {
+		xfree(id_str);
 		return SLURM_ERROR;
 	}
 
-	xfree(tmp);
-	i=0;
-	xstrfmtcat(tmp, "%s", usage_req_inx[i]);
-	for(i=1; i<USAGE_COUNT; i++) {
-		xstrfmtcat(tmp, ", %s", usage_req_inx[i]);
-	}
-	switch (type) {
-	case DBD_GET_ASSOC_USAGE:
-		query = xstrdup_printf(
-			"select %s from \"%s_%s\" as t1, "
-			"\"%s_%s\" as t2, \"%s_%s\" as t3 "
-			"where (t1.time_start < %ld && t1.time_start >= %ld) "
-			"&& t1.id_assoc=t2.id_assoc && t3.id_assoc=%d && "
-			"t2.lft between t3.lft and t3.rgt "
-			"order by t3.id_assoc, time_start;",
-			tmp, cluster_name, my_usage_table,
-			cluster_name, cluster_name, assoc_table, assoc_table,
-			end, start, id);
-		break;
-	case DBD_GET_WCKEY_USAGE:
-		query = xstrdup_printf(
-			"select %s from \"%s_%s\" "
-			"where (time_start < %ld && time_start >= %ld) "
-			"&& id_wckey=%d order by id_wckey, time_start;",
-			tmp, cluster_name, my_usage_table, end, start, id);
-		break;
-	default:
-		error("Unknown usage type %d", type);
-		return SLURM_ERROR;
-		break;
-	}
-
-	xfree(tmp);
-	debug4("%d(%s:%d) query\n%s",
-	       mysql_conn->conn, THIS_FILE, __LINE__, query);
-	if (!(result = mysql_db_query_ret(
-		      mysql_conn, query, 0))) {
-		xfree(query);
-		return SLURM_ERROR;
-	}
-	xfree(query);
-
-	if (!(*my_list))
-		(*my_list) = list_create(slurmdb_destroy_accounting_rec);
-
-	while ((row = mysql_fetch_row(result))) {
-		slurmdb_accounting_rec_t *accounting_rec =
-			xmalloc(sizeof(slurmdb_accounting_rec_t));
-		accounting_rec->id = slurm_atoul(row[USAGE_ID]);
-		accounting_rec->period_start = slurm_atoul(row[USAGE_START]);
-		accounting_rec->alloc_secs = slurm_atoull(row[USAGE_ACPU]);
-		accounting_rec->consumed_energy = slurm_atoull(row[USAGE_ENERGY]);
-		list_append((*my_list), accounting_rec);
-	}
-	mysql_free_result(result);
+	_get_object_usage(mysql_conn, type, my_usage_table, cluster_name,
+			  id_str, start, end, my_list);
+	xfree(id_str);
 
 	return rc;
 }
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_usage.h b/src/plugins/accounting_storage/mysql/as_mysql_usage.h
index c61106aff..52943fb23 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_usage.h
+++ b/src/plugins/accounting_storage/mysql/as_mysql_usage.h
@@ -44,6 +44,7 @@
 
 extern time_t global_last_rollup;
 extern pthread_mutex_t rollup_lock;
+extern pthread_mutex_t usage_rollup_lock;
 
 extern int get_usage_for_list(mysql_conn_t *mysql_conn,
 			      slurmdbd_msg_type_t type, List object_list,
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_user.c b/src/plugins/accounting_storage/mysql/as_mysql_user.c
index 9bed9a4f6..c99a9bf36 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_user.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_user.c
@@ -87,7 +87,7 @@ static List _get_other_user_names_to_mod(mysql_conn_t *mysql_conn, uint32_t uid,
 	List ret_list = NULL;
 	ListIterator itr = NULL;
 
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	slurmdb_wckey_cond_t wckey_cond;
 
 	if (!user_cond->def_acct_list || !list_count(user_cond->def_acct_list))
@@ -96,7 +96,7 @@ static List _get_other_user_names_to_mod(mysql_conn_t *mysql_conn, uint32_t uid,
 	/* We have to use a different association_cond here because
 	   other things could be set here we don't care about in the
 	   user's. (So to be safe just move over the info we care about) */
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.acct_list = user_cond->def_acct_list;
 	if (user_cond->assoc_cond) {
 		if (user_cond->assoc_cond->cluster_list)
@@ -108,7 +108,7 @@ static List _get_other_user_names_to_mod(mysql_conn_t *mysql_conn, uint32_t uid,
 	assoc_cond.only_defs = 1;
 	tmp_list = as_mysql_get_assocs(mysql_conn, uid, &assoc_cond);
 	if (tmp_list) {
-		slurmdb_association_rec_t *object = NULL;
+		slurmdb_assoc_rec_t *object = NULL;
 		itr = list_iterator_create(tmp_list);
 		while ((object = list_next(itr))) {
 			if (!ret_list)
@@ -116,8 +116,7 @@ static List _get_other_user_names_to_mod(mysql_conn_t *mysql_conn, uint32_t uid,
 			slurm_addto_char_list(ret_list, object->user);
 		}
 		list_iterator_destroy(itr);
-		list_destroy(tmp_list);
-		tmp_list = NULL;
+		FREE_NULL_LIST(tmp_list);
 	}
 
 no_assocs:
@@ -145,8 +144,7 @@ no_assocs:
 			slurm_addto_char_list(ret_list, object->user);
 		}
 		list_iterator_destroy(itr);
-		list_destroy(tmp_list);
-		tmp_list = NULL;
+		FREE_NULL_LIST(tmp_list);
 	}
 
 no_wckeys:
@@ -276,12 +274,30 @@ extern int as_mysql_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	char *user_name = NULL;
 	char *extra = NULL, *tmp_extra = NULL;
 	int affect_rows = 0;
-	List assoc_list = list_create(slurmdb_destroy_association_rec);
+	List assoc_list = list_create(slurmdb_destroy_assoc_rec);
 	List wckey_list = list_create(slurmdb_destroy_wckey_rec);
 
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		slurmdb_user_rec_t user;
+
+		memset(&user, 0, sizeof(slurmdb_user_rec_t));
+		user.uid = uid;
+
+		if (!is_user_any_coord(mysql_conn, &user)) {
+			error("Only admins/operators/coordinators "
+			      "can add accounts");
+			return ESLURM_ACCESS_DENIED;
+		}
+		/* If the user is a coord of any acct they can add
+		 * accounts they are only able to make associations to
+		 * these accounts if they are coordinators of the
+		 * parent they are trying to add to
+		 */
+	}
+
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(user_list);
 	while ((object = list_next(itr))) {
@@ -355,7 +371,7 @@ extern int as_mysql_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		   it set correctly.
 		*/
 		if (object->assoc_list) {
-			slurmdb_association_rec_t *assoc = NULL;
+			slurmdb_assoc_rec_t *assoc = NULL;
 			ListIterator assoc_itr =
 				list_iterator_create(object->assoc_list);
 			while ((assoc = list_next(assoc_itr))) {
@@ -413,14 +429,14 @@ extern int as_mysql_add_users(mysql_conn_t *mysql_conn, uint32_t uid,
 		     != SLURM_SUCCESS)
 			error("Problem adding user associations");
 	}
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(assoc_list);
 
 	if (rc == SLURM_SUCCESS && list_count(wckey_list)) {
 		if ((rc = as_mysql_add_wckeys(mysql_conn, uid, wckey_list))
 		    != SLURM_SUCCESS)
 			error("Problem adding user wckeys");
 	}
-	list_destroy(wckey_list);
+	FREE_NULL_LIST(wckey_list);
 	return rc;
 }
 
@@ -627,8 +643,7 @@ extern List as_mysql_modify_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (!(result = mysql_db_query_ret(
 		      mysql_conn, query, 0))) {
 		xfree(query);
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -677,8 +692,7 @@ no_user_table:
 		errno = ESLURM_ONE_CHANGE;
 		xfree(vals);
 		xfree(query);
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -697,17 +711,16 @@ no_user_table:
 	xfree(vals);
 	if (rc == SLURM_ERROR) {
 		error("Couldn't modify users");
-		list_destroy(ret_list);
-		ret_list = NULL;
+		FREE_NULL_LIST(ret_list);
 	}
 
 	if (user->default_acct) {
-		slurmdb_association_cond_t assoc_cond;
-		slurmdb_association_rec_t assoc;
+		slurmdb_assoc_cond_t assoc_cond;
+		slurmdb_assoc_rec_t assoc;
 		List tmp_list = NULL;
 
-		memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
-		slurmdb_init_association_rec(&assoc, 0);
+		memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
+		slurmdb_init_assoc_rec(&assoc, 0);
 		assoc.is_def = 1;
 		assoc_cond.acct_list = list_create(NULL);
 		list_append(assoc_cond.acct_list, user->default_acct);
@@ -718,11 +731,10 @@ no_user_table:
 				user_cond->assoc_cond->cluster_list;
 		tmp_list = as_mysql_modify_assocs(mysql_conn, uid,
 						  &assoc_cond, &assoc);
-		list_destroy(assoc_cond.acct_list);
+		FREE_NULL_LIST(assoc_cond.acct_list);
 
 		if (!tmp_list) {
-			list_destroy(ret_list);
-			ret_list = NULL;
+			FREE_NULL_LIST(ret_list);
 			goto end_it;
 		}
 		/* char *names = NULL; */
@@ -731,7 +743,7 @@ no_user_table:
 		/* 	info("%s", names); */
 		/* } */
 		/* list_iterator_destroy(itr); */
-		list_destroy(tmp_list);
+		FREE_NULL_LIST(tmp_list);
 	}
 
 	if (user->default_wckey) {
@@ -751,11 +763,10 @@ no_user_table:
 				user_cond->assoc_cond->cluster_list;
 		tmp_list = as_mysql_modify_wckeys(mysql_conn, uid,
 						  &wckey_cond, &wckey);
-		list_destroy(wckey_cond.name_list);
+		FREE_NULL_LIST(wckey_cond.name_list);
 
 		if (!tmp_list) {
-			list_destroy(ret_list);
-			ret_list = NULL;
+			FREE_NULL_LIST(ret_list);
 			goto end_it;
 		}
 		/* char *names = NULL; */
@@ -764,7 +775,7 @@ no_user_table:
 		/* 	info("%s", names); */
 		/* } */
 		/* list_iterator_destroy(itr); */
-		list_destroy(tmp_list);
+		FREE_NULL_LIST(tmp_list);
 	}
 end_it:
 	return ret_list;
@@ -786,7 +797,7 @@ extern List as_mysql_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	MYSQL_RES *result = NULL;
 	MYSQL_ROW row;
 	slurmdb_user_cond_t user_coord_cond;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	slurmdb_wckey_cond_t wckey_cond;
 	bool jobs_running = 0;
 
@@ -798,6 +809,11 @@ extern List as_mysql_remove_users(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	if (user_cond->assoc_cond && user_cond->assoc_cond->user_list
 	    && list_count(user_cond->assoc_cond->user_list)) {
 		set = 0;
@@ -862,7 +878,7 @@ no_user_table:
 	xfree(query);
 
 	memset(&user_coord_cond, 0, sizeof(slurmdb_user_cond_t));
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	/* we do not need to free the objects we put in here since
 	   they are also placed in a list that will be freed
 	*/
@@ -894,17 +910,15 @@ no_user_table:
 	/* We need to remove these accounts from the coord's that have it */
 	coord_list = as_mysql_remove_coord(
 		mysql_conn, uid, NULL, &user_coord_cond);
-	if (coord_list)
-		list_destroy(coord_list);
+	FREE_NULL_LIST(coord_list);
 
 	/* We need to remove these users from the wckey table */
 	memset(&wckey_cond, 0, sizeof(slurmdb_wckey_cond_t));
 	wckey_cond.user_list = assoc_cond.user_list;
 	coord_list = as_mysql_remove_wckeys(mysql_conn, uid, &wckey_cond);
-	if (coord_list)
-		list_destroy(coord_list);
+	FREE_NULL_LIST(coord_list);
 
-	list_destroy(assoc_cond.user_list);
+	FREE_NULL_LIST(assoc_cond.user_list);
 
 	user_name = uid_to_string((uid_t) uid);
 	slurm_mutex_lock(&as_mysql_cluster_list_lock);
@@ -923,7 +937,7 @@ no_user_table:
 	xfree(user_name);
 	xfree(name_char);
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		xfree(assoc_char);
 		return NULL;
 	}
@@ -937,7 +951,7 @@ no_user_table:
 	xfree(query);
 	if (rc != SLURM_SUCCESS) {
 		error("Couldn't remove user coordinators");
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -1058,8 +1072,8 @@ extern List as_mysql_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 				// happen
 				error("We are here with no coord accts");
 				errno = ESLURM_ACCESS_DENIED;
-				list_destroy(ret_list);
-				list_destroy(user_list);
+				FREE_NULL_LIST(ret_list);
+				FREE_NULL_LIST(user_list);
 				xfree(extra);
 				mysql_free_result(result);
 				return NULL;
@@ -1076,8 +1090,8 @@ extern List as_mysql_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 				      "ability to change this account (%s)",
 				      user.name, user.uid, row[1]);
 				errno = ESLURM_ACCESS_DENIED;
-				list_destroy(ret_list);
-				list_destroy(user_list);
+				FREE_NULL_LIST(ret_list);
+				FREE_NULL_LIST(user_list);
 				xfree(extra);
 				mysql_free_result(result);
 				return NULL;
@@ -1099,8 +1113,8 @@ extern List as_mysql_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 	xfree(user_name);
 	xfree(extra);
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
-		list_destroy(user_list);
+		FREE_NULL_LIST(ret_list);
+		FREE_NULL_LIST(user_list);
 		errno = SLURM_ERROR;
 		return NULL;
 	}
@@ -1117,7 +1131,7 @@ extern List as_mysql_remove_coord(mysql_conn_t *mysql_conn, uint32_t uid,
 			slurmdb_destroy_user_rec(user_rec);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(user_list);
+	FREE_NULL_LIST(user_list);
 
 	return ret_list;
 }
@@ -1184,14 +1198,14 @@ extern List as_mysql_get_users(mysql_conn_t *mysql_conn, uid_t uid,
 	if (user_list) {
 		if (!user_cond->assoc_cond)
 			user_cond->assoc_cond =
-				xmalloc(sizeof(slurmdb_association_rec_t));
+				xmalloc(sizeof(slurmdb_assoc_rec_t));
 
 		if (!user_cond->assoc_cond->user_list)
 			user_cond->assoc_cond->user_list = user_list;
 		else {
 			list_transfer(user_cond->assoc_cond->user_list,
 				      user_list);
-			list_destroy(user_list);
+			FREE_NULL_LIST(user_list);
 		}
 		user_list = NULL;
 	} else if ((user_cond->def_acct_list
@@ -1268,7 +1282,7 @@ empty:
 			      && user_cond->assoc_cond->only_defs))) {
 		ListIterator assoc_itr = NULL;
 		slurmdb_user_rec_t *user = NULL;
-		slurmdb_association_rec_t *assoc = NULL;
+		slurmdb_assoc_rec_t *assoc = NULL;
 		List assoc_list = NULL;
 
 		/* Make sure we don't get any non-user associations
@@ -1276,7 +1290,7 @@ empty:
 		 * defined */
 		if (!user_cond->assoc_cond)
 			user_cond->assoc_cond =
-				xmalloc(sizeof(slurmdb_association_cond_t));
+				xmalloc(sizeof(slurmdb_assoc_cond_t));
 
 		if (!user_cond->assoc_cond->user_list)
 			user_cond->assoc_cond->user_list = list_create(NULL);
@@ -1318,7 +1332,7 @@ empty:
 
 				if (!user->assoc_list)
 					user->assoc_list = list_create(
-						slurmdb_destroy_association_rec);
+						slurmdb_destroy_assoc_rec);
 				list_append(user->assoc_list, assoc);
 				list_remove(assoc_itr);
 			}
@@ -1326,7 +1340,7 @@ empty:
 		}
 		list_iterator_destroy(itr);
 		list_iterator_destroy(assoc_itr);
-		list_destroy(assoc_list);
+		FREE_NULL_LIST(assoc_list);
 	}
 
 get_wckeys:
@@ -1400,7 +1414,7 @@ get_wckeys:
 		list_iterator_destroy(itr);
 		list_iterator_destroy(wckey_itr);
 
-		list_destroy(wckey_list);
+		FREE_NULL_LIST(wckey_list);
 	}
 
 	return user_list;
diff --git a/src/plugins/accounting_storage/mysql/as_mysql_wckey.c b/src/plugins/accounting_storage/mysql/as_mysql_wckey.c
index 839a79d1d..0ab15b7f2 100644
--- a/src/plugins/accounting_storage/mysql/as_mysql_wckey.c
+++ b/src/plugins/accounting_storage/mysql/as_mysql_wckey.c
@@ -327,7 +327,7 @@ static int _cluster_remove_wckeys(mysql_conn_t *mysql_conn,
 	xfree(assoc_char);
 
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return SLURM_ERROR;
 	}
 
@@ -481,7 +481,7 @@ static int _cluster_get_wckeys(mysql_conn_t *mysql_conn,
 				   wckey_cond->usage_start,
 				   wckey_cond->usage_end);
 	list_transfer(sent_list, wckey_list);
-	list_destroy(wckey_list);
+	FREE_NULL_LIST(wckey_list);
 	return SLURM_SUCCESS;
 }
 
@@ -504,14 +504,18 @@ extern int as_mysql_add_wckeys(mysql_conn_t *mysql_conn, uint32_t uid,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return ESLURM_DB_CONNECTION;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR))
+		return ESLURM_ACCESS_DENIED;
+
 	user_name = uid_to_string((uid_t) uid);
 	itr = list_iterator_create(wckey_list);
 	while ((object = list_next(itr))) {
 		if (!object->cluster || !object->cluster[0]
 		    || !object->user || !object->user[0]
 		    || !object->name) {
-			error("We need a wckey name, cluster, "
-			      "and user to add.");
+			error("We need a wckey name (%s), cluster (%s), "
+			      "and user (%s) to add.",
+			      object->name, object->cluster, object->user);
 			rc = SLURM_ERROR;
 			continue;
 		}
@@ -626,8 +630,7 @@ extern int as_mysql_add_wckeys(mysql_conn_t *mysql_conn, uint32_t uid,
 end_it:
 	if (rc == SLURM_SUCCESS)
 		_make_sure_users_have_default(mysql_conn, added_user_list);
-	if (added_user_list)
-		list_destroy(added_user_list);
+	FREE_NULL_LIST(added_user_list);
 
 	return rc;
 }
@@ -711,7 +714,7 @@ is_same_user:
 		slurm_mutex_unlock(&as_mysql_cluster_list_lock);
 
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		ret_list = NULL;
 	}
 
@@ -737,6 +740,11 @@ extern List as_mysql_remove_wckeys(mysql_conn_t *mysql_conn,
 	if (check_connection(mysql_conn) != SLURM_SUCCESS)
 		return NULL;
 
+	if (!is_user_min_admin_level(mysql_conn, uid, SLURMDB_ADMIN_OPERATOR)) {
+		errno = ESLURM_ACCESS_DENIED;
+		return NULL;
+	}
+
 	(void) _setup_wckey_cond_limits(wckey_cond, &extra);
 
 	if (wckey_cond->cluster_list && list_count(wckey_cond->cluster_list))
@@ -767,7 +775,7 @@ empty:
 		slurm_mutex_unlock(&as_mysql_cluster_list_lock);
 
 	if (rc == SLURM_ERROR) {
-		list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 		return NULL;
 	}
 
@@ -841,7 +849,7 @@ empty:
 		if (_cluster_get_wckeys(mysql_conn, wckey_cond, tmp, extra,
 					cluster_name, wckey_list)
 		    != SLURM_SUCCESS) {
-			list_destroy(wckey_list);
+			FREE_NULL_LIST(wckey_list);
 			wckey_list = NULL;
 			break;
 		}
diff --git a/src/plugins/accounting_storage/none/Makefile.in b/src/plugins/accounting_storage/none/Makefile.in
index 2082d4df5..54e27ee18 100644
--- a/src/plugins/accounting_storage/none/Makefile.in
+++ b/src/plugins/accounting_storage/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/accounting_storage/none/accounting_storage_none.c b/src/plugins/accounting_storage/none/accounting_storage_none.c
index 31e55eb7a..ca11ae075 100644
--- a/src/plugins/accounting_storage/none/accounting_storage_none.c
+++ b/src/plugins/accounting_storage/none/accounting_storage_none.c
@@ -62,16 +62,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Accounting storage NOT INVOKED plugin";
 const char plugin_type[] = "accounting_storage/none";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -129,8 +125,14 @@ extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
-extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid,
-					   List association_list)
+extern int acct_storage_p_add_tres(void *db_conn,
+				     uint32_t uid, List tres_list)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int acct_storage_p_add_assocs(void *db_conn, uint32_t uid,
+				     List assoc_list)
 {
 	return SLURM_SUCCESS;
 }
@@ -180,9 +182,9 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
-extern List acct_storage_p_modify_associations(void *db_conn, uint32_t uid,
-					      slurmdb_association_cond_t *assoc_q,
-					      slurmdb_association_rec_t *assoc)
+extern List acct_storage_p_modify_assocs(void *db_conn, uint32_t uid,
+					      slurmdb_assoc_cond_t *assoc_q,
+					      slurmdb_assoc_rec_t *assoc)
 {
 	return SLURM_SUCCESS;
 }
@@ -246,8 +248,8 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 	return SLURM_SUCCESS;
 }
 
-extern List acct_storage_p_remove_associations(void *db_conn, uint32_t uid,
-					      slurmdb_association_cond_t *assoc_q)
+extern List acct_storage_p_remove_assocs(void *db_conn, uint32_t uid,
+					      slurmdb_assoc_cond_t *assoc_q)
 {
 	return SLURM_SUCCESS;
 }
@@ -299,8 +301,14 @@ extern List acct_storage_p_get_config(void *db_conn, char *config_name)
 	return NULL;
 }
 
-extern List acct_storage_p_get_associations(void *db_conn, uid_t uid,
-					    slurmdb_association_cond_t *assoc_q)
+extern List acct_storage_p_get_tres(void *db_conn, uid_t uid,
+				      slurmdb_tres_cond_t *tres_cond)
+{
+	return NULL;
+}
+
+extern List acct_storage_p_get_assocs(void *db_conn, uid_t uid,
+					    slurmdb_assoc_cond_t *assoc_q)
 {
 	return NULL;
 }
@@ -312,7 +320,7 @@ extern List acct_storage_p_get_events(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_get_problems(void *db_conn, uid_t uid,
-					slurmdb_association_cond_t *assoc_q)
+					slurmdb_assoc_cond_t *assoc_q)
 {
 	return NULL;
 }
@@ -335,8 +343,8 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 	return NULL;
 }
 
-extern List acct_storage_p_get_reservations(void *mysql_conn, uid_t uid,
-					    slurmdb_reservation_cond_t *resv_cond)
+extern List acct_storage_p_get_reservations(
+	void *db_conn, uid_t uid, slurmdb_reservation_cond_t *resv_cond)
 {
 	return NULL;
 }
@@ -397,9 +405,9 @@ extern int clusteracct_storage_p_fini_ctld(void *db_conn,
 	return SLURM_SUCCESS;
 }
 
-extern int clusteracct_storage_p_cluster_cpus(void *db_conn,
+extern int clusteracct_storage_p_cluster_tres(void *db_conn,
 					      char *cluster_nodes,
-					      uint32_t cpus,
+					      List tres,
 					      time_t event_time)
 {
 	return SLURM_SUCCESS;
diff --git a/src/plugins/accounting_storage/slurmdbd/Makefile.in b/src/plugins/accounting_storage/slurmdbd/Makefile.in
index cbcaf3c69..595098074 100644
--- a/src/plugins/accounting_storage/slurmdbd/Makefile.in
+++ b/src/plugins/accounting_storage/slurmdbd/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
index 29546c62b..40ea367ff 100644
--- a/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
+++ b/src/plugins/accounting_storage/slurmdbd/accounting_storage_slurmdbd.c
@@ -102,16 +102,12 @@ List job_list = NULL;
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Accounting storage SLURMDBD plugin";
 const char plugin_type[] = "accounting_storage/slurmdbd";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 static char *slurmdbd_auth_info = NULL;
 
@@ -139,6 +135,8 @@ static void _partial_free_dbd_job_start(void *object)
 		xfree(req->gres_alloc);
 		xfree(req->gres_req);
 		xfree(req->gres_used);
+		xfree(req->tres_alloc_str);
+		xfree(req->tres_req_str);
 	}
 }
 
@@ -163,6 +161,7 @@ static int _setup_job_start_msg(dbd_job_start_msg_t *req,
 	memset(req, 0, sizeof(dbd_job_start_msg_t));
 
 	req->account       = xstrdup(job_ptr->account);
+
 	req->assoc_id      = job_ptr->assoc_id;
 #ifdef HAVE_BG
 	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
@@ -214,7 +213,6 @@ static int _setup_job_start_msg(dbd_job_start_msg_t *req,
 		req->node_inx = xstrdup(bit_fmt(temp_bit, sizeof(temp_bit),
 						job_ptr->node_bitmap));
 	}
-	req->alloc_cpus    = job_ptr->total_cpus;
 
 	if (!IS_JOB_PENDING(job_ptr) && job_ptr->part_ptr)
 		req->partition = xstrdup(job_ptr->part_ptr->name);
@@ -228,6 +226,8 @@ static int _setup_job_start_msg(dbd_job_start_msg_t *req,
 	req->resv_id       = job_ptr->resv_id;
 	req->priority      = job_ptr->priority;
 	req->timelimit     = job_ptr->time_limit;
+	req->tres_alloc_str= xstrdup(job_ptr->tres_alloc_str);
+	req->tres_req_str  = xstrdup(job_ptr->tres_req_str);
 	req->wckey         = xstrdup(job_ptr->wckey);
 	req->uid           = job_ptr->user_id;
 	req->qos_id        = job_ptr->qos_id;
@@ -288,14 +288,11 @@ static void *_set_db_inx_thread(void *no_data)
 		}
 		itr = list_iterator_create(job_list);
 		while ((job_ptr = list_next(itr))) {
-			if (!job_ptr->db_index && !job_ptr->resize_time) {
-				dbd_job_start_msg_t *req =
-					xmalloc(sizeof(dbd_job_start_msg_t));
-				if (_setup_job_start_msg(req, job_ptr)
-				    != SLURM_SUCCESS) {
-					_partial_destroy_dbd_job_start(req);
+			dbd_job_start_msg_t *req;
+
+			if (!IS_JOB_UPDATE_DB(job_ptr)) {
+				if (job_ptr->db_index || job_ptr->resize_time)
 					continue;
-				}
 
 				/* We set the db_index to NO_VAL here
 				 * to avoid a potential race condition
@@ -312,22 +309,31 @@ static void *_set_db_inx_thread(void *no_data)
 				 * it accordingly.
 				 */
 				job_ptr->db_index = NO_VAL;
+			}
 
-				/* we only want to destory the pointer
-				   here not the contents (except
-				   block_id) so call special function
-				   _partial_destroy_dbd_job_start.
-				*/
-				if (!local_job_list)
-					local_job_list = list_create(
-						_partial_destroy_dbd_job_start);
-				list_append(local_job_list, req);
-				/* Just so we don't have a crazy
-				   amount of messages at once.
-				*/
-				if (list_count(local_job_list) > 1000)
-					break;
+			req = xmalloc(sizeof(dbd_job_start_msg_t));
+			if (_setup_job_start_msg(req, job_ptr)
+			    != SLURM_SUCCESS) {
+				_partial_destroy_dbd_job_start(req);
+				if (job_ptr->db_index == NO_VAL)
+					job_ptr->db_index = 0;
+				continue;
 			}
+
+			/* we only want to destory the pointer
+			   here not the contents (except
+			   block_id) so call special function
+			   _partial_destroy_dbd_job_start.
+			*/
+			if (!local_job_list)
+				local_job_list = list_create(
+					_partial_destroy_dbd_job_start);
+			list_append(local_job_list, req);
+			/* Just so we don't have a crazy
+			   amount of messages at once.
+			*/
+			if (list_count(local_job_list) > 1000)
+				break;
 		}
 		list_iterator_destroy(itr);
 		unlock_slurmctld(job_read_lock);
@@ -346,7 +352,7 @@ static void *_set_db_inx_thread(void *no_data)
 			req.data = &send_msg;
 			rc = slurm_send_recv_slurmdbd_msg(
 				SLURM_PROTOCOL_VERSION, &req, &resp);
-			list_destroy(local_job_list);
+			FREE_NULL_LIST(local_job_list);
 			if (rc != SLURM_SUCCESS) {
 				error("slurmdbd: DBD_SEND_MULT_JOB_START "
 				      "failure: %m");
@@ -387,6 +393,8 @@ static void *_set_db_inx_thread(void *no_data)
 						 * the start needs to be sent
 						 * again. */
 						job_ptr->db_index = id_ptr->id;
+						job_ptr->job_state &=
+							(~JOB_UPDATE_DB);
 					}
 				}
 				list_iterator_destroy(itr);
@@ -488,11 +496,10 @@ extern int init ( void )
 
 extern int fini ( void )
 {
+	slurm_mutex_lock(&db_inx_lock);
 	if (running_db_inx)
 		debug("Waiting for db_inx thread to finish.");
 
-	slurm_mutex_lock(&db_inx_lock);
-
 	/* cancel the db_inx thread and then join the cleanup thread */
 	if (db_inx_handler_thread)
 		pthread_cancel(db_inx_handler_thread);
@@ -642,15 +649,40 @@ extern int acct_storage_p_add_clusters(void *db_conn, uint32_t uid,
 	return rc;
 }
 
-extern int acct_storage_p_add_associations(void *db_conn, uint32_t uid,
-					   List association_list)
+extern int acct_storage_p_add_tres(void *db_conn,
+				   uint32_t uid, List tres_list_in)
+{
+	slurmdbd_msg_t req;
+	dbd_list_msg_t get_msg;
+	int rc, resp_code;
+
+	/* This means we are updating views which don't apply in this plugin */
+	if (!tres_list_in)
+		return SLURM_SUCCESS;
+
+	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
+	get_msg.my_list = tres_list_in;
+
+	req.msg_type = DBD_ADD_TRES;
+	req.data = &get_msg;
+	rc = slurm_send_slurmdbd_recv_rc_msg(SLURM_PROTOCOL_VERSION,
+					     &req, &resp_code);
+
+	if (resp_code != SLURM_SUCCESS)
+		rc = resp_code;
+
+	return rc;
+}
+
+extern int acct_storage_p_add_assocs(void *db_conn, uint32_t uid,
+					   List assoc_list)
 {
 	slurmdbd_msg_t req;
 	dbd_list_msg_t get_msg;
 	int rc, resp_code = SLURM_SUCCESS;
 
 	memset(&get_msg, 0, sizeof(dbd_list_msg_t));
-	get_msg.my_list = association_list;
+	get_msg.my_list = assoc_list;
 
 	req.msg_type = DBD_ADD_ASSOCS;
 	req.data = &get_msg;
@@ -879,10 +911,10 @@ extern List acct_storage_p_modify_clusters(void *db_conn, uint32_t uid,
 	return ret_list;
 }
 
-extern List acct_storage_p_modify_associations(
+extern List acct_storage_p_modify_assocs(
 	void *db_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond,
-	slurmdb_association_rec_t *assoc)
+	slurmdb_assoc_cond_t *assoc_cond,
+	slurmdb_assoc_rec_t *assoc)
 {
 	slurmdbd_msg_t req;
 	dbd_modify_msg_t get_msg;
@@ -1294,9 +1326,9 @@ extern List acct_storage_p_remove_clusters(void *db_conn, uint32_t uid,
 	return ret_list;
 }
 
-extern List acct_storage_p_remove_associations(
+extern List acct_storage_p_remove_assocs(
 	void *db_conn, uint32_t uid,
-	slurmdb_association_cond_t *assoc_cond)
+	slurmdb_assoc_cond_t *assoc_cond)
 {
 	slurmdbd_msg_t req;
 	dbd_cond_msg_t get_msg;
@@ -1652,8 +1684,49 @@ extern List acct_storage_p_get_config(void *db_conn, char *config_name)
 	return ret_list;
 }
 
-extern List acct_storage_p_get_associations(
-	void *db_conn, uid_t uid, slurmdb_association_cond_t *assoc_cond)
+extern List acct_storage_p_get_tres(void *db_conn, uid_t uid,
+				    slurmdb_tres_cond_t *tres_cond)
+{
+	slurmdbd_msg_t req, resp;
+	dbd_cond_msg_t get_msg;
+	dbd_list_msg_t *got_msg;
+	int rc;
+	List ret_list = NULL;
+
+	memset(&get_msg, 0, sizeof(dbd_cond_msg_t));
+	get_msg.cond = tres_cond;
+
+	req.msg_type = DBD_GET_TRES;
+	req.data = &get_msg;
+	rc = slurm_send_recv_slurmdbd_msg(SLURM_PROTOCOL_VERSION, &req, &resp);
+
+	if (rc != SLURM_SUCCESS)
+		error("slurmdbd: DBD_GET_TRES failure: %m");
+	else if (resp.msg_type == DBD_RC) {
+		dbd_rc_msg_t *msg = resp.data;
+		if (msg->return_code == SLURM_SUCCESS) {
+			info("%s", msg->comment);
+			ret_list = list_create(NULL);
+		} else {
+			slurm_seterrno(msg->return_code);
+			error("%s", msg->comment);
+		}
+		slurmdbd_free_rc_msg(msg);
+	} else if (resp.msg_type != DBD_GOT_TRES) {
+		error("slurmdbd: response type not DBD_GOT_TRES: %u",
+		      resp.msg_type);
+	} else {
+		got_msg = (dbd_list_msg_t *) resp.data;
+		ret_list = got_msg->my_list;
+		got_msg->my_list = NULL;
+		slurmdbd_free_list_msg(got_msg);
+	}
+
+	return ret_list;
+}
+
+extern List acct_storage_p_get_assocs(
+	void *db_conn, uid_t uid, slurmdb_assoc_cond_t *assoc_cond)
 {
 	slurmdbd_msg_t req, resp;
 	dbd_cond_msg_t get_msg;
@@ -1735,7 +1808,7 @@ extern List acct_storage_p_get_events(void *db_conn, uint32_t uid,
 }
 
 extern List acct_storage_p_get_problems(void *db_conn, uid_t uid,
-					slurmdb_association_cond_t *assoc_cond)
+					slurmdb_assoc_cond_t *assoc_cond)
 {
 	slurmdbd_msg_t req, resp;
 	dbd_cond_msg_t get_msg;
@@ -1920,7 +1993,7 @@ extern List acct_storage_p_get_wckeys(void *db_conn, uid_t uid,
 }
 
 extern List acct_storage_p_get_reservations(
-	void *mysql_conn, uid_t uid,
+	void *db_conn, uid_t uid,
 	slurmdb_reservation_cond_t *resv_cond)
 {
 	slurmdbd_msg_t req, resp;
@@ -2016,7 +2089,7 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 	slurmdbd_msg_t req, resp;
 	dbd_usage_msg_t get_msg;
 	dbd_usage_msg_t *got_msg;
-	slurmdb_association_rec_t *got_assoc = (slurmdb_association_rec_t *)in;
+	slurmdb_assoc_rec_t *got_assoc = (slurmdb_assoc_rec_t *)in;
 	slurmdb_wckey_rec_t *got_wckey = (slurmdb_wckey_rec_t *)in;
 	slurmdb_cluster_rec_t *got_cluster = (slurmdb_cluster_rec_t *)in;
 	List *my_list = NULL;
@@ -2069,7 +2142,7 @@ extern int acct_storage_p_get_usage(void *db_conn, uid_t uid,
 		got_msg = (dbd_usage_msg_t *) resp.data;
 		switch (type) {
 		case DBD_GET_ASSOC_USAGE:
-			got_assoc = (slurmdb_association_rec_t *)got_msg->rec;
+			got_assoc = (slurmdb_assoc_rec_t *)got_msg->rec;
 			(*my_list) = got_assoc->accounting_list;
 			got_assoc->accounting_list = NULL;
 			break;
@@ -2129,30 +2202,26 @@ extern int clusteracct_storage_p_node_down(void *db_conn,
 {
 	slurmdbd_msg_t msg;
 	dbd_node_state_msg_t req;
-	uint16_t cpus;
 	char *my_reason;
 
-	if (slurmctld_conf.fast_schedule)
-		cpus = node_ptr->config_ptr->cpus;
-	else
-		cpus = node_ptr->cpus;
-
 	if (reason)
 		my_reason = reason;
 	else
 		my_reason = node_ptr->reason;
 
 	memset(&req, 0, sizeof(dbd_node_state_msg_t));
-	req.cpu_count = cpus;
 	req.hostlist   = node_ptr->name;
 	req.new_state  = DBD_NODE_STATE_DOWN;
 	req.event_time = event_time;
 	req.reason     = my_reason;
 	req.reason_uid = reason_uid;
 	req.state      = node_ptr->node_state;
+	req.tres_str   = node_ptr->tres_str;
+
 	msg.msg_type   = DBD_NODE_STATE;
 	msg.data       = &req;
 
+	//info("sending a down message here");
 	if (slurm_send_slurmdbd_msg(SLURM_PROTOCOL_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
@@ -2174,28 +2243,33 @@ extern int clusteracct_storage_p_node_up(void *db_conn,
 	msg.msg_type   = DBD_NODE_STATE;
 	msg.data       = &req;
 
+	// info("sending an up message here");
 	if (slurm_send_slurmdbd_msg(SLURM_PROTOCOL_VERSION, &msg) < 0)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
 }
 
-extern int clusteracct_storage_p_cluster_cpus(void *db_conn,
+extern int clusteracct_storage_p_cluster_tres(void *db_conn,
 					      char *cluster_nodes,
-					      uint32_t cpus,
+					      char *tres_str_in,
 					      time_t event_time)
 {
 	slurmdbd_msg_t msg;
-	dbd_cluster_cpus_msg_t req;
+	dbd_cluster_tres_msg_t req;
 	int rc = SLURM_ERROR;
 
-	debug2("Sending cpu count of %d for cluster", cpus);
-	memset(&req, 0, sizeof(dbd_cluster_cpus_msg_t));
+	if (!tres_str_in)
+		return rc;
+
+	debug2("Sending tres '%s' for cluster", tres_str_in);
+	memset(&req, 0, sizeof(dbd_cluster_tres_msg_t));
 	req.cluster_nodes = cluster_nodes;
-	req.cpu_count   = cpus;
-	req.event_time   = event_time;
-	msg.msg_type     = DBD_CLUSTER_CPUS;
-	msg.data         = &req;
+	req.event_time    = event_time;
+	req.tres_str      = tres_str_in;
+
+	msg.msg_type      = DBD_CLUSTER_TRES;
+	msg.data          = &req;
 
 	slurm_send_slurmdbd_recv_rc_msg(SLURM_PROTOCOL_VERSION, &msg, &rc);
 
@@ -2363,7 +2437,7 @@ extern int jobacct_storage_p_job_complete(void *db_conn,
 extern int jobacct_storage_p_step_start(void *db_conn,
 					struct step_record *step_ptr)
 {
-	uint32_t cpus = 0, tasks = 0, nodes = 0, task_dist = 0;
+	uint32_t tasks = 0, nodes = 0, task_dist = 0;
 	char node_list[BUFFER_SIZE];
 	slurmdbd_msg_t msg;
 	dbd_step_start_msg_t req;
@@ -2374,20 +2448,19 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 #ifdef HAVE_BG_L_P
 
 	if (step_ptr->job_ptr->details)
-		tasks = cpus = step_ptr->job_ptr->details->min_cpus;
+		tasks = step_ptr->job_ptr->details->min_cpus;
 	else
-		tasks = cpus = step_ptr->job_ptr->cpu_cnt;
+		tasks = step_ptr->job_ptr->cpu_cnt;
 	select_g_select_jobinfo_get(step_ptr->job_ptr->select_jobinfo,
 				    SELECT_JOBDATA_NODE_CNT,
 				    &nodes);
 	temp_nodes = step_ptr->job_ptr->nodes;
 #else
 	if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt) {
-		cpus = tasks = step_ptr->job_ptr->total_cpus;
+		tasks = step_ptr->job_ptr->total_cpus;
 		nodes = step_ptr->job_ptr->total_nodes;
 		temp_nodes = step_ptr->job_ptr->nodes;
 	} else {
-		cpus = step_ptr->cpu_count;
 		tasks = step_ptr->step_layout->task_cnt;
 #ifdef HAVE_BGQ
 		select_g_select_jobinfo_get(step_ptr->select_jobinfo,
@@ -2449,9 +2522,13 @@ extern int jobacct_storage_p_step_start(void *db_conn,
 	if (step_ptr->step_layout)
 		req.task_dist   = step_ptr->step_layout->task_dist;
 	req.task_dist   = task_dist;
-	req.total_cpus = cpus;
+
 	req.total_tasks = tasks;
-	req.req_cpufreq = step_ptr->cpu_freq;
+
+	req.tres_alloc_str = step_ptr->tres_alloc_str;
+	req.req_cpufreq_min = step_ptr->cpu_freq_min;
+	req.req_cpufreq_max = step_ptr->cpu_freq_max;
+	req.req_cpufreq_gov = step_ptr->cpu_freq_gov;
 
 	msg.msg_type    = DBD_STEP_START;
 	msg.data        = &req;
@@ -2702,15 +2779,15 @@ extern int acct_storage_p_flush_jobs_on_cluster(void *db_conn,
 						time_t event_time)
 {
 	slurmdbd_msg_t msg;
-	dbd_cluster_cpus_msg_t req;
+	dbd_cluster_tres_msg_t req;
 
 	info("Ending any jobs in accounting that were running when controller "
 	     "went down on");
 
-	memset(&req, 0, sizeof(dbd_cluster_cpus_msg_t));
+	memset(&req, 0, sizeof(dbd_cluster_tres_msg_t));
 
-	req.cpu_count   = 0;
 	req.event_time   = event_time;
+	req.tres_str     = NULL;
 
 	msg.msg_type     = DBD_FLUSH_JOBS;
 	msg.data         = &req;
diff --git a/src/plugins/acct_gather_energy/Makefile.am b/src/plugins/acct_gather_energy/Makefile.am
index d5f11d3de..b70a343b9 100644
--- a/src/plugins/acct_gather_energy/Makefile.am
+++ b/src/plugins/acct_gather_energy/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for accounting gather energy plugins
 
-SUBDIRS = rapl ipmi none
+SUBDIRS = cray rapl ipmi none
diff --git a/src/plugins/acct_gather_energy/Makefile.in b/src/plugins/acct_gather_energy/Makefile.in
index 8ce1818d0..741421f25 100644
--- a/src/plugins/acct_gather_energy/Makefile.in
+++ b/src/plugins/acct_gather_energy/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -447,7 +461,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = rapl ipmi none
+SUBDIRS = cray rapl ipmi none
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/acct_gather_energy/cray/Makefile.am b/src/plugins/acct_gather_energy/cray/Makefile.am
new file mode 100644
index 000000000..c691e55c3
--- /dev/null
+++ b/src/plugins/acct_gather_energy/cray/Makefile.am
@@ -0,0 +1,15 @@
+# Makefile for acct_gather_energy/cray plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = acct_gather_energy_cray.la
+
+# cpu/core energy accounting plugin.
+acct_gather_energy_cray_la_SOURCES = acct_gather_energy_cray.c
+
+acct_gather_energy_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+acct_gather_energy_cray_la_LIBADD  = -lm
diff --git a/src/plugins/acct_gather_energy/cray/Makefile.in b/src/plugins/acct_gather_energy/cray/Makefile.in
new file mode 100644
index 000000000..9450fd730
--- /dev/null
+++ b/src/plugins/acct_gather_energy/cray/Makefile.in
@@ -0,0 +1,814 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for acct_gather_energy/cray plugin
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/acct_gather_energy/cray
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+acct_gather_energy_cray_la_DEPENDENCIES =
+am_acct_gather_energy_cray_la_OBJECTS = acct_gather_energy_cray.lo
+acct_gather_energy_cray_la_OBJECTS =  \
+	$(am_acct_gather_energy_cray_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+acct_gather_energy_cray_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(acct_gather_energy_cray_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(acct_gather_energy_cray_la_SOURCES)
+DIST_SOURCES = $(acct_gather_energy_cray_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = acct_gather_energy_cray.la
+
+# cpu/core energy accounting plugin.
+acct_gather_energy_cray_la_SOURCES = acct_gather_energy_cray.c
+acct_gather_energy_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+acct_gather_energy_cray_la_LIBADD = -lm
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/acct_gather_energy/cray/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/acct_gather_energy/cray/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+acct_gather_energy_cray.la: $(acct_gather_energy_cray_la_OBJECTS) $(acct_gather_energy_cray_la_DEPENDENCIES) $(EXTRA_acct_gather_energy_cray_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(acct_gather_energy_cray_la_LINK) -rpath $(pkglibdir) $(acct_gather_energy_cray_la_OBJECTS) $(acct_gather_energy_cray_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/acct_gather_energy_cray.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/acct_gather_energy/cray/acct_gather_energy_cray.c b/src/plugins/acct_gather_energy/cray/acct_gather_energy_cray.c
new file mode 100644
index 000000000..7ed6983a2
--- /dev/null
+++ b/src/plugins/acct_gather_energy/cray/acct_gather_energy_cray.c
@@ -0,0 +1,352 @@
+/*****************************************************************************\
+ *  acct_gather_energy_cray.c - slurm energy accounting plugin for cray.
+ *****************************************************************************
+ *  Copyright (C) 2015 SchedMD LLC
+ *  Written by Danny Auble <da@schedmd.com> who borrowed from the rapl
+ *  plugin of the same type
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+ *
+\*****************************************************************************/
+
+/*   acct_gather_energy_cray
+ * This plugin does not initiate a node-level thread.
+ * It will be used to get energy values from the cray bmc when available
+ */
+
+
+#include "src/common/slurm_xlator.h"
+#include "src/common/slurm_acct_gather_energy.h"
+
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "jobacct" for SLURM job completion logging) and <method>
+ * is a description of how this plugin satisfies that application.  SLURM will
+ * only load job completion logging plugins if the plugin_type string has a
+ * prefix of "jobacct/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+const char plugin_name[] = "AcctGatherEnergy CRAY plugin";
+const char plugin_type[] = "acct_gather_energy/cray";
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
+
+static acct_gather_energy_t *local_energy = NULL;
+static uint64_t debug_flags = 0;
+
+enum {
+	GET_ENERGY,
+	GET_POWER
+};
+
+static uint32_t _get_latest_stats(int type)
+{
+	uint32_t data = 0;
+	int fd;
+	FILE *fp = NULL;
+	char *file_name;
+	char sbuf[72];
+	int num_read;
+
+	switch (type) {
+	case GET_ENERGY:
+		file_name = "/sys/cray/pm_counters/energy";
+		break;
+	case GET_POWER:
+		file_name = "/sys/cray/pm_counters/power";
+		break;
+	default:
+		error("unknown type %d", type);
+		return 0;
+		break;
+	}
+
+	if (!(fp = fopen(file_name, "r"))) {
+		error("_get_latest_stats: unable to open %s", file_name);
+		return data;
+	}
+
+	fd = fileno(fp);
+	fcntl(fd, F_SETFD, FD_CLOEXEC);
+	num_read = read(fd, sbuf, (sizeof(sbuf) - 1));
+	if (num_read > 0) {
+		sbuf[num_read] = '\0';
+		sscanf(sbuf, "%u", &data);
+	}
+	fclose(fp);
+
+	return data;
+}
+
+static bool _run_in_daemon(void)
+{
+	static bool set = false;
+	static bool run = false;
+
+	if (!set) {
+		set = 1;
+		run = run_in_daemon("slurmd,slurmstepd");
+	}
+
+	return run;
+}
+
+static void _get_joules_task(acct_gather_energy_t *energy)
+{
+	uint64_t curr_energy, diff_energy = 0;
+	uint32_t curr_power;
+	time_t now;
+
+	if (energy->current_watts == NO_VAL)
+		return;
+
+	now = time(NULL);
+	curr_energy = (uint64_t) _get_latest_stats(GET_ENERGY);
+	curr_power = _get_latest_stats(GET_POWER);
+
+	if (energy->previous_consumed_energy) {
+		diff_energy = curr_energy - energy->previous_consumed_energy;
+
+		energy->consumed_energy += diff_energy;
+	} else
+		energy->base_consumed_energy = curr_energy;
+
+	energy->current_watts = curr_power;
+
+	if (!energy->base_watts || (energy->base_watts > curr_power))
+		energy->base_watts = curr_power;
+
+	if (debug_flags & DEBUG_FLAG_ENERGY)
+		info("_get_joules_task: %"PRIu64" Joules consumed over last"
+		     " %ld secs. Currently at %u watts, lowest watts %u",
+		     diff_energy,
+		     energy->poll_time ? now - energy->poll_time : 0,
+		     curr_power, energy->base_watts);
+
+	energy->previous_consumed_energy = curr_energy;
+	energy->poll_time = now;
+}
+
+static int _running_profile(void)
+{
+	static bool run = false;
+	static uint32_t profile_opt = ACCT_GATHER_PROFILE_NOT_SET;
+
+	if (profile_opt == ACCT_GATHER_PROFILE_NOT_SET) {
+		acct_gather_profile_g_get(ACCT_GATHER_PROFILE_RUNNING,
+					  &profile_opt);
+		if (profile_opt & ACCT_GATHER_PROFILE_ENERGY)
+			run = true;
+	}
+
+	return run;
+}
+
+static int _send_profile(void)
+{
+	uint64_t curr_watts;
+	acct_gather_profile_dataset_t dataset[] = {
+		{ "Power", PROFILE_FIELD_UINT64 },
+		{ NULL, PROFILE_FIELD_NOT_SET }
+	};
+
+	static int dataset_id = -1; /* id of the dataset for profile data */
+
+	if (!_running_profile())
+		return SLURM_SUCCESS;
+
+	if (debug_flags & DEBUG_FLAG_ENERGY)
+		info("_send_profile: consumed %d watts",
+		     local_energy->current_watts);
+
+	if (dataset_id < 0) {
+		dataset_id = acct_gather_profile_g_create_dataset(
+			"Energy", NO_PARENT, dataset);
+		if (debug_flags & DEBUG_FLAG_ENERGY)
+			debug("Energy: dataset created (id = %d)", dataset_id);
+		if (dataset_id == SLURM_ERROR) {
+			error("Energy: Failed to create the dataset for RAPL");
+			return SLURM_ERROR;
+		}
+	}
+
+	curr_watts = (uint64_t)local_energy->current_watts;
+
+	if (debug_flags & DEBUG_FLAG_PROFILE) {
+		info("PROFILE-Energy: power=%u", local_energy->current_watts);
+	}
+
+	return acct_gather_profile_g_add_sample_data(dataset_id,
+	                                             (void *)&curr_watts,
+						     local_energy->poll_time);
+}
+
+extern int acct_gather_energy_p_update_node_energy(void)
+{
+	int rc = SLURM_SUCCESS;
+
+	xassert(_run_in_daemon());
+
+	if (!local_energy || local_energy->current_watts == NO_VAL)
+		return rc;
+
+	_get_joules_task(local_energy);
+
+	return rc;
+}
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	debug_flags = slurm_get_debug_flags();
+
+	/* put anything that requires the .conf being read in
+	   acct_gather_energy_p_conf_parse
+	*/
+
+	return SLURM_SUCCESS;
+}
+
+extern int fini(void)
+{
+	if (!_run_in_daemon())
+		return SLURM_SUCCESS;
+
+	acct_gather_energy_destroy(local_energy);
+	local_energy = NULL;
+	return SLURM_SUCCESS;
+}
+
+extern int acct_gather_energy_p_get_data(enum acct_energy_type data_type,
+					 void *data)
+{
+	int rc = SLURM_SUCCESS;
+	acct_gather_energy_t *energy = (acct_gather_energy_t *)data;
+	time_t *last_poll = (time_t *)data;
+
+	xassert(_run_in_daemon());
+
+	switch (data_type) {
+	case ENERGY_DATA_JOULES_TASK:
+		if (local_energy->current_watts == NO_VAL)
+			energy->consumed_energy = NO_VAL;
+		else
+			_get_joules_task(energy);
+		break;
+	case ENERGY_DATA_STRUCT:
+		memcpy(energy, local_energy, sizeof(acct_gather_energy_t));
+		break;
+	case ENERGY_DATA_LAST_POLL:
+		*last_poll = local_energy->poll_time;
+		break;
+	default:
+		error("acct_gather_energy_p_get_data: unknown enum %d",
+		      data_type);
+		rc = SLURM_ERROR;
+		break;
+	}
+	return rc;
+}
+
+extern int acct_gather_energy_p_set_data(enum acct_energy_type data_type,
+					 void *data)
+{
+	int rc = SLURM_SUCCESS;
+
+	xassert(_run_in_daemon());
+
+	switch (data_type) {
+	case ENERGY_DATA_RECONFIG:
+		debug_flags = slurm_get_debug_flags();
+		break;
+	case ENERGY_DATA_PROFILE:
+		_get_joules_task(local_energy);
+		_send_profile();
+		break;
+	default:
+		error("acct_gather_energy_p_set_data: unknown enum %d",
+		      data_type);
+		rc = SLURM_ERROR;
+		break;
+	}
+	return rc;
+}
+
+extern void acct_gather_energy_p_conf_options(s_p_options_t **full_options,
+					      int *full_options_cnt)
+{
+	return;
+}
+
+extern void acct_gather_energy_p_conf_set(s_p_hashtbl_t *tbl)
+{
+	static bool flag_init = 0;
+
+	if (!_run_in_daemon())
+		return;
+
+	if (!flag_init) {
+		flag_init = 1;
+		local_energy = acct_gather_energy_alloc(1);
+		if (!_get_latest_stats(GET_ENERGY))
+			local_energy->current_watts = NO_VAL;
+		else
+			_get_joules_task(local_energy);
+	}
+
+	debug("%s loaded", plugin_name);
+
+	return;
+}
+
+extern void acct_gather_energy_p_conf_values(List *data)
+{
+	return;
+}
diff --git a/src/plugins/acct_gather_energy/ipmi/Makefile.in b/src/plugins/acct_gather_energy/ipmi/Makefile.in
index 2fa56ea2c..572aaaa51 100644
--- a/src/plugins/acct_gather_energy/ipmi/Makefile.in
+++ b/src/plugins/acct_gather_energy/ipmi/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -291,6 +294,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -340,8 +345,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -360,6 +369,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -403,6 +415,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -426,6 +439,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c
index 17b5f46e0..406e5c22f 100644
--- a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c
+++ b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi.c
@@ -2,7 +2,7 @@
  *  acct_gather_energy_ipmi.c - slurm energy accounting plugin for ipmi.
  *****************************************************************************
  *  Copyright (C) 2012
- *  Written by Bull- Thomas Cadeau
+ *  Initially written by Thomas Cadeau @ Bull. Adapted by Yoann Blein @ Bull.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -82,7 +82,7 @@ slurmd_conf_t *conf = NULL;
 
 #define _DEBUG 1
 #define _DEBUG_ENERGY 1
-#define IPMI_VERSION 1		/* Data structure version number */
+#define IPMI_VERSION 2		/* Data structure version number */
 #define NBFIRSTREAD 3
 
 /*
@@ -107,17 +107,13 @@ slurmd_conf_t *conf = NULL;
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 
 const char plugin_name[] = "AcctGatherEnergy IPMI plugin";
 const char plugin_type[] = "acct_gather_energy/ipmi";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * freeipmi variable declaration
@@ -134,10 +130,31 @@ char *sensor_config_file = NULL;
 /*
  * internal variables
  */
-static uint32_t last_update_watt = 0;
 static time_t last_update_time = 0;
 static time_t previous_update_time = 0;
-static acct_gather_energy_t *local_energy = NULL;
+
+/* array of struct to track the status of multiple sensors */
+typedef struct sensor_status {
+	uint32_t id;
+	uint32_t last_update_watt;
+	acct_gather_energy_t energy;
+} sensor_status_t;
+static sensor_status_t *sensors = NULL;
+static uint16_t sensors_len = 0;
+static uint64_t *start_current_energies = NULL;
+
+/* array of struct describing the configuration of the sensors */
+typedef struct description {
+	const char* label;
+	uint16_t sensor_cnt;
+	uint16_t *sensor_idxs;
+} description_t;
+static description_t *descriptions;
+static uint16_t       descriptions_len;
+static const char *NODE_DESC = "Node";
+
+static int dataset_id = -1; /* id of the dataset for profile data */
+
 static slurm_ipmi_conf_t slurm_ipmi_conf;
 static uint64_t debug_flags = 0;
 static bool flag_energy_accounting_shutdown = false;
@@ -199,13 +216,10 @@ static int _running_profile(void)
  * _get_additional_consumption computes consumption between 2 times
  * method is set to third method strongly
  */
-static uint32_t _get_additional_consumption(time_t time0, time_t time1,
+static uint64_t _get_additional_consumption(time_t time0, time_t time1,
 					    uint32_t watt0, uint32_t watt1)
 {
-	uint32_t consumption;
-	consumption = (uint32_t) ((time1 - time0)*(watt1 + watt0)/2);
-
-	return consumption;
+	return (uint64_t) ((time1 - time0)*(watt1 + watt0)/2);
 }
 
 /*
@@ -308,53 +322,61 @@ static int _init_ipmi_config (void)
  */
 static int _check_power_sensor(void)
 {
-	unsigned int record_ids[] = {(int) slurm_ipmi_conf.power_sensor_num};
-	unsigned int record_ids_length = 1;
+	/* check the sensors list */
+	void *sensor_reading;
+	int rc;
 	int sensor_units;
-	void* sensor_reading;
-
-	if ((ipmi_monitoring_sensor_readings_by_record_id(
-		     ipmi_ctx,
-		     hostname,
-		     &ipmi_config,
-		     sensor_reading_flags,
-		     record_ids,
-		     record_ids_length,
-		     NULL, NULL)) != record_ids_length) {
+	uint16_t i;
+	unsigned int ids[sensors_len];
+
+	for (i = 0; i < sensors_len; ++i)
+		ids[i] = sensors[i].id;
+	rc = ipmi_monitoring_sensor_readings_by_record_id(ipmi_ctx,
+							  hostname,
+							  &ipmi_config,
+							  sensor_reading_flags,
+							  ids,
+							  sensors_len,
+							  NULL,
+							  NULL);
+	if (rc != sensors_len) {
 		error("ipmi_monitoring_sensor_readings_by_record_id: %s",
 		      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
 		return SLURM_FAILURE;
 	}
 
-	if ((sensor_units = ipmi_monitoring_sensor_read_sensor_units(ipmi_ctx))
-	    < 0) {
-		error("ipmi_monitoring_sensor_read_sensor_units: %s",
-		      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
-		return SLURM_FAILURE;
-	}
+	i = 0;
+	do {
+		/* check if the sensor unit is watts */
+		sensor_units =
+		    ipmi_monitoring_sensor_read_sensor_units(ipmi_ctx);
+		if (sensor_units < 0) {
+			error("ipmi_monitoring_sensor_read_sensor_units: %s",
+			      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
+			return SLURM_FAILURE;
+		}
+		if (sensor_units != slurm_ipmi_conf.variable) {
+			error("Configured sensor is not in Watt, "
+			      "please check ipmi.conf");
+			return SLURM_FAILURE;
+		}
 
-	if (sensor_units != slurm_ipmi_conf.variable) {
-		error("Configured sensor is not in Watt, "
-		      "please check ipmi.conf");
-		return SLURM_FAILURE;
-	}
+		/* update current value of the sensor */
+		sensor_reading =
+		    ipmi_monitoring_sensor_read_sensor_reading(ipmi_ctx);
+		if (sensor_reading) {
+			sensors[i].last_update_watt =
+			    (uint32_t) (*((double *)sensor_reading));
+		} else {
+			error("ipmi read an empty value for power consumption");
+			return SLURM_FAILURE;
+		}
+		++i;
+	} while (ipmi_monitoring_sensor_iterator_next(ipmi_ctx));
 
-	ipmi_monitoring_sensor_iterator_first(ipmi_ctx);
-	if (ipmi_monitoring_sensor_read_record_id(ipmi_ctx) < 0) {
-		error("ipmi_monitoring_sensor_read_record_id: %s",
-		      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
-		return SLURM_FAILURE;
-	}
+	previous_update_time = last_update_time;
+	last_update_time = time(NULL);
 
-	sensor_reading = ipmi_monitoring_sensor_read_sensor_reading(ipmi_ctx);
-	if (sensor_reading) {
-		last_update_watt = (uint32_t)(*((double *)sensor_reading));
-		previous_update_time = last_update_time;
-		last_update_time = time(NULL);
-	} else {
-		error("ipmi read an empty value for power consumption");
-		return SLURM_FAILURE;
-	}
 	return SLURM_SUCCESS;
 }
 
@@ -387,9 +409,9 @@ static int _find_power_sensor(void)
 
 	for (i = 0; i < sensor_count; i++,
 		     ipmi_monitoring_sensor_iterator_next(ipmi_ctx)) {
-		if ((sensor_units =
-		     ipmi_monitoring_sensor_read_sensor_units(ipmi_ctx))
-		    < 0) {
+		sensor_units =
+			ipmi_monitoring_sensor_read_sensor_units(ipmi_ctx);
+		if (sensor_units < 0) {
 			error("ipmi_monitoring_sensor_read_sensor_units: %s",
 			      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
 			return SLURM_FAILURE;
@@ -398,19 +420,32 @@ static int _find_power_sensor(void)
 		if (sensor_units != slurm_ipmi_conf.variable)
 			continue;
 
-		if ((record_id =
-		     ipmi_monitoring_sensor_read_record_id(ipmi_ctx))
-		    < 0) {
+		record_id = ipmi_monitoring_sensor_read_record_id(ipmi_ctx);
+		if (record_id < 0) {
 			error("ipmi_monitoring_sensor_read_record_id: %s",
 			      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
 			return SLURM_FAILURE;
 		}
-		slurm_ipmi_conf.power_sensor_num = (uint32_t) record_id;
-		sensor_reading = ipmi_monitoring_sensor_read_sensor_reading(
-			ipmi_ctx);
+
+		sensor_reading =
+			ipmi_monitoring_sensor_read_sensor_reading(ipmi_ctx);
 		if (sensor_reading) {
-			last_update_watt =
-				(uint32_t)(*((double *)sensor_reading));
+			/* we found a valid sensor, allocate room for its
+			 * status and its description as the main sensor */
+			sensors_len = 1;
+			sensors = xmalloc(sizeof(sensor_status_t));
+			sensors[0].id = (uint32_t)record_id;
+			sensors[0].last_update_watt =
+			    (uint32_t) (*((double *)sensor_reading));
+
+			descriptions_len = 1;
+			descriptions = xmalloc(sizeof(description_t));
+			descriptions[0].label = xstrdup(NODE_DESC);
+			descriptions[0].sensor_cnt = 1;
+			descriptions[0].sensor_idxs = xmalloc(sizeof(uint16_t));
+			descriptions[0].sensor_idxs[0] = 0;
+
+			previous_update_time = last_update_time;
 			last_update_time = time(NULL);
 		} else {
 			error("ipmi read an empty value for power consumption");
@@ -424,8 +459,7 @@ static int _find_power_sensor(void)
 	if (rc != SLURM_SUCCESS)
 		info("Power sensor not found.");
 	else if (debug_flags & DEBUG_FLAG_ENERGY)
-		info("Power sensor found: %d",
-		     slurm_ipmi_conf.power_sensor_num);
+		info("Power sensor found: %d", sensors_len);
 
 	return rc;
 }
@@ -435,41 +469,73 @@ static int _find_power_sensor(void)
  */
 static int _read_ipmi_values(void)
 {
-	unsigned int record_ids[] = {(int) slurm_ipmi_conf.power_sensor_num};
-	unsigned int record_ids_length = 1;
-	void* sensor_reading;
-
-	if ((ipmi_monitoring_sensor_readings_by_record_id(
-		     ipmi_ctx,
-		     hostname,
-		     &ipmi_config,
-		     sensor_reading_flags,
-		     record_ids,
-		     record_ids_length,
-		     NULL,NULL)) != record_ids_length) {
+	/* read sensors list */
+	void *sensor_reading;
+	int rc;
+	uint16_t i;
+	unsigned int ids[sensors_len];
+
+	for (i = 0; i < sensors_len; ++i)
+		ids[i] = sensors[i].id;
+	rc = ipmi_monitoring_sensor_readings_by_record_id(ipmi_ctx,
+	                                                  hostname,
+	                                                  &ipmi_config,
+	                                                  sensor_reading_flags,
+	                                                  ids,
+	                                                  sensors_len,
+	                                                  NULL,
+	                                                  NULL);
+	if (rc != sensors_len) {
 		error("ipmi_monitoring_sensor_readings_by_record_id: %s",
 		      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
 		return SLURM_FAILURE;
 	}
-	ipmi_monitoring_sensor_iterator_first(ipmi_ctx);
-	if (ipmi_monitoring_sensor_read_record_id(ipmi_ctx) < 0) {
-		error("ipmi_monitoring_sensor_read_record_id: %s",
-		      ipmi_monitoring_ctx_errormsg(ipmi_ctx));
-		return SLURM_FAILURE;
-	}
-	sensor_reading = ipmi_monitoring_sensor_read_sensor_reading(ipmi_ctx);
-	if (sensor_reading) {
-		last_update_watt = (uint32_t)(*((double *)sensor_reading));
-		previous_update_time = last_update_time;
-		last_update_time = time(NULL);
-	} else {
-		error("ipmi read an empty value for power consumption");
-		return SLURM_FAILURE;
-	}
+
+	i = 0;
+	do {
+		sensor_reading =
+		    ipmi_monitoring_sensor_read_sensor_reading(ipmi_ctx);
+		if (sensor_reading) {
+			sensors[i].last_update_watt =
+			    (uint32_t) (*((double *)sensor_reading));
+		} else {
+			error("ipmi read an empty value for power consumption");
+			return SLURM_FAILURE;
+		}
+		++i;
+	} while (ipmi_monitoring_sensor_iterator_next(ipmi_ctx));
+
+	previous_update_time = last_update_time;
+	last_update_time = time(NULL);
 
 	return SLURM_SUCCESS;
 }
 
+/* updates the given energy according to the last watt reading of the sensor */
+static void _update_energy(acct_gather_energy_t *e, uint32_t last_update_watt)
+{
+	if (e->current_watts) {
+		e->base_watts = e->current_watts;
+		e->current_watts = last_update_watt;
+		if (previous_update_time == 0)
+			e->base_consumed_energy = 0;
+		else
+			e->base_consumed_energy =
+				_get_additional_consumption(
+					previous_update_time,
+					last_update_time,
+					e->base_watts,
+					e->current_watts);
+		e->previous_consumed_energy = e->consumed_energy;
+		e->consumed_energy += e->base_consumed_energy;
+	} else {
+		e->consumed_energy = 0;
+		e->base_watts = 0;
+		e->current_watts = last_update_watt;
+	}
+	e->poll_time = time(NULL);
+}
+
 /*
  * _thread_update_node_energy calls _read_ipmi_values and updates all values
  * for node consumption
@@ -477,46 +543,31 @@ static int _read_ipmi_values(void)
 static int _thread_update_node_energy(void)
 {
 	int rc = SLURM_SUCCESS;
-
-	if (local_energy->current_watts == NO_VAL)
-		return rc;
+	uint16_t i;
 
 	rc = _read_ipmi_values();
 
 	if (rc == SLURM_SUCCESS) {
-		if (local_energy->current_watts != 0) {
-			local_energy->base_watts = local_energy->current_watts;
-			local_energy->current_watts = last_update_watt;
-			if (previous_update_time == 0)
-				local_energy->base_consumed_energy = 0;
-			else
-				local_energy->base_consumed_energy =
-					_get_additional_consumption(
-						previous_update_time,
-						last_update_time,
-						local_energy->base_watts,
-						local_energy->current_watts);
-			local_energy->previous_consumed_energy =
-				local_energy->consumed_energy;
-			local_energy->consumed_energy +=
-				local_energy->base_consumed_energy;
+		/* sensors list */
+		for (i = 0; i < sensors_len; ++i) {
+			if (sensors[i].energy.current_watts == NO_VAL)
+				return rc;
+			_update_energy(&sensors[i].energy,
+			               sensors[i].last_update_watt);
 		}
+
 		if (previous_update_time == 0)
 			previous_update_time = last_update_time;
-		if (local_energy->current_watts == 0) {
-			local_energy->consumed_energy = 0;
-			local_energy->base_watts = 0;
-			local_energy->current_watts = last_update_watt;
-		}
-		local_energy->poll_time = time(NULL);
 	}
+
 	if (debug_flags & DEBUG_FLAG_ENERGY) {
-		info("ipmi-thread = %d sec, current %d Watts, "
-		     "consumed %d Joules %d new",
-		     (int) (last_update_time - previous_update_time),
-		     local_energy->current_watts,
-		     local_energy->consumed_energy,
-		     local_energy->base_consumed_energy);
+		for (i = 0; i < sensors_len; ++i)
+			info("ipmi-thread: sensor %u current_watts: %u, "
+			     "consumed %"PRIu64" Joules %"PRIu64" new",
+			     sensors[i].id,
+			     sensors[i].energy.current_watts,
+			     sensors[i].energy.consumed_energy,
+			     sensors[i].energy.base_consumed_energy);
 	}
 
 	return rc;
@@ -530,6 +581,7 @@ static int _thread_init(void)
 	static bool first = true;
 	static bool first_init = SLURM_FAILURE;
 	int rc = SLURM_SUCCESS;
+	uint16_t i;
 
 	if (!first)
 		return first_init;
@@ -539,12 +591,17 @@ static int _thread_init(void)
 		//TODO verbose error?
 		rc = SLURM_FAILURE;
 	} else {
-		if ((slurm_ipmi_conf.power_sensor_num == -1
-		     && _find_power_sensor() != SLURM_SUCCESS)
+		if ((sensors_len == 0 && _find_power_sensor() != SLURM_SUCCESS)
 		    || _check_power_sensor() != SLURM_SUCCESS) {
-			local_energy->current_watts = NO_VAL;
+			/* no valid sensors found */
+			for (i = 0; i < sensors_len; ++i) {
+				sensors[i].energy.current_watts = NO_VAL;
+			}
 		} else {
-			local_energy->current_watts = last_update_watt;
+			for (i = 0; i < sensors_len; ++i) {
+				sensors[i].energy.current_watts =
+					sensors[i].last_update_watt;
+			}
 		}
 		if (slurm_ipmi_conf.reread_sdr_cache)
 			//IPMI cache is reread only on initialisation
@@ -552,8 +609,6 @@ static int _thread_init(void)
 			sensor_reading_flags ^=
 				IPMI_MONITORING_SENSOR_READING_FLAGS_REREAD_SDR_CACHE;
 	}
-	local_energy->consumed_energy = 0;
-	local_energy->base_watts = 0;
 	slurm_mutex_unlock(&ipmi_mutex);
 
 	if (rc != SLURM_SUCCESS)
@@ -570,25 +625,53 @@ static int _thread_init(void)
 
 static int _ipmi_send_profile(void)
 {
-	acct_energy_data_t ener;
+	uint16_t i, j;
+	uint64_t data[descriptions_len];
+	uint32_t id;
 
 	if (!_running_profile())
 		return SLURM_SUCCESS;
 
-	if (debug_flags & DEBUG_FLAG_ENERGY)
-		info("_ipmi_send_profile: consumed %d watts",
-		     local_energy->current_watts);
-
-	memset(&ener, 0, sizeof(acct_energy_data_t));
-	/*TODO function to calculate Average CPUs Frequency*/
-	/*ener->cpu_freq = // read /proc/...*/
-	ener.cpu_freq = 1;
-	ener.time = time(NULL);
-	ener.power = local_energy->current_watts;
-	acct_gather_profile_g_add_sample_data(
-		ACCT_GATHER_PROFILE_ENERGY, &ener);
+	if (dataset_id < 0) {
+		acct_gather_profile_dataset_t dataset[descriptions_len+1];
+		for (i = 0; i < descriptions_len; i++) {
+			dataset[i].name = xstrdup_printf(
+				"%sPower", descriptions[i].label);
+			dataset[i].type = PROFILE_FIELD_UINT64;
+		}
+		dataset[i].name = NULL;
+		dataset[i].type = PROFILE_FIELD_NOT_SET;
+		dataset_id = acct_gather_profile_g_create_dataset(
+			"Energy", NO_PARENT, dataset);
+		for (i = 0; i < descriptions_len; ++i)
+			xfree(dataset[i].name);
+		if (debug_flags & DEBUG_FLAG_ENERGY)
+			debug("Energy: dataset created (id = %d)", dataset_id);
+		if (dataset_id == SLURM_ERROR) {
+			error("Energy: Failed to create the dataset for IPMI");
+			return SLURM_ERROR;
+		}
+	}
 
-	return SLURM_ERROR;
+	/* pack an array of uint64_t with current power of sensors */
+	memset(data, 0, sizeof(data));
+	for (i = 0; i < descriptions_len; ++i) {
+		for (j = 0; j < descriptions[i].sensor_cnt; ++j) {
+			id = descriptions[i].sensor_idxs[j];
+			data[i] += sensors[id].energy.current_watts;
+		}
+	}
+
+	if (debug_flags & DEBUG_FLAG_PROFILE) {
+		for (i = 0; i < descriptions_len; i++) {
+			id = descriptions[i].sensor_idxs[j];
+			info("PROFILE-Energy: %sPower=%d",
+			     descriptions[i].label,
+			     sensors[id].energy.current_watts);
+		}
+	}
+	return acct_gather_profile_g_add_sample_data(dataset_id, (void *)data,
+						     last_update_time);
 }
 
 
@@ -703,62 +786,108 @@ static void *_thread_launcher(void *no_data)
 
 static int _get_joules_task(uint16_t delta)
 {
-	acct_gather_energy_t *last_energy = NULL;
-	time_t now;
+	time_t now = time(NULL);
 	static bool first = true;
-	static uint32_t start_current_energy = 0;
-	uint32_t adjustment = 0;
+	uint64_t adjustment = 0;
+	uint16_t i;
+	acct_gather_energy_t *new, *old;
 
-	last_energy = local_energy;
-	local_energy = NULL;
+	/* sensors list */
+	acct_gather_energy_t *energies;
+	uint16_t sensor_cnt;
 
-	if (slurm_get_node_energy(NULL, delta, &local_energy)) {
+	if (slurm_get_node_energy(NULL, delta, &sensor_cnt, &energies)) {
 		error("_get_joules_task: can't get info from slurmd");
-		local_energy = last_energy;
 		return SLURM_ERROR;
 	}
-	now = time(NULL);
+	if (first) {
+		sensors_len = sensor_cnt;
+		sensors = xmalloc(sizeof(sensor_status_t) * sensors_len);
+		start_current_energies =
+			xmalloc(sizeof(uint64_t) * sensors_len);
+	}
 
-	local_energy->previous_consumed_energy = last_energy->consumed_energy;
+	if (sensor_cnt != sensors_len) {
+		error("_get_joules_task: received %u sensors, %u expected",
+		      sensor_cnt, sensors_len);
+		acct_gather_energy_destroy(energies);
+		return SLURM_ERROR;
+	}
 
-	if (slurm_ipmi_conf.adjustment)
-		adjustment = _get_additional_consumption(
-			local_energy->poll_time, now,
-			local_energy->current_watts,
-			local_energy->current_watts);
 
-	if (!first) {
-		local_energy->consumed_energy -= start_current_energy;
+	for (i = 0; i < sensor_cnt; ++i) {
+		new = &energies[i];
+		old = &sensors[i].energy;
+		new->previous_consumed_energy = old->consumed_energy;
 
-		local_energy->base_consumed_energy =
-			(local_energy->consumed_energy
-			 - last_energy->consumed_energy)
-			+ adjustment;
-	} else {
-		/* This is just for the step, so take all the pervious
-		   consumption out of the mix.
-		*/
-		start_current_energy =
-			local_energy->consumed_energy + adjustment;
-		local_energy->base_consumed_energy = 0;
-		first = false;
-	}
+		if (slurm_ipmi_conf.adjustment)
+			adjustment = _get_additional_consumption(
+				new->poll_time, now,
+				new->current_watts,
+				new->current_watts);
 
-	local_energy->consumed_energy = local_energy->previous_consumed_energy
-		+ local_energy->base_consumed_energy;
+		if (!first) {
+			new->consumed_energy -= start_current_energies[i];
+			new->base_consumed_energy = adjustment +
+				(new->consumed_energy - old->consumed_energy);
+		} else {
+			/* This is just for the step, so take all the pervious
+			   consumption out of the mix.
+			   */
+			start_current_energies[i] =
+				new->consumed_energy + adjustment;
+			new->base_consumed_energy = 0;
+		}
 
-	acct_gather_energy_destroy(last_energy);
+		new->consumed_energy = new->previous_consumed_energy
+			+ new->base_consumed_energy;
+		memcpy(old, new, sizeof(acct_gather_energy_t));
 
-	if (debug_flags & DEBUG_FLAG_ENERGY)
-		info("_get_joules_task: consumed %u Joules "
-		     "(received %u(%u watts) from slurmd)",
-		     local_energy->consumed_energy,
-		     local_energy->base_consumed_energy,
-		     local_energy->current_watts);
+		if (debug_flags & DEBUG_FLAG_ENERGY)
+			info("_get_joules_task: consumed %"PRIu64" Joules "
+			     "(received %"PRIu64"(%u watts) from slurmd)",
+			     new->consumed_energy,
+			     new->base_consumed_energy,
+			     new->current_watts);
+	}
+
+	acct_gather_energy_destroy(energies);
+
+	first = false;
 
 	return SLURM_SUCCESS;
 }
 
+static void _get_node_energy(acct_gather_energy_t *energy)
+{
+	uint16_t i, j, id;
+	acct_gather_energy_t *e;
+
+	/* find the "Node" description */
+	for (i = 0; i < descriptions_len; ++i)
+		if (xstrcmp(descriptions[i].label, NODE_DESC) == 0)
+			break;
+	/* not found, init is not finished or there is no watt sensors */
+	if (i >= descriptions_len)
+		return;
+
+	/* sum the energy of all sensors described for "Node" */
+	memset(energy, 0, sizeof(acct_gather_energy_t));
+	for (j = 0; j < descriptions[i].sensor_cnt; ++j) {
+		id = descriptions[i].sensor_idxs[j];
+		e = &sensors[id].energy;
+		energy->base_consumed_energy += e->base_consumed_energy;
+		energy->base_watts += e->base_watts;
+		energy->consumed_energy += e->consumed_energy;
+		energy->current_watts += e->current_watts;
+		energy->previous_consumed_energy += e->previous_consumed_energy;
+		/* node poll_time is computed as the oldest poll_time of
+		   the sensors */
+		if (energy->poll_time == 0 || energy->poll_time > e->poll_time)
+			energy->poll_time = e->poll_time;
+	}
+}
+
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
@@ -775,6 +904,8 @@ extern int init(void)
 
 extern int fini(void)
 {
+	uint16_t i;
+
 	if (!_run_in_daemon())
 		return SLURM_SUCCESS;
 
@@ -787,8 +918,15 @@ extern int fini(void)
 		pthread_join(cleanup_handler_thread, NULL);
 	slurm_mutex_unlock(&ipmi_mutex);
 
-	acct_gather_energy_destroy(local_energy);
-	local_energy = NULL;
+	xfree(sensors);
+	xfree(start_current_energies);
+
+	for (i = 0; i < descriptions_len; ++i) {
+		xfree(descriptions[i].label);
+		xfree(descriptions[i].sensor_idxs);
+	}
+	xfree(descriptions);
+
 	return SLURM_SUCCESS;
 }
 
@@ -803,39 +941,46 @@ extern int acct_gather_energy_p_update_node_energy(void)
 extern int acct_gather_energy_p_get_data(enum acct_energy_type data_type,
 					 void *data)
 {
+	uint16_t i;
 	int rc = SLURM_SUCCESS;
 	acct_gather_energy_t *energy = (acct_gather_energy_t *)data;
 	time_t *last_poll = (time_t *)data;
+	uint16_t *sensor_cnt = (uint16_t *)data;
 
 	xassert(_run_in_daemon());
 
 	switch (data_type) {
-	case ENERGY_DATA_JOULES_TASK:
+	case ENERGY_DATA_NODE_ENERGY:
 		slurm_mutex_lock(&ipmi_mutex);
-		if (_is_thread_launcher()) {
-			if (_thread_init() == SLURM_SUCCESS)
-				_thread_update_node_energy();
-		} else
-			_get_joules_task(10); /* Since we don't have
-						 access to the
-						 frequency here just
-						 send in something.
-					      */
-		memcpy(energy, local_energy, sizeof(acct_gather_energy_t));
+		_get_node_energy(energy);
+		slurm_mutex_unlock(&ipmi_mutex);
+		break;
+	case ENERGY_DATA_LAST_POLL:
+		slurm_mutex_lock(&ipmi_mutex);
+		*last_poll = last_update_time;
 		slurm_mutex_unlock(&ipmi_mutex);
 		break;
+	case ENERGY_DATA_SENSOR_CNT:
+		*sensor_cnt = sensors_len;
+		break;
 	case ENERGY_DATA_STRUCT:
 		slurm_mutex_lock(&ipmi_mutex);
-		memcpy(energy, local_energy, sizeof(acct_gather_energy_t));
+		for (i = 0; i < sensors_len; ++i)
+			memcpy(&energy[i], &sensors[i].energy,
+				sizeof(acct_gather_energy_t));
 		slurm_mutex_unlock(&ipmi_mutex);
-		if (debug_flags & DEBUG_FLAG_ENERGY) {
-			info("_get_joules_node_ipmi = consumed %d Joules",
-			     energy->consumed_energy);
-		}
 		break;
-	case ENERGY_DATA_LAST_POLL:
+	case ENERGY_DATA_JOULES_TASK:
 		slurm_mutex_lock(&ipmi_mutex);
-		*last_poll = local_energy->poll_time;
+		if (_is_thread_launcher()) {
+			if (_thread_init() == SLURM_SUCCESS)
+				_thread_update_node_energy();
+		} else {
+			_get_joules_task(10);
+		}
+		for (i = 0; i < sensors_len; ++i)
+			memcpy(&energy[i], &sensors[i].energy,
+				sizeof(acct_gather_energy_t));
 		slurm_mutex_unlock(&ipmi_mutex);
 		break;
 	default:
@@ -874,6 +1019,113 @@ extern int acct_gather_energy_p_set_data(enum acct_energy_type data_type,
 	return rc;
 }
 
+/* Parse the sensor descriptions stored into slurm_ipmi_conf.power_sensors.
+ * Expected format: comma-separated sensors ids and semi-colon-separated
+ * sensors descriptions. Also expects a mandatory description with label
+ * "Node". */
+static int _parse_sensor_descriptions(void)
+{
+	/* TODO: error propagation */
+
+	const char *sep1 = ";";
+	const char *sep2 = ",";
+	char *str_desc_list, *str_desc, *str_id, *mid, *endptr;
+	char *saveptr1, *saveptr2; // pointers for strtok_r storage
+	uint16_t i, j, k;
+	uint16_t id;
+	uint16_t *idx;
+	description_t *d;
+	bool found;
+
+	if (!slurm_ipmi_conf.power_sensors || !slurm_ipmi_conf.power_sensors[0])
+		return SLURM_SUCCESS;
+
+	/* count the number of descriptions */
+	str_desc_list = xstrdup(slurm_ipmi_conf.power_sensors);
+	descriptions_len = 0;
+	str_desc = strtok_r(str_desc_list, sep1, &saveptr1);
+	while (str_desc) {
+		++descriptions_len;
+		str_desc = strtok_r(NULL, sep1, &saveptr1);
+	}
+
+	descriptions = xmalloc(sizeof(description_t) * descriptions_len);
+
+	/* parse descriptions */
+	strcpy(str_desc_list, slurm_ipmi_conf.power_sensors);
+	i = 0;
+	str_desc = strtok_r(str_desc_list, sep1, &saveptr1);
+	while (str_desc) {
+		mid = xstrchr(str_desc, '=');
+		if (!mid || mid == str_desc) {
+			goto error;
+		}
+		/* label */
+		*mid = '\0';
+		d = &descriptions[i];
+		d->label = xstrdup(str_desc);
+		/* associated sensors */
+		++mid;
+		str_id = strtok_r(mid, sep2, &saveptr2);
+		/* parse sensor ids of the current description */
+		while (str_id) {
+			id = strtol(str_id, &endptr, 10);
+			if (*endptr != '\0')
+				goto error;
+			d->sensor_cnt++;
+			xrealloc(d->sensor_idxs,
+				 sizeof(uint16_t) * d->sensor_cnt);
+			d->sensor_idxs[d->sensor_cnt - 1] = id;
+			str_id = strtok_r(NULL, sep2, &saveptr2);
+		}
+		++i;
+		str_desc = strtok_r(NULL, sep1, &saveptr1);
+	}
+	xfree(str_desc_list);
+
+	/* Ensure that the "Node" description is provided */
+	found = false;
+	for (i = 0; i < descriptions_len && !found; ++i)
+		found = (xstrcasecmp(descriptions[i].label, NODE_DESC) == 0);
+	if (!found)
+		goto error;
+
+	/* Here we have the list of descriptions with sensors ids in the
+	 * sensors_idxs field instead of their indexes. We still have to
+	 * gather the unique sensors ids and replace sensors_idxs by their
+	 * indexes in the sensors array */
+	for (i = 0; i < descriptions_len; ++i) {
+		for (j = 0; j < descriptions[i].sensor_cnt; ++j) {
+			idx = &descriptions[i].sensor_idxs[j];
+			found = false;
+			for (k = 0; k < sensors_len && !found; ++k)
+				found = (*idx == sensors[k].id);
+			if (found) {
+				*idx = k - 1;
+			} else {
+				++sensors_len;
+				xrealloc(sensors, sensors_len
+					 * sizeof(sensor_status_t));
+				sensors[sensors_len - 1].id = *idx;
+				*idx = sensors_len - 1;;
+			}
+		}
+	}
+
+	return SLURM_SUCCESS;
+
+error:
+	error("Configuration of EnergyIPMIPowerSensors is malformed. "
+	      "Make sure that the expected format is respected and that "
+	      "the \"Node\" label is provided.");
+	for (i = 0; i < descriptions_len; ++i) {
+		xfree(descriptions[i].label);
+		xfree(descriptions[i].sensor_idxs);
+	}
+	xfree(descriptions); descriptions = NULL;
+	return SLURM_ERROR;
+}
+
 extern void acct_gather_energy_p_conf_options(s_p_options_t **full_options,
 					      int *full_options_cnt)
 {
@@ -907,7 +1159,7 @@ extern void acct_gather_energy_p_conf_options(s_p_options_t **full_options,
 		{"EnergyIPMIEntitySensorNames", S_P_BOOLEAN},
 		{"EnergyIPMIFrequency", S_P_UINT32},
 		{"EnergyIPMICalcAdjustment", S_P_BOOLEAN},
-		{"EnergyIPMIPowerSensor", S_P_UINT32},
+		{"EnergyIPMIPowerSensors", S_P_STRING},
 		{"EnergyIPMITimeout", S_P_UINT32},
 		{"EnergyIPMIVariable", S_P_STRING},
 		{NULL} };
@@ -1003,8 +1255,8 @@ extern void acct_gather_energy_p_conf_set(s_p_hashtbl_t *tbl)
 				     "EnergyIPMICalcAdjustment", tbl))
 			slurm_ipmi_conf.adjustment = false;
 
-		s_p_get_uint32(&slurm_ipmi_conf.power_sensor_num,
-			       "EnergyIPMIPowerSensor", tbl);
+		s_p_get_string(&slurm_ipmi_conf.power_sensors,
+			       "EnergyIPMIPowerSensors", tbl);
 
 		s_p_get_uint32(&slurm_ipmi_conf.timeout,
 			       "EnergyIPMITimeout", tbl);
@@ -1012,7 +1264,13 @@ extern void acct_gather_energy_p_conf_set(s_p_hashtbl_t *tbl)
 		if (s_p_get_string(&tmp_char, "EnergyIPMIVariable", tbl)) {
 			if (!strcmp(tmp_char, "Temp"))
 				slurm_ipmi_conf.variable =
-					IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE;
+					IPMI_MONITORING_SENSOR_UNITS_CELSIUS;
+			else if (!strcmp(tmp_char, "Voltage"))
+				slurm_ipmi_conf.variable =
+					IPMI_MONITORING_SENSOR_UNITS_VOLTS;
+			else if (!strcmp(tmp_char, "Fan"))
+				slurm_ipmi_conf.variable =
+					IPMI_MONITORING_SENSOR_UNITS_RPM;
 			xfree(tmp_char);
 		}
 	}
@@ -1021,10 +1279,9 @@ extern void acct_gather_energy_p_conf_set(s_p_hashtbl_t *tbl)
 		return;
 
 	if (!flag_init) {
-		local_energy = acct_gather_energy_alloc();
-		local_energy->consumed_energy=0;
-		local_energy->base_consumed_energy=0;
-		local_energy->base_watts=0;
+		/* try to parse the PowerSensors settings */
+		_parse_sensor_descriptions();
+
 		flag_init = true;
 		if (_is_thread_launcher()) {
 			pthread_attr_t attr;
@@ -1195,9 +1452,9 @@ extern void acct_gather_energy_p_conf_values(List *data)
 	list_append(*data, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
-	key_pair->name = xstrdup("EnergyIPMIPowerSensor");
-	key_pair->value = xstrdup_printf(
-		"%u", slurm_ipmi_conf.power_sensor_num);
+	key_pair->name = xstrdup("EnergyIPMIPowerSensors");
+	key_pair->value =
+	    xstrdup_printf("%s", slurm_ipmi_conf.power_sensors);
 	list_append(*data, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -1208,9 +1465,15 @@ extern void acct_gather_energy_p_conf_values(List *data)
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("EnergyIPMIVariable");
 	switch (slurm_ipmi_conf.variable) {
-	case IPMI_MONITORING_SENSOR_TYPE_TEMPERATURE:
+	case IPMI_MONITORING_SENSOR_UNITS_CELSIUS:
 		key_pair->value = xstrdup("Temp");
 		break;
+	case IPMI_MONITORING_SENSOR_UNITS_RPM:
+		key_pair->value = xstrdup("Fan");
+		break;
+	case IPMI_MONITORING_SENSOR_UNITS_VOLTS:
+		key_pair->value = xstrdup("Voltage");
+		break;
 	case IPMI_MONITORING_SENSOR_UNITS_WATTS:
 		key_pair->value = xstrdup("Watts");
 		break;
diff --git a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.c b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.c
index 23d1d3225..0883e7ed9 100644
--- a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.c
+++ b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.c
@@ -61,6 +61,8 @@ extern void reset_slurm_ipmi_conf(slurm_ipmi_conf_t *slurm_ipmi_conf)
 {
 	if (slurm_ipmi_conf) {
 		slurm_ipmi_conf->power_sensor_num = -1;
+		xfree(slurm_ipmi_conf->power_sensors);
+		slurm_ipmi_conf->power_sensors = NULL;
 		slurm_ipmi_conf->freq = DEFAULT_IPMI_FREQ;
 		slurm_ipmi_conf->adjustment = false;
 		slurm_ipmi_conf->timeout = TIMEOUT;
diff --git a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.h b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.h
index e104f92c1..1cf7a65cc 100644
--- a/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.h
+++ b/src/plugins/acct_gather_energy/ipmi/acct_gather_energy_ipmi_config.h
@@ -136,6 +136,7 @@ typedef struct slurm_ipmi_conf {
 	/* Options for SLURM IPMI plugin*/
 	/* sensor num (only for power) */
 	uint32_t power_sensor_num;
+	char *power_sensors;
 	/* Out-of-band Communication Configuration */
 	/* Indicate the IPMI protocol version to use
 	 * IPMI_MONITORING_PROTOCOL_VERSION_1_5 = 0x00,
diff --git a/src/plugins/acct_gather_energy/none/Makefile.in b/src/plugins/acct_gather_energy/none/Makefile.in
index da01346a1..a80b3209e 100644
--- a/src/plugins/acct_gather_energy/none/Makefile.in
+++ b/src/plugins/acct_gather_energy/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_energy/none/acct_gather_energy_none.c b/src/plugins/acct_gather_energy/none/acct_gather_energy_none.c
index 95b80a344..84b78c5ff 100644
--- a/src/plugins/acct_gather_energy/none/acct_gather_energy_none.c
+++ b/src/plugins/acct_gather_energy/none/acct_gather_energy_none.c
@@ -78,16 +78,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "AcctGatherEnergy NONE plugin";
 const char plugin_type[] = "acct_gather_energy/none";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/acct_gather_energy/rapl/Makefile.in b/src/plugins/acct_gather_energy/rapl/Makefile.in
index 5791884fd..06869291d 100644
--- a/src/plugins/acct_gather_energy/rapl/Makefile.in
+++ b/src/plugins/acct_gather_energy/rapl/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_energy/rapl/acct_gather_energy_rapl.c b/src/plugins/acct_gather_energy/rapl/acct_gather_energy_rapl.c
index 1b68e8e7a..d86111db2 100644
--- a/src/plugins/acct_gather_energy/rapl/acct_gather_energy_rapl.c
+++ b/src/plugins/acct_gather_energy/rapl/acct_gather_energy_rapl.c
@@ -120,20 +120,18 @@ union {
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "AcctGatherEnergy RAPL plugin";
 const char plugin_type[] = "acct_gather_energy/rapl";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 static acct_gather_energy_t *local_energy = NULL;
 static uint64_t debug_flags = 0;
 
+static int dataset_id = -1; /* id of the dataset for profile data */
+
 /* one cpu in the package */
 static int pkg2cpu[MAX_PKGS] = {[0 ... MAX_PKGS-1] = -1};
 static int pkg_fd[MAX_PKGS] = {[0 ... MAX_PKGS-1] = -1};
@@ -355,24 +353,25 @@ static void _get_joules_task(acct_gather_energy_t *energy)
 	if (debug_flags & DEBUG_FLAG_ENERGY)
 		info("RAPL Result %"PRIu64" = %.6f Joules", result, ret);
 
-	if (energy->consumed_energy != 0) {
+	if (energy->consumed_energy) {
 		uint16_t node_freq;
-		energy->consumed_energy = (uint32_t)ret - energy->base_watts;
+		energy->consumed_energy =
+			(uint64_t)ret - energy->base_consumed_energy;
 		energy->current_watts =
 			(uint32_t)ret - energy->previous_consumed_energy;
 		node_freq = slurm_get_acct_gather_node_freq();
 		if (node_freq)	/* Prevent divide by zero */
-			local_energy->current_watts /= (float)node_freq;
-	}
-	if (energy->consumed_energy == 0) {
+			energy->current_watts /= (float)node_freq;
+	} else {
 		energy->consumed_energy = 1;
-		energy->base_watts = (uint32_t)ret;
+		energy->base_consumed_energy = (uint64_t)ret;
 	}
-	energy->previous_consumed_energy = (uint32_t)ret;
+	energy->previous_consumed_energy = (uint64_t)ret;
 	energy->poll_time = time(NULL);
 
 	if (debug_flags & DEBUG_FLAG_ENERGY)
-		info("_get_joules_task: current %.6f Joules, consumed %u",
+		info("_get_joules_task: current %.6f Joules, "
+		     "consumed %"PRIu64"",
 		     ret, energy->consumed_energy);
 }
 
@@ -393,25 +392,38 @@ static int _running_profile(void)
 
 static int _send_profile(void)
 {
-	acct_energy_data_t ener;
+	uint64_t curr_watts;
+	acct_gather_profile_dataset_t dataset[] = {
+		{ "Power", PROFILE_FIELD_UINT64 },
+		{ NULL, PROFILE_FIELD_NOT_SET }
+	};
 
 	if (!_running_profile())
 		return SLURM_SUCCESS;
 
 	if (debug_flags & DEBUG_FLAG_ENERGY)
-		info("_send_profile: consumed %d watts",
+		info("_send_profile: consumed %u watts",
 		     local_energy->current_watts);
 
-	memset(&ener, 0, sizeof(acct_energy_data_t));
-	/*TODO function to calculate Average CPUs Frequency*/
-	/*ener->cpu_freq = // read /proc/...*/
-	ener.cpu_freq = 1;
-	ener.time = time(NULL);
-	ener.power = local_energy->current_watts;
-	acct_gather_profile_g_add_sample_data(
-		ACCT_GATHER_PROFILE_ENERGY, &ener);
+	if (dataset_id < 0) {
+		dataset_id = acct_gather_profile_g_create_dataset(
+			"Energy", NO_PARENT, dataset);
+		if (debug_flags & DEBUG_FLAG_ENERGY)
+			debug("Energy: dataset created (id = %d)", dataset_id);
+		if (dataset_id == SLURM_ERROR) {
+			error("Energy: Failed to create the dataset for RAPL");
+			return SLURM_ERROR;
+		}
+	}
+
+	curr_watts = (uint64_t)local_energy->current_watts;
+	if (debug_flags & DEBUG_FLAG_PROFILE) {
+		info("PROFILE-Energy: power=%u", local_energy->current_watts);
+	}
 
-	return SLURM_ERROR;
+	return acct_gather_profile_g_add_sample_data(dataset_id,
+	                                             (void *)&curr_watts,
+						     local_energy->poll_time);
 }
 
 extern int acct_gather_energy_p_update_node_energy(void)
@@ -420,7 +432,7 @@ extern int acct_gather_energy_p_update_node_energy(void)
 
 	xassert(_run_in_daemon());
 
-	if (!local_energy || local_energy->current_watts == NO_VAL)
+	if (local_energy->current_watts == NO_VAL)
 		return rc;
 
 	_get_joules_task(local_energy);
@@ -470,6 +482,7 @@ extern int acct_gather_energy_p_get_data(enum acct_energy_type data_type,
 	int rc = SLURM_SUCCESS;
 	acct_gather_energy_t *energy = (acct_gather_energy_t *)data;
 	time_t *last_poll = (time_t *)data;
+	uint16_t *sensor_cnt = (uint16_t *)data;
 
 	xassert(_run_in_daemon());
 
@@ -480,12 +493,16 @@ extern int acct_gather_energy_p_get_data(enum acct_energy_type data_type,
 		else
 			_get_joules_task(energy);
 		break;
+	case ENERGY_DATA_NODE_ENERGY:
 	case ENERGY_DATA_STRUCT:
 		memcpy(energy, local_energy, sizeof(acct_gather_energy_t));
 		break;
 	case ENERGY_DATA_LAST_POLL:
 		*last_poll = local_energy->poll_time;
 		break;
+	case ENERGY_DATA_SENSOR_CNT:
+		*sensor_cnt = 1;
+		break;
 	default:
 		error("acct_gather_energy_p_get_data: unknown enum %d",
 		      data_type);
@@ -537,7 +554,7 @@ extern void acct_gather_energy_p_conf_set(s_p_hashtbl_t *tbl)
 	for (i = 0; i < nb_pkg; i++)
 		pkg_fd[i] = _open_msr(pkg2cpu[i]);
 
-	local_energy = acct_gather_energy_alloc();
+	local_energy = acct_gather_energy_alloc(1);
 
 	result = _read_msr(pkg_fd[0], MSR_RAPL_POWER_UNIT);
 	if (result == 0)
diff --git a/src/plugins/acct_gather_filesystem/Makefile.in b/src/plugins/acct_gather_filesystem/Makefile.in
index 9e2df34f6..1e8a8008c 100644
--- a/src/plugins/acct_gather_filesystem/Makefile.in
+++ b/src/plugins/acct_gather_filesystem/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_filesystem/lustre/Makefile.in b/src/plugins/acct_gather_filesystem/lustre/Makefile.in
index ec5c63a77..f1d1aecfe 100644
--- a/src/plugins/acct_gather_filesystem/lustre/Makefile.in
+++ b/src/plugins/acct_gather_filesystem/lustre/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -278,6 +281,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -327,8 +332,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -347,6 +356,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -390,6 +402,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -413,6 +426,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c b/src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c
index c2af42c3d..579d4d62c 100644
--- a/src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c
+++ b/src/plugins/acct_gather_filesystem/lustre/acct_gather_filesystem_lustre.c
@@ -88,18 +88,13 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 
 const char plugin_name[] = "AcctGatherFilesystem LUSTRE plugin";
 const char plugin_type[] = "acct_gather_filesystem/lustre";
-const uint32_t plugin_version = 100;
-
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 typedef struct {
 	time_t last_update_time;
@@ -275,57 +270,83 @@ static int _read_lustre_counters(void)
  */
 static int _update_node_filesystem(void)
 {
-	static acct_filesystem_data_t fls;
-	static acct_filesystem_data_t current;
 	static acct_filesystem_data_t previous;
+	static int dataset_id = -1;
 	static bool first = true;
-	int cc;
+	acct_filesystem_data_t current;
+
+	enum {
+		FIELD_READ,
+		FIELD_READMB,
+		FIELD_WRITE,
+		FIELD_WRITEMB,
+		FIELD_CNT
+	};
+
+	acct_gather_profile_dataset_t dataset[] = {
+		{ "Reads", PROFILE_FIELD_UINT64 },
+		{ "ReadMB", PROFILE_FIELD_DOUBLE },
+		{ "Writes", PROFILE_FIELD_UINT64 },
+		{ "WriteMB", PROFILE_FIELD_DOUBLE },
+		{ NULL, PROFILE_FIELD_NOT_SET }
+	};
+
+	union {
+		double d;
+		uint64_t u64;
+	} data[FIELD_CNT];
 
 	slurm_mutex_lock(&lustre_lock);
 
-	cc = _read_lustre_counters();
-	if (cc != SLURM_SUCCESS) {
+	if (_read_lustre_counters() != SLURM_SUCCESS) {
 		error("%s: Cannot read lustre counters", __func__);
 		slurm_mutex_unlock(&lustre_lock);
 		return SLURM_FAILURE;
 	}
 
 	if (first) {
-		/* First time initialize the counters and return.
-		 */
+		dataset_id = acct_gather_profile_g_create_dataset("Network",
+			NO_PARENT, dataset);
+		if (dataset_id == SLURM_ERROR) {
+			error("FileSystem: Failed to create the dataset "
+			      "for Lustre");
+			return SLURM_ERROR;
+		}
+
 		previous.reads = lustre_se.all_lustre_nb_reads;
 		previous.writes = lustre_se.all_lustre_nb_writes;
-		previous.read_size
-			= (double)lustre_se.all_lustre_read_bytes/1048576.0;
-		previous.write_size
-			= (double)lustre_se.all_lustre_write_bytes/1048576.0;
+		previous.read_size = (double)lustre_se.all_lustre_read_bytes;
+		previous.write_size = (double)lustre_se.all_lustre_write_bytes;
 
 		first = false;
-		memset(&lustre_se, 0, sizeof(lustre_sens_t));
-		slurm_mutex_unlock(&lustre_lock);
+	}
 
-		return SLURM_SUCCESS;
+	if (dataset_id < 0) {
+		slurm_mutex_unlock(&lustre_lock);
+		return SLURM_ERROR;
 	}
 
-	/* Compute the current values read from all lustre-xxxx
-	 * directories
-	 */
+	/* Compute the current values read from all lustre-xxxx directories */
 	current.reads = lustre_se.all_lustre_nb_reads;
 	current.writes = lustre_se.all_lustre_nb_writes;
-	current.read_size = (double)lustre_se.all_lustre_read_bytes/1048576.0;
-	current.write_size = (double)lustre_se.all_lustre_write_bytes/1048576.0;
-
-	/* Now compute the difference between the two snapshots
-	 * and send it to hdf5 log.
-	 */
-	fls.reads = fls.reads + (current.reads - previous.reads);
-	fls.writes = fls.writes + (current.writes - previous.writes);
-	fls.read_size = fls.read_size
-		+ (current.read_size - previous.read_size);
-	fls.write_size = fls.write_size
-		+ (current.write_size - previous.write_size);
-
-	acct_gather_profile_g_add_sample_data(ACCT_GATHER_PROFILE_LUSTRE, &fls);
+	current.read_size = (double)lustre_se.all_lustre_read_bytes;
+	current.write_size = (double)lustre_se.all_lustre_write_bytes;
+
+	/* record sample */
+	data[FIELD_READ].u64 = current.reads - previous.reads;
+	data[FIELD_READMB].d = (current.read_size - previous.read_size) /
+		(1 << 20);
+	data[FIELD_WRITE].u64 = current.writes - previous.writes;
+	data[FIELD_WRITEMB].d = (current.write_size - previous.write_size) /
+		(1 << 20);
+
+	if (debug_flags & DEBUG_FLAG_PROFILE) {
+		char str[256];
+		info("PROFILE-Lustre: %s", acct_gather_profile_dataset_str(
+			     dataset, data, str, sizeof(str)));
+	}
+	acct_gather_profile_g_add_sample_data(dataset_id, (void *)data,
+					      lustre_se.update_time);
 
 	/* Save current as previous and clean up the working
 	 * data structure.
@@ -333,10 +354,6 @@ static int _update_node_filesystem(void)
 	memcpy(&previous, &current, sizeof(acct_filesystem_data_t));
 	memset(&lustre_se, 0, sizeof(lustre_sens_t));
 
-	info("%s: num reads %"PRIu64" nums write %"PRIu64" "
-	     "read %f MB wrote %f MB",
-	     __func__, fls.reads, fls.writes, fls.read_size, fls.write_size);
-
 	slurm_mutex_unlock(&lustre_lock);
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/acct_gather_filesystem/none/Makefile.in b/src/plugins/acct_gather_filesystem/none/Makefile.in
index 7fbf3b186..03b774a45 100644
--- a/src/plugins/acct_gather_filesystem/none/Makefile.in
+++ b/src/plugins/acct_gather_filesystem/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_filesystem/none/acct_gather_filesystem_none.c b/src/plugins/acct_gather_filesystem/none/acct_gather_filesystem_none.c
index b05de563c..61897cf12 100644
--- a/src/plugins/acct_gather_filesystem/none/acct_gather_filesystem_none.c
+++ b/src/plugins/acct_gather_filesystem/none/acct_gather_filesystem_none.c
@@ -77,16 +77,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "AcctGatherFilesystem NONE plugin";
 const char plugin_type[] = "acct_gather_filesystem/none";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/acct_gather_infiniband/Makefile.in b/src/plugins/acct_gather_infiniband/Makefile.in
index bd6a2eba0..c4cc87c12 100644
--- a/src/plugins/acct_gather_infiniband/Makefile.in
+++ b/src/plugins/acct_gather_infiniband/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_infiniband/none/Makefile.in b/src/plugins/acct_gather_infiniband/none/Makefile.in
index b7c78085f..9c506e49b 100644
--- a/src/plugins/acct_gather_infiniband/none/Makefile.in
+++ b/src/plugins/acct_gather_infiniband/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_infiniband/none/acct_gather_infiniband_none.c b/src/plugins/acct_gather_infiniband/none/acct_gather_infiniband_none.c
index 942eb41cf..93dcb588e 100644
--- a/src/plugins/acct_gather_infiniband/none/acct_gather_infiniband_none.c
+++ b/src/plugins/acct_gather_infiniband/none/acct_gather_infiniband_none.c
@@ -77,17 +77,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "AcctGatherInfiniband NONE plugin";
 const char plugin_type[] = "acct_gather_infiniband/none";
-const uint32_t plugin_version = 100;
-
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 /*
  * init() is called when the plugin is loaded, before any other functions
  * are called.  Put global initialization here.
diff --git a/src/plugins/acct_gather_infiniband/ofed/Makefile.in b/src/plugins/acct_gather_infiniband/ofed/Makefile.in
index e7f412e1f..7b4dbe18f 100644
--- a/src/plugins/acct_gather_infiniband/ofed/Makefile.in
+++ b/src/plugins/acct_gather_infiniband/ofed/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -287,6 +290,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -336,8 +341,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -356,6 +365,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -399,6 +411,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -422,6 +435,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_infiniband/ofed/acct_gather_infiniband_ofed.c b/src/plugins/acct_gather_infiniband/ofed/acct_gather_infiniband_ofed.c
index 632a3cf94..ed6c02512 100644
--- a/src/plugins/acct_gather_infiniband/ofed/acct_gather_infiniband_ofed.c
+++ b/src/plugins/acct_gather_infiniband/ofed/acct_gather_infiniband_ofed.c
@@ -98,17 +98,13 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 
 const char plugin_name[] = "AcctGatherInfiniband OFED plugin";
 const char plugin_type[] = "acct_gather_infiniband/ofed";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 typedef struct {
 	uint32_t port;
@@ -141,6 +137,8 @@ static slurm_ofed_conf_t ofed_conf;
 static uint64_t debug_flags = 0;
 static pthread_mutex_t ofed_lock = PTHREAD_MUTEX_INITIALIZER;
 
+static int dataset_id = -1; /* id of the dataset for profile data */
+
 static uint8_t *_slurm_pma_query_via(void *rcvbuf, ib_portid_t * dest, int port,
 				     unsigned timeout, unsigned id,
 				     const struct ibmad_port *srcport)
@@ -264,20 +262,50 @@ static int _read_ofed_values(void)
  */
 static int _update_node_infiniband(void)
 {
-	acct_network_data_t net;
-	int rc = SLURM_SUCCESS;
+	int rc;
+
+	enum {
+		FIELD_PACKIN,
+		FIELD_PACKOUT,
+		FIELD_MBIN,
+		FIELD_MBOUT,
+		FIELD_CNT
+	};
+
+	acct_gather_profile_dataset_t dataset[] = {
+		{ "PacketsIn", PROFILE_FIELD_UINT64 },
+		{ "PacketsOut", PROFILE_FIELD_UINT64 },
+		{ "InMB", PROFILE_FIELD_DOUBLE },
+		{ "OutMB", PROFILE_FIELD_DOUBLE },
+		{ NULL, PROFILE_FIELD_NOT_SET }
+	};
+
+	union {
+		double d;
+		uint64_t u64;
+	} data[FIELD_CNT];
+
+	if (dataset_id < 0) {
+		dataset_id = acct_gather_profile_g_create_dataset("Network",
+			NO_PARENT, dataset);
+		if (debug_flags & DEBUG_FLAG_INFINIBAND)
+			debug("IB: dataset created (id = %d)", dataset_id);
+		if (dataset_id == SLURM_ERROR) {
+			error("IB: Failed to create the dataset for ofed");
+			return SLURM_ERROR;
+		}
+	}
 
 	slurm_mutex_lock(&ofed_lock);
-	rc = _read_ofed_values();
-
-	memset(&net, 0, sizeof(acct_network_data_t));
+	if ((rc = _read_ofed_values()) != SLURM_SUCCESS) {
+		slurm_mutex_unlock(&ofed_lock);
+		return rc;
+	}
 
-	net.packets_in = ofed_sens.rcvpkts;
-	net.packets_out = ofed_sens.xmtpkts;
-	net.size_in = (double) ofed_sens.rcvdata / 1048576;
-	net.size_out = (double) ofed_sens.xmtdata / 1048576;
-	acct_gather_profile_g_add_sample_data(ACCT_GATHER_PROFILE_NETWORK,
-					      &net);
+	data[FIELD_PACKIN].u64 = ofed_sens.rcvpkts;
+	data[FIELD_PACKOUT].u64 = ofed_sens.xmtpkts;
+	data[FIELD_MBIN].d = (double) ofed_sens.rcvdata / (1 << 20);
+	data[FIELD_MBOUT].d = (double) ofed_sens.xmtdata / (1 << 20);
 
 	if (debug_flags & DEBUG_FLAG_INFINIBAND) {
 		info("ofed-thread = %d sec, transmitted %"PRIu64" bytes, "
@@ -287,7 +315,13 @@ static int _update_node_infiniband(void)
 	}
 	slurm_mutex_unlock(&ofed_lock);
 
-	return rc;
+	if (debug_flags & DEBUG_FLAG_PROFILE) {
+		char str[256];
+		info("PROFILE-Network: %s", acct_gather_profile_dataset_str(
+			     dataset, data, str, sizeof(str)));
+	}
+	return acct_gather_profile_g_add_sample_data(dataset_id, (void *)data,
+						     ofed_sens.update_time);
 }
 
 static bool _run_in_daemon(void)
diff --git a/src/plugins/acct_gather_profile/Makefile.in b/src/plugins/acct_gather_profile/Makefile.in
index 0e19d92d9..bd0af2182 100644
--- a/src/plugins/acct_gather_profile/Makefile.in
+++ b/src/plugins/acct_gather_profile/Makefile.in
@@ -99,6 +99,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -107,10 +108,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -123,7 +126,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -252,6 +255,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -301,8 +306,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -321,6 +330,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -364,6 +376,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -387,6 +400,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_profile/hdf5/Makefile.in b/src/plugins/acct_gather_profile/hdf5/Makefile.in
index b4eaee42e..ca0408e80 100644
--- a/src/plugins/acct_gather_profile/hdf5/Makefile.in
+++ b/src/plugins/acct_gather_profile/hdf5/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -342,6 +345,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -391,8 +396,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -411,6 +420,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -454,6 +466,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -477,6 +490,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_profile/hdf5/acct_gather_profile_hdf5.c b/src/plugins/acct_gather_profile/hdf5/acct_gather_profile_hdf5.c
index 28cd8a9bc..d68be3a5c 100644
--- a/src/plugins/acct_gather_profile/hdf5/acct_gather_profile_hdf5.c
+++ b/src/plugins/acct_gather_profile/hdf5/acct_gather_profile_hdf5.c
@@ -4,10 +4,12 @@
  *****************************************************************************
  *  Copyright (C) 2013 Bull S. A. S.
  *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
- *  Written by Rod Schultz <rod.schultz@bull.com>
  *
  *  Portions Copyright (C) 2013 SchedMD LLC.
- *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  Initially written by Rod Schultz <rod.schultz@bull.com> @ Bull
+ *  and Danny Auble <da@schedmd.com> @ SchedMD.
+ *  Adapted by Yoann Blein <yoann.blein@bull.net> @ Bull.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://www.schedmd.com/slurmdocs/>.
@@ -58,9 +60,17 @@
 #include "src/common/slurm_acct_gather_profile.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
+#include "src/common/slurm_time.h"
 #include "src/slurmd/common/proctrack.h"
 #include "hdf5_api.h"
 
+#define HDF5_CHUNK_SIZE 10
+/* Compression level, a value of 0 through 9. Level 0 is faster but offers the
+ * least compression; level 9 is slower but offers maximum compression.
+ * A setting of -1 indicates that no compression is desired. */
+/* TODO: Make this configurable with a parameter */
+#define HDF5_COMPRESS 0
+
 /*
  * These variables are required by the generic plugin interface.  If they
  * are not found in the plugin, the plugin loader will ignore it.
@@ -83,24 +93,23 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "AcctGatherProfile hdf5 plugin";
 const char plugin_type[] = "acct_gather_profile/hdf5";
-const uint32_t plugin_version = 100;
-
-hid_t typTOD;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 typedef struct {
 	char *dir;
 	uint32_t def;
 } slurm_hdf5_conf_t;
 
+typedef struct {
+	hid_t  table_id;
+	size_t type_size;
+} table_t;
+
 // Global HDF5 Variables
 //	The HDF5 file and base objects will remain open for the duration of the
 //	step. This avoids reconstruction on every acct_gather_sample and
@@ -116,6 +125,13 @@ static slurm_hdf5_conf_t hdf5_conf;
 static uint64_t debug_flags = 0;
 static uint32_t g_profile_running = ACCT_GATHER_PROFILE_NOT_SET;
 static stepd_step_rec_t *g_job = NULL;
+static time_t step_start_time;
+
+static hid_t *groups = NULL;
+static size_t groups_len = 0;
+static table_t *tables = NULL;
+static size_t   tables_max_len = 0;
+static size_t   tables_cur_len = 0;
 
 static void _reset_slurm_profile_conf(void)
 {
@@ -139,22 +155,6 @@ static uint32_t _determine_profile(void)
 	return profile;
 }
 
-static int _get_taskid_from_pid(pid_t pid, uint32_t *gtid)
-{
-	int tx;
-
-	xassert(g_job);
-
-	for (tx=0; tx<g_job->node_tasks; tx++) {
-		if (g_job->task[tx]->pid == pid) {
-			*gtid = g_job->task[tx]->gtid;
-			return SLURM_SUCCESS;
-		}
-	}
-
-	return SLURM_ERROR;
-}
-
 static int _create_directories(void)
 {
 	int rc;
@@ -199,17 +199,6 @@ static int _create_directories(void)
 	return SLURM_SUCCESS;
 }
 
-static bool _do_profile(uint32_t profile, uint32_t req_profiles)
-{
-	if (req_profiles <= ACCT_GATHER_PROFILE_NONE)
-		return false;
-	if ((profile == ACCT_GATHER_PROFILE_NOT_SET)
-	    || (req_profiles & profile))
-		return true;
-
-	return false;
-}
-
 static bool _run_in_daemon(void)
 {
 	static bool set = false;
@@ -234,11 +223,17 @@ extern int init(void)
 
 	debug_flags = slurm_get_debug_flags();
 
+	/* Move HDF5 trace printing to log file instead of stderr */
+	H5Eset_auto(H5E_DEFAULT, (herr_t (*)(hid_t, void *))H5Eprint,
+	            log_fp());
+
 	return SLURM_SUCCESS;
 }
 
 extern int fini(void)
 {
+	xfree(tables);
+	xfree(groups);
 	xfree(hdf5_conf.dir);
 	return SLURM_SUCCESS;
 }
@@ -306,8 +301,7 @@ extern int acct_gather_profile_p_node_step_start(stepd_step_rec_t* job)
 {
 	int rc = SLURM_SUCCESS;
 
-	time_t start_time;
-	char    *profile_file_name;
+	char *profile_file_name;
 	char *profile_str;
 
 	xassert(_run_in_daemon());
@@ -346,7 +340,6 @@ extern int acct_gather_profile_p_node_step_start(stepd_step_rec_t* job)
 	}
 
 	// Create a new file using the default properties.
-	profile_init();
 	file_id = H5Fcreate(profile_file_name, H5F_ACC_TRUNC, H5P_DEFAULT,
 			    H5P_DEFAULT);
 	if (chown(profile_file_name, (uid_t)g_job->uid,
@@ -360,10 +353,9 @@ extern int acct_gather_profile_p_node_step_start(stepd_step_rec_t* job)
 		return SLURM_FAILURE;
 	}
 	/* fd_set_close_on_exec(file_id); Not supported for HDF5 */
-	sprintf(group_node, "/%s_%s", GRP_NODE, g_job->node_name);
-	gid_node = H5Gcreate(file_id, group_node, H5P_DEFAULT,
-			     H5P_DEFAULT, H5P_DEFAULT);
-	if (gid_node < 1) {
+	sprintf(group_node, "/%s", g_job->node_name);
+	gid_node = make_group(file_id, group_node);
+	if (gid_node < 0) {
 		H5Fclose(file_id);
 		file_id = -1;
 		info("PROFILE: Failed to create Node group");
@@ -371,9 +363,11 @@ extern int acct_gather_profile_p_node_step_start(stepd_step_rec_t* job)
 	}
 	put_string_attribute(gid_node, ATTR_NODENAME, g_job->node_name);
 	put_int_attribute(gid_node, ATTR_NTASKS, g_job->node_tasks);
-	start_time = time(NULL);
+	put_int_attribute(gid_node, ATTR_CPUPERTASK, g_job->cpus_per_task);
+
+	step_start_time = time(NULL);
 	put_string_attribute(gid_node, ATTR_STARTTIME,
-			     slurm_ctime(&start_time));
+			     slurm_ctime2(&step_start_time));
 
 	return rc;
 }
@@ -397,6 +391,7 @@ extern int acct_gather_profile_p_child_forked(void)
 extern int acct_gather_profile_p_node_step_end(void)
 {
 	int rc = SLURM_SUCCESS;
+	size_t i;
 
 	xassert(_run_in_daemon());
 
@@ -415,6 +410,15 @@ extern int acct_gather_profile_p_node_step_end(void)
 	if (debug_flags & DEBUG_FLAG_PROFILE)
 		info("PROFILE: node_step_end (shutdown)");
 
+	/* close tables */
+	for (i = 0; i < tables_cur_len; ++i) {
+		H5PTclose(tables[i].table_id);
+	}
+	/* close groups */
+	for (i = 0; i < groups_len; ++i) {
+		H5Gclose(groups[i]);
+	}
+
 	if (gid_totals > 0)
 		H5Gclose(gid_totals);
 	if (gid_samples > 0)
@@ -454,181 +458,165 @@ extern int acct_gather_profile_p_task_start(uint32_t taskid)
 
 extern int acct_gather_profile_p_task_end(pid_t taskpid)
 {
-	hid_t   gid_task;
-	char 	group_task[MAX_GROUP_NAME+1];
-	uint32_t task_id;
-	int rc = SLURM_SUCCESS;
-
-	xassert(_run_in_daemon());
-	xassert(g_job);
-
-	if (g_job->stepid == NO_VAL)
-		return rc;
-
-	xassert(g_profile_running != ACCT_GATHER_PROFILE_NOT_SET);
-
-	if (!_do_profile(ACCT_GATHER_PROFILE_NOT_SET, g_profile_running))
-		return rc;
-
-	if (_get_taskid_from_pid(taskpid, &task_id) != SLURM_SUCCESS)
-		return SLURM_FAILURE;
-	if (file_id == -1) {
-		info("PROFILE: add_task_data, HDF5 file is not open");
-		return SLURM_FAILURE;
-	}
-	if (gid_tasks < 0) {
-		gid_tasks = make_group(gid_node, GRP_TASKS);
-		if (gid_tasks < 1) {
-			info("PROFILE: Failed to create Tasks group");
-			return SLURM_FAILURE;
-		}
-	}
-	sprintf(group_task, "%s_%d", GRP_TASK, task_id);
-	gid_task = get_group(gid_tasks, group_task);
-	if (gid_task == -1) {
-		gid_task = make_group(gid_tasks, group_task);
-		if (gid_task < 0) {
-			info("Failed to open tasks %s", group_task);
-			return SLURM_FAILURE;
-		}
-		put_int_attribute(gid_task, ATTR_TASKID, task_id);
-	}
-	put_int_attribute(gid_task, ATTR_CPUPERTASK, g_job->cpus_per_task);
-
 	if (debug_flags & DEBUG_FLAG_PROFILE)
 		info("PROFILE: task_end");
-	return rc;
+	return SLURM_SUCCESS;
 }
 
-extern int acct_gather_profile_p_add_sample_data(uint32_t type, void *data)
+extern int acct_gather_profile_p_create_group(const char* name)
 {
-	hid_t   g_sample_grp;
-	char    group[MAX_GROUP_NAME+1];
-	char 	group_sample[MAX_GROUP_NAME+1];
-	static uint32_t sample_no = 0;
-	uint32_t task_id = 0;
-	void *send_profile = NULL;
-	char *type_name = NULL;
-
-	profile_task_t  profile_task;
-	profile_network_t  profile_network;
-	profile_energy_t  profile_energy;
-	profile_io_t  profile_io;
-
-	struct jobacctinfo *jobacct = (struct jobacctinfo *)data;
-	acct_network_data_t *net = (acct_network_data_t *)data;
-	acct_energy_data_t *ener = (acct_energy_data_t *)data;
-	struct lustre_data *lus = (struct lustre_data *)data;
-
-	xassert(_run_in_daemon());
-	xassert(g_job);
+	hid_t gid_group = make_group(gid_node, name);
+	if (gid_group < 0) {
+		return SLURM_ERROR;
+	}
 
-	if (g_job->stepid == NO_VAL)
-		return SLURM_SUCCESS;
+	/* store the group to keep track of it */
+	groups = xrealloc(groups, (groups_len + 1) * sizeof(hid_t));
+	groups[groups_len] = gid_group;
+	++groups_len;
 
-	xassert(g_profile_running != ACCT_GATHER_PROFILE_NOT_SET);
+	return gid_group;
+}
 
-	if (!_do_profile(type, g_profile_running))
-		return SLURM_SUCCESS;
+extern int acct_gather_profile_p_create_dataset(
+	const char* name, int parent, acct_gather_profile_dataset_t *dataset)
+{
+	size_t type_size;
+	size_t offset, field_size;
+	hid_t dtype_id;
+	hid_t field_id;
+	hid_t table_id;
+	acct_gather_profile_dataset_t *dataset_loc = dataset;
 
-	switch (type) {
-	case ACCT_GATHER_PROFILE_ENERGY:
-		snprintf(group, sizeof(group), "%s", GRP_ENERGY);
+	if (g_profile_running <= ACCT_GATHER_PROFILE_NONE)
+		return SLURM_ERROR;
 
-		memset(&profile_energy, 0, sizeof(profile_energy_t));
-		profile_energy.time = ener->time;
-		profile_energy.cpu_freq = ener->cpu_freq;
-		profile_energy.power = ener->power;
+	debug("acct_gather_profile_p_create_dataset %s", name);
+
+	/* compute the size of the type needed to create the table */
+	type_size = sizeof(uint64_t) * 2; /* size for time field */
+	while (dataset_loc && (dataset_loc->type != PROFILE_FIELD_NOT_SET)) {
+		switch (dataset_loc->type) {
+		case PROFILE_FIELD_UINT64:
+			type_size += sizeof(uint64_t);
+			break;
+		case PROFILE_FIELD_DOUBLE:
+			type_size += sizeof(double);
+			break;
+		case PROFILE_FIELD_NOT_SET:
+			break;
+		}
+		dataset_loc++;
+	}
 
-		send_profile = &profile_energy;
-		break;
-	case ACCT_GATHER_PROFILE_TASK:
-		if (_get_taskid_from_pid(jobacct->pid, &task_id)
-		    != SLURM_SUCCESS)
-			return SLURM_ERROR;
+	/* create the datatype for the dataset */
+	if ((dtype_id = H5Tcreate(H5T_COMPOUND, type_size)) < 0) {
+		debug3("PROFILE: failed to create datatype for table %s",
+		       name);
+		return SLURM_ERROR;
+	}
 
-		snprintf(group, sizeof(group), "%s_%u", GRP_TASK, task_id);
+	/* insert fields */
+	if (H5Tinsert(dtype_id, "ElapsedTime", sizeof(uint64_t),
+		      H5T_NATIVE_UINT64) < 0)
+		return SLURM_ERROR;
+	if (H5Tinsert(dtype_id, "EpochTime", 0, H5T_NATIVE_UINT64) < 0)
+		return SLURM_ERROR;
 
-		memset(&profile_task, 0, sizeof(profile_task_t));
-		profile_task.time = time(NULL);
-		profile_task.cpu_freq = jobacct->act_cpufreq;
-		profile_task.cpu_time = jobacct->tot_cpu;
-		profile_task.cpu_utilization = jobacct->tot_cpu;
-		profile_task.pages = jobacct->tot_pages;
-		profile_task.read_size = jobacct->tot_disk_read;
-		profile_task.rss = jobacct->tot_rss;
-		profile_task.vm_size = jobacct->tot_vsize;
-		profile_task.write_size = jobacct->tot_disk_write;
+	dataset_loc = dataset;
+
+	offset = sizeof(uint64_t) * 2;
+	while (dataset_loc && (dataset_loc->type != PROFILE_FIELD_NOT_SET)) {
+		switch (dataset_loc->type) {
+		case PROFILE_FIELD_UINT64:
+			field_id = H5T_NATIVE_UINT64;
+			field_size = sizeof(uint64_t);
+			break;
+		case PROFILE_FIELD_DOUBLE:
+			field_id = H5T_NATIVE_DOUBLE;
+			field_size = sizeof(double);
+			break;
+		case PROFILE_FIELD_NOT_SET:
+			break;
+		}
+		if (H5Tinsert(dtype_id, dataset_loc->name,
+			      offset, field_id) < 0)
+			return SLURM_ERROR;
+		offset += field_size;
+		dataset_loc++;
+	}
 
-		send_profile = &profile_task;
-		break;
-	case ACCT_GATHER_PROFILE_LUSTRE:
-		snprintf(group, sizeof(group), "%s", GRP_LUSTRE);
+	/* create the table */
+	if (parent < 0)
+		parent = gid_node; /* default parent is the node group */
+	table_id = H5PTcreate_fl(parent, name, dtype_id, HDF5_CHUNK_SIZE,
+	                         HDF5_COMPRESS);
+	if (table_id < 0) {
+		error("PROFILE: Impossible to create the table %s", name);
+		H5Tclose(dtype_id);
+		return SLURM_ERROR;
+	}
+	H5Tclose(dtype_id); /* close the datatype since H5PT keeps a copy */
+
+	/* resize the tables array if full */
+	if (tables_cur_len == tables_max_len) {
+		if (tables_max_len == 0)
+			++tables_max_len;
+		tables_max_len *= 2;
+		tables = xrealloc(tables, tables_max_len * sizeof(table_t));
+	}
 
-		memset(&profile_io, 0, sizeof(profile_io_t));
-		profile_io.time = time(NULL);
-		profile_io.reads = lus->reads;
-		profile_io.read_size = lus->read_size;
-		profile_io.writes = lus->writes;
-		profile_io.write_size = lus->write_size;
+	/* reserve a new table */
+	tables[tables_cur_len].table_id  = table_id;
+	tables[tables_cur_len].type_size = type_size;
+	++tables_cur_len;
 
-		send_profile = &profile_io;
+	return tables_cur_len - 1;
+}
 
-		break;
-	case ACCT_GATHER_PROFILE_NETWORK:
+extern int acct_gather_profile_p_add_sample_data(int table_id, void *data,
+						 time_t sample_time)
+{
+	table_t *ds = &tables[table_id];
+	uint8_t send_data[ds->type_size];
+	int header_size = 0;
+	debug("acct_gather_profile_p_add_sample_data %d", table_id);
 
-		snprintf(group, sizeof(group), "%s", GRP_NETWORK);
+	if (file_id < 0) {
+		debug("PROFILE: Trying to add data but profiling is over");
+		return SLURM_SUCCESS;
+	}
 
-		memset(&profile_network, 0, sizeof(profile_network_t));
-		profile_network.time = time(NULL);
-		profile_network.packets_in = net->packets_in;
-		profile_network.size_in = net->size_in;
-		profile_network.packets_out = net->packets_out;
-		profile_network.size_out = net->size_out;
+	if (table_id < 0 || table_id >= tables_cur_len) {
+		error("PROFILE: trying to add samples to an invalid table %d",
+		      table_id);
+		return SLURM_ERROR;
+	}
 
-		send_profile = &profile_network;
+	/* ensure that we have to record something */
+	xassert(_run_in_daemon());
+	xassert(g_job);
+	if (g_job->stepid == NO_VAL)
+		return SLURM_SUCCESS;
+	xassert(g_profile_running != ACCT_GATHER_PROFILE_NOT_SET);
 
-		break;
-	default:
-		error("acct_gather_profile_p_add_sample_data: "
-		      "Unknown type %d sent", type);
+	if (g_profile_running <= ACCT_GATHER_PROFILE_NONE)
 		return SLURM_ERROR;
-	}
 
-	type_name = acct_gather_profile_type_to_string(type);
+	/* prepend timestampe and relative time */
+	((uint64_t *)send_data)[0] = difftime(sample_time, step_start_time);
+	header_size += sizeof(uint64_t);
+	((uint64_t *)send_data)[1] = sample_time;
+	header_size += sizeof(uint64_t);
 
-	if (debug_flags & DEBUG_FLAG_PROFILE)
-		info("PROFILE: add_sample_data Group-%s Type=%s",
-		     group, type_name);
-
-	if (file_id == -1) {
-		if (debug_flags & DEBUG_FLAG_PROFILE) {
-			// This can happen from samples from the gather threads
-			// before the step actually starts.
-			info("PROFILE: add_sample_data, HDF5 file not open");
-		}
-		return SLURM_FAILURE;
-	}
-	if (gid_samples < 0) {
-		gid_samples = make_group(gid_node, GRP_SAMPLES);
-		if (gid_samples < 1) {
-			info("PROFILE: failed to create TimeSeries group");
-			return SLURM_FAILURE;
-		}
-	}
-	g_sample_grp = get_group(gid_samples, group);
-	if (g_sample_grp < 0) {
-		g_sample_grp = make_group(gid_samples, group);
-		if (g_sample_grp < 0) {
-			info("PROFILE: failed to open TimeSeries %s", group);
-			return SLURM_FAILURE;
-		}
-		put_string_attribute(g_sample_grp, ATTR_DATATYPE, type_name);
+	memcpy(send_data + header_size, data, ds->type_size - header_size);
+
+	/* append the record to the table */
+	if (H5PTappend(ds->table_id, 1, send_data) < 0) {
+		error("PROFILE: Impossible to add data to the table %d; "
+		      "maybe the table has not been created?", table_id);
+		return SLURM_ERROR;
 	}
-	sprintf(group_sample, "%s_%10.10d", group, ++sample_no);
-	put_hdf5_data(g_sample_grp, type, SUBDATA_SAMPLE,
-		      group_sample, send_profile, 1);
-	H5Gclose(g_sample_grp);
 
 	return SLURM_SUCCESS;
 }
@@ -652,3 +640,12 @@ extern void acct_gather_profile_p_conf_values(List *data)
 	return;
 
 }
+
+extern bool acct_gather_profile_p_is_active(uint32_t type)
+{
+	if (g_profile_running <= ACCT_GATHER_PROFILE_NONE)
+		return false;
+	return (type == ACCT_GATHER_PROFILE_NOT_SET)
+		|| (g_profile_running & type);
+}
+
diff --git a/src/plugins/acct_gather_profile/hdf5/hdf5_api.c b/src/plugins/acct_gather_profile/hdf5/hdf5_api.c
index f9beb1385..67368fd30 100644
--- a/src/plugins/acct_gather_profile/hdf5/hdf5_api.c
+++ b/src/plugins/acct_gather_profile/hdf5/hdf5_api.c
@@ -38,1582 +38,23 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \****************************************************************************/
 
+#include <string.h>
+
 #include "src/common/macros.h"
 #include "src/common/xassert.h"
 #include "src/common/xstring.h"
+#include "src/common/xmalloc.h"
+#include "src/common/slurm_acct_gather_profile.h"
 
 #include "hdf5_api.h"
 
-
-// Static variables ok as add function are inside a lock.
-static time_t seriesStart;
-static hid_t typTOD;
-static int i; // General index used in some macros.
-static int moffset; // General variable used by insert macros
-
-/*
- * Macro to insert a date string type into a compound memory type
- *
- * Parameters
- * 	p	parent (group) memory type
- * 	label	description of item
- * 	type	profile struct type
- * 	item    data item in type
- */
-#define MEM_ADD_DATE_TIME(p, label, type, item)				\
-	if(H5Tinsert(p, label, HOFFSET(type, item), typTOD) < 0) {	\
-		debug3("PROFILE: failed insert into memory datatype");	\
-		H5Tclose(p);						\
-		return -1;						\
-	}
-/*
- * Macro to insert a date string type into a compound file type
- *
- * Parameters
- * 	p	parent (group) file type
- * 	label	description of item
- * 	offset  offset into record
- */
-#define FILE_ADD_DATE_TIME(p, label, offset) 				\
-	if(H5Tinsert(p, label, offset, typTOD) < 0) {			\
-		debug3("PROFILE: failed insert into file datatype");	\
-		H5Tclose(p);						\
-		return -1;						\
-	}
-
-/*
- * Macro to insert an uint64 into a compound memory type
- *
- * Parameters
- * 	p	parent (group) memory type
- * 	label	description of item
- * 	type	profile struct type
- * 	item    data item in type
- */
-#define MEM_ADD_UINT64(p, label, type, item)				\
-	if(H5Tinsert(p, label, HOFFSET(type, item), H5T_NATIVE_UINT64) < 0) { \
-		debug3("PROFILE: failed insert64 into memory datatype"); \
-		H5Tclose(p);						\
-		return -1;						\
-	}
-/*
- * Macro to insert a uint64 into a compound file type
- *
- * Parameters
- * 	p	parent (group) file type
- * 	label	description of item
- */
-#define FILE_ADD_UINT64(p, label)					\
-	if(H5Tinsert(p, label, moffset, H5T_NATIVE_UINT64) < 0) {	\
-		debug3("PROFILE: failed insert64 into file datatype");	\
-		H5Tclose(p);						\
-		return -1;						\
-	}								\
-	moffset += 8;
-
-/*
- * Macro to insert a double into a compound memory type
- *
- * Parameters
- * 	p	parent (group) memory type
- * 	label	description of item
- * 	type	profile struct type
- * 	item    data item in type
- */
-#define MEM_ADD_DBL(p, label, type, item)				\
-	if(H5Tinsert(p, label, HOFFSET(type, item), H5T_NATIVE_DOUBLE) < 0) { \
-		debug3("PROFILE: failed insertdbl into memory datatype"); \
-		H5Tclose(p);						\
-		return -1;						\
-	}
-/*
- * Macro to insert a double into a compound file type
- *
- * Parameters
- * 	p	parent (group) file type
- * 	label	description of item
- */
-#define FILE_ADD_DBL(p, label)						\
-	if(H5Tinsert(p, label, moffset, H5T_NATIVE_DOUBLE) < 0) {	\
-		debug3("PROFILE: failed insertdbl into file datatype");	\
-		H5Tclose(p);						\
-		return -1;						\
-	}								\
-	moffset += 8;
-
-/*
- * Macro to increment a sample in a difference series
- * -- Difference means each sample represents counts for only that interval
- *	(assumes consistent naming convention)
- *
- *
- * Parameters
- * 	tot	total pointer
- * 	smp     sample pointer
- * 	var	variable name in sample
- * 	count	number of items in series
- */
-#define INCR_DIF_SAMPLE(tot, smp, var, count)			\
-	for (i=0; i<count; i++) {				\
-		if (i == 0) {					\
-			total->var.min = smp[i].var;		\
-		}						\
-		tot->var.total += smp[i].var;			\
-		tot->var.min = MIN(smp[i].var, tot->var.min);	\
-		tot->var.max = MAX(smp[i].var, tot->var.max);	\
-	}							\
-	tot->var.ave = tot->var.total / count;
-
-/*
- * Macro to increment a sample in a running total
- * -- Running total means first sample is initial conditions
- *	(assumes consistent naming convention)
- *
- *
- * Parameters
- * 	tot	total pointer
- * 	smp     sample pointer
- * 	var	variable name in sample
- * 	count	number of items in series
- */
-#define INCR_RT_SAMPLE(tot, smp, var, count)			\
-	for (i=1; i<count; i++) {				\
-		if (i == 1) {					\
-			total->var.min = smp[i].var;		\
-		}						\
-		tot->var.total += smp[i].var;			\
-		tot->var.min = MIN(smp[i].var, tot->var.min);	\
-		tot->var.max = MAX(smp[i].var, tot->var.max);	\
-	}							\
-	tot->var.ave = tot->var.total / count;
-
-/* Macro to put an int min,ave,max,total for a variable to extract file
- *
- * Parameters
- * 	fp	file descriptor
- * 	var	variable name
- * 	prf	prefix for series (usually ','
- */
-#define PUT_UINT_SUM(fp, var, prfx)			\
-	fprintf(fp, "%s%ld,%ld,%ld,%ld", prfx,		\
-		var.min, var.ave, var.max, var.total);
-/* Macro to put an int min,ave,max,total for a variable to extract file
- *
- * Parameters
- * 	fp	file descriptor
- * 	var	variable name
- * 	prf	prefix for series (usually ','
- */
-#define PUT_DBL_SUM(fp, var, prfx)			\
-	fprintf(fp, "%s%.3f,%.3f,%.3f,%.3f", prfx,	\
-		var.min, var.ave, var.max, var.total);
-
-
-// ============================================================================
-// Routines supporting Energy Data type
-// ============================================================================
-
-static int _energy_dataset_size(void)
-{
-	return sizeof(profile_energy_t);
-}
-
-static hid_t _energy_create_memory_datatype(void)
-{
-	hid_t   mtyp_energy = H5Tcreate(H5T_COMPOUND, sizeof(profile_energy_t));
-	if (mtyp_energy < 0) {
-		debug3("PROFILE: failed to create Energy memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_energy, "Date_Time", profile_energy_t, tod);
-	MEM_ADD_UINT64(mtyp_energy, "Time", profile_energy_t, time);
-	MEM_ADD_UINT64(mtyp_energy, "Power", profile_energy_t, power);
-	MEM_ADD_UINT64(mtyp_energy, "CPU_Frequency",
-		       profile_energy_t, cpu_freq);
-
-	return mtyp_energy;
-}
-
-static hid_t _energy_create_file_datatype(void)
-{
-	hid_t   ftyp_energy = H5Tcreate(H5T_COMPOUND, (TOD_LEN+3*8));
-	if (ftyp_energy < 0) {
-		debug3("PROFILE: failed to create Energy file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_energy, "Date_Time", 0);
-	FILE_ADD_UINT64(ftyp_energy, "Time");
-	FILE_ADD_UINT64(ftyp_energy, "Power");
-	FILE_ADD_UINT64(ftyp_energy, "CPU_Frequency");
-
-	return ftyp_energy;
-}
-
-static hid_t _energy_s_create_memory_datatype(void)
-{
-	hid_t   mtyp_energy = H5Tcreate(H5T_COMPOUND,
-					sizeof(profile_energy_s_t));
-	if (mtyp_energy < 0) {
-		debug3("PROFILE: failed to create Energy_s memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_energy, "Start Time",
-			  profile_energy_s_t, start_time);
-	MEM_ADD_UINT64(mtyp_energy, "Elapsed Time",
-		       profile_energy_s_t, elapsed_time);
-	MEM_ADD_UINT64(mtyp_energy, "Min Power", profile_energy_s_t, power.min);
-	MEM_ADD_UINT64(mtyp_energy, "Ave Power", profile_energy_s_t, power.ave);
-	MEM_ADD_UINT64(mtyp_energy, "Max Power", profile_energy_s_t, power.max);
-	MEM_ADD_UINT64(mtyp_energy, "Total Power",
-		       profile_energy_s_t, power.total);
-	MEM_ADD_UINT64(mtyp_energy, "Min CPU Frequency", profile_energy_s_t,
-		       cpu_freq.min);
-	MEM_ADD_UINT64(mtyp_energy, "Ave CPU Frequency", profile_energy_s_t,
-		       cpu_freq.ave);
-	MEM_ADD_UINT64(mtyp_energy, "Max CPU Frequency", profile_energy_s_t,
-		       cpu_freq.max);
-	MEM_ADD_UINT64(mtyp_energy, "Total CPU Frequency", profile_energy_s_t,
-		       cpu_freq.total);
-
-	return mtyp_energy;
-}
-
-static hid_t _energy_s_create_file_datatype(void)
-{
-	hid_t   ftyp_energy = H5Tcreate(H5T_COMPOUND, (TOD_LEN+9*8));
-	if (ftyp_energy < 0) {
-		debug3("PROFILE: failed to create Energy_s file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_energy, "Start Time", 0);
-	FILE_ADD_UINT64(ftyp_energy, "Elapsed Time");
-	FILE_ADD_UINT64(ftyp_energy, "Min Power");
-	FILE_ADD_UINT64(ftyp_energy, "Ave Power");
-	FILE_ADD_UINT64(ftyp_energy, "Max Power");
-	FILE_ADD_UINT64(ftyp_energy, "Total Power");
-	FILE_ADD_UINT64(ftyp_energy, "Min CPU Frequency");
-	FILE_ADD_UINT64(ftyp_energy, "Ave CPU Frequency");
-	FILE_ADD_UINT64(ftyp_energy, "Max CPU Frequency");
-	FILE_ADD_UINT64(ftyp_energy, "Total CPU Frequency");
-
-	return ftyp_energy;
-}
-
-static void *_energy_init_job_series(int n_samples)
-{
-	profile_energy_t*  energy_data;
-
-	energy_data = xmalloc(n_samples * sizeof(profile_energy_t));
-	if (energy_data == NULL) {
-		debug3("PROFILE: failed to get memory for energy data");
-		return NULL;
-	}
-	return (void*) energy_data;
-}
-
-static char** _energy_get_series_tod(void* data, int nsmp)
-{
-	int ix;
-	char      **tod_values = NULL;
-	profile_energy_t* energy_series = (profile_energy_t*) data;
-	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
-	if (tod_values == NULL) {
-		info("Failed to get memory for energy tod");
-		return NULL;
-	}
-	for (ix=0; ix < nsmp; ix++) {
-		tod_values[ix] = xstrdup(energy_series[ix].tod);
-	}
-	return tod_values;
-}
-
-static double* _energy_get_series_values(char* data_name, void* data, int nsmp)
-{
-	int ix;
-	profile_energy_t* energy_series = (profile_energy_t*) data;
-	double  *energy_values = NULL;
-	energy_values = xmalloc(nsmp*sizeof(double));
-	if (energy_values == NULL) {
-		info("PROFILE: Failed to get memory for energy data");
-		return NULL;
-	}
-	if (strcasecmp(data_name,"Time") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			energy_values[ix] = (double) energy_series[ix].time;
-
-		}
-		return energy_values;
-	} else if (strcasecmp(data_name,"Power") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			energy_values[ix] = (double) energy_series[ix].power;
-
-		}
-		return energy_values;
-	} else if (strcasecmp(data_name,"CPU_Frequency") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			energy_values[ix] = (double) energy_series[ix].cpu_freq;
-
-		}
-		return energy_values;
-	}
-	xfree(energy_values);
-	info("PROFILE: %s is invalid data item for energy data", data_name);
-	return NULL;
-}
-
-static void _energy_merge_step_series(
-	hid_t group, void *prior, void *cur, void *buf)
-{
-//	This is a difference series
-	profile_energy_t* prf_cur = (profile_energy_t*) cur;
-	profile_energy_t* prf_buf = (profile_energy_t*) buf;
-	struct tm *ts = localtime(&prf_cur->time);
-	strftime(prf_buf->tod, TOD_LEN, TOD_FMT, ts);
-	if (prior == NULL) {
-		// First sample.
-		seriesStart = prf_cur->time;
-		prf_buf->time = 0;
-
-	} else {
-		prf_buf->time = prf_cur->time - seriesStart;
-	}
-	prf_buf->power = prf_cur->power;
-	prf_buf->cpu_freq = prf_cur->cpu_freq;
-	return;
-}
-
-static void *_energy_series_total(int n_samples, void *data)
-{
-	profile_energy_t* energy_data;
-	profile_energy_s_t* total;
-	if (n_samples < 1)
-		return NULL;
-	energy_data = (profile_energy_t*) data;
-	total = xmalloc(sizeof(profile_energy_s_t));
-	if (total == NULL) {
-		error("PROFILE: Out of memory getting energy total");
-		return NULL;
-	}
-	// Assuming energy series are a difference series
-	strcpy(total->start_time, energy_data[0].tod);
-	total->elapsed_time = energy_data[n_samples-1].time;
-	INCR_DIF_SAMPLE(total, energy_data, power, n_samples);
-	INCR_DIF_SAMPLE(total, energy_data, cpu_freq, n_samples);
-	return total;
-}
-
-static void _energy_extract_series(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-
-	int n_items, ix;
-	profile_energy_t* energy_data = (profile_energy_t*) data;
-	if (put_header) {
-		fprintf(fp, "Job,Step,Node,Series,Date_Time,Elapsed_Time,"
-			"Power, CPU_Frequency\n");
-	}
-	n_items = size_data / sizeof(profile_energy_t);
-	for (ix=0; ix < n_items; ix++) {
-		fprintf(fp, "%d,%d,%s,%s,%s,%ld,%ld,%ld\n", job, step, node,
-			series, energy_data[ix].tod, energy_data[ix].time,
-			energy_data[ix].power, energy_data[ix].cpu_freq);
-	}
-	return;
-}
-
-static void _energy_extract_total(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-	profile_energy_s_t* energy_data = (profile_energy_s_t*) data;
-	if (put_header) {
-		fprintf(fp, "Job,Step,Node,Series,Start_Time,Elapsed_Time,"
-			"Min_Power,Ave_Power,Max_Power,Total_Power,"
-			"Min_CPU Frequency,Ave_CPU Frequency,"
-			"Max_CPU Frequency,Total_CPU Frequency\n");
-	}
-	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
-		energy_data->start_time, energy_data->elapsed_time);
-	PUT_UINT_SUM(fp, energy_data->power, ",");
-	PUT_UINT_SUM(fp, energy_data->cpu_freq, ",");
-	fprintf(fp, "\n");
-	return;
-}
-
-static hdf5_api_ops_t* _energy_profile_factory(void)
-{
-	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
-	ops->dataset_size = &_energy_dataset_size;
-	ops->create_memory_datatype = &_energy_create_memory_datatype;
-	ops->create_file_datatype = &_energy_create_file_datatype;
-	ops->create_s_memory_datatype = &_energy_s_create_memory_datatype;
-	ops->create_s_file_datatype = &_energy_s_create_file_datatype;
-	ops->init_job_series = &_energy_init_job_series;
-	ops->get_series_tod = &_energy_get_series_tod;
-	ops->get_series_values = &_energy_get_series_values;
-	ops->merge_step_series = &_energy_merge_step_series;
-	ops->series_total = &_energy_series_total;
-	ops->extract_series = &_energy_extract_series;
-	ops->extract_total = &_energy_extract_total;
-	return ops;
-}
-
-
-// ============================================================================
-// Routines supporting I/O Data type
-// ============================================================================
-
-static int _io_dataset_size(void)
-{
-	return sizeof(profile_io_t);
-}
-
-static hid_t _io_create_memory_datatype(void)
-{
-	hid_t   mtyp_io = -1;
-
-	mtyp_io = H5Tcreate(H5T_COMPOUND, sizeof(profile_io_t));
-	if (mtyp_io < 0) {
-		debug3("PROFILE: failed to create IO memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_io, "Date_Time", profile_io_t, tod);
-	MEM_ADD_UINT64(mtyp_io, "Time", profile_io_t, time);
-	MEM_ADD_UINT64(mtyp_io, "Reads", profile_io_t, reads);
-	MEM_ADD_DBL(mtyp_io, "Megabytes_Read", profile_io_t, read_size);
-	MEM_ADD_UINT64(mtyp_io, "Writes", profile_io_t, writes);
-	MEM_ADD_DBL(mtyp_io, "Megabytes_Write", profile_io_t, write_size);
-	return mtyp_io;
-}
-
-static hid_t _io_create_file_datatype(void)
-{
-	hid_t   ftyp_io = -1;
-
-	ftyp_io = H5Tcreate(H5T_COMPOUND, TOD_LEN+5*8);
-	if (ftyp_io < 0) {
-		debug3("PROFILE: failed to create IO file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_io, "Date_Time", 0);
-	FILE_ADD_UINT64(ftyp_io, "Time");
-	FILE_ADD_UINT64(ftyp_io, "Reads");
-	FILE_ADD_DBL(ftyp_io, "Megabytes_Read");
-	FILE_ADD_UINT64(ftyp_io, "Writes");
-	FILE_ADD_DBL(ftyp_io, "Megabytes_Write");
-
-	return ftyp_io;
-}
-
-static hid_t _io_s_create_memory_datatype(void)
-{
-	hid_t   mtyp_io = -1;
-
-	mtyp_io = H5Tcreate(H5T_COMPOUND, sizeof(profile_io_s_t));
-	if (mtyp_io < 0) {
-		debug3("PROFILE: failed to create IO memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_io, "Start Time", profile_io_s_t, start_time);
-	MEM_ADD_UINT64(mtyp_io, "Elapsed Time", profile_io_s_t, elapsed_time);
-	MEM_ADD_UINT64(mtyp_io, "Min Reads", profile_io_s_t, reads.min);
-	MEM_ADD_UINT64(mtyp_io, "Ave Reads", profile_io_s_t, reads.ave);
-	MEM_ADD_UINT64(mtyp_io, "Max Reads", profile_io_s_t, reads.max);
-	MEM_ADD_UINT64(mtyp_io, "Total Reads", profile_io_s_t, reads.total);
-	MEM_ADD_DBL(mtyp_io, "Min Read Megabytes",
-		    profile_io_s_t, read_size.min);
-	MEM_ADD_DBL(mtyp_io, "Ave Read Megabytes",
-		    profile_io_s_t, read_size.ave);
-	MEM_ADD_DBL(mtyp_io, "Max Read Megabytes",
-		    profile_io_s_t, read_size.max);
-	MEM_ADD_DBL(mtyp_io, "Total Read Megabytes", profile_io_s_t,
-		    read_size.total);
-	MEM_ADD_UINT64(mtyp_io, "Min Writes", profile_io_s_t, writes.min);
-	MEM_ADD_UINT64(mtyp_io, "Ave Writes", profile_io_s_t, writes.ave);
-	MEM_ADD_UINT64(mtyp_io, "Max Writes", profile_io_s_t, writes.max);
-	MEM_ADD_UINT64(mtyp_io, "Total Writes", profile_io_s_t, writes.total);
-	MEM_ADD_DBL(mtyp_io, "Min Write Megabytes", profile_io_s_t,
-		    write_size.min);
-	MEM_ADD_DBL(mtyp_io, "Ave Write Megabytes", profile_io_s_t,
-		    write_size.ave);
-	MEM_ADD_DBL(mtyp_io, "Max Write Megabytes", profile_io_s_t,
-		    write_size.max);
-	MEM_ADD_DBL(mtyp_io, "Total Write Megabytes", profile_io_s_t,
-		    write_size.total);
-
-	return mtyp_io;
-}
-
-static hid_t _io_s_create_file_datatype(void)
-{
-	hid_t   ftyp_io = -1;
-
-	ftyp_io = H5Tcreate(H5T_COMPOUND, TOD_LEN+17*8);
-	if (ftyp_io < 0) {
-		debug3("PROFILE: failed to create IO file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_io, "Start Time", 0);
-	FILE_ADD_UINT64(ftyp_io, "Elapsed Time");
-	FILE_ADD_UINT64(ftyp_io, "Min Reads");
-	FILE_ADD_UINT64(ftyp_io, "Ave Reads");
-	FILE_ADD_UINT64(ftyp_io, "Max Reads");
-	FILE_ADD_UINT64(ftyp_io, "Total Reads");
-	FILE_ADD_DBL(ftyp_io, "Min Read Megabytes");
-	FILE_ADD_DBL(ftyp_io, "Ave Read Megabytes");
-	FILE_ADD_DBL(ftyp_io, "Max Read Megabytes");
-	FILE_ADD_DBL(ftyp_io, "Total Read Megabytes");
-	FILE_ADD_UINT64(ftyp_io, "Min Writes");
-	FILE_ADD_UINT64(ftyp_io, "Ave Writes");
-	FILE_ADD_UINT64(ftyp_io, "Max Writes");
-	FILE_ADD_UINT64(ftyp_io, "Total Writes");
-	FILE_ADD_DBL(ftyp_io, "Min Write Megabytes");
-	FILE_ADD_DBL(ftyp_io, "Ave Write Megabytes");
-	FILE_ADD_DBL(ftyp_io, "Max Write Megabytes");
-	FILE_ADD_DBL(ftyp_io, "Total Write Megabytes");
-
-	return ftyp_io;
-}
-
-static void *_io_init_job_series(int n_samples)
-{
-	profile_io_t*  io_data;
-	io_data = xmalloc(n_samples * sizeof(profile_io_t));
-	if (io_data == NULL) {
-		debug3("PROFILE: failed to get memory for combined io data");
-		return NULL;
-	}
-	return (void*) io_data;
-}
-
-static char** _io_get_series_tod(void* data, int nsmp)
-{
-	int ix;
-	char      **tod_values = NULL;
-	profile_io_t* io_series = (profile_io_t*) data;
-	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
-	if (tod_values == NULL) {
-		info("Failed to get memory for io tod");
-		return NULL;
-	}
-	for (ix=0; ix < nsmp; ix++) {
-		tod_values[ix] = xstrdup(io_series[ix].tod);
-	}
-	return tod_values;
-}
-
-static double* _io_get_series_values(char* data_name, void* data, int nsmp)
-{
-	int ix;
-	profile_io_t* io_series = (profile_io_t*) data;
-	double  *io_values = NULL;
-	io_values = xmalloc(nsmp*sizeof(double));
-	if (io_values == NULL) {
-		info("PROFILE: Failed to get memory for io data");
-		return NULL;
-	}
-	if (strcasecmp(data_name,"Time") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			io_values[ix] = (double) io_series[ix].time;
-
-		}
-		return io_values;
-	} else if (strcasecmp(data_name,"Reads") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			io_values[ix] = (double) io_series[ix].reads;
-
-		}
-		return io_values;
-	} else if (strcasecmp(data_name,"Megabytes_Read") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			io_values[ix] = io_series[ix].read_size;
-
-		}
-		return io_values;
-	} else if (strcasecmp(data_name,"Writes") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			io_values[ix] = (double) io_series[ix].writes;
-
-		}
-		return io_values;
-	} else if (strcasecmp(data_name,"Megabytes_Write") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			io_values[ix] = io_series[ix].write_size;
-
-		}
-		return io_values;
-	}
-	xfree(io_values);
-	info("PROFILE: %s is invalid data item for io data", data_name);
-	return NULL;
-}
-
-static void _io_merge_step_series(
-	hid_t group, void *prior, void *cur, void *buf)
-{
-	// This is a difference series
-	static uint64_t start_reads = 0;
-	static uint64_t start_writes = 0;
-	static double start_read_size = 0;
-	static double start_write_size = 0;
-	profile_io_t* prfCur = (profile_io_t*) cur;
-	profile_io_t* prfBuf = (profile_io_t*) buf;
-	struct tm *ts = localtime(&prfCur->time);
-	strftime(prfBuf->tod, TOD_LEN, TOD_FMT, ts);
-	if (prior == NULL) {
-		// First sample.
-		seriesStart = prfCur->time;
-		prfBuf->time = 0;
-		start_reads = prfCur->reads;
-		prfBuf->reads = 0;
-		start_writes = prfCur->writes;
-		prfBuf->writes = 0;
-		start_read_size = prfCur->read_size;
-		prfBuf->read_size = 0;
-		start_write_size = prfCur->write_size;
-		prfBuf->write_size = 0;
-	} else {
-		prfBuf->time = prfCur->time - seriesStart;
-		prfBuf->reads = prfCur->reads - start_reads;
-		prfBuf->writes = prfCur->writes - start_writes;
-		prfBuf->read_size = prfCur->read_size - start_read_size;
-		prfBuf->write_size = prfCur->write_size - start_write_size;
-	}
-	return;
-}
-
-static void *_io_series_total(int n_samples, void *data)
-{
-	profile_io_t* io_data;
-	profile_io_s_t* total;
-	if (n_samples < 1)
-		return NULL;
-	io_data = (profile_io_t*) data;
-	total = xmalloc(sizeof(profile_io_s_t));
-	if (total == NULL) {
-		error("PROFILE: Out of memory getting I/O total");
-		return NULL;
-	}
-	// Assuming io series are a running total, and the first
-	// sample just sets the initial conditions
-	strcpy(total->start_time, io_data[0].tod);
-	total->elapsed_time = io_data[n_samples-1].time;
-	INCR_DIF_SAMPLE(total, io_data, reads, n_samples);
-	INCR_DIF_SAMPLE(total, io_data, read_size, n_samples);
-	INCR_DIF_SAMPLE(total, io_data, writes, n_samples);
-	INCR_DIF_SAMPLE(total, io_data, write_size, n_samples);
-	return total;
-}
-
-static void _io_extract_series(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-	int n_items, ix;
-	profile_io_t* io_data = (profile_io_t*) data;
-	if (put_header) {
-		fprintf(fp,"Job,Step,Node,Series,Date_Time,Elapsed_time,"
-			"Reads,Read Megabytes,Writes,Write Megabytes\n");
-	}
-	n_items = size_data / sizeof(profile_io_t);
-	for (ix=0; ix < n_items; ix++) {
-		fprintf(fp,"%d,%d,%s,%s,%s,%ld,%ld,%.3f,%ld,%.3f\n",
-			job, step, node, series,
-			io_data[ix].tod, io_data[ix].time,
-			io_data[ix].reads, io_data[ix].read_size,
-			io_data[ix].writes, io_data[ix].write_size);
-	}
-	return;
-}
-
-static void _io_extract_total(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-	profile_io_s_t* io_data = (profile_io_s_t*) data;
-	if (put_header) {
-		fprintf(fp,"Job,Step,Node,Series,Start_Time,Elapsed_time,"
-			"Min_Reads,Ave_Reads,Max_Reads,Total_Reads,"
-			"Min_Read_Megabytes,Ave_Read_Megabytes,"
-			"Max_Read_Megabytes,Total_Read_Megabytes,"
-			"Min_Writes,Ave_Writes,Max_Writes,Total_Writes,"
-			"Min_Write_Megabytes,Ave_Write_Megabytes,"
-			"Max_Write_Megabytes,Total_Write_Megabytes\n");
-	}
-	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
-		io_data->start_time, io_data->elapsed_time);
-	PUT_UINT_SUM(fp, io_data->reads, ",");
-	PUT_DBL_SUM(fp, io_data->read_size, ",");
-	PUT_UINT_SUM(fp, io_data->writes, ",");
-	PUT_DBL_SUM(fp, io_data->write_size, ",");
-	fprintf(fp, "\n");
-	return;
-}
-
-static hdf5_api_ops_t* _io_profile_factory(void)
-{
-	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
-	ops->dataset_size = &_io_dataset_size;
-	ops->create_memory_datatype = &_io_create_memory_datatype;
-	ops->create_file_datatype = &_io_create_file_datatype;
-	ops->create_s_memory_datatype = &_io_s_create_memory_datatype;
-	ops->create_s_file_datatype = &_io_s_create_file_datatype;
-	ops->init_job_series = &_io_init_job_series;
-	ops->get_series_tod = &_io_get_series_tod;
-	ops->get_series_values = &_io_get_series_values;
-	ops->merge_step_series = &_io_merge_step_series;
-	ops->series_total = &_io_series_total;
-	ops->extract_series = &_io_extract_series;
-	ops->extract_total = &_io_extract_total;
-	return ops;
-}
-
-
-// ============================================================================
-// Routines supporting Network Data type
-// ============================================================================
-
-static int _network_dataset_size(void)
-{
-	return sizeof(profile_network_t);
-}
-
-static hid_t _network_create_memory_datatype(void)
-{
-	hid_t   mtyp_network = H5Tcreate(H5T_COMPOUND,
-					 sizeof(profile_network_t));
-	if (mtyp_network < 0) {
-		debug3("PROFILE: failed to create Network memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_network, "Date_Time", profile_network_t, tod);
-	MEM_ADD_UINT64(mtyp_network, "Time", profile_network_t, time);
-	MEM_ADD_UINT64(mtyp_network, "Packets_In",
-		       profile_network_t, packets_in);
-	MEM_ADD_DBL(mtyp_network, "Megabytes_In", profile_network_t, size_in);
-	MEM_ADD_UINT64(mtyp_network, "Packets_Out",
-		       profile_network_t, packets_out);
-	MEM_ADD_DBL(mtyp_network, "Megabytes_Out", profile_network_t, size_out);
-
-	return mtyp_network;
-}
-
-static hid_t _network_create_file_datatype(void)
-{
-	hid_t   ftyp_network = H5Tcreate(H5T_COMPOUND, TOD_LEN+5*8);
-	if (ftyp_network < 0) {
-		debug3("PROFILE: failed to create Network file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_network, "Date_Time", 0);
-	FILE_ADD_UINT64(ftyp_network, "Time");
-	FILE_ADD_UINT64(ftyp_network, "Packets_In");
-	FILE_ADD_DBL(ftyp_network, "Megabytes_In");
-	FILE_ADD_UINT64(ftyp_network, "Packets_Out");
-	FILE_ADD_DBL(ftyp_network, "Megabytes_Out");
-
-	return ftyp_network;
-}
-
-static hid_t _network_s_create_memory_datatype(void)
-{
-	hid_t   mtyp_network = -1;
-
-	mtyp_network = H5Tcreate(H5T_COMPOUND, sizeof(profile_network_s_t));
-	if (mtyp_network < 0) {
-		debug3("PROFILE: failed to create Network memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_network, "Start Time", profile_network_s_t,
-			  start_time);
-	MEM_ADD_UINT64(mtyp_network, "Elapsed Time", profile_network_s_t,
-		       elapsed_time);
-	MEM_ADD_UINT64(mtyp_network, "Min Packets In", profile_network_s_t,
-		       packets_in.min);
-	MEM_ADD_UINT64(mtyp_network, "Ave Packets In", profile_network_s_t,
-		       packets_in.ave);
-	MEM_ADD_UINT64(mtyp_network, "Max Packets In", profile_network_s_t,
-		       packets_in.max);
-	MEM_ADD_UINT64(mtyp_network, "Total Packets In", profile_network_s_t,
-		       packets_in.total);
-	MEM_ADD_DBL(mtyp_network, "Min Megabytes In", profile_network_s_t,
-		    size_in.min);
-	MEM_ADD_DBL(mtyp_network, "Ave Megabytes In", profile_network_s_t,
-		    size_in.ave);
-	MEM_ADD_DBL(mtyp_network, "Max Megabytes In", profile_network_s_t,
-		    size_in.max);
-	MEM_ADD_DBL(mtyp_network, "Total Megabytes In", profile_network_s_t,
-		    size_in.total);
-	MEM_ADD_UINT64(mtyp_network, "Min Packets Out", profile_network_s_t,
-		       packets_out.min);
-	MEM_ADD_UINT64(mtyp_network, "Ave Packets Out", profile_network_s_t,
-		       packets_out.ave);
-	MEM_ADD_UINT64(mtyp_network, "Max Packets Out", profile_network_s_t,
-		       packets_out.max);
-	MEM_ADD_UINT64(mtyp_network, "Total Packets Out", profile_network_s_t,
-		       packets_out.total);
-	MEM_ADD_DBL(mtyp_network, "Min Megabytes Out", profile_network_s_t,
-		    size_out.min);
-	MEM_ADD_DBL(mtyp_network, "Ave Megabytes Out", profile_network_s_t,
-		    size_out.ave);
-	MEM_ADD_DBL(mtyp_network, "Max Megabytes Out", profile_network_s_t,
-		    size_out.max);
-	MEM_ADD_DBL(mtyp_network, "Total Megabytes Out", profile_network_s_t,
-		    size_out.total);
-
-	return mtyp_network;
-}
-
-static hid_t _network_s_create_file_datatype(void)
-{
-	hid_t   ftyp_network = H5Tcreate(H5T_COMPOUND, TOD_LEN+17*8);
-	if (ftyp_network < 0) {
-		debug3("PROFILE: failed to create Network file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_network, "Start Time", 0);
-	FILE_ADD_UINT64(ftyp_network, "Elapsed Time");
-	FILE_ADD_UINT64(ftyp_network, "Min Packets In");
-	FILE_ADD_UINT64(ftyp_network, "Ave Packets In");
-	FILE_ADD_UINT64(ftyp_network, "Max Packets In");
-	FILE_ADD_UINT64(ftyp_network, "Total Packets In");
-	FILE_ADD_DBL(ftyp_network, "Min Megabytes In");
-	FILE_ADD_DBL(ftyp_network, "Ave Megabytes In");
-	FILE_ADD_DBL(ftyp_network, "Max Megabytes In");
-	FILE_ADD_DBL(ftyp_network, "Total Megabytes In");
-	FILE_ADD_UINT64(ftyp_network, "Min Packets Out");
-	FILE_ADD_UINT64(ftyp_network, "Ave Packets Out");
-	FILE_ADD_UINT64(ftyp_network, "Max Packets Out");
-	FILE_ADD_UINT64(ftyp_network, "Total Packets Out");
-	FILE_ADD_DBL(ftyp_network, "Min Megabytes Out");
-	FILE_ADD_DBL(ftyp_network, "Ave Megabytes Out");
-	FILE_ADD_DBL(ftyp_network, "Max Megabytes Out");
-	FILE_ADD_DBL(ftyp_network, "Total Megabytes Out");
-
-	return ftyp_network;
-}
-
-static void *_network_init_job_series(int n_samples)
-{
-	profile_network_t*  network_data;
-
-	network_data = xmalloc(n_samples * sizeof(profile_network_t));
-	if (network_data == NULL) {
-		debug3("PROFILE: failed to get memory for network data");
-		return NULL;
-	}
-	return (void*) network_data;
-}
-
-static char** _network_get_series_tod(void* data, int nsmp)
-{
-	int ix;
-	char      **tod_values = NULL;
-	profile_network_t* network_series = (profile_network_t*) data;
-	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
-	if (tod_values == NULL) {
-		info("Failed to get memory for network tod");
-		return NULL;
-	}
-	for (ix=0; ix < nsmp; ix++) {
-		tod_values[ix] = xstrdup(network_series[ix].tod);
-	}
-	return tod_values;
-}
-
-static double* _network_get_series_values(char* data_name, void* data, int nsmp)
-{
-	int ix;
-	profile_network_t* network_series = (profile_network_t*) data;
-	double  *network_values = NULL;
-	network_values = xmalloc(nsmp*sizeof(double));
-	if (network_values == NULL) {
-		info("PROFILE: Failed to get memory for network data");
-		return NULL;
-	}
-	if (strcasecmp(data_name,"Time") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			network_values[ix] = (double) network_series[ix].time;
-
-		}
-		return network_values;
-	} else if (strcasecmp(data_name,"Packets_In") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			network_values[ix] =
-					(double) network_series[ix].packets_in;
-
-		}
-		return network_values;
-	} else if (strcasecmp(data_name,"Megabytes_In") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			network_values[ix] = network_series[ix].size_in;
-
-		}
-		return network_values;
-	} else if (strcasecmp(data_name,"Packets_Out") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			network_values[ix] =
-					(double) network_series[ix].packets_out;
-
-		}
-		return network_values;
-	} else if (strcasecmp(data_name,"Megabytes_Out") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			network_values[ix] = network_series[ix].size_out;
-
-		}
-		return network_values;
-	}
-	xfree(network_values);
-	info("PROFILE: %s is invalid data item for network data", data_name);
-	return NULL;
-}
-
-static void _network_merge_step_series(
-	hid_t group, void *prior, void *cur, void *buf)
-{
-// This is a difference series
-	profile_network_t* prf_cur = (profile_network_t*) cur;
-	profile_network_t* prf_buf = (profile_network_t*) buf;
-	struct tm *ts = localtime(&prf_cur->time);
-	strftime(prf_buf->tod, TOD_LEN, TOD_FMT, ts);
-	if (prior == NULL) {
-		// First sample.
-		seriesStart = prf_cur->time;
-		prf_buf->time = 0;
-	} else {
-		prf_buf->time = prf_cur->time - seriesStart;
-	}
-	prf_buf->packets_in = prf_cur->packets_in;
-	prf_buf->packets_out = prf_cur->packets_out;
-	prf_buf->size_in = prf_cur->size_in;
-	prf_buf->size_out = prf_cur->size_out;
-	return;
-}
-
-static void *_network_series_total(int n_samples, void *data)
-{
-	profile_network_t* network_data;
-	profile_network_s_t* total;
-	if (n_samples < 1)
-		return NULL;
-	network_data = (profile_network_t*) data;
-	total = xmalloc(sizeof(profile_network_s_t));
-	if (total == NULL) {
-		error("PROFILE: Out of memory getting network total");
-		return NULL;
-	}
-	// Assuming network series are a running total, and the first
-	// sample just sets the initial conditions
-	strcpy(total->start_time, network_data[0].tod);
-	total->elapsed_time = network_data[n_samples-1].time;
-	INCR_DIF_SAMPLE(total, network_data, packets_in, n_samples);
-	INCR_DIF_SAMPLE(total, network_data, size_in, n_samples);
-	INCR_DIF_SAMPLE(total, network_data, packets_out, n_samples);
-	INCR_DIF_SAMPLE(total, network_data, size_out, n_samples);
-	return total;
-}
-
-static void _network_extract_series(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-	int n_items, ix;
-	profile_network_t* network_data = (profile_network_t*) data;
-
-	if (put_header) {
-		fprintf(fp,"Job,Step,Node,Series,Date_Time,Elapsed_time,"
-			"Packets_In,MegaBytes_In,Packets_Out,MegaBytes_Out\n");
-	}
-	n_items = size_data / sizeof(profile_network_t);
-	for (ix=0; ix < n_items; ix++) {
-		fprintf(fp,"%d,%d,%s,%s,%s,%ld,%ld,%.3f,%ld,%.3f\n",
-			job, step, node,series,
-			network_data[ix].tod, network_data[ix].time,
-			network_data[ix].packets_in, network_data[ix].size_in,
-			network_data[ix].packets_out,
-			network_data[ix].size_out);
-	}
-	return;
-}
-
-static void _network_extract_total(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-	profile_network_s_t* network_data = (profile_network_s_t*) data;
-	if (put_header) {
-		fprintf(fp,"Job,Step,Node,Series,Start_Time,Elapsed_time,"
-			"Min_Packets_In,Ave_Packets_In,"
-			"Max_Packets_In,Total_Packets_In,"
-			"Min_Megabytes_In,Ave_Megabytes_In,"
-			"Max_Megabytes_In,Total_Megabytes_In,"
-			"Min_Packets_Out,Ave_Packets_Out,"
-			"Max_Packets_Out,Total_Packets_Out,"
-			"Min_Megabytes_Out,Ave_Megabytes_Out,"
-			"Max_Megabytes_Out,Total_Megabytes_Out\n");
-	}
-	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
-		network_data->start_time, network_data->elapsed_time);
-	PUT_UINT_SUM(fp, network_data->packets_in, ",");
-	PUT_DBL_SUM(fp, network_data->size_in, ",");
-	PUT_UINT_SUM(fp, network_data->packets_out, ",");
-	PUT_DBL_SUM(fp, network_data->size_out, ",");
-	fprintf(fp, "\n");
-	return;
-}
-
-static hdf5_api_ops_t *_network_profile_factory(void)
-{
-	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
-	ops->dataset_size = &_network_dataset_size;
-	ops->create_memory_datatype = &_network_create_memory_datatype;
-	ops->create_file_datatype = &_network_create_file_datatype;
-	ops->create_s_memory_datatype = &_network_s_create_memory_datatype;
-	ops->create_s_file_datatype = &_network_s_create_file_datatype;
-	ops->init_job_series = &_network_init_job_series;
-	ops->get_series_tod = &_network_get_series_tod;
-	ops->get_series_values = &_network_get_series_values;
-	ops->merge_step_series = &_network_merge_step_series;
-	ops->series_total = &_network_series_total;
-	ops->extract_series = &_network_extract_series;
-	ops->extract_total = &_network_extract_total;
-	return ops;
-}
-
-// ============================================================================
-// Routines supporting Task Data type
-// ============================================================================
-
-static int _task_dataset_size(void)
-{
-	return sizeof(profile_task_t);
-}
-
-static hid_t _task_create_memory_datatype(void)
-{
-	hid_t   mtyp_task = H5Tcreate(H5T_COMPOUND, sizeof(profile_task_t));
-	if (mtyp_task < 0) {
-		debug3("PROFILE: failed to create Task memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_task, "Date_Time", profile_task_t, tod);
-	MEM_ADD_UINT64(mtyp_task, "Time", profile_task_t, time);
-	MEM_ADD_UINT64(mtyp_task, "CPU_Frequency", profile_task_t, cpu_freq);
-	MEM_ADD_UINT64(mtyp_task, "CPU_Time", profile_task_t, cpu_time);
-	MEM_ADD_DBL(mtyp_task, "CPU_Utilization",
-		    profile_task_t, cpu_utilization);
-	MEM_ADD_UINT64(mtyp_task, "RSS", profile_task_t, rss);
-	MEM_ADD_UINT64(mtyp_task, "VM_Size", profile_task_t, vm_size);
-	MEM_ADD_UINT64(mtyp_task, "Pages", profile_task_t, pages);
-	MEM_ADD_DBL(mtyp_task, "Read_Megabytes", profile_task_t, read_size);
-	MEM_ADD_DBL(mtyp_task, "Write_Megabytes", profile_task_t, write_size);
-
-	return mtyp_task;
-}
-
-static hid_t _task_create_file_datatype(void)
-{
-	hid_t   ftyp_task = H5Tcreate(H5T_COMPOUND, TOD_LEN+9*8);
-	if (ftyp_task < 0) {
-		debug3("PROFILE: failed to create Task file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_task, "Date_Time", 0);
-	FILE_ADD_UINT64(ftyp_task, "Time");
-	FILE_ADD_UINT64(ftyp_task, "CPU_Frequency");
-	FILE_ADD_UINT64(ftyp_task, "CPU_Time");
-	FILE_ADD_DBL(ftyp_task, "CPU_Utilization");
-	FILE_ADD_UINT64(ftyp_task, "RSS");
-	FILE_ADD_UINT64(ftyp_task, "VM_Size");
-	FILE_ADD_UINT64(ftyp_task, "Pages");
-	FILE_ADD_DBL(ftyp_task, "Read_Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Write_Megabytes");
-
-	return ftyp_task;
-}
-
-static hid_t _task_s_create_memory_datatype(void)
-{
-	hid_t   mtyp_task = H5Tcreate(H5T_COMPOUND, sizeof(profile_task_s_t));
-	if (mtyp_task < 0) {
-		debug3("PROFILE: failed to create Task memory datatype");
-		return -1;
-	}
-	MEM_ADD_DATE_TIME(mtyp_task, "Start Time", profile_task_s_t,
-			  start_time);
-	MEM_ADD_UINT64(mtyp_task, "Elapsed Time", profile_task_s_t,
-		       elapsed_time);
-	MEM_ADD_UINT64(mtyp_task, "Min CPU Frequency", profile_task_s_t,
-		       cpu_freq.min);
-	MEM_ADD_UINT64(mtyp_task, "Ave CPU Frequency", profile_task_s_t,
-		       cpu_freq.ave);
-	MEM_ADD_UINT64(mtyp_task, "Max CPU Frequency", profile_task_s_t,
-		       cpu_freq.max);
-	MEM_ADD_UINT64(mtyp_task, "Total CPU Frequency", profile_task_s_t,
-		       cpu_freq.total);
-	MEM_ADD_UINT64(mtyp_task, "Min CPU Time", profile_task_s_t,
-		       cpu_time.min);
-	MEM_ADD_UINT64(mtyp_task, "Ave CPU Time", profile_task_s_t,
-		       cpu_time.ave);
-	MEM_ADD_UINT64(mtyp_task, "Max CPU Time", profile_task_s_t,
-		       cpu_time.max);
-	MEM_ADD_UINT64(mtyp_task, "Total CPU Time", profile_task_s_t,
-		       cpu_time.total);
-	MEM_ADD_DBL(mtyp_task, "Min CPU Utilization", profile_task_s_t,
-		    cpu_utilization.min);
-	MEM_ADD_DBL(mtyp_task, "Ave CPU Utilization", profile_task_s_t,
-		    cpu_utilization.ave);
-	MEM_ADD_DBL(mtyp_task, "Max CPU Utilization", profile_task_s_t,
-		    cpu_utilization.max);
-	MEM_ADD_DBL(mtyp_task, "Total CPU Utilization", profile_task_s_t,
-		    cpu_utilization.total);
-	MEM_ADD_UINT64(mtyp_task, "Min RSS", profile_task_s_t, rss.min);
-	MEM_ADD_UINT64(mtyp_task, "Ave RSS", profile_task_s_t, rss.ave);
-	MEM_ADD_UINT64(mtyp_task, "Max RSS", profile_task_s_t, rss.max);
-	MEM_ADD_UINT64(mtyp_task, "Total RSS", profile_task_s_t, rss.total);
-	MEM_ADD_UINT64(mtyp_task, "Min VM Size", profile_task_s_t, vm_size.min);
-	MEM_ADD_UINT64(mtyp_task, "Ave VM Size", profile_task_s_t, vm_size.ave);
-	MEM_ADD_UINT64(mtyp_task, "Max VM Size", profile_task_s_t, vm_size.max);
-	MEM_ADD_UINT64(mtyp_task, "Total VM Size",
-		       profile_task_s_t, vm_size.total);
-	MEM_ADD_UINT64(mtyp_task, "Min Pages", profile_task_s_t, pages.min);
-	MEM_ADD_UINT64(mtyp_task, "Ave Pages", profile_task_s_t, pages.ave);
-	MEM_ADD_UINT64(mtyp_task, "Max Pages", profile_task_s_t, pages.max);
-	MEM_ADD_UINT64(mtyp_task, "Total Pages", profile_task_s_t, pages.total);
-	MEM_ADD_DBL(mtyp_task, "Min Read Megabytes", profile_task_s_t,
-		    read_size.min);
-	MEM_ADD_DBL(mtyp_task, "Ave Read Megabytes", profile_task_s_t,
-		    read_size.ave);
-	MEM_ADD_DBL(mtyp_task, "Max Read Megabytes", profile_task_s_t,
-		    read_size.max);
-	MEM_ADD_DBL(mtyp_task, "Total Read Megabytes", profile_task_s_t,
-		    read_size.total);
-	MEM_ADD_DBL(mtyp_task, "Min Write Megabytes", profile_task_s_t,
-		    write_size.min);
-	MEM_ADD_DBL(mtyp_task, "Ave Write Megabytes", profile_task_s_t,
-		    write_size.ave);
-	MEM_ADD_DBL(mtyp_task, "Max Write Megabytes", profile_task_s_t,
-		    write_size.max);
-	MEM_ADD_DBL(mtyp_task, "Total Write Megabytes", profile_task_s_t,
-		    write_size.total);
-
-	return mtyp_task;
-}
-
-static hid_t _task_s_create_file_datatype(void)
-{
-	hid_t   ftyp_task = H5Tcreate(H5T_COMPOUND, TOD_LEN+33*8);
-	if (ftyp_task < 0) {
-		debug3("PROFILE: failed to create Task file datatype");
-		return -1;
-	}
-	moffset = TOD_LEN;
-	FILE_ADD_DATE_TIME(ftyp_task, "Start Time", 0);
-	FILE_ADD_UINT64(ftyp_task, "Elapsed Time");
-	FILE_ADD_UINT64(ftyp_task, "Min CPU Frequency");
-	FILE_ADD_UINT64(ftyp_task, "Ave CPU Frequency");
-	FILE_ADD_UINT64(ftyp_task, "Max CPU Frequency");
-	FILE_ADD_UINT64(ftyp_task, "Total CPU Frequency");
-	FILE_ADD_UINT64(ftyp_task, "Min CPU Time");
-	FILE_ADD_UINT64(ftyp_task, "Ave CPU Time");
-	FILE_ADD_UINT64(ftyp_task, "Max CPU Time");
-	FILE_ADD_UINT64(ftyp_task, "Total CPU Time");
-	FILE_ADD_DBL(ftyp_task, "Min CPU Utilization");
-	FILE_ADD_DBL(ftyp_task, "Ave CPU Utilization");
-	FILE_ADD_DBL(ftyp_task, "Max CPU Utilization");
-	FILE_ADD_DBL(ftyp_task, "Total CPU Utilization");
-	FILE_ADD_UINT64(ftyp_task, "Min RSS");
-	FILE_ADD_UINT64(ftyp_task, "Ave RSS");
-	FILE_ADD_UINT64(ftyp_task, "Max RSS");
-	FILE_ADD_UINT64(ftyp_task, "Total RSS");
-	FILE_ADD_UINT64(ftyp_task, "Min VM Size");
-	FILE_ADD_UINT64(ftyp_task, "Ave VM Size");
-	FILE_ADD_UINT64(ftyp_task, "Max VM Size");
-	FILE_ADD_UINT64(ftyp_task, "Total VM Size");
-	FILE_ADD_UINT64(ftyp_task, "Min Pages");
-	FILE_ADD_UINT64(ftyp_task, "Ave Pages");
-	FILE_ADD_UINT64(ftyp_task, "Max Pages");
-	FILE_ADD_UINT64(ftyp_task, "Total Pages");
-	FILE_ADD_DBL(ftyp_task, "Min Read Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Ave Read Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Max Read Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Total Read Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Min Write Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Ave Write Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Max Write Megabytes");
-	FILE_ADD_DBL(ftyp_task, "Total Write Megabytes");
-
-	return ftyp_task;
-}
-
-static void *_task_init_job_series(int n_samples)
-{
-	profile_task_t*  task_data;
-	task_data = xmalloc(n_samples * sizeof(profile_task_t));
-	if (task_data == NULL) {
-		debug3("PROFILE: failed to get memory for combined task data");
-		return NULL;
-	}
-	return (void*) task_data;
-}
-
-static char** _task_get_series_tod(void* data, int nsmp)
-{
-	int ix;
-	char      **tod_values = NULL;
-	profile_task_t* task_series = (profile_task_t*) data;
-	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
-	if (tod_values == NULL) {
-		info("Failed to get memory for task tod");
-		return NULL;
-	}
-	for (ix=0; ix < nsmp; ix++) {
-		tod_values[ix] = xstrdup(task_series[ix].tod);
-	}
-	return tod_values;
-}
-
-static double* _task_get_series_values(char* data_name, void* data, int nsmp)
-{
-	int ix;
-	profile_task_t* task_series = (profile_task_t*) data;
-	double  *task_values = NULL;
-	task_values = xmalloc(nsmp*sizeof(double));
-	if (task_values == NULL) {
-		info("PROFILE: Failed to get memory for task data");
-		return NULL;
-	}
-	if (strcasecmp(data_name,"Time") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = (double) task_series[ix].time;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"CPU_Frequency") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = (double) task_series[ix].cpu_freq;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"CPU_Time") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = (double) task_series[ix].cpu_time;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"CPU_Utilization") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = task_series[ix].cpu_utilization;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"RSS") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = (double) task_series[ix].rss;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"VM_Size") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = (double) task_series[ix].vm_size;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"Pages") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = (double) task_series[ix].pages;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"Read_Megabytes") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = task_series[ix].read_size;
-
-		}
-		return task_values;
-	} else if (strcasecmp(data_name,"Write_Megabytes") == 0) {
-		for (ix=0; ix < nsmp; ix++) {
-			task_values[ix] = task_series[ix].write_size;
-
-		}
-		return task_values;
-	}
-	xfree(task_values);
-	info("PROFILE: %s is invalid data item for task data", data_name);
-	return NULL;
-}
-
-static void _task_merge_step_series(
-	hid_t group, void *prior, void *cur, void *buf)
-{
-// This is a running total series
-	profile_task_t* prf_prior = (profile_task_t*) prior;
-	profile_task_t* prf_cur = (profile_task_t*) cur;
-	profile_task_t* buf_prv = NULL;
-	profile_task_t* buf_cur = (profile_task_t*) buf;
-
-	struct tm *ts;
-	ts = localtime(&prf_cur->time);
-	strftime(buf_cur->tod, TOD_LEN, TOD_FMT, ts);
-	if (prf_prior == NULL) {
-		// First sample.
-		seriesStart = prf_cur->time;
-		buf_cur->time = 0;
-		buf_cur->cpu_time = 0;
-		buf_cur->cpu_utilization = 0;
-		buf_cur->read_size = 0.0;
-		buf_cur->write_size = 0.0;
-	} else {
-		buf_prv = buf_cur - 1;
-		buf_cur->time = prf_cur->time - seriesStart;
-		buf_cur->cpu_time = prf_cur->cpu_time - prf_prior->cpu_time;
-		buf_cur->cpu_utilization = 100.0*((double) buf_cur->cpu_time /
-				(double) (buf_cur->time - buf_prv->time));
-		buf_cur->read_size =
-			prf_cur->read_size - prf_prior->read_size;
-		buf_cur->write_size =
-			prf_cur->write_size - prf_prior->write_size;
-	}
-	buf_cur->cpu_freq = prf_cur->cpu_freq;
-	buf_cur->rss = prf_cur->rss;
-	buf_cur->vm_size = prf_cur->vm_size;
-	buf_cur->pages = prf_cur->pages;
-	return;
-}
-
-static void *_task_series_total(int n_samples, void *data)
-{
-	profile_task_t* task_data;
-	profile_task_s_t* total;
-	task_data = (profile_task_t*) data;
-	total = xmalloc(sizeof(profile_task_s_t));
-	if (total == NULL) {
-		error("PROFILE: Out of memory getting task total");
-		return NULL;
-	}
-	strcpy(total->start_time, task_data[0].tod);
-	total->elapsed_time = task_data[n_samples-1].time;
-	INCR_DIF_SAMPLE(total, task_data, cpu_freq, n_samples);
-	INCR_RT_SAMPLE(total, task_data, cpu_time, n_samples);
-	INCR_DIF_SAMPLE(total, task_data, cpu_utilization, n_samples);
-	INCR_DIF_SAMPLE(total, task_data, rss, n_samples);
-	INCR_DIF_SAMPLE(total, task_data, vm_size , n_samples);
-	INCR_DIF_SAMPLE(total, task_data, pages, n_samples);
-	INCR_RT_SAMPLE(total, task_data, read_size, n_samples);
-	INCR_RT_SAMPLE(total, task_data, write_size, n_samples);
-	return total;
-}
-
-static void _task_extract_series(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-	int n_items, ix;
-	profile_task_t* task_data = (profile_task_t*) data;
-	if (put_header) {
-		fprintf(fp,"Job,Step,Node,Series,Date Time,ElapsedTime,"
-			"CPU Frequency,CPU Time,"
-			"CPU Utilization,rss,VM Size,Pages,"
-			"Read_bytes,Write_bytes\n");
-	}
-	n_items = size_data / sizeof(profile_task_t);
-	for (ix=0; ix < n_items; ix++) {
-		fprintf(fp,"%d,%d,%s,%s,%s,%ld,%ld,%ld,%.3f",
-			job, step, node, series,
-			task_data[ix].tod, task_data[ix].time,
-			task_data[ix].cpu_freq,
-			task_data[ix].cpu_time, task_data[ix].cpu_utilization);
-		fprintf(fp,",%ld,%ld,%ld,%.3f,%.3f\n",	task_data[ix].rss,
-			task_data[ix].vm_size, task_data[ix].pages,
-			task_data[ix].read_size, task_data[ix].write_size);
-	}
-	return;
-}
-
-static void _task_extract_total(
-	FILE* fp, bool put_header, int job, int step,
-	char *node, char *series, void *data, int size_data)
-{
-
-	profile_task_s_t* task_data = (profile_task_s_t*) data;
-	if (put_header) {
-		fprintf(fp,"Job,Step,Node,Series,Start_Time,Elapsed_time,"
-			"Min CPU Frequency,Ave CPU Frequency,"
-			"Ave CPU Frequency,Total CPU Frequency,"
-			"Min_CPU_Time,Ave_CPU_Time,"
-			"Max_CPU_Time,Total_CPU_Time,"
-			"Min_CPU_Utilization,Ave_CPU_Utilization,"
-			"Max_CPU_Utilization,Total_CPU_Utilization,"
-			"Min_RSS,Ave_RSS,Max_RSS,Total_RSS,"
-			"Min_VMSize,Ave_VMSize,Max_VMSize,Total_VMSize,"
-			"Min_Pages,Ave_Pages,Max_Pages,Total_Pages,"
-			"Min_Read_Megabytes,Ave_Read_Megabytes,"
-			"Max_Read_Megabytes,Total_Read_Megabytes,"
-			"Min_Write_Megabytes,Ave_Write_Megabytes,"
-			"Max_Write_Megabytes,Total_Write_Megabytes\n");
-	}
-	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
-		task_data->start_time, task_data->elapsed_time);
-	PUT_UINT_SUM(fp, task_data->cpu_freq, ",");
-	PUT_UINT_SUM(fp, task_data->cpu_time, ",");
-	PUT_DBL_SUM(fp, task_data->cpu_utilization, ",");
-	PUT_UINT_SUM(fp, task_data->rss, ",");
-	PUT_UINT_SUM(fp, task_data->vm_size, ",");
-	PUT_UINT_SUM(fp, task_data->pages, ",");
-	PUT_DBL_SUM(fp, task_data->read_size, ",");
-	PUT_DBL_SUM(fp, task_data->write_size, ",");
-	fprintf(fp, "\n");
-	return;
-}
-
-static hdf5_api_ops_t *_task_profile_factory(void)
-{
-	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
-	ops->dataset_size = &_task_dataset_size;
-	ops->create_memory_datatype = &_task_create_memory_datatype;
-	ops->create_file_datatype = &_task_create_file_datatype;
-	ops->create_s_memory_datatype = &_task_s_create_memory_datatype;
-	ops->create_s_file_datatype = &_task_s_create_file_datatype;
-	ops->init_job_series = &_task_init_job_series;
-	ops->get_series_tod = &_task_get_series_tod;
-	ops->get_series_values = &_task_get_series_values;
-	ops->merge_step_series = &_task_merge_step_series;
-	ops->series_total = &_task_series_total;
-	ops->extract_series = &_task_extract_series;
-	ops->extract_total = &_task_extract_total;
-	return ops;
-}
-
-/* ============================================================================
- * Common support functions
- ===========================================================================*/
-
-extern hdf5_api_ops_t* profile_factory(uint32_t type)
-{
-	switch (type) {
-	case ACCT_GATHER_PROFILE_ENERGY:
-		return _energy_profile_factory();
-		break;
-	case ACCT_GATHER_PROFILE_TASK:
-		return _task_profile_factory();
-		break;
-	case ACCT_GATHER_PROFILE_LUSTRE:
-		return _io_profile_factory();
-		break;
-	case ACCT_GATHER_PROFILE_NETWORK:
-		return _network_profile_factory();
-		break;
-	default:
-		error("profile_factory: Unknown type %d sent", type);
-		return NULL;
-	}
-}
-
-
-extern void profile_init(void)
-{
-	typTOD = H5Tcopy (H5T_C_S1);
-	H5Tset_size (typTOD, TOD_LEN); /* create string of length TOD_LEN */
-
-	return;
-}
-
 extern void profile_fini(void)
 {
-	H5Tclose(typTOD);
 	H5close(); /* make sure all H5 Objects are closed */
 
 	return;
 }
 
-extern char *get_data_set_name(char *type)
-{
-	static char  dset_name[MAX_DATASET_NAME+1];
-	dset_name[0] = '\0';
-	sprintf(dset_name, "%s Data", type);
-
-	return dset_name;
-}
-
-
-static char* _H5O_type_t2str(H5O_type_t type)
-{
-	switch (type)
-	{
-	case H5O_TYPE_UNKNOWN:
-		return "H5O_TYPE_UNKNOWN";
-	case H5O_TYPE_GROUP:
-		return "H5O_TYPE_GROUP";
-	case H5O_TYPE_DATASET:
-		return "H5O_TYPE_DATASET";
-	case H5O_TYPE_NAMED_DATATYPE:
-		return "H5O_TYPE_NAMED_DATATYPE";
-	case H5O_TYPE_NTYPES:
-		return "H5O_TYPE_NTYPES";
-	default:
-		return "Invalid H5O_TYPE";
-	}
-}
-
-
-extern void hdf5_obj_info(hid_t group, char *nam_group)
-{
-	char buf[MAX_GROUP_NAME+1];
-	hsize_t nobj, nattr;
-	hid_t aid;
-	int i, len;
-	H5G_info_t group_info;
-	H5O_info_t object_info;
-
-	if (group < 0) {
-		info("PROFILE: Group is not HDF5 object");
-		return;
-	}
-	H5Gget_info(group, &group_info);
-	nobj = group_info.nlinks;
-	H5Oget_info(group, &object_info);
-	nattr = object_info.num_attrs;
-	info("PROFILE group: %s NumObject=%d NumAttributes=%d",
-	     nam_group, (int) nobj, (int) nattr);
-	for (i = 0; (nobj>0) && (i<nobj); i++) {
-		H5Oget_info_by_idx(group, ".", H5_INDEX_NAME, H5_ITER_INC, i,
-				   &object_info, H5P_DEFAULT);
-		len = H5Lget_name_by_idx(group, ".", H5_INDEX_NAME,
-					 H5_ITER_INC, i, buf, MAX_GROUP_NAME,
-					 H5P_DEFAULT);
-		if ((len > 0) && (len < MAX_GROUP_NAME)) {
-			info("PROFILE: Obj=%d Type=%s Name=%s",
-			     i, _H5O_type_t2str(object_info.type), buf);
-		} else {
-			info("PROFILE: Obj=%d Type=%s Name=%s (is truncated)",
-			     i, _H5O_type_t2str(object_info.type), buf);
-		}
-	}
-	for (i = 0; (nattr>0) && (i<nattr); i++) {
-		aid = H5Aopen_by_idx(group, ".", H5_INDEX_NAME, H5_ITER_INC,
-				     i, H5P_DEFAULT, H5P_DEFAULT);
-		// Get the name of the attribute.
-		len = H5Aget_name(aid, MAX_ATTR_NAME, buf);
-		if (len < MAX_ATTR_NAME) {
-			info("PROFILE: Attr=%d Name=%s", i, buf);
-		} else {
-			info("PROFILE: Attr=%d Name=%s (is truncated)", i, buf);
-		}
-		H5Aclose(aid);
-	}
-
-	return;
-}
-
 extern hid_t get_attribute_handle(hid_t parent, char *name)
 {
 	char buf[MAX_ATTR_NAME+1];
@@ -1645,7 +86,7 @@ extern hid_t get_attribute_handle(hid_t parent, char *name)
 	return -1;
 }
 
-extern hid_t get_group(hid_t parent, char *name)
+extern hid_t get_group(hid_t parent, const char *name)
 {
 	char buf[MAX_GROUP_NAME];
 	hsize_t nobj;
@@ -1678,7 +119,7 @@ extern hid_t get_group(hid_t parent, char *name)
 	return -1;
 }
 
-extern hid_t make_group(hid_t parent, char *name)
+extern hid_t make_group(hid_t parent, const char *name)
 {
 	hid_t gid = -1;
 
@@ -1738,47 +179,6 @@ extern void put_string_attribute(hid_t parent, char *name, char *value)
 	return;
 }
 
-extern char *get_string_attribute(hid_t parent, char *name)
-{
-	char *value = NULL;
-
-	hid_t   attr, type;
-	size_t  size;
-
-	attr = get_attribute_handle(parent, name);
-	if (attr < 0) {
-		debug3("PROFILE: Attribute=%s does not exist", name);
-		return NULL;
-	}
-	type  = H5Aget_type(attr);
-	if (H5Tget_class(type) != H5T_STRING) {
-		H5Aclose(attr);
-		debug3("PROFILE: Attribute=%s is not a string", name);
-		return NULL;
-	}
-	size = H5Tget_size(type);
-	value = xmalloc(size+1);
-	if (value == NULL) {
-		H5Tclose(type);
-		H5Aclose(attr);
-		debug3("PROFILE: failed to malloc %d bytes for attribute=%s",
-		       (int) size,
-		       name);
-		return NULL;
-	}
-	if (H5Aread(attr, type, value) < 0) {
-		xfree(value);
-		H5Tclose(type);
-		H5Aclose(attr);
-		debug3("PROFILE: failed to read attribute=%s", name);
-		return NULL;
-	}
-	H5Tclose(type);
-	H5Aclose(attr);
-
-	return value;
-}
-
 extern void put_int_attribute(hid_t parent, char *name, int value)
 {
 	hid_t   attr, space_attr;
@@ -1806,243 +206,4 @@ extern void put_int_attribute(hid_t parent, char *name, int value)
 	return;
 }
 
-extern int get_int_attribute(hid_t parent, char *name)
-{
-	int value = 0;
-
-	hid_t   attr;
-	attr = get_attribute_handle(parent, name);
-	if (attr < 0) {
-		debug3("PROFILE: Attribute=%s does not exist, returning", name);
-		return value;
-	}
-	if (H5Aread(attr, H5T_NATIVE_INT, &value) < 0) {
-		debug3("PROFILE: failed to read attribute=%s, returning", name);
-	}
-	H5Aclose(attr);
-
-	return value;
-}
-
-
-extern void put_uint32_attribute(hid_t parent, char *name, uint32_t value)
-{
-	hid_t   attr, space_attr;
-	hsize_t dim_attr[1] = {1}; // Single dimension array of values
-
-	space_attr  = H5Screate_simple(1, dim_attr, NULL);
-	if (space_attr < 0) {
-		debug3("PROFILE: failed to create space for attribute %s",
-		       name);
-		return;
-	}
-	attr = H5Acreate(parent, name, H5T_NATIVE_UINT32, space_attr,
-			 H5P_DEFAULT, H5P_DEFAULT);
-	if (attr < 0) {
-		H5Sclose(space_attr);
-		debug3("PROFILE: failed to create attribute %s", name);
-		return;
-	}
-	if (H5Awrite(attr, H5T_NATIVE_UINT32, &value) < 0) {
-		debug3("PROFILE: failed to write attribute %s", name);
-		// Fall through to release resources
-	}
-	H5Sclose(space_attr);
-	H5Aclose(attr);
-
-	return;
-}
-
-extern uint32_t get_uint32_attribute(hid_t parent, char *name)
-{
-	int value = 0;
-	hid_t   attr;
-
-	attr = get_attribute_handle(parent, name);
-	if (attr < 0) {
-		debug3("PROFILE: Attribute=%s does not exist, returning", name);
-		return value;
-	}
-	if (H5Aread(attr, H5T_NATIVE_UINT32, &value) < 0) {
-		debug3("PROFILE: failed to read attribute=%s, returning", name);
-	}
-	H5Aclose(attr);
-
-	return value;
-}
-
-extern void *get_hdf5_data(hid_t parent, uint32_t type,
-			   char *nam_group, int *size_data)
-{
-	void *  data = NULL;
-
-	hid_t   id_data_set, dtyp_memory;
-	hsize_t szDset;
-	herr_t  ec;
-	char *subtype = NULL;
-	hdf5_api_ops_t* ops = profile_factory(type);
-	char *type_name = acct_gather_profile_type_to_string(type);
-
-	if (ops == NULL) {
-		debug3("PROFILE: failed to create %s operations",
-		       type_name);
-		return NULL;
-	}
-	subtype = get_string_attribute(parent, ATTR_SUBDATATYPE);
-	if (subtype < 0) {
-		xfree(ops);
-		debug3("PROFILE: failed to get %s attribute",
-		       ATTR_SUBDATATYPE);
-		return NULL;
-	}
-	id_data_set = H5Dopen(parent, get_data_set_name(nam_group),
-			      H5P_DEFAULT);
-	if (id_data_set < 0) {
-		xfree(subtype);
-		xfree(ops);
-		debug3("PROFILE: failed to open %s Data Set",
-		       type_name);
-		return NULL;
-	}
-	if (strcmp(subtype, SUBDATA_SUMMARY))
-		dtyp_memory = (*(ops->create_memory_datatype))();
-	else
-		dtyp_memory = (*(ops->create_s_memory_datatype))();
-	xfree(subtype);
-	if (dtyp_memory < 0) {
-		H5Dclose(id_data_set);
-		xfree(ops);
-		debug3("PROFILE: failed to create %s memory datatype",
-		       type_name);
-		return NULL;
-	}
-	szDset = H5Dget_storage_size(id_data_set);
-	*size_data = (int) szDset;
-	if (szDset == 0) {
-		H5Tclose(dtyp_memory);
-		H5Dclose(id_data_set);
-		xfree(ops);
-		debug3("PROFILE: %s data set is empty",
-		       type_name);
-		return NULL;
-	}
-	data = xmalloc(szDset);
-	if (data == NULL) {
-		H5Tclose(dtyp_memory);
-		H5Dclose(id_data_set);
-		xfree(ops);
-		debug3("PROFILE: failed to get memory for %s data set",
-		       type_name);
-		return NULL;
-	}
-	ec = H5Dread(id_data_set, dtyp_memory, H5S_ALL, H5S_ALL, H5P_DEFAULT,
-		     data);
-	if (ec < 0) {
-		H5Tclose(dtyp_memory);
-		H5Dclose(id_data_set);
-		xfree(data);
-		xfree(ops);
-		debug3("PROFILE: failed to read %s data",
-		       type_name);
-		return NULL;
-	}
-	H5Tclose(dtyp_memory);
-	H5Dclose(id_data_set);
-	xfree(ops);
-
-	return data;
-}
-
-extern void put_hdf5_data(hid_t parent, uint32_t type, char *subtype,
-			  char *group, void *data, int n_item)
-{
-	hid_t   id_group, dtyp_memory, dtyp_file, id_data_space, id_data_set;
-	hsize_t dims[1];
-	herr_t  ec;
-	hdf5_api_ops_t* ops = profile_factory(type);
-	char *type_name = acct_gather_profile_type_to_string(type);
-
-	if (ops == NULL) {
-		debug3("PROFILE: failed to create %s operations",
-		       type_name);
-		return;
-	}
-	// Create the datatypes.
-	if (strcmp(subtype, SUBDATA_SUMMARY)) {
-		dtyp_memory = (*(ops->create_memory_datatype))();
-		dtyp_file = (*(ops->create_file_datatype))();
-	} else {
-		dtyp_memory = (*(ops->create_s_memory_datatype))();
-		dtyp_file = (*(ops->create_s_file_datatype))();
-	}
-
-	if (dtyp_memory < 0) {
-		xfree(ops);
-		debug3("PROFILE: failed to create %s memory datatype",
-		       type_name);
-		return;
-	}
-
-	if (dtyp_file < 0) {
-		H5Tclose(dtyp_memory);
-		xfree(ops);
-		debug3("PROFILE: failed to create %s file datatype",
-		       type_name);
-		return;
-	}
-
-	dims[0] = n_item;
-	id_data_space = H5Screate_simple(1, dims, NULL);
-	if (id_data_space < 0) {
-		H5Tclose(dtyp_file);
-		H5Tclose(dtyp_memory);
-		xfree(ops);
-		debug3("PROFILE: failed to create %s space descriptor",
-		       type_name);
-		return;
-	}
-
-	id_group = H5Gcreate(parent, group, H5P_DEFAULT,
-			     H5P_DEFAULT, H5P_DEFAULT);
-	if (id_group < 0) {
-		H5Sclose(id_data_space);
-		H5Tclose(dtyp_file);
-		H5Tclose(dtyp_memory);
-		xfree(ops);
-		debug3("PROFILE: failed to create %s group", group);
-		return;
-	}
-
-	put_string_attribute(id_group, ATTR_DATATYPE, type_name);
-	put_string_attribute(id_group, ATTR_SUBDATATYPE, subtype);
-
-	id_data_set = H5Dcreate(id_group, get_data_set_name(group), dtyp_file,
-				id_data_space, H5P_DEFAULT, H5P_DEFAULT,
-				H5P_DEFAULT);
-	if (id_data_set < 0) {
-		H5Gclose(id_group);
-		H5Sclose(id_data_space);
-		H5Tclose(dtyp_file);
-		H5Tclose(dtyp_memory);
-		xfree(ops);
-		debug3("PROFILE: failed to create %s dataset", group);
-		return;
-	}
-
-	ec = H5Dwrite(id_data_set, dtyp_memory, H5S_ALL, H5S_ALL, H5P_DEFAULT,
-		      data);
-	if (ec < 0) {
-		debug3("PROFILE: failed to create write task data");
-		// Fall through to release resources
-	}
-	H5Dclose(id_data_set);
-	H5Gclose(id_group);
-	H5Sclose(id_data_space);
-	H5Tclose(dtyp_file);
-	H5Tclose(dtyp_memory);
-	xfree(ops);
-
-
-	return;
-}
 
diff --git a/src/plugins/acct_gather_profile/hdf5/hdf5_api.h b/src/plugins/acct_gather_profile/hdf5/hdf5_api.h
index 721bb1897..16239875f 100644
--- a/src/plugins/acct_gather_profile/hdf5/hdf5_api.h
+++ b/src/plugins/acct_gather_profile/hdf5/hdf5_api.h
@@ -59,244 +59,31 @@
 #include <stdlib.h>
 
 #include <hdf5.h>
-#include "src/common/slurm_acct_gather_profile.h"
+#include <hdf5_hl.h>
 
 #define MAX_PROFILE_PATH 1024
 #define MAX_ATTR_NAME 64
 #define MAX_GROUP_NAME 64
-#define MAX_DATASET_NAME 64
 
 #define ATTR_NODENAME "Node Name"
-#define ATTR_STARTTIME "Start Time"
 #define ATTR_NSTEPS "Number of Steps"
 #define ATTR_NNODES "Number of Nodes"
 #define ATTR_NTASKS "Number of Tasks"
-#define ATTR_TASKID "Task Id"
 #define ATTR_CPUPERTASK "CPUs per Task"
-#define ATTR_DATATYPE "Data Type"
-#define ATTR_SUBDATATYPE "Subdata Type"
 #define ATTR_STARTTIME "Start Time"
-#define ATTR_STARTSEC "Start Second"
-#define SUBDATA_DATA "Data"
-#define SUBDATA_NODE "Node"
-#define SUBDATA_SAMPLE "Sample"
-#define SUBDATA_SERIES "Series"
-#define SUBDATA_TOTAL "Total"
-#define SUBDATA_SUMMARY "Summary"
 
 #define GRP_ENERGY "Energy"
 #define GRP_LUSTRE "Lustre"
-#define GRP_STEP "Step"
+#define GRP_STEPS "Steps"
 #define GRP_NODES "Nodes"
-#define GRP_NODE "Node"
 #define GRP_NETWORK "Network"
-#define GRP_SAMPLES "Time Series"
-#define GRP_SAMPLE "Sample"
-#define GRP_TASKS "Tasks"
 #define GRP_TASK "Task"
-#define GRP_TOTALS "Totals"
-
-// Data types supported by all HDF5 plugins of this type
-
-#define TOD_LEN 24
-#define TOD_FMT "%F %T"
-
-/*
- * prof_uint_sum is a low level structure intended to hold the
- * minimum, average, maximum, and total values of a data item.
- * It is usually used in a summary data structure for an item
- * that occurs in a time series.
- */
-typedef struct prof_uint_sum {
-	uint64_t min;	// Minumum value
-	uint64_t ave;	// Average value
-	uint64_t max;	// Maximum value
-	uint64_t total;	// Accumlated value
-} prof_uint_sum_t;
-
-// Save as prof_uint_sum, but for double precision items
-typedef struct prof_dbl_sum {
-	double	min;	// Minumum value
-	double	ave;	// Average value
-	double	max;	// Maximum value
-	double	total;	// Accumlated value
-} prof_dbl_sum_t;
-
-#define PROFILE_ENERGY_DATA "Energy"
-// energy data structures
-//	node_step file
-typedef struct profile_energy {
-	char		tod[TOD_LEN];	// Not used in node-step
-	time_t		time;
-	uint64_t	power;
-	uint64_t	cpu_freq;
-} profile_energy_t;
-//	summary data in job-node-totals
-typedef struct profile_energy_s {
-	char		start_time[TOD_LEN];
-	uint64_t	elapsed_time;
-	prof_uint_sum_t	power;
-	prof_uint_sum_t cpu_freq;
-} profile_energy_s_t; // series summary
-
-#define PROFILE_IO_DATA "I/O"
-// io data structure
-//	node_step file
-typedef struct profile_io {
-	char		tod[TOD_LEN];	// Not used in node-step
-	time_t		time;
-	uint64_t	reads;
-	double		read_size;	// currently in megabytes
-	uint64_t	writes;
-	double		write_size;	// currently in megabytes
-} profile_io_t;
-//	summary data in job-node-totals
-typedef struct profile_io_s {
-	char		start_time[TOD_LEN];
-	uint64_t	elapsed_time;
-	prof_uint_sum_t	reads;
-	prof_dbl_sum_t	read_size;	// currently in megabytes
-	prof_uint_sum_t	writes;
-	prof_dbl_sum_t	write_size;	// currently in megabytes
-} profile_io_s_t;
-
-#define PROFILE_NETWORK_DATA "Network"
-// Network data structure
-//	node_step file
-typedef struct profile_network {
-	char		tod[TOD_LEN];	// Not used in node-step
-	time_t		time;
-	uint64_t	packets_in;
-	double		size_in;	// currently in megabytes
-	uint64_t	packets_out;
-	double		size_out;	// currently in megabytes
-} profile_network_t;
-//	summary data in job-node-totals
-typedef struct profile_network_s {
-	char		start_time[TOD_LEN];
-	uint64_t	elapsed_time;
-	prof_uint_sum_t packets_in;
-	prof_dbl_sum_t  size_in;		// currently in megabytes
-	prof_uint_sum_t packets_out;
-	prof_dbl_sum_t  size_out;	// currently in megabytes
-} profile_network_s_t;
-
-#define PROFILE_TASK_DATA "Task"
-// task data structure
-//	node_step file
-typedef struct profile_task {
-	char		tod[TOD_LEN];	// Not used in node-step
-	time_t		time;
-	uint64_t	cpu_freq;
-	uint64_t	cpu_time;
-	double		cpu_utilization;
-	uint64_t	rss;
-	uint64_t	vm_size;
-	uint64_t	pages;
-	double	 	read_size;	// currently in megabytes
-	double	 	write_size;	// currently in megabytes
-} profile_task_t;
-//	summary data in job-node-totals
-typedef struct profile_task_s {
-	char		start_time[TOD_LEN];
-	uint64_t	elapsed_time;
-	prof_uint_sum_t	cpu_freq;
-	prof_uint_sum_t cpu_time;
-	prof_dbl_sum_t 	cpu_utilization;
-	prof_uint_sum_t rss;
-	prof_uint_sum_t vm_size;
-	prof_uint_sum_t pages;
-	prof_dbl_sum_t 	read_size;	// currently in megabytes
-	prof_dbl_sum_t 	write_size;	// currently in megabytes
-} profile_task_s_t;
-
-/*
- * Structure of function pointers of common operations on a profile data type.
- *	dataset_size -- size of one dataset (structure size)
- *	create_memory_datatype -- creates hdf5 memory datatype corresponding
- *		to the datatype structure.
- *	create_file_datatype -- creates hdf5 file datatype corresponding
- *		to the datatype structure.
- *	create_s_memory_datatype -- creates hdf5 memory datatype corresponding
- *		to the summary datatype structure.
- *	create_s_file_datatype -- creates hdf5 file datatype corresponding
- *		to the summary datatype structure.
- *	init_job_series -- allocates a buffer for a complete time series
- *		(in job merge) and initializes each member
- *      get_series_tod -- get the date/time value of each sample in the series
- *      get_series_values -- gets a specific data item from each sample in the
- *		series
- *	merge_step_series -- merges all the individual time samples into a
- *		single data set with one item per sample.
- *		Data items can be scaled (e.g. subtracting beginning time)
- *		differenced (to show counts in interval) or other things
- *		appropriate for the series.
- *	series_total -- accumulate or average members in the entire series to
- *		be added to the file as totals for the node or task.
- *	extract_series -- format members of a structure for putting to
- *		to a file data extracted from a time series to be imported into
- *		another analysis tool. (e.g. format as comma separated value.)
- *	extract_totals -- format members of a structure for putting to
- *		to a file data extracted from a time series total to be
- *		imported into another analysis tool.
- *		(format as comma,separated value, for example.)
- */
-typedef struct hdf5_api_ops {
-	int   (*dataset_size) (void);
-	hid_t (*create_memory_datatype) (void);
-	hid_t (*create_file_datatype) (void);
-	hid_t (*create_s_memory_datatype) (void);
-	hid_t (*create_s_file_datatype) (void);
-	void* (*init_job_series) (int);
-	char** (*get_series_tod) (void*, int);
-	double* (*get_series_values) (char*, void*, int);
-	void  (*merge_step_series) (hid_t, void*, void*, void*);
-	void* (*series_total) (int, void*);
-	void  (*extract_series) (FILE*, bool, int, int, char*, char*, void*,
-				 int);
-	void  (*extract_total) (FILE*, bool, int, int, char*, char*, void*,
-				int);
-} hdf5_api_ops_t;
-
-/* ============================================================================
- * Common support functions
- ==========================================================================*/
-
-/*
- * Create a opts group from type
- */
-hdf5_api_ops_t* profile_factory(uint32_t type);
-
-/*
- * Initialize profile (initialize static memory)
- */
-void profile_init(void);
 
 /*
- * Finialize profile (initialize static memory)
+ * Finalize profile (initialize static memory)
  */
 void profile_fini(void);
 
-/*
- * Make a dataset name
- *
- * Parameters
- *	type	- series name
- *
- * Returns
- *	common data set name based on type in static memory
- */
-char* get_data_set_name(char* type);
-
-/*
- * print info on an object for debugging
- *
- * Parameters
- *	group	 - handle to group.
- *	namGroup - name of the group
- */
-void hdf5_obj_info(hid_t group, char* namGroup);
-
 /*
  * get attribute handle by name.
  *
@@ -317,7 +104,7 @@ hid_t get_attribute_handle(hid_t parent, char* name);
  *
  * Returns - handle for group (or -1 when not found), caller must close
  */
-hid_t get_group(hid_t parent, char* name);
+hid_t get_group(hid_t parent, const char* name);
 
 /*
  * make group by name.
@@ -328,7 +115,7 @@ hid_t get_group(hid_t parent, char* name);
  *
  * Returns - handle for group (or -1 on error), caller must close
  */
-hid_t make_group(hid_t parent, char* name);
+hid_t make_group(hid_t parent, const char* name);
 
 /*
  * Put string attribute
@@ -340,17 +127,6 @@ hid_t make_group(hid_t parent, char* name);
  */
 void put_string_attribute(hid_t parent, char* name, char* value);
 
-/*
- * get string attribute
- *
- * Parameters
- *	parent	- handle to parent group.
- *	name	- name of the attribute
- *
- * Return: pointer to value. Caller responsibility to free!!!
- */
-char* get_string_attribute(hid_t parent, char* name);
-
 /*
  * Put integer attribute
  *
@@ -361,63 +137,4 @@ char* get_string_attribute(hid_t parent, char* name);
  */
 void put_int_attribute(hid_t parent, char* name, int value);
 
-/*
- * get int attribute
- *
- * Parameters
- *	parent	- handle to parent group.
- *	name	- name of the attribute
- *
- * Return: value
- */
-int get_int_attribute(hid_t parent, char* name);
-
-/*
- * Put uint32_t attribute
- *
- * Parameters
- *	parent	- handle to parent group.
- *	name	- name of the attribute
- *	value	- value of the attribute
- */
-void put_uint32_attribute(hid_t parent, char* name, uint32_t value);
-
-/*
- * get uint32_t attribute
- *
- * Parameters
- *	parent	- handle to parent group.
- *	name	- name of the attribute
- *
- * Return: value
- */
-uint32_t get_uint32_attribute(hid_t parent, char* name);
-
-/*
- * Get data from a group of a HDF5 file
- *
- * Parameters
- *	parent   - handle to parent.
- *	type     - type of data (ACCT_GATHER_PROFILE_* in slurm.h)
- *	namGroup - name of group
- *	sizeData - pointer to variable into which to put size of dataset
- *
- * Returns -- data set of type (or null), caller must free.
- */
-void* get_hdf5_data(hid_t parent, uint32_t type, char* namGroup, int* sizeData);
-
-/*
- * Put one data sample into a new group in an HDF5 file
- *
- * Parameters
- *	parent  - handle to parent group.
- *	type    - type of data (ACCT_GATHER_PROFILE_* in slurm.h)
- *	subtype - generally source (node, series, ...) or summary
- *	group   - name of new group
- *	data    - data for the sample
- *      nItems  - number of items of type in the data
- */
-void put_hdf5_data(hid_t parent, uint32_t type, char* subtype, char* group,
-		   void* data, int nItems);
-
 #endif /*__ACCT_GATHER_HDF5_API_H__*/
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.am b/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.am
index b3169fb5f..c1be61967 100644
--- a/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.am
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.am
@@ -7,15 +7,17 @@ AUTOMAKE_OPTIONS = foreign
 # hdf5 could of been installed with a link to the generic mpi.h.
 AM_CPPFLAGS = -I$(top_srcdir) -I../ $(HDF5_CPPFLAGS)
 
-SHDF5_SOURCES = sh5util.c
+SHDF5_SOURCES = sh5util.c sh5util.h
 
 if BUILD_HDF5
 
+SUBDIRS = libsh5util_old
+
 bin_PROGRAMS = sh5util
 
 sh5util_SOURCES = $(SHDF5_SOURCES)
 sh5util_LDADD = $(top_builddir)/src/api/libslurm.o $(DL_LIBS) \
-	../libhdf5_api.la
+	../libhdf5_api.la libsh5util_old/libsh5util_old.la
 
 sh5util_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) \
 	$(HDF5_LDFLAGS) $(HDF5_LIBS)
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.in b/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.in
index 83c8672be..513f3672a 100644
--- a/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.in
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -135,15 +138,16 @@ CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 PROGRAMS = $(bin_PROGRAMS)
-am__sh5util_SOURCES_DIST = sh5util.c
+am__sh5util_SOURCES_DIST = sh5util.c sh5util.h
 am__objects_1 = sh5util.$(OBJEXT)
 @BUILD_HDF5_TRUE@am_sh5util_OBJECTS = $(am__objects_1)
-am__EXTRA_sh5util_SOURCES_DIST = sh5util.c
+am__EXTRA_sh5util_SOURCES_DIST = sh5util.c sh5util.h
 sh5util_OBJECTS = $(am_sh5util_OBJECTS)
 am__DEPENDENCIES_1 =
 @BUILD_HDF5_TRUE@sh5util_DEPENDENCIES =  \
 @BUILD_HDF5_TRUE@	$(top_builddir)/src/api/libslurm.o \
-@BUILD_HDF5_TRUE@	$(am__DEPENDENCIES_1) ../libhdf5_api.la
+@BUILD_HDF5_TRUE@	$(am__DEPENDENCIES_1) ../libhdf5_api.la \
+@BUILD_HDF5_TRUE@	libsh5util_old/libsh5util_old.la
 AM_V_lt = $(am__v_lt_@AM_V@)
 am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
 am__v_lt_0 = --silent
@@ -188,11 +192,27 @@ am__v_CCLD_1 =
 SOURCES = $(sh5util_SOURCES) $(EXTRA_sh5util_SOURCES)
 DIST_SOURCES = $(am__sh5util_SOURCES_DIST) \
 	$(am__EXTRA_sh5util_SOURCES_DIST)
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+	ctags-recursive dvi-recursive html-recursive info-recursive \
+	install-data-recursive install-dvi-recursive \
+	install-exec-recursive install-html-recursive \
+	install-info-recursive install-pdf-recursive \
+	install-ps-recursive install-recursive installcheck-recursive \
+	installdirs-recursive pdf-recursive ps-recursive \
+	tags-recursive uninstall-recursive
 am__can_run_installinfo = \
   case $$AM_UPDATE_INFO_DIR in \
     n|no|NO) false;; \
     *) (install-info --version) >/dev/null 2>&1;; \
   esac
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+  $(RECURSIVE_TARGETS) \
+  $(RECURSIVE_CLEAN_TARGETS) \
+  $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+	distdir
 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
 # Read a list of newline-separated strings from the standard input,
 # and print each of them once, without duplicates.  Input order is
@@ -212,7 +232,33 @@ am__define_uniq_tagged_files = \
   done | $(am__uniquify_input)`
 ETAGS = etags
 CTAGS = ctags
+DIST_SUBDIRS = libsh5util_old
 DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
 ACLOCAL = @ACLOCAL@
 AMTAR = @AMTAR@
 AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
@@ -254,6 +300,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -303,8 +351,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -323,6 +375,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -366,6 +421,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -389,6 +445,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -455,16 +512,17 @@ AUTOMAKE_OPTIONS = foreign
 # Do not put a link to common here.  src/common contains an mpi.h which
 # hdf5 could of been installed with a link to the generic mpi.h.
 AM_CPPFLAGS = -I$(top_srcdir) -I../ $(HDF5_CPPFLAGS)
-SHDF5_SOURCES = sh5util.c
+SHDF5_SOURCES = sh5util.c sh5util.h
+@BUILD_HDF5_TRUE@SUBDIRS = libsh5util_old
 @BUILD_HDF5_TRUE@sh5util_SOURCES = $(SHDF5_SOURCES)
 @BUILD_HDF5_TRUE@sh5util_LDADD = $(top_builddir)/src/api/libslurm.o $(DL_LIBS) \
-@BUILD_HDF5_TRUE@	../libhdf5_api.la
+@BUILD_HDF5_TRUE@	../libhdf5_api.la libsh5util_old/libsh5util_old.la
 
 @BUILD_HDF5_TRUE@sh5util_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) \
 @BUILD_HDF5_TRUE@	$(HDF5_LDFLAGS) $(HDF5_LIBS)
 
 @BUILD_HDF5_FALSE@EXTRA_sh5util_SOURCES = $(SHDF5_SOURCES)
-all: all-am
+all: all-recursive
 
 .SUFFIXES:
 .SUFFIXES: .c .lo .o .obj
@@ -587,14 +645,61 @@ mostlyclean-libtool:
 clean-libtool:
 	-rm -rf .libs _libs
 
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+#     (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+	@fail=; \
+	if $(am__make_keepgoing); then \
+	  failcom='fail=yes'; \
+	else \
+	  failcom='exit 1'; \
+	fi; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
 ID: $(am__tagged_files)
 	$(am__define_uniq_tagged_files); mkid -fID $$unique
-tags: tags-am
+tags: tags-recursive
 TAGS: tags
 
 tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
 	set x; \
 	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
 	$(am__define_uniq_tagged_files); \
 	shift; \
 	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
@@ -607,7 +712,7 @@ tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
 	      $$unique; \
 	  fi; \
 	fi
-ctags: ctags-am
+ctags: ctags-recursive
 
 CTAGS: ctags
 ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
@@ -620,7 +725,7 @@ GTAGS:
 	here=`$(am__cd) $(top_builddir) && pwd` \
 	  && $(am__cd) $(top_srcdir) \
 	  && gtags -i $(GTAGS_ARGS) "$$here"
-cscopelist: cscopelist-am
+cscopelist: cscopelist-recursive
 
 cscopelist-am: $(am__tagged_files)
 	list='$(am__tagged_files)'; \
@@ -669,22 +774,48 @@ distdir: $(DISTFILES)
 	    || exit 1; \
 	  fi; \
 	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
 check-am: all-am
-check: check-am
+check: check-recursive
 all-am: Makefile $(PROGRAMS)
-installdirs:
+installdirs: installdirs-recursive
+installdirs-am:
 	for dir in "$(DESTDIR)$(bindir)"; do \
 	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
 	done
-install: install-am
-install-exec: install-exec-am
-install-data: install-data-am
-uninstall: uninstall-am
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
 
 install-am: all-am
 	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
 
-installcheck: installcheck-am
+installcheck: installcheck-recursive
 install-strip:
 	if test -z '$(STRIP)'; then \
 	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
@@ -706,91 +837,92 @@ distclean-generic:
 maintainer-clean-generic:
 	@echo "This command is intended for maintainers to use"
 	@echo "it deletes files that may require special tools to rebuild."
-clean: clean-am
+clean: clean-recursive
 
 clean-am: clean-binPROGRAMS clean-generic clean-libtool mostlyclean-am
 
-distclean: distclean-am
+distclean: distclean-recursive
 	-rm -rf ./$(DEPDIR)
 	-rm -f Makefile
 distclean-am: clean-am distclean-compile distclean-generic \
 	distclean-tags
 
-dvi: dvi-am
+dvi: dvi-recursive
 
 dvi-am:
 
-html: html-am
+html: html-recursive
 
 html-am:
 
-info: info-am
+info: info-recursive
 
 info-am:
 
 install-data-am:
 
-install-dvi: install-dvi-am
+install-dvi: install-dvi-recursive
 
 install-dvi-am:
 
 install-exec-am: install-binPROGRAMS
 
-install-html: install-html-am
+install-html: install-html-recursive
 
 install-html-am:
 
-install-info: install-info-am
+install-info: install-info-recursive
 
 install-info-am:
 
 install-man:
 
-install-pdf: install-pdf-am
+install-pdf: install-pdf-recursive
 
 install-pdf-am:
 
-install-ps: install-ps-am
+install-ps: install-ps-recursive
 
 install-ps-am:
 
 installcheck-am:
 
-maintainer-clean: maintainer-clean-am
+maintainer-clean: maintainer-clean-recursive
 	-rm -rf ./$(DEPDIR)
 	-rm -f Makefile
 maintainer-clean-am: distclean-am maintainer-clean-generic
 
-mostlyclean: mostlyclean-am
+mostlyclean: mostlyclean-recursive
 
 mostlyclean-am: mostlyclean-compile mostlyclean-generic \
 	mostlyclean-libtool
 
-pdf: pdf-am
+pdf: pdf-recursive
 
 pdf-am:
 
-ps: ps-am
+ps: ps-recursive
 
 ps-am:
 
 uninstall-am: uninstall-binPROGRAMS
 
-.MAKE: install-am install-strip
-
-.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean \
-	clean-binPROGRAMS clean-generic clean-libtool cscopelist-am \
-	ctags ctags-am distclean distclean-compile distclean-generic \
-	distclean-libtool distclean-tags distdir dvi dvi-am html \
-	html-am info info-am install install-am install-binPROGRAMS \
-	install-data install-data-am install-dvi install-dvi-am \
-	install-exec install-exec-am install-html install-html-am \
-	install-info install-info-am install-man install-pdf \
-	install-pdf-am install-ps install-ps-am install-strip \
-	installcheck installcheck-am installdirs maintainer-clean \
-	maintainer-clean-generic mostlyclean mostlyclean-compile \
-	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
-	tags tags-am uninstall uninstall-am uninstall-binPROGRAMS
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+	check-am clean clean-binPROGRAMS clean-generic clean-libtool \
+	cscopelist-am ctags ctags-am distclean distclean-compile \
+	distclean-generic distclean-libtool distclean-tags distdir dvi \
+	dvi-am html html-am info info-am install install-am \
+	install-binPROGRAMS install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	installdirs-am maintainer-clean maintainer-clean-generic \
+	mostlyclean mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool pdf pdf-am ps ps-am tags tags-am uninstall \
+	uninstall-am uninstall-binPROGRAMS
 
 
 @BUILD_HDF5_TRUE@force:
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.am b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.am
new file mode 100644
index 000000000..82f29109d
--- /dev/null
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.am
@@ -0,0 +1,13 @@
+#
+# Makefile for sh5util_old lib, can be taken out 2 versions past 15.08
+#
+
+AUTOMAKE_OPTIONS = foreign
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common -I.
+
+noinst_LTLIBRARIES = libsh5util_old.la
+
+libsh5util_old_la_SOURCES = sh5util.c sh5util_old.h hdf5_api.c hdf5_api.h
+
+libsh5util_old_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.in b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.in
new file mode 100644
index 000000000..233acc380
--- /dev/null
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile.in
@@ -0,0 +1,756 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+#
+# Makefile for sh5util_old lib, can be taken out 2 versions past 15.08
+#
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libsh5util_old_la_LIBADD =
+am_libsh5util_old_la_OBJECTS = sh5util.lo hdf5_api.lo
+libsh5util_old_la_OBJECTS = $(am_libsh5util_old_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+libsh5util_old_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(libsh5util_old_la_LDFLAGS) $(LDFLAGS) \
+	-o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(libsh5util_old_la_SOURCES)
+DIST_SOURCES = $(libsh5util_old_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common -I.
+noinst_LTLIBRARIES = libsh5util_old.la
+libsh5util_old_la_SOURCES = sh5util.c sh5util_old.h hdf5_api.c hdf5_api.h
+libsh5util_old_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+libsh5util_old.la: $(libsh5util_old_la_OBJECTS) $(libsh5util_old_la_DEPENDENCIES) $(EXTRA_libsh5util_old_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(libsh5util_old_la_LINK)  $(libsh5util_old_la_OBJECTS) $(libsh5util_old_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/hdf5_api.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sh5util.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.c b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.c
new file mode 100644
index 000000000..0772c0692
--- /dev/null
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.c
@@ -0,0 +1,1868 @@
+/****************************************************************************\
+ *  hdf5_api.c
+ *****************************************************************************
+ *  Copyright (C) 2013 Bull S. A. S.
+ *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
+ *
+ *  Written by Rod Schultz <rod.schultz@bull.com>
+ *
+ *  Provide support for acct_gather_profile plugins based on HDF5 files.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\****************************************************************************/
+
+#include "src/common/macros.h"
+#include "src/common/slurm_time.h"
+#include "src/common/xassert.h"
+#include "src/common/xstring.h"
+
+#include "hdf5_api.h"
+
+
+// Static variables ok as add function are inside a lock.
+static time_t seriesStart;
+static hid_t typTOD;
+static int i; // General index used in some macros.
+static int moffset; // General variable used by insert macros
+
+/*
+ * Macro to insert a date string type into a compound memory type
+ *
+ * Parameters
+ * 	p	parent (group) memory type
+ * 	label	description of item
+ * 	type	profile struct type
+ * 	item    data item in type
+ */
+#define MEM_ADD_DATE_TIME(p, label, type, item)				\
+	if(H5Tinsert(p, label, HOFFSET(type, item), typTOD) < 0) {	\
+		debug3("PROFILE: failed insert into memory datatype");	\
+		H5Tclose(p);						\
+		return -1;						\
+	}
+/*
+ * Macro to insert a date string type into a compound file type
+ *
+ * Parameters
+ * 	p	parent (group) file type
+ * 	label	description of item
+ * 	offset  offset into record
+ */
+#define FILE_ADD_DATE_TIME(p, label, offset) 				\
+	if(H5Tinsert(p, label, offset, typTOD) < 0) {			\
+		debug3("PROFILE: failed insert into file datatype");	\
+		H5Tclose(p);						\
+		return -1;						\
+	}
+
+/*
+ * Macro to insert an uint64 into a compound memory type
+ *
+ * Parameters
+ * 	p	parent (group) memory type
+ * 	label	description of item
+ * 	type	profile struct type
+ * 	item    data item in type
+ */
+#define MEM_ADD_UINT64(p, label, type, item)				\
+	if(H5Tinsert(p, label, HOFFSET(type, item), H5T_NATIVE_UINT64) < 0) { \
+		debug3("PROFILE: failed insert64 into memory datatype"); \
+		H5Tclose(p);						\
+		return -1;						\
+	}
+/*
+ * Macro to insert a uint64 into a compound file type
+ *
+ * Parameters
+ * 	p	parent (group) file type
+ * 	label	description of item
+ */
+#define FILE_ADD_UINT64(p, label)					\
+	if(H5Tinsert(p, label, moffset, H5T_NATIVE_UINT64) < 0) {	\
+		debug3("PROFILE: failed insert64 into file datatype");	\
+		H5Tclose(p);						\
+		return -1;						\
+	}								\
+	moffset += 8;
+
+/*
+ * Macro to insert a double into a compound memory type
+ *
+ * Parameters
+ * 	p	parent (group) memory type
+ * 	label	description of item
+ * 	type	profile struct type
+ * 	item    data item in type
+ */
+#define MEM_ADD_DBL(p, label, type, item)				\
+	if(H5Tinsert(p, label, HOFFSET(type, item), H5T_NATIVE_DOUBLE) < 0) { \
+		debug3("PROFILE: failed insertdbl into memory datatype"); \
+		H5Tclose(p);						\
+		return -1;						\
+	}
+/*
+ * Macro to insert a double into a compound file type
+ *
+ * Parameters
+ * 	p	parent (group) file type
+ * 	label	description of item
+ */
+#define FILE_ADD_DBL(p, label)						\
+	if(H5Tinsert(p, label, moffset, H5T_NATIVE_DOUBLE) < 0) {	\
+		debug3("PROFILE: failed insertdbl into file datatype");	\
+		H5Tclose(p);						\
+		return -1;						\
+	}								\
+	moffset += 8;
+
+/*
+ * Macro to increment a sample in a difference series
+ * -- Difference means each sample represents counts for only that interval
+ *	(assumes consistent naming convention)
+ *
+ *
+ * Parameters
+ * 	tot	total pointer
+ * 	smp     sample pointer
+ * 	var	variable name in sample
+ * 	count	number of items in series
+ */
+#define INCR_DIF_SAMPLE(tot, smp, var, count)			\
+	for (i=0; i<count; i++) {				\
+		if (i == 0) {					\
+			total->var.min = smp[i].var;		\
+		}						\
+		tot->var.total += smp[i].var;			\
+		tot->var.min = MIN(smp[i].var, tot->var.min);	\
+		tot->var.max = MAX(smp[i].var, tot->var.max);	\
+	}							\
+	tot->var.ave = tot->var.total / count;
+
+/*
+ * Macro to increment a sample in a running total
+ * -- Running total means first sample is initial conditions
+ *	(assumes consistent naming convention)
+ *
+ *
+ * Parameters
+ * 	tot	total pointer
+ * 	smp     sample pointer
+ * 	var	variable name in sample
+ * 	count	number of items in series
+ */
+#define INCR_RT_SAMPLE(tot, smp, var, count)			\
+	for (i=1; i<count; i++) {				\
+		if (i == 1) {					\
+			total->var.min = smp[i].var;		\
+		}						\
+		tot->var.total += smp[i].var;			\
+		tot->var.min = MIN(smp[i].var, tot->var.min);	\
+		tot->var.max = MAX(smp[i].var, tot->var.max);	\
+	}							\
+	tot->var.ave = tot->var.total / count;
+
+/* Macro to put an int min,ave,max,total for a variable to extract file
+ *
+ * Parameters
+ * 	fp	file descriptor
+ * 	var	variable name
+ * 	prf	prefix for series (usually ','
+ */
+#define PUT_UINT_SUM(fp, var, prfx)			\
+	fprintf(fp, "%s%ld,%ld,%ld,%ld", prfx,		\
+		var.min, var.ave, var.max, var.total);
+/* Macro to put an int min,ave,max,total for a variable to extract file
+ *
+ * Parameters
+ * 	fp	file descriptor
+ * 	var	variable name
+ * 	prf	prefix for series (usually ','
+ */
+#define PUT_DBL_SUM(fp, var, prfx)			\
+	fprintf(fp, "%s%.3f,%.3f,%.3f,%.3f", prfx,	\
+		var.min, var.ave, var.max, var.total);
+
+
+// ============================================================================
+// Routines supporting Energy Data type
+// ============================================================================
+
+static int _energy_dataset_size(void)
+{
+	return sizeof(profile_energy_t);
+}
+
+static hid_t _energy_create_memory_datatype(void)
+{
+	hid_t   mtyp_energy = H5Tcreate(H5T_COMPOUND, sizeof(profile_energy_t));
+	if (mtyp_energy < 0) {
+		debug3("PROFILE: failed to create Energy memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_energy, "Date_Time", profile_energy_t, tod);
+	MEM_ADD_UINT64(mtyp_energy, "Time", profile_energy_t, time);
+	MEM_ADD_UINT64(mtyp_energy, "Power", profile_energy_t, power);
+	MEM_ADD_UINT64(mtyp_energy, "CPU_Frequency",
+		       profile_energy_t, cpu_freq);
+
+	return mtyp_energy;
+}
+
+static hid_t _energy_create_file_datatype(void)
+{
+	hid_t   ftyp_energy = H5Tcreate(H5T_COMPOUND, (TOD_LEN+3*8));
+	if (ftyp_energy < 0) {
+		debug3("PROFILE: failed to create Energy file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_energy, "Date_Time", 0);
+	FILE_ADD_UINT64(ftyp_energy, "Time");
+	FILE_ADD_UINT64(ftyp_energy, "Power");
+	FILE_ADD_UINT64(ftyp_energy, "CPU_Frequency");
+
+	return ftyp_energy;
+}
+
+static hid_t _energy_s_create_memory_datatype(void)
+{
+	hid_t   mtyp_energy = H5Tcreate(H5T_COMPOUND,
+					sizeof(profile_energy_s_t));
+	if (mtyp_energy < 0) {
+		debug3("PROFILE: failed to create Energy_s memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_energy, "Start Time",
+			  profile_energy_s_t, start_time);
+	MEM_ADD_UINT64(mtyp_energy, "Elapsed Time",
+		       profile_energy_s_t, elapsed_time);
+	MEM_ADD_UINT64(mtyp_energy, "Min Power", profile_energy_s_t, power.min);
+	MEM_ADD_UINT64(mtyp_energy, "Ave Power", profile_energy_s_t, power.ave);
+	MEM_ADD_UINT64(mtyp_energy, "Max Power", profile_energy_s_t, power.max);
+	MEM_ADD_UINT64(mtyp_energy, "Total Power",
+		       profile_energy_s_t, power.total);
+	MEM_ADD_UINT64(mtyp_energy, "Min CPU Frequency", profile_energy_s_t,
+		       cpu_freq.min);
+	MEM_ADD_UINT64(mtyp_energy, "Ave CPU Frequency", profile_energy_s_t,
+		       cpu_freq.ave);
+	MEM_ADD_UINT64(mtyp_energy, "Max CPU Frequency", profile_energy_s_t,
+		       cpu_freq.max);
+	MEM_ADD_UINT64(mtyp_energy, "Total CPU Frequency", profile_energy_s_t,
+		       cpu_freq.total);
+
+	return mtyp_energy;
+}
+
+static hid_t _energy_s_create_file_datatype(void)
+{
+	hid_t   ftyp_energy = H5Tcreate(H5T_COMPOUND, (TOD_LEN+9*8));
+	if (ftyp_energy < 0) {
+		debug3("PROFILE: failed to create Energy_s file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_energy, "Start Time", 0);
+	FILE_ADD_UINT64(ftyp_energy, "Elapsed Time");
+	FILE_ADD_UINT64(ftyp_energy, "Min Power");
+	FILE_ADD_UINT64(ftyp_energy, "Ave Power");
+	FILE_ADD_UINT64(ftyp_energy, "Max Power");
+	FILE_ADD_UINT64(ftyp_energy, "Total Power");
+	FILE_ADD_UINT64(ftyp_energy, "Min CPU Frequency");
+	FILE_ADD_UINT64(ftyp_energy, "Ave CPU Frequency");
+	FILE_ADD_UINT64(ftyp_energy, "Max CPU Frequency");
+	FILE_ADD_UINT64(ftyp_energy, "Total CPU Frequency");
+
+	return ftyp_energy;
+}
+
+static void *_energy_init_job_series(int n_samples)
+{
+	profile_energy_t*  energy_data;
+
+	energy_data = xmalloc(n_samples * sizeof(profile_energy_t));
+	if (energy_data == NULL) {
+		debug3("PROFILE: failed to get memory for energy data");
+		return NULL;
+	}
+	return (void*) energy_data;
+}
+
+static char** _energy_get_series_tod(void* data, int nsmp)
+{
+	int ix;
+	char      **tod_values = NULL;
+	profile_energy_t* energy_series = (profile_energy_t*) data;
+	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
+	if (tod_values == NULL) {
+		info("Failed to get memory for energy tod");
+		return NULL;
+	}
+	for (ix=0; ix < nsmp; ix++) {
+		tod_values[ix] = xstrdup(energy_series[ix].tod);
+	}
+	return tod_values;
+}
+
+static double* _energy_get_series_values(char* data_name, void* data, int nsmp)
+{
+	int ix;
+	profile_energy_t* energy_series = (profile_energy_t*) data;
+	double  *energy_values = NULL;
+	energy_values = xmalloc(nsmp*sizeof(double));
+	if (energy_values == NULL) {
+		info("PROFILE: Failed to get memory for energy data");
+		return NULL;
+	}
+	if (strcasecmp(data_name,"Time") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			energy_values[ix] = (double) energy_series[ix].time;
+
+		}
+		return energy_values;
+	} else if (strcasecmp(data_name,"Power") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			energy_values[ix] = (double) energy_series[ix].power;
+
+		}
+		return energy_values;
+	} else if (strcasecmp(data_name,"CPU_Frequency") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			energy_values[ix] = (double) energy_series[ix].cpu_freq;
+
+		}
+		return energy_values;
+	}
+	xfree(energy_values);
+	info("PROFILE: %s is invalid data item for energy data", data_name);
+	return NULL;
+}
+
+static void _energy_merge_step_series(
+	hid_t group, void *prior, void *cur, void *buf)
+{
+//	This is a difference series
+	profile_energy_t* prf_cur = (profile_energy_t*) cur;
+	profile_energy_t* prf_buf = (profile_energy_t*) buf;
+	struct tm *ts = slurm_localtime(&prf_cur->time);
+	strftime(prf_buf->tod, TOD_LEN, TOD_FMT, ts);
+	if (prior == NULL) {
+		// First sample.
+		seriesStart = prf_cur->time;
+		prf_buf->time = 0;
+
+	} else {
+		prf_buf->time = prf_cur->time - seriesStart;
+	}
+	prf_buf->power = prf_cur->power;
+	prf_buf->cpu_freq = prf_cur->cpu_freq;
+	return;
+}
+
+static void *_energy_series_total(int n_samples, void *data)
+{
+	profile_energy_t* energy_data;
+	profile_energy_s_t* total;
+	if (n_samples < 1)
+		return NULL;
+	energy_data = (profile_energy_t*) data;
+	total = xmalloc(sizeof(profile_energy_s_t));
+	if (total == NULL) {
+		error("PROFILE: Out of memory getting energy total");
+		return NULL;
+	}
+	// Assuming energy series are a difference series
+	strcpy(total->start_time, energy_data[0].tod);
+	total->elapsed_time = energy_data[n_samples-1].time;
+	INCR_DIF_SAMPLE(total, energy_data, power, n_samples);
+	INCR_DIF_SAMPLE(total, energy_data, cpu_freq, n_samples);
+	return total;
+}
+
+static void _energy_extract_series(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+
+	int n_items, ix;
+	profile_energy_t* energy_data = (profile_energy_t*) data;
+	if (put_header) {
+		fprintf(fp, "Job,Step,Node,Series,Date_Time,Elapsed_Time,"
+			"Power, CPU_Frequency\n");
+	}
+	n_items = size_data / sizeof(profile_energy_t);
+	for (ix=0; ix < n_items; ix++) {
+		fprintf(fp, "%d,%d,%s,%s,%s,%ld,%ld,%ld\n", job, step, node,
+			series, energy_data[ix].tod, energy_data[ix].time,
+			energy_data[ix].power, energy_data[ix].cpu_freq);
+	}
+	return;
+}
+
+static void _energy_extract_total(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+	profile_energy_s_t* energy_data = (profile_energy_s_t*) data;
+	if (put_header) {
+		fprintf(fp, "Job,Step,Node,Series,Start_Time,Elapsed_Time,"
+			"Min_Power,Ave_Power,Max_Power,Total_Power,"
+			"Min_CPU Frequency,Ave_CPU Frequency,"
+			"Max_CPU Frequency,Total_CPU Frequency\n");
+	}
+	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
+		energy_data->start_time, energy_data->elapsed_time);
+	PUT_UINT_SUM(fp, energy_data->power, ",");
+	PUT_UINT_SUM(fp, energy_data->cpu_freq, ",");
+	fprintf(fp, "\n");
+	return;
+}
+
+static hdf5_api_ops_t* _energy_profile_factory(void)
+{
+	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
+	ops->dataset_size = &_energy_dataset_size;
+	ops->create_memory_datatype = &_energy_create_memory_datatype;
+	ops->create_file_datatype = &_energy_create_file_datatype;
+	ops->create_s_memory_datatype = &_energy_s_create_memory_datatype;
+	ops->create_s_file_datatype = &_energy_s_create_file_datatype;
+	ops->init_job_series = &_energy_init_job_series;
+	ops->get_series_tod = &_energy_get_series_tod;
+	ops->get_series_values = &_energy_get_series_values;
+	ops->merge_step_series = &_energy_merge_step_series;
+	ops->series_total = &_energy_series_total;
+	ops->extract_series = &_energy_extract_series;
+	ops->extract_total = &_energy_extract_total;
+	return ops;
+}
+
+
+// ============================================================================
+// Routines supporting I/O Data type
+// ============================================================================
+
+static int _io_dataset_size(void)
+{
+	return sizeof(profile_io_t);
+}
+
+static hid_t _io_create_memory_datatype(void)
+{
+	hid_t   mtyp_io = -1;
+
+	mtyp_io = H5Tcreate(H5T_COMPOUND, sizeof(profile_io_t));
+	if (mtyp_io < 0) {
+		debug3("PROFILE: failed to create IO memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_io, "Date_Time", profile_io_t, tod);
+	MEM_ADD_UINT64(mtyp_io, "Time", profile_io_t, time);
+	MEM_ADD_UINT64(mtyp_io, "Reads", profile_io_t, reads);
+	MEM_ADD_DBL(mtyp_io, "Megabytes_Read", profile_io_t, read_size);
+	MEM_ADD_UINT64(mtyp_io, "Writes", profile_io_t, writes);
+	MEM_ADD_DBL(mtyp_io, "Megabytes_Write", profile_io_t, write_size);
+	return mtyp_io;
+}
+
+static hid_t _io_create_file_datatype(void)
+{
+	hid_t   ftyp_io = -1;
+
+	ftyp_io = H5Tcreate(H5T_COMPOUND, TOD_LEN+5*8);
+	if (ftyp_io < 0) {
+		debug3("PROFILE: failed to create IO file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_io, "Date_Time", 0);
+	FILE_ADD_UINT64(ftyp_io, "Time");
+	FILE_ADD_UINT64(ftyp_io, "Reads");
+	FILE_ADD_DBL(ftyp_io, "Megabytes_Read");
+	FILE_ADD_UINT64(ftyp_io, "Writes");
+	FILE_ADD_DBL(ftyp_io, "Megabytes_Write");
+
+	return ftyp_io;
+}
+
+static hid_t _io_s_create_memory_datatype(void)
+{
+	hid_t   mtyp_io = -1;
+
+	mtyp_io = H5Tcreate(H5T_COMPOUND, sizeof(profile_io_s_t));
+	if (mtyp_io < 0) {
+		debug3("PROFILE: failed to create IO memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_io, "Start Time", profile_io_s_t, start_time);
+	MEM_ADD_UINT64(mtyp_io, "Elapsed Time", profile_io_s_t, elapsed_time);
+	MEM_ADD_UINT64(mtyp_io, "Min Reads", profile_io_s_t, reads.min);
+	MEM_ADD_UINT64(mtyp_io, "Ave Reads", profile_io_s_t, reads.ave);
+	MEM_ADD_UINT64(mtyp_io, "Max Reads", profile_io_s_t, reads.max);
+	MEM_ADD_UINT64(mtyp_io, "Total Reads", profile_io_s_t, reads.total);
+	MEM_ADD_DBL(mtyp_io, "Min Read Megabytes",
+		    profile_io_s_t, read_size.min);
+	MEM_ADD_DBL(mtyp_io, "Ave Read Megabytes",
+		    profile_io_s_t, read_size.ave);
+	MEM_ADD_DBL(mtyp_io, "Max Read Megabytes",
+		    profile_io_s_t, read_size.max);
+	MEM_ADD_DBL(mtyp_io, "Total Read Megabytes", profile_io_s_t,
+		    read_size.total);
+	MEM_ADD_UINT64(mtyp_io, "Min Writes", profile_io_s_t, writes.min);
+	MEM_ADD_UINT64(mtyp_io, "Ave Writes", profile_io_s_t, writes.ave);
+	MEM_ADD_UINT64(mtyp_io, "Max Writes", profile_io_s_t, writes.max);
+	MEM_ADD_UINT64(mtyp_io, "Total Writes", profile_io_s_t, writes.total);
+	MEM_ADD_DBL(mtyp_io, "Min Write Megabytes", profile_io_s_t,
+		    write_size.min);
+	MEM_ADD_DBL(mtyp_io, "Ave Write Megabytes", profile_io_s_t,
+		    write_size.ave);
+	MEM_ADD_DBL(mtyp_io, "Max Write Megabytes", profile_io_s_t,
+		    write_size.max);
+	MEM_ADD_DBL(mtyp_io, "Total Write Megabytes", profile_io_s_t,
+		    write_size.total);
+
+	return mtyp_io;
+}
+
+static hid_t _io_s_create_file_datatype(void)
+{
+	hid_t   ftyp_io = -1;
+
+	ftyp_io = H5Tcreate(H5T_COMPOUND, TOD_LEN+17*8);
+	if (ftyp_io < 0) {
+		debug3("PROFILE: failed to create IO file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_io, "Start Time", 0);
+	FILE_ADD_UINT64(ftyp_io, "Elapsed Time");
+	FILE_ADD_UINT64(ftyp_io, "Min Reads");
+	FILE_ADD_UINT64(ftyp_io, "Ave Reads");
+	FILE_ADD_UINT64(ftyp_io, "Max Reads");
+	FILE_ADD_UINT64(ftyp_io, "Total Reads");
+	FILE_ADD_DBL(ftyp_io, "Min Read Megabytes");
+	FILE_ADD_DBL(ftyp_io, "Ave Read Megabytes");
+	FILE_ADD_DBL(ftyp_io, "Max Read Megabytes");
+	FILE_ADD_DBL(ftyp_io, "Total Read Megabytes");
+	FILE_ADD_UINT64(ftyp_io, "Min Writes");
+	FILE_ADD_UINT64(ftyp_io, "Ave Writes");
+	FILE_ADD_UINT64(ftyp_io, "Max Writes");
+	FILE_ADD_UINT64(ftyp_io, "Total Writes");
+	FILE_ADD_DBL(ftyp_io, "Min Write Megabytes");
+	FILE_ADD_DBL(ftyp_io, "Ave Write Megabytes");
+	FILE_ADD_DBL(ftyp_io, "Max Write Megabytes");
+	FILE_ADD_DBL(ftyp_io, "Total Write Megabytes");
+
+	return ftyp_io;
+}
+
+static void *_io_init_job_series(int n_samples)
+{
+	profile_io_t*  io_data;
+	io_data = xmalloc(n_samples * sizeof(profile_io_t));
+	if (io_data == NULL) {
+		debug3("PROFILE: failed to get memory for combined io data");
+		return NULL;
+	}
+	return (void*) io_data;
+}
+
+static char** _io_get_series_tod(void* data, int nsmp)
+{
+	int ix;
+	char      **tod_values = NULL;
+	profile_io_t* io_series = (profile_io_t*) data;
+	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
+	if (tod_values == NULL) {
+		info("Failed to get memory for io tod");
+		return NULL;
+	}
+	for (ix=0; ix < nsmp; ix++) {
+		tod_values[ix] = xstrdup(io_series[ix].tod);
+	}
+	return tod_values;
+}
+
+static double* _io_get_series_values(char* data_name, void* data, int nsmp)
+{
+	int ix;
+	profile_io_t* io_series = (profile_io_t*) data;
+	double  *io_values = NULL;
+	io_values = xmalloc(nsmp*sizeof(double));
+	if (io_values == NULL) {
+		info("PROFILE: Failed to get memory for io data");
+		return NULL;
+	}
+	if (strcasecmp(data_name,"Time") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			io_values[ix] = (double) io_series[ix].time;
+
+		}
+		return io_values;
+	} else if (strcasecmp(data_name,"Reads") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			io_values[ix] = (double) io_series[ix].reads;
+
+		}
+		return io_values;
+	} else if (strcasecmp(data_name,"Megabytes_Read") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			io_values[ix] = io_series[ix].read_size;
+
+		}
+		return io_values;
+	} else if (strcasecmp(data_name,"Writes") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			io_values[ix] = (double) io_series[ix].writes;
+
+		}
+		return io_values;
+	} else if (strcasecmp(data_name,"Megabytes_Write") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			io_values[ix] = io_series[ix].write_size;
+
+		}
+		return io_values;
+	}
+	xfree(io_values);
+	info("PROFILE: %s is invalid data item for io data", data_name);
+	return NULL;
+}
+
+static void _io_merge_step_series(
+	hid_t group, void *prior, void *cur, void *buf)
+{
+	// This is a difference series
+	static uint64_t start_reads = 0;
+	static uint64_t start_writes = 0;
+	static double start_read_size = 0;
+	static double start_write_size = 0;
+	profile_io_t* prfCur = (profile_io_t*) cur;
+	profile_io_t* prfBuf = (profile_io_t*) buf;
+	struct tm *ts = slurm_localtime(&prfCur->time);
+	strftime(prfBuf->tod, TOD_LEN, TOD_FMT, ts);
+	if (prior == NULL) {
+		// First sample.
+		seriesStart = prfCur->time;
+		prfBuf->time = 0;
+		start_reads = prfCur->reads;
+		prfBuf->reads = 0;
+		start_writes = prfCur->writes;
+		prfBuf->writes = 0;
+		start_read_size = prfCur->read_size;
+		prfBuf->read_size = 0;
+		start_write_size = prfCur->write_size;
+		prfBuf->write_size = 0;
+	} else {
+		prfBuf->time = prfCur->time - seriesStart;
+		prfBuf->reads = prfCur->reads - start_reads;
+		prfBuf->writes = prfCur->writes - start_writes;
+		prfBuf->read_size = prfCur->read_size - start_read_size;
+		prfBuf->write_size = prfCur->write_size - start_write_size;
+	}
+	return;
+}
+
+static void *_io_series_total(int n_samples, void *data)
+{
+	profile_io_t* io_data;
+	profile_io_s_t* total;
+	if (n_samples < 1)
+		return NULL;
+	io_data = (profile_io_t*) data;
+	total = xmalloc(sizeof(profile_io_s_t));
+	if (total == NULL) {
+		error("PROFILE: Out of memory getting I/O total");
+		return NULL;
+	}
+	// Assuming io series are a running total, and the first
+	// sample just sets the initial conditions
+	strcpy(total->start_time, io_data[0].tod);
+	total->elapsed_time = io_data[n_samples-1].time;
+	INCR_DIF_SAMPLE(total, io_data, reads, n_samples);
+	INCR_DIF_SAMPLE(total, io_data, read_size, n_samples);
+	INCR_DIF_SAMPLE(total, io_data, writes, n_samples);
+	INCR_DIF_SAMPLE(total, io_data, write_size, n_samples);
+	return total;
+}
+
+static void _io_extract_series(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+	int n_items, ix;
+	profile_io_t* io_data = (profile_io_t*) data;
+	if (put_header) {
+		fprintf(fp,"Job,Step,Node,Series,Date_Time,Elapsed_time,"
+			"Reads,Read Megabytes,Writes,Write Megabytes\n");
+	}
+	n_items = size_data / sizeof(profile_io_t);
+	for (ix=0; ix < n_items; ix++) {
+		fprintf(fp,"%d,%d,%s,%s,%s,%ld,%ld,%.3f,%ld,%.3f\n",
+			job, step, node, series,
+			io_data[ix].tod, io_data[ix].time,
+			io_data[ix].reads, io_data[ix].read_size,
+			io_data[ix].writes, io_data[ix].write_size);
+	}
+	return;
+}
+
+static void _io_extract_total(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+	profile_io_s_t* io_data = (profile_io_s_t*) data;
+	if (put_header) {
+		fprintf(fp,"Job,Step,Node,Series,Start_Time,Elapsed_time,"
+			"Min_Reads,Ave_Reads,Max_Reads,Total_Reads,"
+			"Min_Read_Megabytes,Ave_Read_Megabytes,"
+			"Max_Read_Megabytes,Total_Read_Megabytes,"
+			"Min_Writes,Ave_Writes,Max_Writes,Total_Writes,"
+			"Min_Write_Megabytes,Ave_Write_Megabytes,"
+			"Max_Write_Megabytes,Total_Write_Megabytes\n");
+	}
+	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
+		io_data->start_time, io_data->elapsed_time);
+	PUT_UINT_SUM(fp, io_data->reads, ",");
+	PUT_DBL_SUM(fp, io_data->read_size, ",");
+	PUT_UINT_SUM(fp, io_data->writes, ",");
+	PUT_DBL_SUM(fp, io_data->write_size, ",");
+	fprintf(fp, "\n");
+	return;
+}
+
+static hdf5_api_ops_t* _io_profile_factory(void)
+{
+	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
+	ops->dataset_size = &_io_dataset_size;
+	ops->create_memory_datatype = &_io_create_memory_datatype;
+	ops->create_file_datatype = &_io_create_file_datatype;
+	ops->create_s_memory_datatype = &_io_s_create_memory_datatype;
+	ops->create_s_file_datatype = &_io_s_create_file_datatype;
+	ops->init_job_series = &_io_init_job_series;
+	ops->get_series_tod = &_io_get_series_tod;
+	ops->get_series_values = &_io_get_series_values;
+	ops->merge_step_series = &_io_merge_step_series;
+	ops->series_total = &_io_series_total;
+	ops->extract_series = &_io_extract_series;
+	ops->extract_total = &_io_extract_total;
+	return ops;
+}
+
+
+// ============================================================================
+// Routines supporting Network Data type
+// ============================================================================
+
+static int _network_dataset_size(void)
+{
+	return sizeof(profile_network_t);
+}
+
+static hid_t _network_create_memory_datatype(void)
+{
+	hid_t   mtyp_network = H5Tcreate(H5T_COMPOUND,
+					 sizeof(profile_network_t));
+	if (mtyp_network < 0) {
+		debug3("PROFILE: failed to create Network memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_network, "Date_Time", profile_network_t, tod);
+	MEM_ADD_UINT64(mtyp_network, "Time", profile_network_t, time);
+	MEM_ADD_UINT64(mtyp_network, "Packets_In",
+		       profile_network_t, packets_in);
+	MEM_ADD_DBL(mtyp_network, "Megabytes_In", profile_network_t, size_in);
+	MEM_ADD_UINT64(mtyp_network, "Packets_Out",
+		       profile_network_t, packets_out);
+	MEM_ADD_DBL(mtyp_network, "Megabytes_Out", profile_network_t, size_out);
+
+	return mtyp_network;
+}
+
+static hid_t _network_create_file_datatype(void)
+{
+	hid_t   ftyp_network = H5Tcreate(H5T_COMPOUND, TOD_LEN+5*8);
+	if (ftyp_network < 0) {
+		debug3("PROFILE: failed to create Network file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_network, "Date_Time", 0);
+	FILE_ADD_UINT64(ftyp_network, "Time");
+	FILE_ADD_UINT64(ftyp_network, "Packets_In");
+	FILE_ADD_DBL(ftyp_network, "Megabytes_In");
+	FILE_ADD_UINT64(ftyp_network, "Packets_Out");
+	FILE_ADD_DBL(ftyp_network, "Megabytes_Out");
+
+	return ftyp_network;
+}
+
+static hid_t _network_s_create_memory_datatype(void)
+{
+	hid_t   mtyp_network = -1;
+
+	mtyp_network = H5Tcreate(H5T_COMPOUND, sizeof(profile_network_s_t));
+	if (mtyp_network < 0) {
+		debug3("PROFILE: failed to create Network memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_network, "Start Time", profile_network_s_t,
+			  start_time);
+	MEM_ADD_UINT64(mtyp_network, "Elapsed Time", profile_network_s_t,
+		       elapsed_time);
+	MEM_ADD_UINT64(mtyp_network, "Min Packets In", profile_network_s_t,
+		       packets_in.min);
+	MEM_ADD_UINT64(mtyp_network, "Ave Packets In", profile_network_s_t,
+		       packets_in.ave);
+	MEM_ADD_UINT64(mtyp_network, "Max Packets In", profile_network_s_t,
+		       packets_in.max);
+	MEM_ADD_UINT64(mtyp_network, "Total Packets In", profile_network_s_t,
+		       packets_in.total);
+	MEM_ADD_DBL(mtyp_network, "Min Megabytes In", profile_network_s_t,
+		    size_in.min);
+	MEM_ADD_DBL(mtyp_network, "Ave Megabytes In", profile_network_s_t,
+		    size_in.ave);
+	MEM_ADD_DBL(mtyp_network, "Max Megabytes In", profile_network_s_t,
+		    size_in.max);
+	MEM_ADD_DBL(mtyp_network, "Total Megabytes In", profile_network_s_t,
+		    size_in.total);
+	MEM_ADD_UINT64(mtyp_network, "Min Packets Out", profile_network_s_t,
+		       packets_out.min);
+	MEM_ADD_UINT64(mtyp_network, "Ave Packets Out", profile_network_s_t,
+		       packets_out.ave);
+	MEM_ADD_UINT64(mtyp_network, "Max Packets Out", profile_network_s_t,
+		       packets_out.max);
+	MEM_ADD_UINT64(mtyp_network, "Total Packets Out", profile_network_s_t,
+		       packets_out.total);
+	MEM_ADD_DBL(mtyp_network, "Min Megabytes Out", profile_network_s_t,
+		    size_out.min);
+	MEM_ADD_DBL(mtyp_network, "Ave Megabytes Out", profile_network_s_t,
+		    size_out.ave);
+	MEM_ADD_DBL(mtyp_network, "Max Megabytes Out", profile_network_s_t,
+		    size_out.max);
+	MEM_ADD_DBL(mtyp_network, "Total Megabytes Out", profile_network_s_t,
+		    size_out.total);
+
+	return mtyp_network;
+}
+
+static hid_t _network_s_create_file_datatype(void)
+{
+	hid_t   ftyp_network = H5Tcreate(H5T_COMPOUND, TOD_LEN+17*8);
+	if (ftyp_network < 0) {
+		debug3("PROFILE: failed to create Network file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_network, "Start Time", 0);
+	FILE_ADD_UINT64(ftyp_network, "Elapsed Time");
+	FILE_ADD_UINT64(ftyp_network, "Min Packets In");
+	FILE_ADD_UINT64(ftyp_network, "Ave Packets In");
+	FILE_ADD_UINT64(ftyp_network, "Max Packets In");
+	FILE_ADD_UINT64(ftyp_network, "Total Packets In");
+	FILE_ADD_DBL(ftyp_network, "Min Megabytes In");
+	FILE_ADD_DBL(ftyp_network, "Ave Megabytes In");
+	FILE_ADD_DBL(ftyp_network, "Max Megabytes In");
+	FILE_ADD_DBL(ftyp_network, "Total Megabytes In");
+	FILE_ADD_UINT64(ftyp_network, "Min Packets Out");
+	FILE_ADD_UINT64(ftyp_network, "Ave Packets Out");
+	FILE_ADD_UINT64(ftyp_network, "Max Packets Out");
+	FILE_ADD_UINT64(ftyp_network, "Total Packets Out");
+	FILE_ADD_DBL(ftyp_network, "Min Megabytes Out");
+	FILE_ADD_DBL(ftyp_network, "Ave Megabytes Out");
+	FILE_ADD_DBL(ftyp_network, "Max Megabytes Out");
+	FILE_ADD_DBL(ftyp_network, "Total Megabytes Out");
+
+	return ftyp_network;
+}
+
+static void *_network_init_job_series(int n_samples)
+{
+	profile_network_t*  network_data;
+
+	network_data = xmalloc(n_samples * sizeof(profile_network_t));
+	if (network_data == NULL) {
+		debug3("PROFILE: failed to get memory for network data");
+		return NULL;
+	}
+	return (void*) network_data;
+}
+
+static char** _network_get_series_tod(void* data, int nsmp)
+{
+	int ix;
+	char      **tod_values = NULL;
+	profile_network_t* network_series = (profile_network_t*) data;
+	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
+	if (tod_values == NULL) {
+		info("Failed to get memory for network tod");
+		return NULL;
+	}
+	for (ix=0; ix < nsmp; ix++) {
+		tod_values[ix] = xstrdup(network_series[ix].tod);
+	}
+	return tod_values;
+}
+
+static double* _network_get_series_values(char* data_name, void* data, int nsmp)
+{
+	int ix;
+	profile_network_t* network_series = (profile_network_t*) data;
+	double  *network_values = NULL;
+	network_values = xmalloc(nsmp*sizeof(double));
+	if (network_values == NULL) {
+		info("PROFILE: Failed to get memory for network data");
+		return NULL;
+	}
+	if (strcasecmp(data_name,"Time") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			network_values[ix] = (double) network_series[ix].time;
+
+		}
+		return network_values;
+	} else if (strcasecmp(data_name,"Packets_In") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			network_values[ix] =
+					(double) network_series[ix].packets_in;
+
+		}
+		return network_values;
+	} else if (strcasecmp(data_name,"Megabytes_In") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			network_values[ix] = network_series[ix].size_in;
+
+		}
+		return network_values;
+	} else if (strcasecmp(data_name,"Packets_Out") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			network_values[ix] =
+					(double) network_series[ix].packets_out;
+
+		}
+		return network_values;
+	} else if (strcasecmp(data_name,"Megabytes_Out") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			network_values[ix] = network_series[ix].size_out;
+
+		}
+		return network_values;
+	}
+	xfree(network_values);
+	info("PROFILE: %s is invalid data item for network data", data_name);
+	return NULL;
+}
+
+static void _network_merge_step_series(
+	hid_t group, void *prior, void *cur, void *buf)
+{
+// This is a difference series
+	profile_network_t* prf_cur = (profile_network_t*) cur;
+	profile_network_t* prf_buf = (profile_network_t*) buf;
+	struct tm *ts = slurm_localtime(&prf_cur->time);
+	strftime(prf_buf->tod, TOD_LEN, TOD_FMT, ts);
+	if (prior == NULL) {
+		// First sample.
+		seriesStart = prf_cur->time;
+		prf_buf->time = 0;
+	} else {
+		prf_buf->time = prf_cur->time - seriesStart;
+	}
+	prf_buf->packets_in = prf_cur->packets_in;
+	prf_buf->packets_out = prf_cur->packets_out;
+	prf_buf->size_in = prf_cur->size_in;
+	prf_buf->size_out = prf_cur->size_out;
+	return;
+}
+
+static void *_network_series_total(int n_samples, void *data)
+{
+	profile_network_t* network_data;
+	profile_network_s_t* total;
+	if (n_samples < 1)
+		return NULL;
+	network_data = (profile_network_t*) data;
+	total = xmalloc(sizeof(profile_network_s_t));
+	if (total == NULL) {
+		error("PROFILE: Out of memory getting network total");
+		return NULL;
+	}
+	// Assuming network series are a running total, and the first
+	// sample just sets the initial conditions
+	strcpy(total->start_time, network_data[0].tod);
+	total->elapsed_time = network_data[n_samples-1].time;
+	INCR_DIF_SAMPLE(total, network_data, packets_in, n_samples);
+	INCR_DIF_SAMPLE(total, network_data, size_in, n_samples);
+	INCR_DIF_SAMPLE(total, network_data, packets_out, n_samples);
+	INCR_DIF_SAMPLE(total, network_data, size_out, n_samples);
+	return total;
+}
+
+static void _network_extract_series(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+	int n_items, ix;
+	profile_network_t* network_data = (profile_network_t*) data;
+
+	if (put_header) {
+		fprintf(fp,"Job,Step,Node,Series,Date_Time,Elapsed_time,"
+			"Packets_In,MegaBytes_In,Packets_Out,MegaBytes_Out\n");
+	}
+	n_items = size_data / sizeof(profile_network_t);
+	for (ix=0; ix < n_items; ix++) {
+		fprintf(fp,"%d,%d,%s,%s,%s,%ld,%ld,%.3f,%ld,%.3f\n",
+			job, step, node,series,
+			network_data[ix].tod, network_data[ix].time,
+			network_data[ix].packets_in, network_data[ix].size_in,
+			network_data[ix].packets_out,
+			network_data[ix].size_out);
+	}
+	return;
+}
+
+static void _network_extract_total(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+	profile_network_s_t* network_data = (profile_network_s_t*) data;
+	if (put_header) {
+		fprintf(fp,"Job,Step,Node,Series,Start_Time,Elapsed_time,"
+			"Min_Packets_In,Ave_Packets_In,"
+			"Max_Packets_In,Total_Packets_In,"
+			"Min_Megabytes_In,Ave_Megabytes_In,"
+			"Max_Megabytes_In,Total_Megabytes_In,"
+			"Min_Packets_Out,Ave_Packets_Out,"
+			"Max_Packets_Out,Total_Packets_Out,"
+			"Min_Megabytes_Out,Ave_Megabytes_Out,"
+			"Max_Megabytes_Out,Total_Megabytes_Out\n");
+	}
+	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
+		network_data->start_time, network_data->elapsed_time);
+	PUT_UINT_SUM(fp, network_data->packets_in, ",");
+	PUT_DBL_SUM(fp, network_data->size_in, ",");
+	PUT_UINT_SUM(fp, network_data->packets_out, ",");
+	PUT_DBL_SUM(fp, network_data->size_out, ",");
+	fprintf(fp, "\n");
+	return;
+}
+
+static hdf5_api_ops_t *_network_profile_factory(void)
+{
+	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
+	ops->dataset_size = &_network_dataset_size;
+	ops->create_memory_datatype = &_network_create_memory_datatype;
+	ops->create_file_datatype = &_network_create_file_datatype;
+	ops->create_s_memory_datatype = &_network_s_create_memory_datatype;
+	ops->create_s_file_datatype = &_network_s_create_file_datatype;
+	ops->init_job_series = &_network_init_job_series;
+	ops->get_series_tod = &_network_get_series_tod;
+	ops->get_series_values = &_network_get_series_values;
+	ops->merge_step_series = &_network_merge_step_series;
+	ops->series_total = &_network_series_total;
+	ops->extract_series = &_network_extract_series;
+	ops->extract_total = &_network_extract_total;
+	return ops;
+}
+
+// ============================================================================
+// Routines supporting Task Data type
+// ============================================================================
+
+static int _task_dataset_size(void)
+{
+	return sizeof(profile_task_t);
+}
+
+static hid_t _task_create_memory_datatype(void)
+{
+	hid_t   mtyp_task = H5Tcreate(H5T_COMPOUND, sizeof(profile_task_t));
+	if (mtyp_task < 0) {
+		debug3("PROFILE: failed to create Task memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_task, "Date_Time", profile_task_t, tod);
+	MEM_ADD_UINT64(mtyp_task, "Time", profile_task_t, time);
+	MEM_ADD_UINT64(mtyp_task, "CPU_Frequency", profile_task_t, cpu_freq);
+	MEM_ADD_UINT64(mtyp_task, "CPU_Time", profile_task_t, cpu_time);
+	MEM_ADD_DBL(mtyp_task, "CPU_Utilization",
+		    profile_task_t, cpu_utilization);
+	MEM_ADD_UINT64(mtyp_task, "RSS", profile_task_t, rss);
+	MEM_ADD_UINT64(mtyp_task, "VM_Size", profile_task_t, vm_size);
+	MEM_ADD_UINT64(mtyp_task, "Pages", profile_task_t, pages);
+	MEM_ADD_DBL(mtyp_task, "Read_Megabytes", profile_task_t, read_size);
+	MEM_ADD_DBL(mtyp_task, "Write_Megabytes", profile_task_t, write_size);
+
+	return mtyp_task;
+}
+
+static hid_t _task_create_file_datatype(void)
+{
+	hid_t   ftyp_task = H5Tcreate(H5T_COMPOUND, TOD_LEN+9*8);
+	if (ftyp_task < 0) {
+		debug3("PROFILE: failed to create Task file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_task, "Date_Time", 0);
+	FILE_ADD_UINT64(ftyp_task, "Time");
+	FILE_ADD_UINT64(ftyp_task, "CPU_Frequency");
+	FILE_ADD_UINT64(ftyp_task, "CPU_Time");
+	FILE_ADD_DBL(ftyp_task, "CPU_Utilization");
+	FILE_ADD_UINT64(ftyp_task, "RSS");
+	FILE_ADD_UINT64(ftyp_task, "VM_Size");
+	FILE_ADD_UINT64(ftyp_task, "Pages");
+	FILE_ADD_DBL(ftyp_task, "Read_Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Write_Megabytes");
+
+	return ftyp_task;
+}
+
+static hid_t _task_s_create_memory_datatype(void)
+{
+	hid_t   mtyp_task = H5Tcreate(H5T_COMPOUND, sizeof(profile_task_s_t));
+	if (mtyp_task < 0) {
+		debug3("PROFILE: failed to create Task memory datatype");
+		return -1;
+	}
+	MEM_ADD_DATE_TIME(mtyp_task, "Start Time", profile_task_s_t,
+			  start_time);
+	MEM_ADD_UINT64(mtyp_task, "Elapsed Time", profile_task_s_t,
+		       elapsed_time);
+	MEM_ADD_UINT64(mtyp_task, "Min CPU Frequency", profile_task_s_t,
+		       cpu_freq.min);
+	MEM_ADD_UINT64(mtyp_task, "Ave CPU Frequency", profile_task_s_t,
+		       cpu_freq.ave);
+	MEM_ADD_UINT64(mtyp_task, "Max CPU Frequency", profile_task_s_t,
+		       cpu_freq.max);
+	MEM_ADD_UINT64(mtyp_task, "Total CPU Frequency", profile_task_s_t,
+		       cpu_freq.total);
+	MEM_ADD_UINT64(mtyp_task, "Min CPU Time", profile_task_s_t,
+		       cpu_time.min);
+	MEM_ADD_UINT64(mtyp_task, "Ave CPU Time", profile_task_s_t,
+		       cpu_time.ave);
+	MEM_ADD_UINT64(mtyp_task, "Max CPU Time", profile_task_s_t,
+		       cpu_time.max);
+	MEM_ADD_UINT64(mtyp_task, "Total CPU Time", profile_task_s_t,
+		       cpu_time.total);
+	MEM_ADD_DBL(mtyp_task, "Min CPU Utilization", profile_task_s_t,
+		    cpu_utilization.min);
+	MEM_ADD_DBL(mtyp_task, "Ave CPU Utilization", profile_task_s_t,
+		    cpu_utilization.ave);
+	MEM_ADD_DBL(mtyp_task, "Max CPU Utilization", profile_task_s_t,
+		    cpu_utilization.max);
+	MEM_ADD_DBL(mtyp_task, "Total CPU Utilization", profile_task_s_t,
+		    cpu_utilization.total);
+	MEM_ADD_UINT64(mtyp_task, "Min RSS", profile_task_s_t, rss.min);
+	MEM_ADD_UINT64(mtyp_task, "Ave RSS", profile_task_s_t, rss.ave);
+	MEM_ADD_UINT64(mtyp_task, "Max RSS", profile_task_s_t, rss.max);
+	MEM_ADD_UINT64(mtyp_task, "Total RSS", profile_task_s_t, rss.total);
+	MEM_ADD_UINT64(mtyp_task, "Min VM Size", profile_task_s_t, vm_size.min);
+	MEM_ADD_UINT64(mtyp_task, "Ave VM Size", profile_task_s_t, vm_size.ave);
+	MEM_ADD_UINT64(mtyp_task, "Max VM Size", profile_task_s_t, vm_size.max);
+	MEM_ADD_UINT64(mtyp_task, "Total VM Size",
+		       profile_task_s_t, vm_size.total);
+	MEM_ADD_UINT64(mtyp_task, "Min Pages", profile_task_s_t, pages.min);
+	MEM_ADD_UINT64(mtyp_task, "Ave Pages", profile_task_s_t, pages.ave);
+	MEM_ADD_UINT64(mtyp_task, "Max Pages", profile_task_s_t, pages.max);
+	MEM_ADD_UINT64(mtyp_task, "Total Pages", profile_task_s_t, pages.total);
+	MEM_ADD_DBL(mtyp_task, "Min Read Megabytes", profile_task_s_t,
+		    read_size.min);
+	MEM_ADD_DBL(mtyp_task, "Ave Read Megabytes", profile_task_s_t,
+		    read_size.ave);
+	MEM_ADD_DBL(mtyp_task, "Max Read Megabytes", profile_task_s_t,
+		    read_size.max);
+	MEM_ADD_DBL(mtyp_task, "Total Read Megabytes", profile_task_s_t,
+		    read_size.total);
+	MEM_ADD_DBL(mtyp_task, "Min Write Megabytes", profile_task_s_t,
+		    write_size.min);
+	MEM_ADD_DBL(mtyp_task, "Ave Write Megabytes", profile_task_s_t,
+		    write_size.ave);
+	MEM_ADD_DBL(mtyp_task, "Max Write Megabytes", profile_task_s_t,
+		    write_size.max);
+	MEM_ADD_DBL(mtyp_task, "Total Write Megabytes", profile_task_s_t,
+		    write_size.total);
+
+	return mtyp_task;
+}
+
+static hid_t _task_s_create_file_datatype(void)
+{
+	hid_t   ftyp_task = H5Tcreate(H5T_COMPOUND, TOD_LEN+33*8);
+	if (ftyp_task < 0) {
+		debug3("PROFILE: failed to create Task file datatype");
+		return -1;
+	}
+	moffset = TOD_LEN;
+	FILE_ADD_DATE_TIME(ftyp_task, "Start Time", 0);
+	FILE_ADD_UINT64(ftyp_task, "Elapsed Time");
+	FILE_ADD_UINT64(ftyp_task, "Min CPU Frequency");
+	FILE_ADD_UINT64(ftyp_task, "Ave CPU Frequency");
+	FILE_ADD_UINT64(ftyp_task, "Max CPU Frequency");
+	FILE_ADD_UINT64(ftyp_task, "Total CPU Frequency");
+	FILE_ADD_UINT64(ftyp_task, "Min CPU Time");
+	FILE_ADD_UINT64(ftyp_task, "Ave CPU Time");
+	FILE_ADD_UINT64(ftyp_task, "Max CPU Time");
+	FILE_ADD_UINT64(ftyp_task, "Total CPU Time");
+	FILE_ADD_DBL(ftyp_task, "Min CPU Utilization");
+	FILE_ADD_DBL(ftyp_task, "Ave CPU Utilization");
+	FILE_ADD_DBL(ftyp_task, "Max CPU Utilization");
+	FILE_ADD_DBL(ftyp_task, "Total CPU Utilization");
+	FILE_ADD_UINT64(ftyp_task, "Min RSS");
+	FILE_ADD_UINT64(ftyp_task, "Ave RSS");
+	FILE_ADD_UINT64(ftyp_task, "Max RSS");
+	FILE_ADD_UINT64(ftyp_task, "Total RSS");
+	FILE_ADD_UINT64(ftyp_task, "Min VM Size");
+	FILE_ADD_UINT64(ftyp_task, "Ave VM Size");
+	FILE_ADD_UINT64(ftyp_task, "Max VM Size");
+	FILE_ADD_UINT64(ftyp_task, "Total VM Size");
+	FILE_ADD_UINT64(ftyp_task, "Min Pages");
+	FILE_ADD_UINT64(ftyp_task, "Ave Pages");
+	FILE_ADD_UINT64(ftyp_task, "Max Pages");
+	FILE_ADD_UINT64(ftyp_task, "Total Pages");
+	FILE_ADD_DBL(ftyp_task, "Min Read Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Ave Read Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Max Read Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Total Read Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Min Write Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Ave Write Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Max Write Megabytes");
+	FILE_ADD_DBL(ftyp_task, "Total Write Megabytes");
+
+	return ftyp_task;
+}
+
+static void *_task_init_job_series(int n_samples)
+{
+	profile_task_t*  task_data;
+	task_data = xmalloc(n_samples * sizeof(profile_task_t));
+	if (task_data == NULL) {
+		debug3("PROFILE: failed to get memory for combined task data");
+		return NULL;
+	}
+	return (void*) task_data;
+}
+
+static char** _task_get_series_tod(void* data, int nsmp)
+{
+	int ix;
+	char      **tod_values = NULL;
+	profile_task_t* task_series = (profile_task_t*) data;
+	tod_values = (char**) xmalloc(nsmp*sizeof(char*));
+	if (tod_values == NULL) {
+		info("Failed to get memory for task tod");
+		return NULL;
+	}
+	for (ix=0; ix < nsmp; ix++) {
+		tod_values[ix] = xstrdup(task_series[ix].tod);
+	}
+	return tod_values;
+}
+
+static double* _task_get_series_values(char* data_name, void* data, int nsmp)
+{
+	int ix;
+	profile_task_t* task_series = (profile_task_t*) data;
+	double  *task_values = NULL;
+	task_values = xmalloc(nsmp*sizeof(double));
+	if (task_values == NULL) {
+		info("PROFILE: Failed to get memory for task data");
+		return NULL;
+	}
+	if (strcasecmp(data_name,"Time") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = (double) task_series[ix].time;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"CPU_Frequency") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = (double) task_series[ix].cpu_freq;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"CPU_Time") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = (double) task_series[ix].cpu_time;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"CPU_Utilization") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = task_series[ix].cpu_utilization;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"RSS") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = (double) task_series[ix].rss;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"VM_Size") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = (double) task_series[ix].vm_size;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"Pages") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = (double) task_series[ix].pages;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"Read_Megabytes") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = task_series[ix].read_size;
+
+		}
+		return task_values;
+	} else if (strcasecmp(data_name,"Write_Megabytes") == 0) {
+		for (ix=0; ix < nsmp; ix++) {
+			task_values[ix] = task_series[ix].write_size;
+
+		}
+		return task_values;
+	}
+	xfree(task_values);
+	info("PROFILE: %s is invalid data item for task data", data_name);
+	return NULL;
+}
+
+static void _task_merge_step_series(
+	hid_t group, void *prior, void *cur, void *buf)
+{
+// This is a running total series
+	profile_task_t* prf_prior = (profile_task_t*) prior;
+	profile_task_t* prf_cur = (profile_task_t*) cur;
+	profile_task_t* buf_prv = NULL;
+	profile_task_t* buf_cur = (profile_task_t*) buf;
+	struct tm *ts;
+
+	ts = slurm_localtime(&prf_cur->time);
+	strftime(buf_cur->tod, TOD_LEN, TOD_FMT, ts);
+	if (prf_prior == NULL) {
+		// First sample.
+		seriesStart = prf_cur->time;
+		buf_cur->time = 0;
+		buf_cur->cpu_time = 0;
+		buf_cur->cpu_utilization = 0;
+		buf_cur->read_size = 0.0;
+		buf_cur->write_size = 0.0;
+	} else {
+		buf_prv = buf_cur - 1;
+		buf_cur->time = prf_cur->time - seriesStart;
+		buf_cur->cpu_time = prf_cur->cpu_time - prf_prior->cpu_time;
+		buf_cur->cpu_utilization = 100.0*((double) buf_cur->cpu_time /
+				(double) (buf_cur->time - buf_prv->time));
+		buf_cur->read_size =
+			prf_cur->read_size - prf_prior->read_size;
+		buf_cur->write_size =
+			prf_cur->write_size - prf_prior->write_size;
+	}
+	buf_cur->cpu_freq = prf_cur->cpu_freq;
+	buf_cur->rss = prf_cur->rss;
+	buf_cur->vm_size = prf_cur->vm_size;
+	buf_cur->pages = prf_cur->pages;
+	return;
+}
+
+static void *_task_series_total(int n_samples, void *data)
+{
+	profile_task_t* task_data;
+	profile_task_s_t* total;
+	task_data = (profile_task_t*) data;
+	total = xmalloc(sizeof(profile_task_s_t));
+	if (total == NULL) {
+		error("PROFILE: Out of memory getting task total");
+		return NULL;
+	}
+	strcpy(total->start_time, task_data[0].tod);
+	total->elapsed_time = task_data[n_samples-1].time;
+	INCR_DIF_SAMPLE(total, task_data, cpu_freq, n_samples);
+	INCR_RT_SAMPLE(total, task_data, cpu_time, n_samples);
+	INCR_DIF_SAMPLE(total, task_data, cpu_utilization, n_samples);
+	INCR_DIF_SAMPLE(total, task_data, rss, n_samples);
+	INCR_DIF_SAMPLE(total, task_data, vm_size , n_samples);
+	INCR_DIF_SAMPLE(total, task_data, pages, n_samples);
+	INCR_RT_SAMPLE(total, task_data, read_size, n_samples);
+	INCR_RT_SAMPLE(total, task_data, write_size, n_samples);
+	return total;
+}
+
+static void _task_extract_series(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+	int n_items, ix;
+	profile_task_t* task_data = (profile_task_t*) data;
+	if (put_header) {
+		fprintf(fp,"Job,Step,Node,Series,Date Time,ElapsedTime,"
+			"CPU Frequency,CPU Time,"
+			"CPU Utilization,rss,VM Size,Pages,"
+			"Read_bytes,Write_bytes\n");
+	}
+	n_items = size_data / sizeof(profile_task_t);
+	for (ix=0; ix < n_items; ix++) {
+		fprintf(fp,"%d,%d,%s,%s,%s,%ld,%ld,%ld,%.3f",
+			job, step, node, series,
+			task_data[ix].tod, task_data[ix].time,
+			task_data[ix].cpu_freq,
+			task_data[ix].cpu_time, task_data[ix].cpu_utilization);
+		fprintf(fp,",%ld,%ld,%ld,%.3f,%.3f\n",	task_data[ix].rss,
+			task_data[ix].vm_size, task_data[ix].pages,
+			task_data[ix].read_size, task_data[ix].write_size);
+	}
+	return;
+}
+
+static void _task_extract_total(
+	FILE* fp, bool put_header, int job, int step,
+	char *node, char *series, void *data, int size_data)
+{
+
+	profile_task_s_t* task_data = (profile_task_s_t*) data;
+	if (put_header) {
+		fprintf(fp,"Job,Step,Node,Series,Start_Time,Elapsed_time,"
+			"Min CPU Frequency,Ave CPU Frequency,"
+			"Ave CPU Frequency,Total CPU Frequency,"
+			"Min_CPU_Time,Ave_CPU_Time,"
+			"Max_CPU_Time,Total_CPU_Time,"
+			"Min_CPU_Utilization,Ave_CPU_Utilization,"
+			"Max_CPU_Utilization,Total_CPU_Utilization,"
+			"Min_RSS,Ave_RSS,Max_RSS,Total_RSS,"
+			"Min_VMSize,Ave_VMSize,Max_VMSize,Total_VMSize,"
+			"Min_Pages,Ave_Pages,Max_Pages,Total_Pages,"
+			"Min_Read_Megabytes,Ave_Read_Megabytes,"
+			"Max_Read_Megabytes,Total_Read_Megabytes,"
+			"Min_Write_Megabytes,Ave_Write_Megabytes,"
+			"Max_Write_Megabytes,Total_Write_Megabytes\n");
+	}
+	fprintf(fp, "%d,%d,%s,%s,%s,%ld", job, step, node, series,
+		task_data->start_time, task_data->elapsed_time);
+	PUT_UINT_SUM(fp, task_data->cpu_freq, ",");
+	PUT_UINT_SUM(fp, task_data->cpu_time, ",");
+	PUT_DBL_SUM(fp, task_data->cpu_utilization, ",");
+	PUT_UINT_SUM(fp, task_data->rss, ",");
+	PUT_UINT_SUM(fp, task_data->vm_size, ",");
+	PUT_UINT_SUM(fp, task_data->pages, ",");
+	PUT_DBL_SUM(fp, task_data->read_size, ",");
+	PUT_DBL_SUM(fp, task_data->write_size, ",");
+	fprintf(fp, "\n");
+	return;
+}
+
+static hdf5_api_ops_t *_task_profile_factory(void)
+{
+	hdf5_api_ops_t* ops = xmalloc(sizeof(hdf5_api_ops_t));
+	ops->dataset_size = &_task_dataset_size;
+	ops->create_memory_datatype = &_task_create_memory_datatype;
+	ops->create_file_datatype = &_task_create_file_datatype;
+	ops->create_s_memory_datatype = &_task_s_create_memory_datatype;
+	ops->create_s_file_datatype = &_task_s_create_file_datatype;
+	ops->init_job_series = &_task_init_job_series;
+	ops->get_series_tod = &_task_get_series_tod;
+	ops->get_series_values = &_task_get_series_values;
+	ops->merge_step_series = &_task_merge_step_series;
+	ops->series_total = &_task_series_total;
+	ops->extract_series = &_task_extract_series;
+	ops->extract_total = &_task_extract_total;
+	return ops;
+}
+
+/* ============================================================================
+ * Common support functions
+ ===========================================================================*/
+
+extern hdf5_api_ops_t* profile_factory(uint32_t type)
+{
+	switch (type) {
+	case ACCT_GATHER_PROFILE_ENERGY:
+		return _energy_profile_factory();
+		break;
+	case ACCT_GATHER_PROFILE_TASK:
+		return _task_profile_factory();
+		break;
+	case ACCT_GATHER_PROFILE_LUSTRE:
+		return _io_profile_factory();
+		break;
+	case ACCT_GATHER_PROFILE_NETWORK:
+		return _network_profile_factory();
+		break;
+	default:
+		error("profile_factory: Unknown type %d sent", type);
+		return NULL;
+	}
+}
+
+
+extern void profile_init_old(void)
+{
+	typTOD = H5Tcopy (H5T_C_S1);
+	H5Tset_size (typTOD, TOD_LEN); /* create string of length TOD_LEN */
+
+	return;
+}
+
+extern void profile_fini_old(void)
+{
+	H5Tclose(typTOD);
+
+	return;
+}
+
+extern char *get_data_set_name(char *type)
+{
+	static char  dset_name[MAX_DATASET_NAME+1];
+	dset_name[0] = '\0';
+	sprintf(dset_name, "%s Data", type);
+
+	return dset_name;
+}
+
+
+static char* _H5O_type_t2str(H5O_type_t type)
+{
+	switch (type)
+	{
+	case H5O_TYPE_UNKNOWN:
+		return "H5O_TYPE_UNKNOWN";
+	case H5O_TYPE_GROUP:
+		return "H5O_TYPE_GROUP";
+	case H5O_TYPE_DATASET:
+		return "H5O_TYPE_DATASET";
+	case H5O_TYPE_NAMED_DATATYPE:
+		return "H5O_TYPE_NAMED_DATATYPE";
+	case H5O_TYPE_NTYPES:
+		return "H5O_TYPE_NTYPES";
+	default:
+		return "Invalid H5O_TYPE";
+	}
+}
+
+
+extern void hdf5_obj_info(hid_t group, char *nam_group)
+{
+	char buf[MAX_GROUP_NAME+1];
+	hsize_t nobj, nattr;
+	hid_t aid;
+	int i, len;
+	H5G_info_t group_info;
+	H5O_info_t object_info;
+
+	if (group < 0) {
+		info("PROFILE: Group is not HDF5 object");
+		return;
+	}
+	H5Gget_info(group, &group_info);
+	nobj = group_info.nlinks;
+	H5Oget_info(group, &object_info);
+	nattr = object_info.num_attrs;
+	info("PROFILE group: %s NumObject=%d NumAttributes=%d",
+	     nam_group, (int) nobj, (int) nattr);
+	for (i = 0; (nobj>0) && (i<nobj); i++) {
+		H5Oget_info_by_idx(group, ".", H5_INDEX_NAME, H5_ITER_INC, i,
+				   &object_info, H5P_DEFAULT);
+		len = H5Lget_name_by_idx(group, ".", H5_INDEX_NAME,
+					 H5_ITER_INC, i, buf, MAX_GROUP_NAME,
+					 H5P_DEFAULT);
+		if ((len > 0) && (len < MAX_GROUP_NAME)) {
+			info("PROFILE: Obj=%d Type=%s Name=%s",
+			     i, _H5O_type_t2str(object_info.type), buf);
+		} else {
+			info("PROFILE: Obj=%d Type=%s Name=%s (is truncated)",
+			     i, _H5O_type_t2str(object_info.type), buf);
+		}
+	}
+	for (i = 0; (nattr>0) && (i<nattr); i++) {
+		aid = H5Aopen_by_idx(group, ".", H5_INDEX_NAME, H5_ITER_INC,
+				     i, H5P_DEFAULT, H5P_DEFAULT);
+		// Get the name of the attribute.
+		len = H5Aget_name(aid, MAX_ATTR_NAME, buf);
+		if (len < MAX_ATTR_NAME) {
+			info("PROFILE: Attr=%d Name=%s", i, buf);
+		} else {
+			info("PROFILE: Attr=%d Name=%s (is truncated)", i, buf);
+		}
+		H5Aclose(aid);
+	}
+
+	return;
+}
+
+extern char *get_string_attribute(hid_t parent, char *name)
+{
+	char *value = NULL;
+
+	hid_t   attr, type;
+	size_t  size;
+
+	attr = get_attribute_handle(parent, name);
+	if (attr < 0) {
+		debug3("PROFILE: Attribute=%s does not exist", name);
+		return NULL;
+	}
+	type  = H5Aget_type(attr);
+	if (H5Tget_class(type) != H5T_STRING) {
+		H5Aclose(attr);
+		debug3("PROFILE: Attribute=%s is not a string", name);
+		return NULL;
+	}
+	size = H5Tget_size(type);
+	value = xmalloc(size+1);
+	if (value == NULL) {
+		H5Tclose(type);
+		H5Aclose(attr);
+		debug3("PROFILE: failed to malloc %d bytes for attribute=%s",
+		       (int) size,
+		       name);
+		return NULL;
+	}
+	if (H5Aread(attr, type, value) < 0) {
+		xfree(value);
+		H5Tclose(type);
+		H5Aclose(attr);
+		debug3("PROFILE: failed to read attribute=%s", name);
+		return NULL;
+	}
+	H5Tclose(type);
+	H5Aclose(attr);
+
+	return value;
+}
+
+extern int get_int_attribute(hid_t parent, char *name)
+{
+	int value = 0;
+
+	hid_t   attr;
+	attr = get_attribute_handle(parent, name);
+	if (attr < 0) {
+		debug3("PROFILE: Attribute=%s does not exist, returning", name);
+		return value;
+	}
+	if (H5Aread(attr, H5T_NATIVE_INT, &value) < 0) {
+		debug3("PROFILE: failed to read attribute=%s, returning", name);
+	}
+	H5Aclose(attr);
+
+	return value;
+}
+
+extern uint32_t get_uint32_attribute(hid_t parent, char *name)
+{
+	int value = 0;
+	hid_t   attr;
+
+	attr = get_attribute_handle(parent, name);
+	if (attr < 0) {
+		debug3("PROFILE: Attribute=%s does not exist, returning", name);
+		return value;
+	}
+	if (H5Aread(attr, H5T_NATIVE_UINT32, &value) < 0) {
+		debug3("PROFILE: failed to read attribute=%s, returning", name);
+	}
+	H5Aclose(attr);
+
+	return value;
+}
+
+extern void *get_hdf5_data(hid_t parent, uint32_t type,
+			   char *nam_group, int *size_data)
+{
+	void *  data = NULL;
+
+	hid_t   id_data_set, dtyp_memory;
+	hsize_t szDset;
+	herr_t  ec;
+	char *subtype = NULL;
+	hdf5_api_ops_t* ops = profile_factory(type);
+	char *type_name = acct_gather_profile_type_to_string(type);
+
+	if (ops == NULL) {
+		debug3("PROFILE: failed to create %s operations",
+		       type_name);
+		return NULL;
+	}
+	subtype = get_string_attribute(parent, ATTR_SUBDATATYPE);
+	if (subtype < 0) {
+		xfree(ops);
+		debug3("PROFILE: failed to get %s attribute",
+		       ATTR_SUBDATATYPE);
+		return NULL;
+	}
+	id_data_set = H5Dopen(parent, get_data_set_name(nam_group),
+			      H5P_DEFAULT);
+	if (id_data_set < 0) {
+		xfree(subtype);
+		xfree(ops);
+		debug3("PROFILE: failed to open %s Data Set",
+		       type_name);
+		return NULL;
+	}
+	if (strcmp(subtype, SUBDATA_SUMMARY))
+		dtyp_memory = (*(ops->create_memory_datatype))();
+	else
+		dtyp_memory = (*(ops->create_s_memory_datatype))();
+	xfree(subtype);
+	if (dtyp_memory < 0) {
+		H5Dclose(id_data_set);
+		xfree(ops);
+		debug3("PROFILE: failed to create %s memory datatype",
+		       type_name);
+		return NULL;
+	}
+	szDset = H5Dget_storage_size(id_data_set);
+	*size_data = (int) szDset;
+	if (szDset == 0) {
+		H5Tclose(dtyp_memory);
+		H5Dclose(id_data_set);
+		xfree(ops);
+		debug3("PROFILE: %s data set is empty",
+		       type_name);
+		return NULL;
+	}
+	data = xmalloc(szDset);
+	if (data == NULL) {
+		H5Tclose(dtyp_memory);
+		H5Dclose(id_data_set);
+		xfree(ops);
+		debug3("PROFILE: failed to get memory for %s data set",
+		       type_name);
+		return NULL;
+	}
+	ec = H5Dread(id_data_set, dtyp_memory, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+		     data);
+	if (ec < 0) {
+		H5Tclose(dtyp_memory);
+		H5Dclose(id_data_set);
+		xfree(data);
+		xfree(ops);
+		debug3("PROFILE: failed to read %s data",
+		       type_name);
+		return NULL;
+	}
+	H5Tclose(dtyp_memory);
+	H5Dclose(id_data_set);
+	xfree(ops);
+
+	return data;
+}
+
+extern void put_hdf5_data(hid_t parent, uint32_t type, char *subtype,
+			  char *group, void *data, int n_item)
+{
+	hid_t   id_group, dtyp_memory, dtyp_file, id_data_space, id_data_set;
+	hsize_t dims[1];
+	herr_t  ec;
+	hdf5_api_ops_t* ops = profile_factory(type);
+	char *type_name = acct_gather_profile_type_to_string(type);
+
+	if (ops == NULL) {
+		debug3("PROFILE: failed to create %s operations",
+		       type_name);
+		return;
+	}
+	// Create the datatypes.
+	if (strcmp(subtype, SUBDATA_SUMMARY)) {
+		dtyp_memory = (*(ops->create_memory_datatype))();
+		dtyp_file = (*(ops->create_file_datatype))();
+	} else {
+		dtyp_memory = (*(ops->create_s_memory_datatype))();
+		dtyp_file = (*(ops->create_s_file_datatype))();
+	}
+
+	if (dtyp_memory < 0) {
+		xfree(ops);
+		debug3("PROFILE: failed to create %s memory datatype",
+		       type_name);
+		return;
+	}
+
+	if (dtyp_file < 0) {
+		H5Tclose(dtyp_memory);
+		xfree(ops);
+		debug3("PROFILE: failed to create %s file datatype",
+		       type_name);
+		return;
+	}
+
+	dims[0] = n_item;
+	id_data_space = H5Screate_simple(1, dims, NULL);
+	if (id_data_space < 0) {
+		H5Tclose(dtyp_file);
+		H5Tclose(dtyp_memory);
+		xfree(ops);
+		debug3("PROFILE: failed to create %s space descriptor",
+		       type_name);
+		return;
+	}
+
+	id_group = H5Gcreate(parent, group, H5P_DEFAULT,
+			     H5P_DEFAULT, H5P_DEFAULT);
+	if (id_group < 0) {
+		H5Sclose(id_data_space);
+		H5Tclose(dtyp_file);
+		H5Tclose(dtyp_memory);
+		xfree(ops);
+		debug3("PROFILE: failed to create %s group", group);
+		return;
+	}
+
+	put_string_attribute(id_group, ATTR_DATATYPE, type_name);
+	put_string_attribute(id_group, ATTR_SUBDATATYPE, subtype);
+
+	id_data_set = H5Dcreate(id_group, get_data_set_name(group), dtyp_file,
+				id_data_space, H5P_DEFAULT, H5P_DEFAULT,
+				H5P_DEFAULT);
+	if (id_data_set < 0) {
+		H5Gclose(id_group);
+		H5Sclose(id_data_space);
+		H5Tclose(dtyp_file);
+		H5Tclose(dtyp_memory);
+		xfree(ops);
+		debug3("PROFILE: failed to create %s dataset", group);
+		return;
+	}
+
+	ec = H5Dwrite(id_data_set, dtyp_memory, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+		      data);
+	if (ec < 0) {
+		debug3("PROFILE: failed to create write task data");
+		// Fall through to release resources
+	}
+	H5Dclose(id_data_set);
+	H5Gclose(id_group);
+	H5Sclose(id_data_space);
+	H5Tclose(dtyp_file);
+	H5Tclose(dtyp_memory);
+	xfree(ops);
+
+
+	return;
+}
+
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.h b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.h
new file mode 100644
index 000000000..2066eadcd
--- /dev/null
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/hdf5_api.h
@@ -0,0 +1,361 @@
+/****************************************************************************\
+ *  hdf5_api.h
+ *****************************************************************************
+ *  Copyright (C) 2013 Bull S. A. S.
+ *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
+ *
+ *  Written by Rod Schultz <rod.schultz@bull.com>
+ *
+ *  Portions Copyright (C) 2013 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  Provide support for acct_gather_profile plugins based on HDF5 files.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\****************************************************************************/
+#ifndef __ACCT_GATHER_HDF5_OLD_API_H__
+#define __ACCT_GATHER_HDF5_OLD_API_H__
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif			/* HAVE_INTTYPES_H */
+#else				/* !HAVE_CONFIG_H */
+#  include <inttypes.h>
+#endif				/*  HAVE_CONFIG_H */
+
+#include <stdlib.h>
+
+#include <hdf5.h>
+#include "src/common/slurm_acct_gather_profile.h"
+#include "../../hdf5_api.h"
+
+#define MAX_PROFILE_PATH 1024
+#define MAX_ATTR_NAME 64
+#define MAX_GROUP_NAME 64
+#define MAX_DATASET_NAME 64
+
+#define ATTR_NODENAME "Node Name"
+#define ATTR_STARTTIME "Start Time"
+#define ATTR_NSTEPS "Number of Steps"
+#define ATTR_NNODES "Number of Nodes"
+#define ATTR_NTASKS "Number of Tasks"
+#define ATTR_TASKID "Task Id"
+#define ATTR_CPUPERTASK "CPUs per Task"
+#define ATTR_DATATYPE "Data Type"
+#define ATTR_SUBDATATYPE "Subdata Type"
+#define ATTR_STARTTIME "Start Time"
+#define ATTR_STARTSEC "Start Second"
+#define SUBDATA_DATA "Data"
+#define SUBDATA_NODE "Node"
+#define SUBDATA_SAMPLE "Sample"
+#define SUBDATA_SERIES "Series"
+#define SUBDATA_TOTAL "Total"
+#define SUBDATA_SUMMARY "Summary"
+
+#define GRP_ENERGY "Energy"
+#define GRP_LUSTRE "Lustre"
+#define GRP_STEP "Step"
+#define GRP_NODES "Nodes"
+#define GRP_NODE "Node"
+#define GRP_NETWORK "Network"
+#define GRP_SAMPLES "Time Series"
+#define GRP_SAMPLE "Sample"
+#define GRP_TASKS "Tasks"
+#define GRP_TASK "Task"
+#define GRP_TOTALS "Totals"
+
+// Data types supported by all HDF5 plugins of this type
+
+#define TOD_LEN 24
+#define TOD_FMT "%F %T"
+
+/*
+ * prof_uint_sum is a low level structure intended to hold the
+ * minimum, average, maximum, and total values of a data item.
+ * It is usually used in a summary data structure for an item
+ * that occurs in a time series.
+ */
+typedef struct prof_uint_sum {
+	uint64_t min;	// Minumum value
+	uint64_t ave;	// Average value
+	uint64_t max;	// Maximum value
+	uint64_t total;	// Accumlated value
+} prof_uint_sum_t;
+
+// Save as prof_uint_sum, but for double precision items
+typedef struct prof_dbl_sum {
+	double	min;	// Minumum value
+	double	ave;	// Average value
+	double	max;	// Maximum value
+	double	total;	// Accumlated value
+} prof_dbl_sum_t;
+
+#define PROFILE_ENERGY_DATA "Energy"
+// energy data structures
+//	node_step file
+typedef struct profile_energy {
+	char		tod[TOD_LEN];	// Not used in node-step
+	time_t		time;
+	uint64_t	power;
+	uint64_t	cpu_freq;
+} profile_energy_t;
+//	summary data in job-node-totals
+typedef struct profile_energy_s {
+	char		start_time[TOD_LEN];
+	uint64_t	elapsed_time;
+	prof_uint_sum_t	power;
+	prof_uint_sum_t cpu_freq;
+} profile_energy_s_t; // series summary
+
+#define PROFILE_IO_DATA "I/O"
+// io data structure
+//	node_step file
+typedef struct profile_io {
+	char		tod[TOD_LEN];	// Not used in node-step
+	time_t		time;
+	uint64_t	reads;
+	double		read_size;	// currently in megabytes
+	uint64_t	writes;
+	double		write_size;	// currently in megabytes
+} profile_io_t;
+//	summary data in job-node-totals
+typedef struct profile_io_s {
+	char		start_time[TOD_LEN];
+	uint64_t	elapsed_time;
+	prof_uint_sum_t	reads;
+	prof_dbl_sum_t	read_size;	// currently in megabytes
+	prof_uint_sum_t	writes;
+	prof_dbl_sum_t	write_size;	// currently in megabytes
+} profile_io_s_t;
+
+#define PROFILE_NETWORK_DATA "Network"
+// Network data structure
+//	node_step file
+typedef struct profile_network {
+	char		tod[TOD_LEN];	// Not used in node-step
+	time_t		time;
+	uint64_t	packets_in;
+	double		size_in;	// currently in megabytes
+	uint64_t	packets_out;
+	double		size_out;	// currently in megabytes
+} profile_network_t;
+//	summary data in job-node-totals
+typedef struct profile_network_s {
+	char		start_time[TOD_LEN];
+	uint64_t	elapsed_time;
+	prof_uint_sum_t packets_in;
+	prof_dbl_sum_t  size_in;		// currently in megabytes
+	prof_uint_sum_t packets_out;
+	prof_dbl_sum_t  size_out;	// currently in megabytes
+} profile_network_s_t;
+
+#define PROFILE_TASK_DATA "Task"
+// task data structure
+//	node_step file
+typedef struct profile_task {
+	char		tod[TOD_LEN];	// Not used in node-step
+	time_t		time;
+	uint64_t	cpu_freq;
+	uint64_t	cpu_time;
+	double		cpu_utilization;
+	uint64_t	rss;
+	uint64_t	vm_size;
+	uint64_t	pages;
+	double	 	read_size;	// currently in megabytes
+	double	 	write_size;	// currently in megabytes
+} profile_task_t;
+//	summary data in job-node-totals
+typedef struct profile_task_s {
+	char		start_time[TOD_LEN];
+	uint64_t	elapsed_time;
+	prof_uint_sum_t	cpu_freq;
+	prof_uint_sum_t cpu_time;
+	prof_dbl_sum_t 	cpu_utilization;
+	prof_uint_sum_t rss;
+	prof_uint_sum_t vm_size;
+	prof_uint_sum_t pages;
+	prof_dbl_sum_t 	read_size;	// currently in megabytes
+	prof_dbl_sum_t 	write_size;	// currently in megabytes
+} profile_task_s_t;
+
+/*
+ * Structure of function pointers of common operations on a profile data type.
+ *	dataset_size -- size of one dataset (structure size)
+ *	create_memory_datatype -- creates hdf5 memory datatype corresponding
+ *		to the datatype structure.
+ *	create_file_datatype -- creates hdf5 file datatype corresponding
+ *		to the datatype structure.
+ *	create_s_memory_datatype -- creates hdf5 memory datatype corresponding
+ *		to the summary datatype structure.
+ *	create_s_file_datatype -- creates hdf5 file datatype corresponding
+ *		to the summary datatype structure.
+ *	init_job_series -- allocates a buffer for a complete time series
+ *		(in job merge) and initializes each member
+ *      get_series_tod -- get the date/time value of each sample in the series
+ *      get_series_values -- gets a specific data item from each sample in the
+ *		series
+ *	merge_step_series -- merges all the individual time samples into a
+ *		single data set with one item per sample.
+ *		Data items can be scaled (e.g. subtracting beginning time)
+ *		differenced (to show counts in interval) or other things
+ *		appropriate for the series.
+ *	series_total -- accumulate or average members in the entire series to
+ *		be added to the file as totals for the node or task.
+ *	extract_series -- format members of a structure for putting to
+ *		to a file data extracted from a time series to be imported into
+ *		another analysis tool. (e.g. format as comma separated value.)
+ *	extract_totals -- format members of a structure for putting to
+ *		to a file data extracted from a time series total to be
+ *		imported into another analysis tool.
+ *		(format as comma,separated value, for example.)
+ */
+typedef struct hdf5_api_ops {
+	int   (*dataset_size) (void);
+	hid_t (*create_memory_datatype) (void);
+	hid_t (*create_file_datatype) (void);
+	hid_t (*create_s_memory_datatype) (void);
+	hid_t (*create_s_file_datatype) (void);
+	void* (*init_job_series) (int);
+	char** (*get_series_tod) (void*, int);
+	double* (*get_series_values) (char*, void*, int);
+	void  (*merge_step_series) (hid_t, void*, void*, void*);
+	void* (*series_total) (int, void*);
+	void  (*extract_series) (FILE*, bool, int, int, char*, char*, void*,
+				 int);
+	void  (*extract_total) (FILE*, bool, int, int, char*, char*, void*,
+				int);
+} hdf5_api_ops_t;
+
+/* ============================================================================
+ * Common support functions
+ ==========================================================================*/
+
+/*
+ * Create a opts group from type
+ */
+hdf5_api_ops_t* profile_factory(uint32_t type);
+
+/*
+ * Initialize profile (initialize static memory)
+ */
+void profile_init_old(void);
+
+/*
+ * Finialize profile (initialize static memory)
+ */
+void profile_fini_old(void);
+
+/*
+ * Make a dataset name
+ *
+ * Parameters
+ *	type	- series name
+ *
+ * Returns
+ *	common data set name based on type in static memory
+ */
+char* get_data_set_name(char* type);
+
+/*
+ * print info on an object for debugging
+ *
+ * Parameters
+ *	group	 - handle to group.
+ *	namGroup - name of the group
+ */
+void hdf5_obj_info(hid_t group, char* namGroup);
+
+/*
+ * get string attribute
+ *
+ * Parameters
+ *	parent	- handle to parent group.
+ *	name	- name of the attribute
+ *
+ * Return: pointer to value. Caller responsibility to free!!!
+ */
+char* get_string_attribute(hid_t parent, char* name);
+
+/*
+ * get int attribute
+ *
+ * Parameters
+ *	parent	- handle to parent group.
+ *	name	- name of the attribute
+ *
+ * Return: value
+ */
+int get_int_attribute(hid_t parent, char* name);
+
+/*
+ * get uint32_t attribute
+ *
+ * Parameters
+ *	parent	- handle to parent group.
+ *	name	- name of the attribute
+ *
+ * Return: value
+ */
+uint32_t get_uint32_attribute(hid_t parent, char* name);
+
+/*
+ * Get data from a group of a HDF5 file
+ *
+ * Parameters
+ *	parent   - handle to parent.
+ *	type     - type of data (ACCT_GATHER_PROFILE_* in slurm.h)
+ *	namGroup - name of group
+ *	sizeData - pointer to variable into which to put size of dataset
+ *
+ * Returns -- data set of type (or null), caller must free.
+ */
+void* get_hdf5_data(hid_t parent, uint32_t type, char* namGroup, int* sizeData);
+
+/*
+ * Put one data sample into a new group in an HDF5 file
+ *
+ * Parameters
+ *	parent  - handle to parent group.
+ *	type    - type of data (ACCT_GATHER_PROFILE_* in slurm.h)
+ *	subtype - generally source (node, series, ...) or summary
+ *	group   - name of new group
+ *	data    - data for the sample
+ *      nItems  - number of items of type in the data
+ */
+void put_hdf5_data(hid_t parent, uint32_t type, char* subtype, char* group,
+		   void* data, int nItems);
+
+#endif /*__ACCT_GATHER_HDF5_OLD_API_H__*/
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util.c b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util.c
new file mode 100644
index 000000000..5ebdbcf92
--- /dev/null
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util.c
@@ -0,0 +1,1571 @@
+/*****************************************************************************\
+ *  sh5util.c - slurm profile accounting plugin for io and energy using hdf5.
+ *            - Utility to merge node-step files into a job file
+ *            - or extract data from an job file
+ *****************************************************************************
+ *  Copyright (C) 2013 Bull S. A. S.
+ *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
+ *
+ *  Written by Rod Schultz <rod.schultz@bull.com>
+ *
+ *  Copyright (C) 2013 SchedMD LLC
+ *
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+ *
+\*****************************************************************************/
+
+#ifndef _GNU_SOURCE
+#  define _GNU_SOURCE
+#endif
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#if HAVE_GETOPT_H
+#  include <getopt.h>
+#else
+#  include "src/common/getopt.h"
+#endif
+
+#include <dirent.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/time.h>
+
+#include "src/common/uid.h"
+#include "src/common/read_config.h"
+#include "src/common/proc_args.h"
+#include "src/common/xstring.h"
+#include "hdf5_api.h"
+#include "../sh5util.h"
+
+static char **series_names;
+static int num_series;
+
+static int _merge_step_files(void);
+static int _extract_data(void);
+static int _series_data(void);
+
+extern int run_old(int argc, char **argv)
+{
+	int cc;
+
+	profile_init_old();
+
+	switch (params.mode) {
+		case SH5UTIL_MODE_MERGE:
+			cc = _merge_step_files();
+			break;
+		case SH5UTIL_MODE_EXTRACT:
+			cc = _extract_data();
+			break;
+		case SH5UTIL_MODE_ITEM_EXTRACT:
+			cc = _series_data();
+			break;
+		case SH5UTIL_MODE_ITEM_LIST:
+			cc = SLURM_ERROR;
+			break;
+		default:
+			error("Unknown type %d", params.mode);
+			break;
+	}
+
+	profile_fini_old();
+
+	return cc;
+}
+
+/*
+ * delete list of strings
+ *
+ * Parameters
+ *	list	- xmalloc'd list of pointers of xmalloc'd strings.
+ *	listlen - number of strings in the list
+ */
+static void _delete_string_list(char **list, int listLen)
+{
+	int ix;
+
+	if (list == NULL)
+		return;
+
+	for (ix = 0; ix < listLen; ix++) {
+		xfree(list[ix]);
+	}
+
+	xfree(list);
+
+}
+
+/* ============================================================================
+ * ============================================================================
+ * Functions for merging samples from node step files into a job file
+ * ============================================================================
+ * ========================================================================= */
+
+static void* _get_all_samples(hid_t gid_series, char* nam_series, uint32_t type,
+                              int nsamples)
+{
+	void*   data = NULL;
+
+	hid_t   id_data_set, dtyp_memory, g_sample, sz_dest;
+	herr_t  ec;
+	int     smpx ,len;
+	void    *data_prior = NULL, *data_cur = NULL;
+	char 	name_sample[MAX_GROUP_NAME+1];
+	hdf5_api_ops_t* ops;
+
+	ops = profile_factory(type);
+	if (ops == NULL) {
+		error("Failed to create operations for %s",
+		      acct_gather_profile_type_to_string(type));
+		return NULL;
+	}
+	data = (*(ops->init_job_series))(nsamples);
+	if (data == NULL) {
+		xfree(ops);
+		error("Failed to get memory for combined data");
+		return NULL;
+	}
+	dtyp_memory = (*(ops->create_memory_datatype))();
+	if (dtyp_memory < 0) {
+		xfree(ops);
+		xfree(data);
+		error("Failed to create %s memory datatype",
+		      acct_gather_profile_type_to_string(type));
+		return NULL;
+	}
+	for (smpx=0; smpx<nsamples; smpx++) {
+		len = H5Lget_name_by_idx(gid_series, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, smpx, name_sample,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if (len<1 || len>MAX_GROUP_NAME) {
+			error("Invalid group name %s", name_sample);
+			continue;
+		}
+		g_sample = H5Gopen(gid_series, name_sample, H5P_DEFAULT);
+		if (g_sample < 0) {
+			info("Failed to open %s", name_sample);
+		}
+		id_data_set = H5Dopen(g_sample, get_data_set_name(name_sample),
+		                      H5P_DEFAULT);
+		if (id_data_set < 0) {
+			H5Gclose(g_sample);
+			error("Failed to open %s dataset",
+			      acct_gather_profile_type_to_string(type));
+			continue;
+		}
+		sz_dest = (*(ops->dataset_size))();
+		data_cur = xmalloc(sz_dest);
+		if (data_cur == NULL) {
+			H5Dclose(id_data_set);
+			H5Gclose(g_sample);
+			error("Failed to get memory for prior data");
+			continue;
+		}
+		ec = H5Dread(id_data_set, dtyp_memory, H5S_ALL, H5S_ALL,
+		             H5P_DEFAULT, data_cur);
+		if (ec < 0) {
+			xfree(data_cur);
+			H5Dclose(id_data_set);
+			H5Gclose(g_sample);
+			error("Failed to read %s data",
+			      acct_gather_profile_type_to_string(type));
+			continue;
+		}
+		(*(ops->merge_step_series))(g_sample, data_prior, data_cur,
+		                            data+(smpx)*sz_dest);
+
+		xfree(data_prior);
+		data_prior = data_cur;
+		H5Dclose(id_data_set);
+		H5Gclose(g_sample);
+	}
+	xfree(data_cur);
+	H5Tclose(dtyp_memory);
+	xfree(ops);
+
+	return data;
+}
+
+static void _merge_series_data(hid_t jgid_tasks, hid_t jg_node, hid_t nsg_node)
+{
+	hid_t   jg_samples, nsg_samples;
+	hid_t   g_series, g_series_total = -1;
+	hsize_t num_samples, n_series;
+	int     idsx, len;
+	void    *data = NULL, *series_total = NULL;
+	uint32_t type;
+	char *data_type;
+	char    nam_series[MAX_GROUP_NAME+1];
+	hdf5_api_ops_t* ops = NULL;
+	H5G_info_t group_info;
+	H5O_info_t object_info;
+
+	if (jg_node < 0) {
+		info("Job Node is not HDF5 object");
+		return;
+	}
+	if (nsg_node < 0) {
+		info("Node-Step is not HDF5 object");
+		return;
+	}
+
+	jg_samples = H5Gcreate(jg_node, GRP_SAMPLES,
+	                       H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+	if (jg_samples < 0) {
+		info("Failed to create job node Samples");
+		return;
+	}
+	nsg_samples = get_group(nsg_node, GRP_SAMPLES);
+	if (nsg_samples < 0) {
+		H5Gclose(jg_samples);
+		debug("Failed to get node-step Samples");
+		return;
+	}
+	H5Gget_info(nsg_samples, &group_info);
+	n_series = group_info.nlinks;
+	if (n_series < 1) {
+		// No series?
+		H5Gclose(jg_samples);
+		H5Gclose(nsg_samples);
+		info("No Samples");
+		return;
+	}
+	for (idsx = 0; idsx < n_series; idsx++) {
+		H5Oget_info_by_idx(nsg_samples, ".", H5_INDEX_NAME, H5_ITER_INC,
+		                   idsx, &object_info, H5P_DEFAULT);
+		if (object_info.type != H5O_TYPE_GROUP)
+			continue;
+
+		len = H5Lget_name_by_idx(nsg_samples, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, idsx, nam_series,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if (len<1 || len>MAX_GROUP_NAME) {
+			info("Invalid group name %s", nam_series);
+			continue;
+		}
+		g_series = H5Gopen(nsg_samples, nam_series, H5P_DEFAULT);
+		if (g_series < 0) {
+			info("Failed to open %s", nam_series);
+			continue;
+		}
+		H5Gget_info(g_series, &group_info);
+		num_samples = group_info.nlinks;
+		if (num_samples <= 0) {
+			H5Gclose(g_series);
+			info("_series %s has no samples", nam_series);
+			continue;
+		}
+		// Get first sample in series to find out how big the data is.
+		data_type = get_string_attribute(g_series, ATTR_DATATYPE);
+		if (!data_type) {
+			H5Gclose(g_series);
+			info("Failed to get datatype for Time Series Dataset");
+			continue;
+		}
+		type = acct_gather_profile_type_from_string(data_type);
+		xfree(data_type);
+		data = _get_all_samples(g_series, nam_series, type,
+		                        num_samples);
+		if (data == NULL) {
+			H5Gclose(g_series);
+			info("Failed to get memory for Time Series Dataset");
+			continue;
+		}
+		put_hdf5_data(jg_samples, type, SUBDATA_SERIES, nam_series,
+		              data, num_samples);
+		ops = profile_factory(type);
+		if (ops == NULL) {
+			xfree(data);
+			H5Gclose(g_series);
+			info("Failed to create operations for %s",
+			     acct_gather_profile_type_to_string(type));
+			continue;
+		}
+		series_total = (*(ops->series_total))(num_samples, data);
+		if (series_total != NULL) {
+			// Totals for series attaches to node
+			g_series_total = make_group(jg_node, GRP_TOTALS);
+			if (g_series_total < 0) {
+				H5Gclose(g_series);
+				xfree(series_total);
+				xfree(data);
+				xfree(ops);
+				info("Failed to make Totals for Node");
+				continue;
+			}
+			put_hdf5_data(g_series_total, type,
+			              SUBDATA_SUMMARY,
+			              nam_series, series_total, 1);
+			H5Gclose(g_series_total);
+		}
+		xfree(series_total);
+		xfree(ops);
+		xfree(data);
+		H5Gclose(g_series);
+	}
+
+	return;
+}
+
+/* ============================================================================
+ * Functions for merging tasks data into a job file
+ ==========================================================================*/
+
+static void _merge_task_totals(hid_t jg_tasks, hid_t nsg_node, char* node_name)
+{
+	hid_t   jg_task, jg_totals, nsg_totals,
+		g_total, nsg_tasks, nsg_task = -1;
+	hsize_t nobj, ntasks = -1;
+	int	i, len, taskx, taskid, taskcpus, size_data;
+	void    *data;
+	uint32_t type;
+	char    buf[MAX_GROUP_NAME+1];
+	char    group_name[MAX_GROUP_NAME+1];
+	H5G_info_t group_info;
+
+	if (jg_tasks < 0) {
+		info("Job Tasks is not HDF5 object");
+		return;
+	}
+	if (nsg_node < 0) {
+		info("Node-Step is not HDF5 object");
+		return;
+	}
+
+	nsg_tasks = get_group(nsg_node, GRP_TASKS);
+	if (nsg_tasks < 0) {
+		debug("No Tasks group in node-step file");
+		return;
+	}
+
+	H5Gget_info(nsg_tasks, &group_info);
+	ntasks = group_info.nlinks;
+	for (taskx = 0; ((int)ntasks>0) && (taskx<((int)ntasks)); taskx++) {
+		// Get the name of the group.
+		len = H5Lget_name_by_idx(nsg_tasks, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, taskx, buf,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if (len<1 || len>MAX_GROUP_NAME) {
+			info("Invalid group name %s", buf);
+			continue;
+		}
+		nsg_task = H5Gopen(nsg_tasks, buf, H5P_DEFAULT);
+		if (nsg_task < 0) {
+			debug("Failed to open %s", buf);
+			continue;
+		}
+		taskid = get_int_attribute(nsg_task, ATTR_TASKID);
+		sprintf(group_name, "%s_%d", GRP_TASK, taskid);
+		jg_task = H5Gcreate(jg_tasks, group_name,
+		                    H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+		if (jg_task < 0) {
+			H5Gclose(nsg_task);
+			info("Failed to create job task group");
+			continue;
+		}
+		put_string_attribute(jg_task, ATTR_NODENAME, node_name);
+		put_int_attribute(jg_task, ATTR_TASKID, taskid);
+		taskcpus = get_int_attribute(nsg_task, ATTR_CPUPERTASK);
+		put_int_attribute(jg_task, ATTR_CPUPERTASK, taskcpus);
+		nsg_totals = get_group(nsg_task, GRP_TOTALS);
+		if (nsg_totals < 0) {
+			H5Gclose(jg_task);
+			H5Gclose(nsg_task);
+			continue;
+		}
+		jg_totals = H5Gcreate(jg_task, GRP_TOTALS,
+		                      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+		if (jg_totals < 0) {
+			H5Gclose(jg_task);
+			H5Gclose(nsg_task);
+			info("Failed to create job task totals");
+			continue;
+		}
+		H5Gget_info(nsg_totals, &group_info);
+		nobj = group_info.nlinks;
+		for (i = 0; (nobj>0) && (i<nobj); i++) {
+			// Get the name of the group.
+			len = H5Lget_name_by_idx(nsg_totals, ".", H5_INDEX_NAME,
+			                         H5_ITER_INC, i, buf,
+			                         MAX_GROUP_NAME, H5P_DEFAULT);
+
+			if (len<1 || len>MAX_GROUP_NAME) {
+				info("Invalid group name %s", buf);
+				continue;
+			}
+			g_total = H5Gopen(nsg_totals, buf, H5P_DEFAULT);
+			if (g_total < 0) {
+				info("Failed to open %s", buf);
+				continue;
+			}
+			type = get_uint32_attribute(g_total, ATTR_DATATYPE);
+			if (!type) {
+				H5Gclose(g_total);
+				info("No %s attribute", ATTR_DATATYPE);
+				continue;
+			}
+			data = get_hdf5_data(g_total, type, buf, &size_data);
+			if (data == NULL) {
+				H5Gclose(g_total);
+				info("Failed to get group %s type %s data", buf,
+				     acct_gather_profile_type_to_string(type));
+				continue;
+			}
+			put_hdf5_data(jg_totals, type, SUBDATA_DATA,
+			              buf, data, 1);
+			xfree(data);
+			H5Gclose(g_total);
+		}
+		H5Gclose(nsg_totals);
+		H5Gclose(nsg_task);
+		H5Gclose(jg_totals);
+		H5Gclose(jg_task);
+	}
+	H5Gclose(nsg_tasks);
+}
+
+/* ============================================================================
+ * Functions for merging node totals into a job file
+ ==========================================================================*/
+
+static void _merge_node_totals(hid_t jg_node, hid_t nsg_node)
+{
+	hid_t	jg_totals, nsg_totals, g_total;
+	hsize_t nobj;
+	int 	i, len, size_data;
+	void  	*data;
+	uint32_t type;
+	char 	buf[MAX_GROUP_NAME+1];
+	H5G_info_t group_info;
+
+	if (jg_node < 0) {
+		info("Job Node is not HDF5 object");
+		return;
+	}
+	if (nsg_node < 0) {
+		info("Node-Step is not HDF5 object");
+		return;
+	}
+	jg_totals = H5Gcreate(jg_node, GRP_TOTALS,
+	                      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+	if (jg_totals < 0) {
+		info("Failed to create job node totals");
+		return;
+	}
+	nsg_totals = get_group(nsg_node, GRP_TOTALS);
+	if (nsg_totals < 0) {
+		H5Gclose(jg_totals);
+		return;
+	}
+
+	H5Gget_info(nsg_totals, &group_info);
+	nobj = group_info.nlinks;
+	for (i = 0; (nobj>0) && (i<nobj); i++) {
+		// Get the name of the group.
+		len = H5Lget_name_by_idx(nsg_totals, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, i, buf,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if (len<1 || len>MAX_GROUP_NAME) {
+			info("invalid group name %s", buf);
+			continue;
+		}
+		g_total = H5Gopen(nsg_totals, buf, H5P_DEFAULT);
+		if (g_total < 0) {
+			info("Failed to open %s", buf);
+			continue;
+		}
+		type = get_uint32_attribute(g_total, ATTR_DATATYPE);
+		if (!type) {
+			H5Gclose(g_total);
+			info("No %s attribute", ATTR_DATATYPE);
+			continue;
+		}
+		data = get_hdf5_data(g_total, type, buf, &size_data);
+		if (data == NULL) {
+			H5Gclose(g_total);
+			info("Failed to get group %s type %s data",
+			     buf, acct_gather_profile_type_to_string(type));
+			continue;
+		}
+		put_hdf5_data(jg_totals, type, SUBDATA_DATA, buf, data, 1);
+		xfree(data);
+		H5Gclose(g_total);
+	}
+	H5Gclose(nsg_totals);
+	H5Gclose(jg_totals);
+	return;
+}
+
+/* ============================================================================
+ * Functions for merging step data into a job file
+ ==========================================================================*/
+
+static void _merge_node_step_data(hid_t fid_job, char* file_name, int nodeIndex,
+                                  char* node_name, hid_t jgid_nodes,
+                                  hid_t jgid_tasks)
+{
+	hid_t	fid_nodestep, jgid_node, nsgid_root, nsgid_node;
+	char	*start_time;
+	char	group_name[MAX_GROUP_NAME+1];
+
+	jgid_node = H5Gcreate(jgid_nodes, node_name,
+	                      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+	if (jgid_node < 0) {
+		error("Failed to create group %s",node_name);
+		return;
+	}
+	put_string_attribute(jgid_node, ATTR_NODENAME, node_name);
+	// Process node step file
+	// Open the file and the node group.
+	fid_nodestep = H5Fopen(file_name, H5F_ACC_RDONLY, H5P_DEFAULT);
+	if (fid_nodestep < 0) {
+		H5Gclose(jgid_node);
+		error("Failed to open %s",file_name);
+		return;
+	}
+	nsgid_root = H5Gopen(fid_nodestep,"/", H5P_DEFAULT);
+	sprintf(group_name, "/%s_%s", GRP_NODE, node_name);
+	nsgid_node = H5Gopen(nsgid_root, group_name, H5P_DEFAULT);
+	if (nsgid_node < 0) {
+		H5Gclose(fid_nodestep);
+		H5Gclose(jgid_node);
+		error("Failed to open node group");
+		return;;
+	}
+	start_time = get_string_attribute(nsgid_node,ATTR_STARTTIME);
+	if (start_time == NULL) {
+		info("No %s attribute", ATTR_STARTTIME);
+	} else {
+		put_string_attribute(jgid_node, ATTR_STARTTIME, start_time);
+		xfree(start_time);
+	}
+	_merge_node_totals(jgid_node, nsgid_node);
+	_merge_task_totals(jgid_tasks, nsgid_node, node_name);
+	_merge_series_data(jgid_tasks, jgid_node, nsgid_node);
+	H5Gclose(nsgid_node);
+	H5Fclose(fid_nodestep);
+	H5Gclose(jgid_node);
+
+	if (!params.keepfiles)
+		remove(file_name);
+
+	return;
+}
+
+static int _merge_step_files(void)
+{
+	hid_t fid_job = -1;
+	hid_t jgid_step = -1;
+	hid_t jgid_nodes = -1;
+	hid_t jgid_tasks = -1;
+	DIR *dir;
+	struct  dirent *de;
+	char file_name[MAX_PROFILE_PATH+1];
+	char step_dir[MAX_PROFILE_PATH+1];
+	char step_path[MAX_PROFILE_PATH+1];
+	char jgrp_step_name[MAX_GROUP_NAME+1];
+	char jgrp_nodes_name[MAX_GROUP_NAME+1];
+	char jgrp_tasks_name[MAX_GROUP_NAME+1];
+	char *step_node;
+	char *pos_char;
+	char *stepno;
+	int	stepx = 0;
+	int num_steps = 0;
+	int nodex = -1;
+	int max_step = -1;
+	int	jobid, stepid;
+	bool found_files = false;
+
+	sprintf(step_dir, "%s/%s", params.dir, params.user);
+
+	while (max_step == -1 || stepx <= max_step) {
+
+		if (!(dir = opendir(step_dir))) {
+			error("Cannot open %s job profile directory: %m", step_dir);
+			return -1;
+		}
+
+		nodex = 0;
+		while ((de = readdir(dir))) {
+
+			strcpy(file_name, de->d_name);
+			if (file_name[0] == '.')
+				continue;
+
+			pos_char = strstr(file_name,".h5");
+			if (!pos_char)
+				continue;
+			*pos_char = 0;
+
+			pos_char = strchr(file_name,'_');
+			if (!pos_char)
+				continue;
+			*pos_char = 0;
+
+			jobid = strtol(file_name, NULL, 10);
+			if (jobid != params.job_id)
+				continue;
+
+			stepno = pos_char + 1;
+			pos_char = strchr(stepno,'_');
+			if (!pos_char) {
+				continue;
+			}
+			*pos_char = 0;
+
+			stepid = strtol(stepno, NULL, 10);
+			if (stepid > max_step)
+				max_step = stepid;
+			if (stepid != stepx)
+				continue;
+
+			step_node = pos_char + 1;
+
+			if (!found_files) {
+				fid_job = H5Fcreate(params.output,
+				                    H5F_ACC_TRUNC,
+				                    H5P_DEFAULT,
+				                    H5P_DEFAULT);
+				if (fid_job < 0) {
+					error("Failed create HDF5 file %s", params.output);
+					return -1;
+				}
+				found_files = true;
+			}
+
+			if (nodex == 0) {
+
+				num_steps++;
+				sprintf(jgrp_step_name, "/%s_%d", GRP_STEP,
+				        stepx);
+
+				jgid_step = make_group(fid_job, jgrp_step_name);
+				if (jgid_step < 0) {
+					error("Failed to create %s", jgrp_step_name);
+					continue;
+				}
+
+				sprintf(jgrp_nodes_name,"%s/%s",
+				        jgrp_step_name,
+				        GRP_NODES);
+				jgid_nodes = make_group(jgid_step,
+				                        jgrp_nodes_name);
+				if (jgid_nodes < 0) {
+					error("Failed to create %s", jgrp_nodes_name);
+					continue;
+				}
+
+				sprintf(jgrp_tasks_name,"%s/%s",
+				        jgrp_step_name,
+				        GRP_TASKS);
+				jgid_tasks = make_group(jgid_step,
+				                        jgrp_tasks_name);
+				if (jgid_tasks < 0) {
+					error("Failed to create %s", jgrp_tasks_name);
+					continue;
+				}
+			}
+
+			sprintf(step_path, "%s/%s", step_dir, de->d_name);
+			debug("Adding %s to the job file", step_path);
+			_merge_node_step_data(fid_job, step_path,
+			                      nodex, step_node,
+			                      jgid_nodes, jgid_tasks);
+			nodex++;
+		}
+
+		closedir(dir);
+
+		if (nodex > 0) {
+			put_int_attribute(jgid_step, ATTR_NNODES, nodex);
+			H5Gclose(jgid_tasks);
+			H5Gclose(jgid_nodes);
+			H5Gclose(jgid_step);
+		}
+
+		/* If we did not find the step 0
+		 * bail out.
+		 */
+		if (stepx == 0
+			&& !found_files)
+			break;
+
+		stepx++;
+	}
+
+	if (!found_files)
+		info("No node-step files found for jobid %d", params.job_id);
+	else
+		put_int_attribute(fid_job, ATTR_NSTEPS, num_steps);
+
+	if (fid_job != -1)
+		H5Fclose(fid_job);
+
+	return 0;
+}
+
+/* ============================================================================
+ * ============================================================================
+ * Functions for data extraction
+ * ============================================================================
+ * ========================================================================= */
+
+static hid_t _get_series_parent(hid_t group)
+{
+	hid_t gid_level = -1;
+
+	if (strcasecmp(params.level, "Node:Totals") == 0) {
+		gid_level = get_group(group, GRP_TOTALS);
+		if (gid_level < 0) {
+			info("Failed to open  group %s", GRP_TOTALS);
+		}
+	} else if (strcasecmp(params.level, "Node:TimeSeries") == 0) {
+		gid_level = get_group(group, GRP_SAMPLES);
+		if (gid_level < 0) {
+			info("Failed to open group %s", GRP_SAMPLES);
+		}
+	} else {
+		info("%s is an illegal level", params.level);
+		return -1;
+
+	}
+
+	return gid_level;
+}
+
+
+static void _get_series_names(hid_t group)
+{
+	int i, len;
+	char buf[MAX_GROUP_NAME+1];
+	H5G_info_t group_info;
+
+	H5Gget_info(group, &group_info);
+	num_series = (int)group_info.nlinks;
+	if (num_series < 0) {
+		debug("No Data Series in group");
+		return;
+	}
+	series_names = xmalloc(sizeof(char*)*num_series);
+	for (i = 0; (num_series>0) && (i<num_series); i++) {
+		len = H5Lget_name_by_idx(group, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, i, buf,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if ((len < 0) || (len > MAX_GROUP_NAME)) {
+			info("Invalid series name=%s", buf);
+			// put into list anyway so list doesn't have a null.
+		}
+		series_names[i] = xstrdup(buf);
+	}
+
+}
+
+static void _extract_series(FILE* fp, int stepx, bool header, hid_t gid_level,
+			    char* node_name, char* data_set_name) {
+	hid_t	gid_series;
+	int 	size_data;
+	void	*data;
+	uint32_t type;
+	char	*data_type, *subtype;
+	hdf5_api_ops_t* ops;
+	gid_series = get_group(gid_level, data_set_name);
+	if (gid_series < 0) {
+		// This is okay, may not have ran long enough for
+		// a sample (hostname????)
+		// OR trying to get all tasks
+		return;
+	}
+	data_type = get_string_attribute(gid_series, ATTR_DATATYPE);
+	if (!data_type) {
+		H5Gclose(gid_series);
+		info("No datatype in %s", data_set_name);
+		return;
+	}
+	type = acct_gather_profile_type_from_string(data_type);
+	xfree(data_type);
+	subtype = get_string_attribute(gid_series, ATTR_SUBDATATYPE);
+	if (subtype == NULL) {
+		H5Gclose(gid_series);
+		info("No %s attribute", ATTR_SUBDATATYPE);
+		return;
+	}
+	ops = profile_factory(type);
+	if (ops == NULL) {
+		xfree(subtype);
+		H5Gclose(gid_series);
+		info("Failed to create operations for %s",
+		     acct_gather_profile_type_to_string(type));
+		return;
+	}
+	data = get_hdf5_data(
+		gid_series, type, data_set_name, &size_data);
+	if (data) {
+		if (strcmp(subtype,SUBDATA_SUMMARY) != 0)
+			(*(ops->extract_series)) (fp, header, params.job_id,
+				 stepx, node_name, data_set_name,
+				 data, size_data);
+		else
+			(*(ops->extract_total)) (fp, header, params.job_id,
+				 stepx, node_name, data_set_name,
+				 data, size_data);
+		xfree(data);
+	} else {
+		fprintf(fp, "%d,%d,%s,No %s Data\n",
+		        params.job_id, stepx, node_name,
+		        data_set_name);
+	}
+	xfree(ops);
+	H5Gclose(gid_series);
+
+}
+static void _extract_node_level(FILE* fp, int stepx, hid_t jgid_nodes,
+                                int nnodes, char* data_set_name)
+{
+
+	hid_t	jgid_node, gid_level;
+	int 	nodex, len;
+	char    jgrp_node_name[MAX_GROUP_NAME+1];
+	bool header = true;
+	for (nodex=0; nodex<nnodes; nodex++) {
+		len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, nodex, jgrp_node_name,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if ((len < 0) || (len > MAX_GROUP_NAME)) {
+			info("Invalid node name=%s", jgrp_node_name);
+			continue;
+		}
+		jgid_node = get_group(jgid_nodes, jgrp_node_name);
+		if (jgid_node < 0) {
+			info("Failed to open group %s", jgrp_node_name);
+			continue;
+		}
+		if (params.node
+		    && strcmp(params.node, "*")
+		    && strcmp(params.node, jgrp_node_name))
+			continue;
+		gid_level = _get_series_parent(jgid_node);
+		if (gid_level == -1) {
+			H5Gclose(jgid_node);
+			continue;
+		}
+		_extract_series(fp, stepx, header, gid_level, jgrp_node_name,
+				data_set_name);
+		header = false;
+		H5Gclose(gid_level);
+		H5Gclose(jgid_node);
+	}
+}
+
+static void _extract_all_tasks(FILE *fp, hid_t gid_step, hid_t gid_nodes,
+		int nnodes, int stepx)
+{
+
+	hid_t	gid_tasks, gid_task = 0, gid_node = -1, gid_level = -1;
+	H5G_info_t group_info;
+	int	ntasks, itx, len, task_id;
+	char	task_name[MAX_GROUP_NAME+1];
+	char*   node_name;
+	char	buf[MAX_GROUP_NAME+1];
+	bool hd = true;
+
+	gid_tasks = get_group(gid_step, GRP_TASKS);
+	if (gid_tasks < 0)
+		fatal("No tasks in step %d", stepx);
+	H5Gget_info(gid_tasks, &group_info);
+	ntasks = (int) group_info.nlinks;
+	if (ntasks <= 0)
+		fatal("No tasks in step %d", stepx);
+
+	for (itx = 0; itx<ntasks; itx++) {
+		// Get the name of the group.
+		len = H5Lget_name_by_idx(gid_tasks, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, itx, buf, MAX_GROUP_NAME,
+		                         H5P_DEFAULT);
+		if ((len > 0) && (len < MAX_GROUP_NAME)) {
+			gid_task = H5Gopen(gid_tasks, buf, H5P_DEFAULT);
+			if (gid_task < 0)
+				fatal("Failed to open %s", buf);
+		} else
+			fatal("Illegal task name %s",buf);
+		task_id = get_int_attribute(gid_task, ATTR_TASKID);
+		node_name = get_string_attribute(gid_task, ATTR_NODENAME);
+		sprintf(task_name,"%s_%d", GRP_TASK, task_id);
+		gid_node = H5Gopen(gid_nodes, node_name, H5P_DEFAULT);
+		if (gid_node < 0)
+			fatal("Failed to open %s for Task_%d",
+					node_name, task_id);
+		gid_level = get_group(gid_node, GRP_SAMPLES);
+		if (gid_level < 0)
+			fatal("Failed to open group %s for node=%s task=%d",
+					GRP_SAMPLES,node_name, task_id);
+		_extract_series(fp, stepx, hd, gid_level, node_name, task_name);
+
+		hd = false;
+		xfree(node_name);
+		H5Gclose(gid_level);
+		H5Gclose(gid_node);
+		H5Gclose(gid_task);
+	}
+	H5Gclose(gid_tasks);
+}
+
+/* _extract_data()
+ */
+static int _extract_data(void)
+{
+	hid_t fid_job;
+	hid_t jgid_root;
+	hid_t jgid_step;
+	hid_t jgid_nodes;
+	hid_t jgid_node;
+	hid_t jgid_level;
+	int	nsteps;
+	int nnodes;
+	int stepx;
+	int isx;
+	int len;
+	char jgrp_step_name[MAX_GROUP_NAME+1];
+	char jgrp_node_name[MAX_GROUP_NAME+1];
+	FILE *fp;
+
+	fp = fopen(params.output, "w");
+	if (fp == NULL) {
+		error("Failed to create output file %s -- %m",
+		      params.output);
+	}
+
+	fid_job = H5Fopen(params.input, H5F_ACC_RDONLY, H5P_DEFAULT);
+	if (fid_job < 0) {
+		error("Failed to open %s", params.input);
+		return -1;
+	}
+
+	jgid_root = H5Gopen(fid_job, "/", H5P_DEFAULT);
+	if (jgid_root < 0) {
+		H5Fclose(fid_job);
+		error("Failed to open  root");
+		return -1;
+	}
+
+	nsteps = get_int_attribute(jgid_root, ATTR_NSTEPS);
+	for (stepx = 0; stepx < nsteps; stepx++) {
+
+		if ((params.step_id != -1) && (stepx != params.step_id))
+			continue;
+
+		sprintf(jgrp_step_name, "%s_%d", GRP_STEP, stepx);
+		jgid_step = get_group(jgid_root, jgrp_step_name);
+		if (jgid_step < 0) {
+			error("Failed to open group %s", jgrp_step_name);
+			continue;
+		}
+
+		if (params.level && !strncasecmp(params.level, "Node:", 5)) {
+
+			nnodes = get_int_attribute(jgid_step, ATTR_NNODES);
+
+			jgid_nodes = get_group(jgid_step, GRP_NODES);
+			if (jgid_nodes < 0) {
+				H5Gclose(jgid_step);
+				error("Failed to open  group %s", GRP_NODES);
+				continue;
+			}
+
+			len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
+			                         H5_ITER_INC, 0, jgrp_node_name,
+			                         MAX_GROUP_NAME, H5P_DEFAULT);
+			if ((len < 0) || (len > MAX_GROUP_NAME)) {
+				H5Gclose(jgid_nodes);
+				H5Gclose(jgid_step);
+				error("Invalid node name %s", jgrp_node_name);
+				continue;
+			}
+
+			jgid_node = get_group(jgid_nodes, jgrp_node_name);
+			if (jgid_node < 0) {
+				H5Gclose(jgid_nodes);
+				H5Gclose(jgid_step);
+				info("Failed to open group %s", jgrp_node_name);
+				continue;
+			}
+
+			jgid_level = _get_series_parent(jgid_node);
+			if (jgid_level == -1) {
+				H5Gclose(jgid_node);
+				H5Gclose(jgid_nodes);
+				H5Gclose(jgid_step);
+				continue;
+			}
+
+			_get_series_names(jgid_level);
+			H5Gclose(jgid_level);
+			H5Gclose(jgid_node);
+
+			if (!params.series || !strcmp(params.series, "*")) {
+				for (isx = 0; isx < num_series; isx++) {
+					if (strncasecmp(series_names[isx],
+							GRP_TASK,
+							strlen(GRP_TASK)) == 0)
+						continue;
+					_extract_node_level(fp, stepx, jgid_nodes,
+					                    nnodes,
+					                    series_names[isx]);
+					// Now handle all tasks.
+				}
+			} else if (strcasecmp(params.series, GRP_TASKS) == 0
+			           || strcasecmp(params.series, GRP_TASK) == 0) {
+				for (isx = 0; isx < num_series; isx++) {
+					if (strstr(series_names[isx],
+					           GRP_TASK)) {
+						_extract_node_level(fp, stepx, jgid_nodes,
+						                    nnodes,
+						                    series_names[isx]);
+					}
+				}
+			} else {
+				_extract_node_level(fp, stepx, jgid_nodes,
+				                    nnodes,
+				                    params.series);
+			}
+
+			_delete_string_list(series_names, num_series);
+			series_names = NULL;
+			num_series = 0;
+			if (!params.series || !strcmp(params.series, "*"))
+				_extract_all_tasks(fp, jgid_step, jgid_nodes,
+						nnodes, stepx);
+
+			H5Gclose(jgid_nodes);
+		} else {
+			error("%s is an illegal level", params.level);
+		}
+		H5Gclose(jgid_step);
+	}
+
+	H5Gclose(jgid_root);
+	H5Fclose(fid_job);
+	fclose(fp);
+
+	return 0;
+}
+
+
+/* ============================================================================
+ * ============================================================================
+ * Functions for data item extraction
+ * ============================================================================
+ * ========================================================================= */
+
+// Get the data_set for a node
+static void *_get_series_data(hid_t jgid_node, char* series,
+                              hdf5_api_ops_t **ops_p, int *nsmp)
+{
+
+	hid_t	gid_level, gid_series;
+	int 	size_data;
+	void	*data;
+	uint32_t type;
+	char	*data_type;
+	hdf5_api_ops_t* ops;
+
+	*nsmp = 0;	// Initialize return arguments.
+	*ops_p = NULL;
+
+	// Navigate from the node group to the data set
+	gid_level = get_group(jgid_node, GRP_SAMPLES);
+	if (gid_level == -1) {
+		return NULL;
+	}
+	gid_series = get_group(gid_level, series);
+	if (gid_series < 0) {
+		// This is okay, may not have ran long enough for
+		// a sample (srun hostname)
+		H5Gclose(gid_level);
+		return NULL;
+	}
+	data_type = get_string_attribute(gid_series, ATTR_DATATYPE);
+	if (!data_type) {
+		H5Gclose(gid_series);
+		H5Gclose(gid_level);
+		debug("No datatype in %s", series);
+		return NULL;
+	}
+	// Invoke the data type operator to get the data set
+	type = acct_gather_profile_type_from_string(data_type);
+	xfree(data_type);
+	ops = profile_factory(type);
+	if (ops == NULL) {
+		H5Gclose(gid_series);
+		H5Gclose(gid_level);
+		debug("Failed to create operations for %s",
+		      acct_gather_profile_type_to_string(type));
+		return NULL;
+	}
+	data = get_hdf5_data(gid_series, type, series, &size_data);
+	if (data) {
+		*nsmp = (size_data / ops->dataset_size());
+		*ops_p = ops;
+	} else {
+		xfree(ops);
+	}
+	H5Gclose(gid_series);
+	H5Gclose(gid_level);
+	return data;
+}
+
+static void _series_analysis(FILE *fp, bool hd, int stepx, int nseries,
+                             int nsmp, char **series_name, char **tod, double *et,
+                             double **all_series, uint64_t *series_smp)
+{
+	double *mn_series;	// Min Value, each sample
+	double *mx_series;	// Max value, each sample
+	double *sum_series;	// Total of all series, each sample
+	double *smp_series;	// all samples for one node
+	uint64_t *mn_sx;	// Index of series with minimum value
+	uint64_t *mx_sx;   	// Index of series with maximum value
+	uint64_t *series_in_smp; // Number of series in the sample
+	int max_smpx = 0;
+	double max_smp_series = 0;
+	double ave_series;
+	int ix, isx;
+
+	mn_series = xmalloc(nsmp * sizeof(double));
+	mx_series = xmalloc(nsmp * sizeof(double));
+	sum_series =xmalloc(nsmp * sizeof(double));
+	mn_sx = xmalloc(nsmp * sizeof(uint64_t));
+	mx_sx = xmalloc(nsmp * sizeof(uint64_t));
+	series_in_smp = xmalloc(nsmp * sizeof(uint64_t));
+
+	for (ix = 0; ix < nsmp; ix++) {
+		for (isx=0; isx<nseries; isx++) {
+			if (series_smp[isx]<nsmp && ix>=series_smp[isx])
+				continue;
+			series_in_smp[ix]++;
+			smp_series = all_series[isx];
+			if (smp_series) {
+				sum_series[ix] += smp_series[ix];
+				if (mn_series[ix] == 0
+				    || smp_series[ix] < mn_series[ix]) {
+					mn_series[ix] = smp_series[ix];
+					mn_sx[ix] = isx;
+				}
+				if (mx_series[ix] == 0
+				    || smp_series[ix] > mx_series[ix]) {
+					mx_series[ix] = smp_series[ix];
+					mx_sx[ix] = isx;
+				}
+			}
+		}
+	}
+
+	for (ix = 0; ix < nsmp; ix++) {
+		if (sum_series[ix] > max_smp_series) {
+			max_smpx = ix;
+			max_smp_series = sum_series[ix];
+		}
+	}
+
+	ave_series = sum_series[max_smpx] / series_in_smp[max_smpx];
+	printf("    Step %d Maximum accumulated %s Value (%f) occurred "
+	       "at %s (Elapsed Time=%d) Ave Node %f\n",
+	       stepx, params.data_item, max_smp_series,
+	       tod[max_smpx], (int) et[max_smpx], ave_series);
+
+	// Put data for step
+	if (!hd) {
+		fprintf(fp,"TOD,Et,JobId,StepId,Min Node,Min %s,"
+		        "Ave %s,Max Node,Max %s,Total %s,"
+		        "Num Nodes",params.data_item,params.data_item,
+		        params.data_item,params.data_item);
+		for (isx = 0; isx < nseries; isx++) {
+			fprintf(fp,",%s",series_name[isx]);
+		}
+		fprintf(fp,"\n");
+	}
+
+	for (ix = 0; ix < nsmp; ix++) {
+		fprintf(fp,"%s, %d",tod[ix], (int) et[ix]);
+		fprintf(fp,",%d,%d",params.job_id,stepx);
+		fprintf(fp,",%s,%f",series_name[mn_sx[ix]],
+		        mn_series[ix]);
+		ave_series = sum_series[ix] / series_in_smp[ix];
+		fprintf(fp,",%f",ave_series);
+		fprintf(fp,",%s,%f",series_name[mx_sx[ix]],
+		        mx_series[ix]);
+		fprintf(fp,",%f",sum_series[ix]);
+		fprintf(fp,",%"PRIu64"",series_in_smp[ix]);
+		for (isx = 0; isx < nseries; isx++) {
+			if (series_smp[isx]<nsmp && ix>=series_smp[isx]) {
+				fprintf(fp,",0.0");
+			} else {
+				smp_series = all_series[isx];
+				fprintf(fp,",%f",smp_series[ix]);
+			}
+		}
+		fprintf(fp,"\n");
+	}
+
+	xfree(mn_series);
+	xfree(mx_series);
+	xfree(sum_series);
+	xfree(mn_sx);
+	xfree(mx_sx);
+}
+
+static void _get_all_node_series(FILE *fp, bool hd, hid_t jgid_step, int stepx)
+{
+	char     **tod = NULL;  // Date time at each sample
+	char     **node_name;	// Node Names
+	double **all_series;	// Pointers to all sampled for each node
+	double *et = NULL;	// Elapsed time at each sample
+	uint64_t *series_smp;   // Number of samples in this series
+
+	hid_t	jgid_nodes, jgid_node;
+	int	nnodes, ndx, len, nsmp = 0, nitem = -1;
+	char	jgrp_node_name[MAX_GROUP_NAME+1];
+	void*   series_data = NULL;
+	hdf5_api_ops_t* ops;
+
+	nnodes = get_int_attribute(jgid_step, ATTR_NNODES);
+	// allocate node arrays
+
+	series_smp = xmalloc(nnodes * (sizeof(uint64_t)));
+	if (series_smp == NULL) {
+		fatal("Failed to get memory for node_samples");
+		return;		/* fix for CLANG false positive */
+	}
+
+	node_name = xmalloc(nnodes * (sizeof(char*)));
+	if (node_name == NULL) {
+		fatal("Failed to get memory for node_name");
+		return;		/* fix for CLANG false positive */
+	}
+
+	all_series = xmalloc(nnodes * (sizeof(double*)));
+	if (all_series == NULL) {
+		fatal("Failed to get memory for all_series");
+		return;		/* fix for CLANG false positive */
+	}
+
+	jgid_nodes = get_group(jgid_step, GRP_NODES);
+	if (jgid_nodes < 0)
+		fatal("Failed to open  group %s", GRP_NODES);
+
+	for (ndx=0; ndx<nnodes; ndx++) {
+		len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, ndx, jgrp_node_name,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if ((len < 0) || (len > MAX_GROUP_NAME)) {
+			debug("Invalid node name=%s", jgrp_node_name);
+			continue;
+		}
+		node_name[ndx] = xstrdup(jgrp_node_name);
+		jgid_node = get_group(jgid_nodes, jgrp_node_name);
+		if (jgid_node < 0) {
+			debug("Failed to open group %s", jgrp_node_name);
+			continue;
+		}
+		ops = NULL;
+		nitem = 0;
+		series_data = _get_series_data(jgid_node, params.series,
+		                               &ops, &nitem);
+		if (series_data==NULL || nitem==0 || ops==NULL) {
+			if (ops != NULL)
+				xfree(ops);
+			continue;
+		}
+		all_series[ndx] = ops->get_series_values(
+			params.data_item, series_data, nitem);
+		if (!all_series[ndx])
+			fatal("No data item %s",params.data_item);
+		series_smp[ndx] = nitem;
+		if (ndx == 0) {
+			nsmp = nitem;
+			tod = ops->get_series_tod(series_data, nitem);
+			et = ops->get_series_values("time",
+			                            series_data, nitem);
+		} else {
+			if (nitem > nsmp) {
+				// new largest number of samples
+				_delete_string_list(tod, nsmp);
+				xfree(et);
+				nsmp = nitem;
+				tod = ops->get_series_tod(series_data,
+				                          nitem);
+				et = ops->get_series_values("time",
+				                            series_data, nitem);
+			}
+		}
+		xfree(ops);
+		xfree(series_data);
+		H5Gclose(jgid_node);
+	}
+	if (nsmp == 0) {
+		// May be bad series name
+		info("No values %s for series %s found in step %d",
+		     params.data_item,params.series,
+		     stepx);
+	} else {
+		_series_analysis(fp, hd, stepx, nnodes, nsmp,
+		                 node_name, tod, et, all_series, series_smp);
+	}
+	for (ndx=0; ndx<nnodes; ndx++) {
+		xfree(node_name[ndx]);
+		xfree(all_series[ndx]);
+	}
+	xfree(node_name);
+	xfree(all_series);
+	xfree(series_smp);
+	_delete_string_list(tod, nsmp);
+	xfree(et);
+
+	H5Gclose(jgid_nodes);
+
+}
+
+static void _get_all_task_series(FILE *fp, bool hd, hid_t jgid_step, int stepx)
+{
+
+	hid_t	jgid_tasks, jgid_task = 0, jgid_nodes, jgid_node;
+	H5G_info_t group_info;
+	int	ntasks,itx, tid;
+	uint64_t *task_id;
+	char     **task_node_name;	/* Node Name for each task */
+	char     **tod = NULL;  /* Date time at each sample */
+	char     **series_name;	/* Node Names */
+	double **all_series;	/* Pointers to all sampled for each node */
+	double *et = NULL;	/* Elapsed time at each sample */
+	uint64_t *series_smp;   /* Number of samples in this series */
+	int	nnodes, ndx, len, nsmp = 0, nitem = -1;
+	char	jgrp_node_name[MAX_GROUP_NAME+1];
+	char	jgrp_task_name[MAX_GROUP_NAME+1];
+	char	buf[MAX_GROUP_NAME+1];
+	void*   series_data = NULL;
+	hdf5_api_ops_t* ops;
+
+	jgid_nodes = get_group(jgid_step, GRP_NODES);
+	if (jgid_nodes < 0)
+		fatal("Failed to open  group %s", GRP_NODES);
+	jgid_tasks = get_group(jgid_step, GRP_TASKS);
+	if (jgid_tasks < 0)
+		fatal("No tasks in step %d", stepx);
+	H5Gget_info(jgid_tasks, &group_info);
+	ntasks = (int) group_info.nlinks;
+	if (ntasks <= 0)
+		fatal("No tasks in step %d", stepx);
+	task_id = xmalloc(ntasks*sizeof(uint64_t));
+	if (task_id == NULL)
+		fatal("Failed to get memory for task_ids");
+	task_node_name = xmalloc(ntasks*sizeof(char*));
+	if (task_node_name == NULL)
+		fatal("Failed to get memory for task_node_names");
+
+	for (itx = 0; itx<ntasks; itx++) {
+		// Get the name of the group.
+		len = H5Lget_name_by_idx(jgid_tasks, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, itx, buf, MAX_GROUP_NAME,
+		                         H5P_DEFAULT);
+		if ((len > 0) && (len < MAX_GROUP_NAME)) {
+			jgid_task = H5Gopen(jgid_tasks, buf, H5P_DEFAULT);
+			if (jgid_task < 0)
+				fatal("Failed to open %s", buf);
+		} else
+			fatal("Illegal task name %s",buf);
+		task_id[itx] = get_int_attribute(jgid_task, ATTR_TASKID);
+		task_node_name[itx] = get_string_attribute(jgid_task,
+		                                           ATTR_NODENAME);
+		H5Gclose(jgid_task);
+	}
+	H5Gclose(jgid_tasks);
+
+	nnodes = get_int_attribute(jgid_step, ATTR_NNODES);
+	// allocate node arrays
+	series_smp = (uint64_t*) xmalloc(ntasks*(sizeof(uint64_t)));
+	if (series_smp == NULL) {
+		fatal("Failed to get memory for node_samples");
+		return; /* Fix for CLANG false positive */
+	}
+	series_name = (char**) xmalloc(ntasks*(sizeof(char*)));
+	if (series_name == NULL) {
+		fatal("Failed to get memory for series_name");
+		return; /* Fix for CLANG false positive */
+	}
+	all_series = (double**) xmalloc(ntasks*(sizeof(double*)));
+	if (all_series == NULL) {
+		fatal("Failed to get memory for all_series");
+		return; /* Fix for CLANG false positive */
+	}
+
+	for (ndx=0; ndx<nnodes; ndx++) {
+
+		len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
+		                         H5_ITER_INC, ndx, jgrp_node_name,
+		                         MAX_GROUP_NAME, H5P_DEFAULT);
+		if ((len < 0) || (len > MAX_GROUP_NAME))
+			fatal("Invalid node name=%s", jgrp_node_name);
+		jgid_node = get_group(jgid_nodes, jgrp_node_name);
+
+		if (jgid_node < 0)
+			fatal("Failed to open group %s", jgrp_node_name);
+		for (itx = 0; itx<ntasks; itx++) {
+			if (strcmp(jgrp_node_name, task_node_name[itx]) != 0)
+				continue;
+			tid = task_id[itx];
+			series_name[itx] = xstrdup_printf("%s_%d %s",
+			                                  GRP_TASK,tid,jgrp_node_name);
+			sprintf(jgrp_task_name,"%s_%d",GRP_TASK, tid);
+
+			ops = NULL;
+			nitem = 0;
+			series_data = _get_series_data(jgid_node,
+			                               jgrp_task_name, &ops, &nitem);
+			if (series_data==NULL || nitem==0 || ops==NULL) {
+				if (ops != NULL)
+					xfree(ops);
+				continue;
+			}
+			all_series[itx] = ops->get_series_values(
+				params.data_item, series_data, nitem);
+			if (!all_series[ndx])
+				fatal("No data item %s",params.data_item);
+			series_smp[itx] = nitem;
+			if (nsmp == 0) {
+				nsmp = nitem;
+				tod = ops->get_series_tod(series_data, nitem);
+				et = ops->get_series_values("time",
+				                            series_data, nitem);
+			} else {
+				if (nitem > nsmp) {
+					// new largest number of samples
+					_delete_string_list(tod, nsmp);
+					xfree(et);
+					nsmp = nitem;
+					tod = ops->get_series_tod(series_data,
+					                          nitem);
+					et = ops->get_series_values("time",
+					                            series_data, nitem);
+				}
+			}
+			xfree(ops);
+			xfree(series_data);
+		}
+		H5Gclose(jgid_node);
+	}
+	if (nsmp == 0) {
+		// May be bad series name
+		info("No values %s for series %s found in step %d",
+		     params.data_item,params.series,
+		     stepx);
+	} else {
+		_series_analysis(fp, hd, stepx, ntasks, nsmp,
+		                 series_name, tod, et, all_series, series_smp);
+	}
+	for (itx=0; itx<ntasks; itx++) {
+		xfree(all_series[itx]);
+	}
+	xfree(series_name);
+	xfree(all_series);
+	xfree(series_smp);
+	_delete_string_list(tod, nsmp);
+	xfree(et);
+	_delete_string_list(task_node_name, ntasks);
+	xfree(task_id);
+
+	H5Gclose(jgid_nodes);
+}
+
+static int _series_data(void)
+{
+	FILE *fp;
+	bool hd = false;
+	hid_t fid_job;
+	hid_t jgid_root;
+	hid_t jgid_step;
+	int	nsteps;
+	int stepx;
+	char jgrp_step_name[MAX_GROUP_NAME + 1];
+
+	fp = fopen(params.output, "w");
+	if (fp == NULL) {
+		error("Failed open file %s -- %m", params.output);
+		return -1;
+	}
+
+	fid_job = H5Fopen(params.input, H5F_ACC_RDONLY, H5P_DEFAULT);
+	if (fid_job < 0) {
+		fclose(fp);
+		error("Failed to open %s", params.input);
+		return -1;
+	}
+
+	jgid_root = H5Gopen(fid_job, "/", H5P_DEFAULT);
+	if (jgid_root < 0) {
+		fclose(fp);
+		H5Fclose(fid_job);
+		error("Failed to open root");
+		return -1;
+	}
+
+	nsteps = get_int_attribute(jgid_root, ATTR_NSTEPS);
+	for (stepx = 0; stepx < nsteps; stepx++) {
+
+		if ((params.step_id != -1) && (stepx != params.step_id))
+			continue;
+
+		sprintf(jgrp_step_name, "%s_%d", GRP_STEP, stepx);
+		jgid_step = get_group(jgid_root, jgrp_step_name);
+		if (jgid_step < 0) {
+			error("Failed to open  group %s", jgrp_step_name);
+			return -1;
+		}
+
+		if (strncmp(params.series,GRP_TASK,strlen(GRP_TASK)) == 0)
+			_get_all_task_series(fp,hd,jgid_step, stepx);
+		else
+			_get_all_node_series(fp,hd,jgid_step, stepx);
+
+		hd = true;
+		H5Gclose(jgid_step);
+	}
+
+	H5Gclose(jgid_root);
+	H5Fclose(fid_job);
+	fclose(fp);
+
+	return 0;
+}
diff --git a/src/plugins/slurmctld/dynalloc/constants.h b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util_old.h
similarity index 78%
rename from src/plugins/slurmctld/dynalloc/constants.h
rename to src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util_old.h
index 2a9da1a0e..575ec2583 100644
--- a/src/plugins/slurmctld/dynalloc/constants.h
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/libsh5util_old/sh5util_old.h
@@ -1,12 +1,13 @@
-/*****************************************************************************\
- *  constants.h -
+/****************************************************************************\
+ *  sh5util_old.h
  *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by Danny Auble <da@schedmd.com>
+ *
+ *  Provide support for the old version of sh5util.
  *
  *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
  *  Please also read the included file: DISCLAIMER.
  *
  *  SLURM is free software; you can redistribute it and/or modify it under
@@ -33,19 +34,12 @@
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef DYNALLOC_CONSTANTS_H_
-#define DYNALLOC_CONSTANTS_H_
-
-#if HAVE_STDBOOL_H
-#  include <stdbool.h>
-#else
-typedef enum {false, true} bool;
-#endif /* !HAVE_STDBOOL_H */
-
+\****************************************************************************/
+#ifndef __ACCT_SH5UTIL_OLD_H__
+#define __ACCT_SH5UTIL_OLD_H__
 
-#define SIZE 8192
+#include <stdlib.h>
 
+extern int run_old(int argc, char **argv);
 
-#endif /* DYNALLOC_CONSTANTS_H_ */
+#endif
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.c b/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.c
index e6e7cc021..7f38a77e7 100644
--- a/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.c
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.c
@@ -6,11 +6,11 @@
  *  Copyright (C) 2013 Bull S. A. S.
  *		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
  *
- *  Written by Rod Schultz <rod.schultz@bull.com>
- *
  *  Copyright (C) 2013 SchedMD LLC
  *
- *  Written by Danny Auble <da@schedmd.com>
+ *  Initially written by Rod Schultz <rod.schultz@bull.com> @ Bull
+ *  and Danny Auble <da@schedmd.com> @ SchedMD.
+ *  Adapted by Yoann Blein <yoann.blein@bull.net> @ Bull.
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://www.schedmd.com/slurmdocs/>.
@@ -69,43 +69,75 @@
 #include "src/common/read_config.h"
 #include "src/common/proc_args.h"
 #include "src/common/xstring.h"
+#include "src/common/slurm_acct_gather_profile.h"
 #include "../hdf5_api.h"
-
-typedef enum {
-	SH5UTIL_MODE_MERGE,
-	SH5UTIL_MODE_EXTRACT,
-	SH5UTIL_MODE_ITEM_EXTRACT,
-} sh5util_mode_t;
-
-typedef struct {
-	char *dir;
-	int help;
-	char *input;
-	int job_id;
-	bool keepfiles;
-	char *level;
-	sh5util_mode_t mode;
-	char *node;
-	char *output;
-	char *series;
-	char *data_item;
-	int step_id;
-	char *user;
-	int verbose;
-} sh5util_opts_t;
-
-
-static sh5util_opts_t params;
-static char **series_names;
-static int num_series;
-
+#include "sh5util.h"
+
+#include "libsh5util_old/sh5util_old.h"
+
+#define MAX_PROFILE_PATH 1024
+// #define MAX_ATTR_NAME 64
+#define MAX_GROUP_NAME 64
+// #define MAX_DATASET_NAME 64
+
+// #define ATTR_NODENAME "Node Name"
+// #define ATTR_STARTTIME "Start Time"
+#define ATTR_NSTEPS "Number of Steps"
+#define ATTR_NNODES "Number of Nodes"
+// #define ATTR_NTASKS "Number of Tasks"
+// #define ATTR_TASKID "Task Id"
+// #define ATTR_CPUPERTASK "CPUs per Task"
+// #define ATTR_DATATYPE "Data Type"
+// #define ATTR_SUBDATATYPE "Subdata Type"
+// #define ATTR_STARTTIME "Start Time"
+// #define ATTR_STARTSEC "Start Second"
+// #define SUBDATA_DATA "Data"
+// #define SUBDATA_NODE "Node"
+// #define SUBDATA_SAMPLE "Sample"
+// #define SUBDATA_SERIES "Series"
+// #define SUBDATA_TOTAL "Total"
+// #define SUBDATA_SUMMARY "Summary"
+
+#define GRP_ENERGY "Energy"
+#define GRP_LUSTRE "Lustre"
+// #define GRP_STEP "Step"
+#define GRP_STEPS "Steps"
+#define GRP_NODES "Nodes"
+// #define GRP_NODE "Node"
+#define GRP_NETWORK "Network"
+// #define GRP_SAMPLES "Time Series"
+// #define GRP_SAMPLE "Sample"
+// #define GRP_TASKS "Tasks"
+#define GRP_TASK "Task"
+// #define GRP_TOTALS "Totals"
+
+// Data types supported by all HDF5 plugins of this type
+
+sh5util_opts_t params;
+
+typedef struct table {
+	const char *step;
+	const char *node;
+	const char *group;
+	const char *name;
+} table_t;
+
+static FILE* output_file;
+static bool group_mode = false;
+static const char *current_step;
+static const char *current_node;
+
+static void _cleanup(void);
 static int _set_options(const int argc, char **argv);
 static int _merge_step_files(void);
-static int _extract_data(void);
-static int _series_data(void);
+static int _extract_series(void);
+static int _extract_item(void);
 static int  _check_params(void);
 static void _free_options(void);
 static void _remove_empty_output(void);
+static int _list_items(void);
+static int _fields_intersection(hid_t fid_job, List tables, List fields);
+
 
 static void _help_msg(void)
 {
@@ -113,6 +145,10 @@ static void _help_msg(void)
 Usage sh5util [<OPTION>] -j <job[.stepid]>\n"
 "\n"
 "Valid <OPTION> values are:\n"
+" -L, --list           Print the items of a series contained in a job file.\n"
+"     -i, --input      merged file to extract from (default ./job_$jobid.h5)\n"
+"     -s, --series     Name of series:\n"
+"                      Energy | Lustre | Network | Tasks\n"
 " -E, --extract        Extract data series from job file.\n"
 "     -i, --input      merged file to extract from (default ./job_$jobid.h5)\n"
 "     -N, --node       Node name to extract (default is all)\n"
@@ -143,6 +179,8 @@ Usage sh5util [<OPTION>] -j <job[.stepid]>\n"
 " --usage              Display brief usage message\n");
 }
 
+
+
 int
 main(int argc, char **argv)
 {
@@ -156,54 +194,49 @@ main(int argc, char **argv)
 	if (cc < 0)
 		goto ouch;
 
-	profile_init();
-
 	switch (params.mode) {
-
 		case SH5UTIL_MODE_MERGE:
-
 			info("Merging node-step files into %s",
 			     params.output);
 			cc = _merge_step_files();
-			if (cc < 0)
-				goto ouch;
 			break;
-
 		case SH5UTIL_MODE_EXTRACT:
-
 			info("Extracting job data from %s into %s",
 			     params.input, params.output);
-			cc = _extract_data();
-			if (cc < 0)
-				goto ouch;
+			cc = _extract_series();
 			break;
-
 		case SH5UTIL_MODE_ITEM_EXTRACT:
-
 			info("Extracting '%s' from '%s' data from %s into %s",
 			     params.data_item, params.series,
 			     params.input, params.output);
-			cc = _series_data();
-			if (cc < 0)
-				goto ouch;
+			cc = _extract_item();
+			break;
+		case SH5UTIL_MODE_ITEM_LIST:
+			info("Listing items from %s", params.input);
+			cc = _list_items();
 			break;
-
 		default:
 			error("Unknown type %d", params.mode);
 			break;
 	}
 
-	_remove_empty_output();
-	profile_fini();
-	_free_options();
+	if (cc == SLURM_PROTOCOL_VERSION_ERROR)
+		cc = run_old(argc, argv);
+ouch:
+	_cleanup();
 
-	return 0;
+	return cc;
+}
 
-ouch:
+static void _cleanup(void)
+{
 	_remove_empty_output();
 	_free_options();
-
-	return -1;
+	log_fini();
+	slurm_conf_destroy();
+	jobacct_gather_fini();
+	acct_gather_profile_fini();
+	acct_gather_conf_destroy();
 }
 
 /* _free_options()
@@ -219,26 +252,15 @@ _free_options(void)
 	xfree(params.data_item);
 	xfree(params.user);
 }
-/*
- * delete list of strings
- *
- * Parameters
- *	list	- xmalloc'd list of pointers of xmalloc'd strings.
- *	listlen - number of strings in the list
- */
-static void _delete_string_list(char **list, int listLen)
-{
-	int ix;
-
-	if (list == NULL)
-		return;
 
-	for (ix = 0; ix < listLen; ix++) {
-		xfree(list[ix]);
-	}
-
-	xfree(list);
+static void _void_free(void *str)
+{
+	xfree(str);
+}
 
+static int _str_cmp(void *str1, void *str2)
+{
+	return !xstrcmp((const char *)str1, (const char *)str2);
 }
 
 static void _remove_empty_output(void)
@@ -284,6 +306,7 @@ static int _set_options(const int argc, char **argv)
 		{"jobs", required_argument, 0, 'j'},
 		{"input", required_argument, 0, 'i'},
 		{"level", required_argument, 0, 'l'},
+		{"list", no_argument, 0, 'L'},
 		{"node", required_argument, 0, 'N'},
 		{"output", required_argument, 0, 'o'},
 		{"profiledir", required_argument, 0, 'p'},
@@ -297,14 +320,24 @@ static int _set_options(const int argc, char **argv)
 
 	log_init(xbasename(argv[0]), logopt, 0, NULL);
 
+#if DEBUG
+	/* Move HDF5 trace printing to log file instead of stderr */
+	H5Eset_auto(H5E_DEFAULT, (herr_t (*)(hid_t, void *))H5Eprint,
+	            log_fp());
+#else
+	/* Silent HDF5 errors */
+	H5Eset_auto(H5E_DEFAULT, NULL, NULL);
+#endif
+
 	_init_opts();
 
-	while ((cc = getopt_long(argc, argv, "d:Ehi:Ij:l:N:o:p:s:S:u:UvV",
+	while ((cc = getopt_long(argc, argv, "d:Ehi:Ij:l:LN:o:p:s:Su:UvV",
 	                         long_options, &option_index)) != EOF) {
 		switch (cc) {
 			case 'd':
 				params.data_item = xstrdup(optarg);
-				params.data_item = xstrtolower(params.data_item);
+				/* params.data_item =
+				   xstrtolower(params.data_item); */
 				break;
 			case 'E':
 				params.mode = SH5UTIL_MODE_EXTRACT;
@@ -312,6 +345,9 @@ static int _set_options(const int argc, char **argv)
 			case 'I':
 				params.mode = SH5UTIL_MODE_ITEM_EXTRACT;
 				break;
+			case 'L':
+				params.mode = SH5UTIL_MODE_ITEM_LIST;
+				break;
 			case 'h':
 				_help_msg();
 				return -1;
@@ -341,8 +377,10 @@ static int _set_options(const int argc, char **argv)
 				if (strcmp(optarg, GRP_ENERGY)
 				    && strcmp(optarg, GRP_LUSTRE)
 				    && strcmp(optarg, GRP_NETWORK)
-				    && strncmp(optarg,GRP_TASK,strlen(GRP_TASK))) {
-					error("Bad value for --series=\"%s\"", optarg);
+				    && strncmp(optarg,GRP_TASK,
+					       strlen(GRP_TASK))) {
+					error("Bad value for --series=\"%s\"",
+					      optarg);
 					return -1;
 				}
 				params.series = xstrdup(optarg);
@@ -352,7 +390,8 @@ static int _set_options(const int argc, char **argv)
 				break;
 			case 'u':
 				if (uid_from_string(optarg, &u) < 0) {
-					error("No such user --uid=\"%s\"", optarg);
+					error("No such user --uid=\"%s\"",
+					      optarg);
 					return -1;
 				}
 				params.user = uid_to_string(u);
@@ -412,6 +451,8 @@ _check_params(void)
 		if (!params.output)
 			params.output = xstrdup_printf(
 				"./extract_%d.csv", params.job_id);
+		if (!params.series)
+			fatal("Must specify series option --series");
 
 	}
 	if (params.mode == SH5UTIL_MODE_ITEM_EXTRACT) {
@@ -430,6 +471,13 @@ _check_params(void)
 			                               params.data_item,
 			                               params.job_id);
 	}
+	if (params.mode == SH5UTIL_MODE_ITEM_LIST) {
+		if (!params.input)
+			params.input = xstrdup_printf(
+				"./job_%d.h5", params.job_id);
+		if (!params.series)
+			fatal("Must specify series option --series");
+	}
 
 	if (!params.output)
 		params.output = xstrdup_printf("./job_%d.h5", params.job_id);
@@ -437,466 +485,44 @@ _check_params(void)
 	return 0;
 }
 
-/* ============================================================================
- * ============================================================================
- * Functions for merging samples from node step files into a job file
- * ============================================================================
- * ========================================================================= */
-
-static void* _get_all_samples(hid_t gid_series, char* nam_series, uint32_t type,
-                              int nsamples)
-{
-	void*   data = NULL;
-
-	hid_t   id_data_set, dtyp_memory, g_sample, sz_dest;
-	herr_t  ec;
-	int     smpx ,len;
-	void    *data_prior = NULL, *data_cur = NULL;
-	char 	name_sample[MAX_GROUP_NAME+1];
-	hdf5_api_ops_t* ops;
-
-	ops = profile_factory(type);
-	if (ops == NULL) {
-		error("Failed to create operations for %s",
-		      acct_gather_profile_type_to_string(type));
-		return NULL;
-	}
-	data = (*(ops->init_job_series))(nsamples);
-	if (data == NULL) {
-		xfree(ops);
-		error("Failed to get memory for combined data");
-		return NULL;
-	}
-	dtyp_memory = (*(ops->create_memory_datatype))();
-	if (dtyp_memory < 0) {
-		xfree(ops);
-		xfree(data);
-		error("Failed to create %s memory datatype",
-		      acct_gather_profile_type_to_string(type));
-		return NULL;
-	}
-	for (smpx=0; smpx<nsamples; smpx++) {
-		len = H5Lget_name_by_idx(gid_series, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, smpx, name_sample,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if (len<1 || len>MAX_GROUP_NAME) {
-			error("Invalid group name %s", name_sample);
-			continue;
-		}
-		g_sample = H5Gopen(gid_series, name_sample, H5P_DEFAULT);
-		if (g_sample < 0) {
-			info("Failed to open %s", name_sample);
-		}
-		id_data_set = H5Dopen(g_sample, get_data_set_name(name_sample),
-		                      H5P_DEFAULT);
-		if (id_data_set < 0) {
-			H5Gclose(g_sample);
-			error("Failed to open %s dataset",
-			      acct_gather_profile_type_to_string(type));
-			continue;
-		}
-		sz_dest = (*(ops->dataset_size))();
-		data_cur = xmalloc(sz_dest);
-		if (data_cur == NULL) {
-			H5Dclose(id_data_set);
-			H5Gclose(g_sample);
-			error("Failed to get memory for prior data");
-			continue;
-		}
-		ec = H5Dread(id_data_set, dtyp_memory, H5S_ALL, H5S_ALL,
-		             H5P_DEFAULT, data_cur);
-		if (ec < 0) {
-			xfree(data_cur);
-			H5Dclose(id_data_set);
-			H5Gclose(g_sample);
-			error("Failed to read %s data",
-			      acct_gather_profile_type_to_string(type));
-			continue;
-		}
-		(*(ops->merge_step_series))(g_sample, data_prior, data_cur,
-		                            data+(smpx)*sz_dest);
-
-		xfree(data_prior);
-		data_prior = data_cur;
-		H5Dclose(id_data_set);
-		H5Gclose(g_sample);
-	}
-	xfree(data_cur);
-	H5Tclose(dtyp_memory);
-	xfree(ops);
-
-	return data;
-}
-
-static void _merge_series_data(hid_t jgid_tasks, hid_t jg_node, hid_t nsg_node)
+/* Copy the group "/{NodeName}" of the hdf5 file file_name into the location
+ * jgid_nodes */
+static int _merge_node_step_data(char* file_name, char* node_name,
+				 hid_t jgid_nodes, hid_t jgid_tasks)
 {
-	hid_t   jg_samples, nsg_samples;
-	hid_t   g_series, g_series_total = -1;
-	hsize_t num_samples, n_series;
-	int     idsx, len;
-	void    *data = NULL, *series_total = NULL;
-	uint32_t type;
-	char *data_type;
-	char    nam_series[MAX_GROUP_NAME+1];
-	hdf5_api_ops_t* ops = NULL;
-	H5G_info_t group_info;
-	H5O_info_t object_info;
-
-	if (jg_node < 0) {
-		info("Job Node is not HDF5 object");
-		return;
-	}
-	if (nsg_node < 0) {
-		info("Node-Step is not HDF5 object");
-		return;
-	}
-
-	jg_samples = H5Gcreate(jg_node, GRP_SAMPLES,
-	                       H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-	if (jg_samples < 0) {
-		info("Failed to create job node Samples");
-		return;
-	}
-	nsg_samples = get_group(nsg_node, GRP_SAMPLES);
-	if (nsg_samples < 0) {
-		H5Gclose(jg_samples);
-		debug("Failed to get node-step Samples");
-		return;
-	}
-	H5Gget_info(nsg_samples, &group_info);
-	n_series = group_info.nlinks;
-	if (n_series < 1) {
-		// No series?
-		H5Gclose(jg_samples);
-		H5Gclose(nsg_samples);
-		info("No Samples");
-		return;
-	}
-	for (idsx = 0; idsx < n_series; idsx++) {
-		H5Oget_info_by_idx(nsg_samples, ".", H5_INDEX_NAME, H5_ITER_INC,
-		                   idsx, &object_info, H5P_DEFAULT);
-		if (object_info.type != H5O_TYPE_GROUP)
-			continue;
+	hid_t fid_nodestep;
+	char group_name[MAX_GROUP_NAME+1];
 
-		len = H5Lget_name_by_idx(nsg_samples, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, idsx, nam_series,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if (len<1 || len>MAX_GROUP_NAME) {
-			info("Invalid group name %s", nam_series);
-			continue;
-		}
-		g_series = H5Gopen(nsg_samples, nam_series, H5P_DEFAULT);
-		if (g_series < 0) {
-			info("Failed to open %s", nam_series);
-			continue;
-		}
-		H5Gget_info(g_series, &group_info);
-		num_samples = group_info.nlinks;
-		if (num_samples <= 0) {
-			H5Gclose(g_series);
-			info("_series %s has no samples", nam_series);
-			continue;
-		}
-		// Get first sample in series to find out how big the data is.
-		data_type = get_string_attribute(g_series, ATTR_DATATYPE);
-		if (!data_type) {
-			H5Gclose(g_series);
-			info("Failed to get datatype for Time Series Dataset");
-			continue;
-		}
-		type = acct_gather_profile_type_from_string(data_type);
-		xfree(data_type);
-		data = _get_all_samples(g_series, nam_series, type,
-		                        num_samples);
-		if (data == NULL) {
-			H5Gclose(g_series);
-			info("Failed to get memory for Time Series Dataset");
-			continue;
-		}
-		put_hdf5_data(jg_samples, type, SUBDATA_SERIES, nam_series,
-		              data, num_samples);
-		ops = profile_factory(type);
-		if (ops == NULL) {
-			xfree(data);
-			H5Gclose(g_series);
-			info("Failed to create operations for %s",
-			     acct_gather_profile_type_to_string(type));
-			continue;
-		}
-		series_total = (*(ops->series_total))(num_samples, data);
-		if (series_total != NULL) {
-			// Totals for series attaches to node
-			g_series_total = make_group(jg_node, GRP_TOTALS);
-			if (g_series_total < 0) {
-				H5Gclose(g_series);
-				xfree(series_total);
-				xfree(data);
-				xfree(ops);
-				info("Failed to make Totals for Node");
-				continue;
-			}
-			put_hdf5_data(g_series_total, type,
-			              SUBDATA_SUMMARY,
-			              nam_series, series_total, 1);
-			H5Gclose(g_series_total);
-		}
-		xfree(series_total);
-		xfree(ops);
-		xfree(data);
-		H5Gclose(g_series);
-	}
-
-	return;
-}
-
-/* ============================================================================
- * Functions for merging tasks data into a job file
- ==========================================================================*/
-
-static void _merge_task_totals(hid_t jg_tasks, hid_t nsg_node, char* node_name)
-{
-	hid_t   jg_task, jg_totals, nsg_totals,
-		g_total, nsg_tasks, nsg_task = -1;
-	hsize_t nobj, ntasks = -1;
-	int	i, len, taskx, taskid, taskcpus, size_data;
-	void    *data;
-	uint32_t type;
-	char    buf[MAX_GROUP_NAME+1];
-	char    group_name[MAX_GROUP_NAME+1];
-	H5G_info_t group_info;
-
-	if (jg_tasks < 0) {
-		info("Job Tasks is not HDF5 object");
-		return;
-	}
-	if (nsg_node < 0) {
-		info("Node-Step is not HDF5 object");
-		return;
-	}
-
-	nsg_tasks = get_group(nsg_node, GRP_TASKS);
-	if (nsg_tasks < 0) {
-		debug("No Tasks group in node-step file");
-		return;
-	}
-
-	H5Gget_info(nsg_tasks, &group_info);
-	ntasks = group_info.nlinks;
-	for (taskx = 0; ((int)ntasks>0) && (taskx<((int)ntasks)); taskx++) {
-		// Get the name of the group.
-		len = H5Lget_name_by_idx(nsg_tasks, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, taskx, buf,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if (len<1 || len>MAX_GROUP_NAME) {
-			info("Invalid group name %s", buf);
-			continue;
-		}
-		nsg_task = H5Gopen(nsg_tasks, buf, H5P_DEFAULT);
-		if (nsg_task < 0) {
-			debug("Failed to open %s", buf);
-			continue;
-		}
-		taskid = get_int_attribute(nsg_task, ATTR_TASKID);
-		sprintf(group_name, "%s_%d", GRP_TASK, taskid);
-		jg_task = H5Gcreate(jg_tasks, group_name,
-		                    H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-		if (jg_task < 0) {
-			H5Gclose(nsg_task);
-			info("Failed to create job task group");
-			continue;
-		}
-		put_string_attribute(jg_task, ATTR_NODENAME, node_name);
-		put_int_attribute(jg_task, ATTR_TASKID, taskid);
-		taskcpus = get_int_attribute(nsg_task, ATTR_CPUPERTASK);
-		put_int_attribute(jg_task, ATTR_CPUPERTASK, taskcpus);
-		nsg_totals = get_group(nsg_task, GRP_TOTALS);
-		if (nsg_totals < 0) {
-			H5Gclose(jg_task);
-			H5Gclose(nsg_task);
-			continue;
-		}
-		jg_totals = H5Gcreate(jg_task, GRP_TOTALS,
-		                      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-		if (jg_totals < 0) {
-			H5Gclose(jg_task);
-			H5Gclose(nsg_task);
-			info("Failed to create job task totals");
-			continue;
-		}
-		H5Gget_info(nsg_totals, &group_info);
-		nobj = group_info.nlinks;
-		for (i = 0; (nobj>0) && (i<nobj); i++) {
-			// Get the name of the group.
-			len = H5Lget_name_by_idx(nsg_totals, ".", H5_INDEX_NAME,
-			                         H5_ITER_INC, i, buf,
-			                         MAX_GROUP_NAME, H5P_DEFAULT);
-
-			if (len<1 || len>MAX_GROUP_NAME) {
-				info("Invalid group name %s", buf);
-				continue;
-			}
-			g_total = H5Gopen(nsg_totals, buf, H5P_DEFAULT);
-			if (g_total < 0) {
-				info("Failed to open %s", buf);
-				continue;
-			}
-			type = get_uint32_attribute(g_total, ATTR_DATATYPE);
-			if (!type) {
-				H5Gclose(g_total);
-				info("No %s attribute", ATTR_DATATYPE);
-				continue;
-			}
-			data = get_hdf5_data(g_total, type, buf, &size_data);
-			if (data == NULL) {
-				H5Gclose(g_total);
-				info("Failed to get group %s type %s data", buf,
-				     acct_gather_profile_type_to_string(type));
-				continue;
-			}
-			put_hdf5_data(jg_totals, type, SUBDATA_DATA,
-			              buf, data, 1);
-			xfree(data);
-			H5Gclose(g_total);
-		}
-		H5Gclose(nsg_totals);
-		H5Gclose(nsg_task);
-		H5Gclose(jg_totals);
-		H5Gclose(jg_task);
-	}
-	H5Gclose(nsg_tasks);
-}
-
-/* ============================================================================
- * Functions for merging node totals into a job file
- ==========================================================================*/
-
-static void _merge_node_totals(hid_t jg_node, hid_t nsg_node)
-{
-	hid_t	jg_totals, nsg_totals, g_total;
-	hsize_t nobj;
-	int 	i, len, size_data;
-	void  	*data;
-	uint32_t type;
-	char 	buf[MAX_GROUP_NAME+1];
-	H5G_info_t group_info;
-
-	if (jg_node < 0) {
-		info("Job Node is not HDF5 object");
-		return;
-	}
-	if (nsg_node < 0) {
-		info("Node-Step is not HDF5 object");
-		return;
-	}
-	jg_totals = H5Gcreate(jg_node, GRP_TOTALS,
-	                      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-	if (jg_totals < 0) {
-		info("Failed to create job node totals");
-		return;
-	}
-	nsg_totals = get_group(nsg_node, GRP_TOTALS);
-	if (nsg_totals < 0) {
-		H5Gclose(jg_totals);
-		return;
-	}
-
-	H5Gget_info(nsg_totals, &group_info);
-	nobj = group_info.nlinks;
-	for (i = 0; (nobj>0) && (i<nobj); i++) {
-		// Get the name of the group.
-		len = H5Lget_name_by_idx(nsg_totals, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, i, buf,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if (len<1 || len>MAX_GROUP_NAME) {
-			info("invalid group name %s", buf);
-			continue;
-		}
-		g_total = H5Gopen(nsg_totals, buf, H5P_DEFAULT);
-		if (g_total < 0) {
-			info("Failed to open %s", buf);
-			continue;
-		}
-		type = get_uint32_attribute(g_total, ATTR_DATATYPE);
-		if (!type) {
-			H5Gclose(g_total);
-			info("No %s attribute", ATTR_DATATYPE);
-			continue;
-		}
-		data = get_hdf5_data(g_total, type, buf, &size_data);
-		if (data == NULL) {
-			H5Gclose(g_total);
-			info("Failed to get group %s type %s data",
-			     buf, acct_gather_profile_type_to_string(type));
-			continue;
-		}
-		put_hdf5_data(jg_totals, type, SUBDATA_DATA, buf, data, 1);
-		xfree(data);
-		H5Gclose(g_total);
-	}
-	H5Gclose(nsg_totals);
-	H5Gclose(jg_totals);
-	return;
-}
-
-/* ============================================================================
- * Functions for merging step data into a job file
- ==========================================================================*/
-
-static void _merge_node_step_data(hid_t fid_job, char* file_name, int nodeIndex,
-                                  char* node_name, hid_t jgid_nodes,
-                                  hid_t jgid_tasks)
-{
-	hid_t	fid_nodestep, jgid_node, nsgid_root, nsgid_node;
-	char	*start_time;
-	char	group_name[MAX_GROUP_NAME+1];
-
-	jgid_node = H5Gcreate(jgid_nodes, node_name,
-	                      H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-	if (jgid_node < 0) {
-		error("Failed to create group %s",node_name);
-		return;
-	}
-	put_string_attribute(jgid_node, ATTR_NODENAME, node_name);
-	// Process node step file
-	// Open the file and the node group.
 	fid_nodestep = H5Fopen(file_name, H5F_ACC_RDONLY, H5P_DEFAULT);
 	if (fid_nodestep < 0) {
-		H5Gclose(jgid_node);
 		error("Failed to open %s",file_name);
-		return;
-	}
-	nsgid_root = H5Gopen(fid_nodestep,"/", H5P_DEFAULT);
-	sprintf(group_name, "/%s_%s", GRP_NODE, node_name);
-	nsgid_node = H5Gopen(nsgid_root, group_name, H5P_DEFAULT);
-	if (nsgid_node < 0) {
-		H5Gclose(fid_nodestep);
-		H5Gclose(jgid_node);
-		error("Failed to open node group");
-		return;;
+		return SLURM_ERROR;
 	}
-	start_time = get_string_attribute(nsgid_node,ATTR_STARTTIME);
-	if (start_time == NULL) {
-		info("No %s attribute", ATTR_STARTTIME);
-	} else {
-		put_string_attribute(jgid_node, ATTR_STARTTIME, start_time);
-		xfree(start_time);
+
+	sprintf(group_name, "/%s", node_name);
+	hid_t ocpypl_id = H5Pcreate(H5P_OBJECT_COPY); /* default copy */
+	hid_t lcpl_id   = H5Pcreate(H5P_LINK_CREATE); /* parameters */
+	if (H5Ocopy(fid_nodestep, group_name, jgid_nodes, node_name,
+	            ocpypl_id, lcpl_id) < 0) {
+		debug("Failed to copy node step data of %s into the job file, "
+		      "trying with old method",
+		      node_name);
+		return SLURM_PROTOCOL_VERSION_ERROR;
 	}
-	_merge_node_totals(jgid_node, nsgid_node);
-	_merge_task_totals(jgid_tasks, nsgid_node, node_name);
-	_merge_series_data(jgid_tasks, jgid_node, nsgid_node);
-	H5Gclose(nsgid_node);
+
 	H5Fclose(fid_nodestep);
-	H5Gclose(jgid_node);
 
 	if (!params.keepfiles)
 		remove(file_name);
 
-	return;
+	return SLURM_SUCCESS;
 }
 
+/* Look for step and node files and merge them together into one job file */
 static int _merge_step_files(void)
 {
 	hid_t fid_job = -1;
+	hid_t jgid_steps = -1;
 	hid_t jgid_step = -1;
 	hid_t jgid_nodes = -1;
 	hid_t jgid_tasks = -1;
@@ -907,7 +533,6 @@ static int _merge_step_files(void)
 	char step_path[MAX_PROFILE_PATH+1];
 	char jgrp_step_name[MAX_GROUP_NAME+1];
 	char jgrp_nodes_name[MAX_GROUP_NAME+1];
-	char jgrp_tasks_name[MAX_GROUP_NAME+1];
 	char *step_node;
 	char *pos_char;
 	char *stepno;
@@ -917,6 +542,7 @@ static int _merge_step_files(void)
 	int max_step = -1;
 	int	jobid, stepid;
 	bool found_files = false;
+	int rc = SLURM_SUCCESS;
 
 	sprintf(step_dir, "%s/%s", params.dir, params.user);
 
@@ -973,12 +599,18 @@ static int _merge_step_files(void)
 					return -1;
 				}
 				found_files = true;
+
+				jgid_steps = make_group(fid_job, GRP_STEPS);
+				if (jgid_steps < 0) {
+					error("Failed to create group %s", GRP_STEPS);
+					continue;
+				}
 			}
 
 			if (nodex == 0) {
 
 				num_steps++;
-				sprintf(jgrp_step_name, "/%s_%d", GRP_STEP,
+				sprintf(jgrp_step_name, "/%s/%d", GRP_STEPS,
 				        stepx);
 
 				jgid_step = make_group(fid_job, jgrp_step_name);
@@ -997,6 +629,7 @@ static int _merge_step_files(void)
 					continue;
 				}
 
+				/*
 				sprintf(jgrp_tasks_name,"%s/%s",
 				        jgrp_step_name,
 				        GRP_TASKS);
@@ -1006,13 +639,13 @@ static int _merge_step_files(void)
 					error("Failed to create %s", jgrp_tasks_name);
 					continue;
 				}
+				*/
 			}
 
 			sprintf(step_path, "%s/%s", step_dir, de->d_name);
 			debug("Adding %s to the job file", step_path);
-			_merge_node_step_data(fid_job, step_path,
-			                      nodex, step_node,
-			                      jgid_nodes, jgid_tasks);
+			rc = _merge_node_step_data(step_path, step_node,
+						   jgid_nodes, jgid_tasks);
 			nodex++;
 		}
 
@@ -1040,10 +673,12 @@ static int _merge_step_files(void)
 	else
 		put_int_attribute(fid_job, ATTR_NSTEPS, num_steps);
 
+	if (jgid_steps != -1)
+		H5Gclose(jgid_steps);
 	if (fid_job != -1)
 		H5Fclose(fid_job);
 
-	return 0;
+	return rc;
 }
 
 /* ============================================================================
@@ -1052,827 +687,1000 @@ static int _merge_step_files(void)
  * ============================================================================
  * ========================================================================= */
 
-static hid_t _get_series_parent(hid_t group)
+static void _table_free(void *table)
 {
-	hid_t gid_level = -1;
+	table_t *t = (table_t *)table;
+	xfree(t->step);
+	xfree(t->node);
+	xfree(t->group);
+	xfree(t->name);
+	xfree(table);
+}
 
-	if (strcasecmp(params.level, "Node:Totals") == 0) {
-		gid_level = get_group(group, GRP_TOTALS);
-		if (gid_level < 0) {
-			info("Failed to open  group %s", GRP_TOTALS);
-		}
-	} else if (strcasecmp(params.level, "Node:TimeSeries") == 0) {
-		gid_level = get_group(group, GRP_SAMPLES);
-		if (gid_level < 0) {
-			info("Failed to open group %s", GRP_SAMPLES);
-		}
-	} else {
-		info("%s is an illegal level", params.level);
-		return -1;
+static void _table_path(table_t *t, char *path)
+{
+	snprintf(path, MAX_PROFILE_PATH,
+	         "/"GRP_STEPS"/%s/"GRP_NODES"/%s/%s/%s",
+	         t->step, t->node, t->group, t->name);
+}
+
+static herr_t _collect_tables_group(hid_t g_id, const char *name,
+                                    const H5L_info_t *link_info, void *op_data)
+{
+	List tables = (List)op_data;
+	hid_t table_id = -1;
 
+	/* open the dataset. */
+	if ((table_id = H5Dopen(g_id, name, H5P_DEFAULT)) < 0) {
+		error("Failed to open the dataset %s", name);
+		return -1;
 	}
+	H5Dclose(table_id);
 
-	return gid_level;
-}
+	group_mode = true;
+
+	table_t *t = xmalloc(sizeof(table_t));
+	t->step  = xstrdup(current_step);
+	t->node  = xstrdup(current_node);
+	t->group = xstrdup(params.series);
+	t->name  = xstrdup(name);
+	list_append(tables, t);
 
+	return 0;
+}
 
-static void _get_series_names(hid_t group)
+static herr_t _collect_tables_node(hid_t g_id, const char *name,
+                                   const H5L_info_t *link_info, void *op_data)
 {
-	int i, len;
-	char buf[MAX_GROUP_NAME+1];
-	H5G_info_t group_info;
-
-	H5Gget_info(group, &group_info);
-	num_series = (int)group_info.nlinks;
-	if (num_series < 0) {
-		debug("No Data Series in group");
-		return;
+	char object_path[MAX_PROFILE_PATH+1];
+	List tables = (List)op_data;
+	hid_t object_id = -1;
+	herr_t err;
+
+	/* node filter */
+	if (params.node
+	    && strcmp(params.node, "*") != 0
+	    && strcmp(params.node, name) != 0)
+		return 0;
+
+	snprintf(object_path, MAX_PROFILE_PATH+1, "%s/%s", name, params.series);
+	current_node = name;
+
+	/* open the dataset. */
+	if ((object_id = H5Oopen(g_id, object_path, H5P_DEFAULT)) < 0) {
+		error("Series %s not found", params.series);
+		return -1;
 	}
-	series_names = xmalloc(sizeof(char*)*num_series);
-	for (i = 0; (num_series>0) && (i<num_series); i++) {
-		len = H5Lget_name_by_idx(group, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, i, buf,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if ((len < 0) || (len > MAX_GROUP_NAME)) {
-			info("Invalid series name=%s", buf);
-			// put into list anyway so list doesn't have a null.
+
+	if (H5Iget_type(object_id) == H5I_DATASET) {
+		table_t *t = xmalloc(sizeof(table_t));
+		t->step  = xstrdup(current_step);
+		t->node  = xstrdup(name);
+		t->group = xstrdup("");
+		t->name  = xstrdup(params.series);
+		list_append(tables, t);
+	} else if (H5Iget_type(object_id) == H5I_GROUP) {
+		err = H5Literate(object_id, H5_INDEX_NAME, H5_ITER_INC, NULL,
+		                 _collect_tables_group, op_data);
+		if (err < 0) {
+			debug("2 Failed to iterate through group %s", object_path);
+			return SLURM_PROTOCOL_VERSION_ERROR;
 		}
-		series_names[i] = xstrdup(buf);
+	} else {
+		error("Object of unknown type: %s", object_path);
+		H5Oclose(object_id);
+		return -1;
 	}
 
+	H5Oclose(object_id);
+
+	return 0;
 }
 
-static void _extract_series(FILE* fp, int stepx, bool header, hid_t gid_level,
-			    char* node_name, char* data_set_name) {
-	hid_t	gid_series;
-	int 	size_data;
-	void	*data;
-	uint32_t type;
-	char	*data_type, *subtype;
-	hdf5_api_ops_t* ops;
-	gid_series = get_group(gid_level, data_set_name);
-	if (gid_series < 0) {
-		// This is okay, may not have ran long enough for
-		// a sample (hostname????)
-		// OR trying to get all tasks
-		return;
-	}
-	data_type = get_string_attribute(gid_series, ATTR_DATATYPE);
-	if (!data_type) {
-		H5Gclose(gid_series);
-		info("No datatype in %s", data_set_name);
-		return;
-	}
-	type = acct_gather_profile_type_from_string(data_type);
-	xfree(data_type);
-	subtype = get_string_attribute(gid_series, ATTR_SUBDATATYPE);
-	if (subtype == NULL) {
-		H5Gclose(gid_series);
-		info("No %s attribute", ATTR_SUBDATATYPE);
-		return;
+static herr_t _collect_tables_step(hid_t g_id, const char *name,
+                                   const H5L_info_t *link_info, void *op_data)
+{
+	char nodes_path[MAX_PROFILE_PATH];
+	herr_t err;
+
+	/* step filter */
+	if ((params.step_id != -1) && (atoi(name) != params.step_id))
+		return 0;
+
+	snprintf(nodes_path, MAX_PROFILE_PATH, "%s/"GRP_NODES, name);
+	current_step = name;
+
+	err = H5Literate_by_name(g_id, nodes_path, H5_INDEX_NAME,
+	                         H5_ITER_INC, NULL, _collect_tables_node,
+	                         op_data, H5P_DEFAULT);
+	if (err < 0) {
+		debug("3 Failed to iterate through group /"GRP_STEPS"/%s",
+		      nodes_path);
+		return err;
 	}
-	ops = profile_factory(type);
-	if (ops == NULL) {
-		xfree(subtype);
-		H5Gclose(gid_series);
-		info("Failed to create operations for %s",
-		     acct_gather_profile_type_to_string(type));
-		return;
+
+	return 0;
+}
+
+static int _tables_list(hid_t fid_job, List tables)
+{
+	herr_t err;
+	ListIterator it;
+	table_t *t;
+
+	/* Find the list of tables to be extracted */
+	err = H5Literate_by_name(fid_job, "/"GRP_STEPS, H5_INDEX_NAME,
+	                         H5_ITER_INC, NULL, _collect_tables_step,
+	                         (void *)tables, H5P_DEFAULT);
+	if (err < 0) {
+		debug("4 Failed to iterate through group /" GRP_STEPS);
+		return SLURM_PROTOCOL_VERSION_ERROR;
 	}
-	data = get_hdf5_data(
-		gid_series, type, data_set_name, &size_data);
-	if (data) {
-		if (strcmp(subtype,SUBDATA_SUMMARY) != 0)
-			(*(ops->extract_series)) (fp, header, params.job_id,
-				 stepx, node_name, data_set_name,
-				 data, size_data);
-		else
-			(*(ops->extract_total)) (fp, header, params.job_id,
-				 stepx, node_name, data_set_name,
-				 data, size_data);
-		xfree(data);
-	} else {
-		fprintf(fp, "%d,%d,%s,No %s Data\n",
-		        params.job_id, stepx, node_name,
-		        data_set_name);
+
+	debug("tables found (group mode: %d):", group_mode);
+	it = list_iterator_create(tables);
+	while ((t = list_next(it))) {
+		debug(" /"GRP_STEPS"/%s/"GRP_NODES"/%s/%s/%s",
+		      t->step, t->node, t->group, t->name);
 	}
-	xfree(ops);
-	H5Gclose(gid_series);
+	list_iterator_destroy(it);
 
+	return SLURM_SUCCESS;
 }
-static void _extract_node_level(FILE* fp, int stepx, hid_t jgid_nodes,
-                                int nnodes, char* data_set_name)
-{
 
-	hid_t	jgid_node, gid_level;
-	int 	nodex, len;
-	char    jgrp_node_name[MAX_GROUP_NAME+1];
-	bool header = true;
-	for (nodex=0; nodex<nnodes; nodex++) {
-		len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, nodex, jgrp_node_name,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if ((len < 0) || (len > MAX_GROUP_NAME)) {
-			info("Invalid node name=%s", jgrp_node_name);
-			continue;
+
+/**
+ * Print the total values of a table to the output file
+ *
+ * @param nb_fields Number of fields in the dataset
+ * @param offsets   Offset of each field
+ * @param types     Type of each field
+ * @param type_size Size of of a record in the dataset
+ * @param table_id  ID of the table to extract from
+ * @param state     State of the current extraction
+ * @param node_name Name of the node containing this table
+ * @param output    output file
+ */
+static void _extract_totals(size_t nb_fields, size_t *offsets, hid_t *types,
+                            hsize_t type_size, hid_t table_id,
+                            table_t *table, FILE *output)
+{
+	hsize_t nrecords;
+	size_t i, j;
+	uint8_t *data;
+
+	/* allocate space for aggregate values: 4 values (min, max,
+	 * sum, avg) on 8 bytes (uint64_t/double) for each field */
+	uint64_t *agg_i;
+	double *agg_d;
+
+	data = xmalloc(type_size);
+	agg_i = xmalloc(nb_fields * 4 * sizeof(uint64_t));
+	agg_d = (double *)agg_i;
+	H5PTget_num_packets(table_id, &nrecords);
+
+	/* compute min/max/sum */
+	for (i = 0; i < nrecords; ++i) {
+		H5PTget_next(table_id, 1, data);
+		for (j = 0; j < nb_fields; ++j) {
+			if (H5Tequal(types[j], H5T_NATIVE_UINT64)) {
+				uint64_t v = *(uint64_t *)(data + offsets[j]);
+				uint64_t *a = agg_i + j * 4;
+				if (i == 0 || v < a[0]) /* min */
+					a[0] = v;
+				if (v > a[1]) /* max */
+					a[1] = v;
+				a[2] += v; /* sum */
+			} else if (H5Tequal(types[j], H5T_NATIVE_DOUBLE)) {
+				double v = *(double *)(data + offsets[j]);
+				double *a = agg_d + j * 4;
+				if (i == 0 || v < a[0]) /* min */
+					a[0] = v;
+				if (v > a[1]) /* max */
+					a[1] = v;
+				a[2] += v; /* sum */
+			}
 		}
-		jgid_node = get_group(jgid_nodes, jgrp_node_name);
-		if (jgid_node < 0) {
-			info("Failed to open group %s", jgrp_node_name);
-			continue;
+	}
+
+	/* compute avg */
+	if (nrecords) {
+		for (j = 0; j < nb_fields; ++j) {
+			if (H5Tequal(types[j], H5T_NATIVE_UINT64)) {
+				agg_d[j*4+3] = (double)agg_i[j*4+2] / nrecords;
+			} else if (H5Tequal(types[j], H5T_NATIVE_DOUBLE)) {
+				agg_d[j*4+3] = (double)agg_d[j*4+2] / nrecords;
+			}
 		}
-		if (params.node
-		    && strcmp(params.node, "*")
-		    && strcmp(params.node, jgrp_node_name))
-			continue;
-		gid_level = _get_series_parent(jgid_node);
-		if (gid_level == -1) {
-			H5Gclose(jgid_node);
-			continue;
+	}
+
+	/* step, node */
+	fprintf(output, "%s,%s", table->step, table->node);
+
+	if (group_mode)
+		fprintf(output, ",%s", table->name);
+
+	/* elapsed time (first field in the last record) */
+	fprintf(output, ",%"PRIu64, *(uint64_t *)data);
+
+	/* aggregate values */
+	for (j = 0; j < nb_fields; ++j) {
+		if (H5Tequal(types[j], H5T_NATIVE_UINT64)) {
+			fprintf(output, ",%"PRIu64",%"PRIu64",%"PRIu64",%lf",
+			        agg_i[j * 4 + 0],
+			        agg_i[j * 4 + 1],
+			        agg_i[j * 4 + 2],
+			        agg_d[j * 4 + 3]);
+		} else if (H5Tequal(types[j], H5T_NATIVE_DOUBLE)) {
+			fprintf(output, ",%lf,%lf,%lf,%lf",
+			        agg_d[j * 4 + 0],
+			        agg_d[j * 4 + 1],
+			        agg_d[j * 4 + 2],
+			        agg_d[j * 4 + 3]);
 		}
-		_extract_series(fp, stepx, header, gid_level, jgrp_node_name,
-				data_set_name);
-		header = false;
-		H5Gclose(gid_level);
-		H5Gclose(jgid_node);
 	}
+	fputc('\n', output);
+	xfree(agg_i);
+	xfree(data);
 }
 
-static void _extract_all_tasks(FILE *fp, hid_t gid_step, hid_t gid_nodes,
-		int nnodes, int stepx)
+/**
+ * Extract the content of a table within a node. This function first discovers
+ * the content of the table and then handles both timeseries and totals levels.
+ */
+static int _extract_series_table(hid_t fid_job, table_t *table, List fields,
+				    FILE *output, bool level_total)
 {
+	char path[MAX_PROFILE_PATH];
+
+	size_t i, j;
+
+	size_t max_fields = list_count(fields);
+	size_t nb_fields = 0;
+	size_t offsets[max_fields];
+	hid_t types[max_fields];
+
+	hid_t did = -1;    /* dataset id */
+	hid_t tid = -1;    /* file type ID */
+	hid_t n_tid = -1;  /* native type ID */
+	hid_t m_tid = -1;  /* member type ID */
+	hid_t nm_tid = -1; /* native member ID */
+	hid_t table_id = -1;
+	hsize_t nmembers;
+	hsize_t type_size;
+	hsize_t nrecords;
+	char *m_name;
+
+	_table_path(table, path);
+	debug("Extracting from table %s", path);
+
+	/* open the dataset. */
+	if ((did = H5Dopen(fid_job, path, H5P_DEFAULT)) < 0) {
+		error("Failed to open the table %s", path);
+		goto error;
+	}
+
+	/* get the datatype */
+	if ((tid = H5Dget_type(did)) < 0)
+		goto error;
+	if ((n_tid = H5Tget_native_type(tid, H5T_DIR_DEFAULT)) < 0)
+		goto error;
+
+	type_size = H5Tget_size(n_tid);
+
+	/* get the number of members */
+	if ((nmembers = H5Tget_nmembers(tid)) == 0)
+		goto error;
+
+	/* iterate through the members */
+	for (i = 0; i < nmembers; i++) {
+		m_name = H5Tget_member_name(tid, (unsigned)i);
+		/* continue if the field must not be extracted */
+		if (!list_find_first(fields, _str_cmp, m_name)) {
+			free(m_name);
+			continue;
+		}
+		free(m_name);
 
-	hid_t	gid_tasks, gid_task = 0, gid_node = -1, gid_level = -1;
-	H5G_info_t group_info;
-	int	ntasks, itx, len, task_id;
-	char	task_name[MAX_GROUP_NAME+1];
-	char*   node_name;
-	char	buf[MAX_GROUP_NAME+1];
-	bool hd = true;
-
-	gid_tasks = get_group(gid_step, GRP_TASKS);
-	if (gid_tasks < 0)
-		fatal("No tasks in step %d", stepx);
-	H5Gget_info(gid_tasks, &group_info);
-	ntasks = (int) group_info.nlinks;
-	if (ntasks <= 0)
-		fatal("No tasks in step %d", stepx);
-
-	for (itx = 0; itx<ntasks; itx++) {
-		// Get the name of the group.
-		len = H5Lget_name_by_idx(gid_tasks, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, itx, buf, MAX_GROUP_NAME,
-		                         H5P_DEFAULT);
-		if ((len > 0) && (len < MAX_GROUP_NAME)) {
-			gid_task = H5Gopen(gid_tasks, buf, H5P_DEFAULT);
-			if (gid_task < 0)
-				fatal("Failed to open %s", buf);
-		} else
-			fatal("Illegal task name %s",buf);
-		task_id = get_int_attribute(gid_task, ATTR_TASKID);
-		node_name = get_string_attribute(gid_task, ATTR_NODENAME);
-		sprintf(task_name,"%s_%d", GRP_TASK, task_id);
-		gid_node = H5Gopen(gid_nodes, node_name, H5P_DEFAULT);
-		if (gid_node < 0)
-			fatal("Failed to open %s for Task_%d",
-					node_name, task_id);
-		gid_level = get_group(gid_node, GRP_SAMPLES);
-		if (gid_level < 0)
-			fatal("Failed to open group %s for node=%s task=%d",
-					GRP_SAMPLES,node_name, task_id);
-		_extract_series(fp, stepx, hd, gid_level, node_name, task_name);
-
-		hd = false;
-		xfree(node_name);
-		H5Gclose(gid_level);
-		H5Gclose(gid_node);
-		H5Gclose(gid_task);
+		/* get the member type */
+		if ((m_tid = H5Tget_member_type(tid, (unsigned)i)) < 0)
+			goto error;
+		if ((nm_tid = H5Tget_native_type(m_tid, H5T_DIR_DEFAULT)) < 0)
+			goto error;
+
+		types[nb_fields] = nm_tid;
+		offsets[nb_fields] = H5Tget_member_offset(n_tid, (unsigned)i);
+		++nb_fields;
+
+		/*H5Tclose(nm_tid);*/
+		H5Tclose(m_tid);
+	}
+
+	H5Tclose(n_tid);
+	H5Tclose(tid);
+	H5Dclose(did);
+
+	/* open the table */
+	if ((table_id = H5PTopen(fid_job, path)) < 0) {
+		error("Failed to open the series %s", params.input);
+		goto error;
+	}
+
+	if (level_total) {
+		_extract_totals(nb_fields, offsets, types, type_size,
+		                table_id, table, output);
+	} else {
+		/* Timeseries level */
+		H5PTget_num_packets(table_id, &nrecords);
+		uint8_t data[type_size];
+
+		/* print the expected fields of all the records */
+		for (i = 0; i < nrecords; ++i) {
+			H5PTget_next(table_id, 1, data);
+			fprintf(output, "%s,%s", table->step, table->node);
+			if (group_mode)
+				fprintf(output, ",%s", table->name);
+
+			for (j = 0; j < nb_fields; ++j) {
+				if (H5Tequal(types[j], H5T_NATIVE_UINT64)) {
+					fprintf(output, ",%"PRIu64,
+					        *(uint64_t *)(data+offsets[j]));
+				} else if (H5Tequal(types[j],
+				                    H5T_NATIVE_DOUBLE)) {
+					fprintf(output, ",%lf",
+					        *(double *)(data + offsets[j]));
+				} else {
+					error("Unknown type");
+					goto error;
+				}
+			}
+			fputc('\n', output);
+		}
 	}
-	H5Gclose(gid_tasks);
+
+	H5PTclose(table_id);
+
+	return SLURM_SUCCESS;
+
+error:
+	if (nm_tid >= 0) H5Dclose(nm_tid);
+	if (m_tid >= 0) H5Dclose(m_tid);
+	if (n_tid >= 0) H5Dclose(n_tid);
+	if (tid >= 0) H5Dclose(tid);
+	if (did >= 0) H5PTclose(did);
+	if (table_id >= 0) H5PTclose(table_id);
+	return SLURM_ERROR;
 }
 
-/* _extract_data()
+/* _extract_series()
  */
-static int _extract_data(void)
+static int _extract_series(void)
 {
-	hid_t fid_job;
-	hid_t jgid_root;
-	hid_t jgid_step;
-	hid_t jgid_nodes;
-	hid_t jgid_node;
-	hid_t jgid_level;
-	int	nsteps;
-	int nnodes;
-	int stepx;
-	int isx;
-	int len;
-	char jgrp_step_name[MAX_GROUP_NAME+1];
-	char jgrp_node_name[MAX_GROUP_NAME+1];
-	FILE *fp;
-
-	fp = fopen(params.output, "w");
-	if (fp == NULL) {
+	hid_t fid_job = -1;
+	bool level_total;
+	const char *field;
+	List tables = NULL;
+	List fields = NULL;
+	ListIterator it;
+	FILE *output = NULL;
+	int rc = SLURM_ERROR;
+	table_t *t;
+
+	level_total = (xstrcasecmp(params.level, "Node:Totals") == 0);
+
+	output = fopen(params.output, "w");
+	if (output == NULL) {
 		error("Failed to create output file %s -- %m",
 		      params.output);
+		goto error;
 	}
 
 	fid_job = H5Fopen(params.input, H5F_ACC_RDONLY, H5P_DEFAULT);
 	if (fid_job < 0) {
 		error("Failed to open %s", params.input);
-		return -1;
+		goto error;
 	}
 
-	jgid_root = H5Gopen(fid_job, "/", H5P_DEFAULT);
-	if (jgid_root < 0) {
-		H5Fclose(fid_job);
-		error("Failed to open  root");
-		return -1;
+	/* Find the list of tables to be extracted */
+	tables = list_create(_table_free);
+	if ((rc = _tables_list(fid_job, tables)) != SLURM_SUCCESS) {
+		debug("Failed to list tables %s", params.series);
+		goto error;
 	}
 
-	nsteps = get_int_attribute(jgid_root, ATTR_NSTEPS);
-	for (stepx = 0; stepx < nsteps; stepx++) {
-
-		if ((params.step_id != -1) && (stepx != params.step_id))
-			continue;
-
-		sprintf(jgrp_step_name, "%s_%d", GRP_STEP, stepx);
-		jgid_step = get_group(jgid_root, jgrp_step_name);
-		if (jgid_step < 0) {
-			error("Failed to open group %s", jgrp_step_name);
-			continue;
-		}
-
-		if (params.level && !strncasecmp(params.level, "Node:", 5)) {
-
-			nnodes = get_int_attribute(jgid_step, ATTR_NNODES);
-
-			jgid_nodes = get_group(jgid_step, GRP_NODES);
-			if (jgid_nodes < 0) {
-				H5Gclose(jgid_step);
-				error("Failed to open  group %s", GRP_NODES);
-				continue;
-			}
-
-			len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
-			                         H5_ITER_INC, 0, jgrp_node_name,
-			                         MAX_GROUP_NAME, H5P_DEFAULT);
-			if ((len < 0) || (len > MAX_GROUP_NAME)) {
-				H5Gclose(jgid_nodes);
-				H5Gclose(jgid_step);
-				error("Invalid node name %s", jgrp_node_name);
-				continue;
-			}
-
-			jgid_node = get_group(jgid_nodes, jgrp_node_name);
-			if (jgid_node < 0) {
-				H5Gclose(jgid_nodes);
-				H5Gclose(jgid_step);
-				info("Failed to open group %s", jgrp_node_name);
-				continue;
-			}
+	/* Find the fields to be extracted */
+	fields = list_create(_void_free);
+	if ((rc = _fields_intersection(fid_job, tables, fields))
+	    != SLURM_SUCCESS) {
+		error("Failed to find data items for series %s", params.series);
+		goto error;
+	}
 
-			jgid_level = _get_series_parent(jgid_node);
-			if (jgid_level == -1) {
-				H5Gclose(jgid_node);
-				H5Gclose(jgid_nodes);
-				H5Gclose(jgid_step);
-				continue;
-			}
+	/* csv header */
+	fprintf(output, "Step,Node");
 
-			_get_series_names(jgid_level);
-			H5Gclose(jgid_level);
-			H5Gclose(jgid_node);
-
-			if (!params.series || !strcmp(params.series, "*")) {
-				for (isx = 0; isx < num_series; isx++) {
-					if (strncasecmp(series_names[isx],
-							GRP_TASK,
-							strlen(GRP_TASK)) == 0)
-						continue;
-					_extract_node_level(fp, stepx, jgid_nodes,
-					                    nnodes,
-					                    series_names[isx]);
-					// Now handle all tasks.
-				}
-			} else if (strcasecmp(params.series, GRP_TASKS) == 0
-			           || strcasecmp(params.series, GRP_TASK) == 0) {
-				for (isx = 0; isx < num_series; isx++) {
-					if (strstr(series_names[isx],
-					           GRP_TASK)) {
-						_extract_node_level(fp, stepx, jgid_nodes,
-						                    nnodes,
-						                    series_names[isx]);
-					}
-				}
-			} else {
-				_extract_node_level(fp, stepx, jgid_nodes,
-				                    nnodes,
-				                    params.series);
-			}
+	if (group_mode) {
+		fprintf(output, ",Series");
+	}
 
-			_delete_string_list(series_names, num_series);
-			series_names = NULL;
-			num_series = 0;
-			if (!params.series || !strcmp(params.series, "*"))
-				_extract_all_tasks(fp, jgid_step, jgid_nodes,
-						nnodes, stepx);
+	if (level_total) {
+		/* do not aggregate time values */
+		list_delete_all(fields, _str_cmp, "ElapsedTime");
+		fputs(",ElapsedTime", output);
+	}
 
-			H5Gclose(jgid_nodes);
+	it = list_iterator_create(fields);
+	while ((field = list_next(it))) {
+		if (level_total) {
+			fprintf(output, ",Min_%s,Max_%s,Sum_%s,Avg_%s",
+			        field, field, field, field);
 		} else {
-			error("%s is an illegal level", params.level);
+			fprintf(output, ",%s", field);
 		}
-		H5Gclose(jgid_step);
 	}
+	fputc('\n', output);
+	list_iterator_destroy(it);
 
-	H5Gclose(jgid_root);
-	H5Fclose(fid_job);
-	fclose(fp);
+	/* Extract from every table */
+	it = list_iterator_create(tables);
+	while ((t = list_next(it))) {
+		if (_extract_series_table(fid_job, t, fields,
+		                          output, level_total) < 0) {
+			error("Failed to extract series");
+			goto error;
+		}
+	}
 
-	return 0;
+	FREE_NULL_LIST(tables);
+	FREE_NULL_LIST(fields);
+	H5Fclose(fid_job);
+	fclose(output);
+	return SLURM_SUCCESS;
+
+error:
+	FREE_NULL_LIST(fields);
+	FREE_NULL_LIST(tables);
+	if (output) fclose(output);
+	if (fid_job >= 0) H5Fclose(fid_job);
+	return rc;
 }
 
-
 /* ============================================================================
  * ============================================================================
  * Functions for data item extraction
  * ============================================================================
  * ========================================================================= */
 
-// Get the data_set for a node
-static void *_get_series_data(hid_t jgid_node, char* series,
-                              hdf5_api_ops_t **ops_p, int *nsmp)
+/**
+ * Perform the analysis on a given item of type uint64_t, present in multiple
+ * tables.
+ *
+ * @param nb_tables  Number of table to analyze
+ * @param tables     IDs of all the tables to analyze
+ * @param nb_records Number of records in each table
+ * @param buf_size   Size of the largest record of the tables
+ * @param offsets    Offset of the item analyzed in each table
+ * @param names      Names of the tables
+ * @param nodes      Name of the node for each table
+ * @param step_name  Name of the current step
+ */
+static void _item_analysis_uint(hsize_t nb_tables, hid_t *tables,
+	hsize_t *nb_records, size_t buf_size, size_t *offsets,
+	const char *names[], const char *nodes[], const char *step_name)
 {
+	size_t   i;
+	uint64_t min_val;
+	size_t   min_idx;
+	uint64_t max_val;
+	size_t   max_idx;
+	uint64_t sum, sum_max = 0;
+	double   avg, avg_max = 0;
+	size_t   nb_series_in_smp;
+	uint64_t v;
+	uint64_t values[nb_tables];
+	uint8_t  *buffer;
+	uint64_t et, et_max = 0;
+
+	buffer = xmalloc(buf_size);
+	for (;;) {
+		min_val = UINT64_MAX;
+		max_val = 0;
+		sum = 0;
+		nb_series_in_smp = 0;
+
+		/* compute aggregate values */
+		for (i = 0; i < nb_tables; ++i) {
+			if (nb_records[i] == 0)
+				continue;
+			--nb_records[i];
+			++nb_series_in_smp;
+			/* read the value of the item in the series i */
+			H5PTget_next(tables[i], 1, (void *)buffer);
+			v = *(uint64_t *)(buffer + offsets[i]);
+			values[i] = v;
+			/* compute the sum, min and max */
+			sum += v;
+			if ((i == 0) || (v < min_val)) {
+				min_val = v;
+				min_idx = i;
+			}
+			if ((i == 0) || (v > max_val)) {
+				max_val = v;
+				max_idx = i;
+			}
+			/* Elapsed time is always at offset 0 */
+			et = *(uint64_t *)buffer;
+		}
+
+		if (nb_series_in_smp == 0) /* stop if no more samples */
+			break;
 
-	hid_t	gid_level, gid_series;
-	int 	size_data;
-	void	*data;
-	uint32_t type;
-	char	*data_type;
-	hdf5_api_ops_t* ops;
+		avg = (double)sum / (double)nb_series_in_smp;
 
-	*nsmp = 0;	// Initialize return arguments.
-	*ops_p = NULL;
+		/* store the greatest sum */
+		if (sum > sum_max) {
+			sum_max = sum;
+			avg_max = avg;
+			et_max = et;
+		}
 
-	// Navigate from the node group to the data set
-	gid_level = get_group(jgid_node, GRP_SAMPLES);
-	if (gid_level == -1) {
-		return NULL;
-	}
-	gid_series = get_group(gid_level, series);
-	if (gid_series < 0) {
-		// This is okay, may not have ran long enough for
-		// a sample (srun hostname)
-		H5Gclose(gid_level);
-		return NULL;
-	}
-	data_type = get_string_attribute(gid_series, ATTR_DATATYPE);
-	if (!data_type) {
-		H5Gclose(gid_series);
-		H5Gclose(gid_level);
-		debug("No datatype in %s", series);
-		return NULL;
-	}
-	// Invoke the data type operator to get the data set
-	type = acct_gather_profile_type_from_string(data_type);
-	xfree(data_type);
-	ops = profile_factory(type);
-	if (ops == NULL) {
-		H5Gclose(gid_series);
-		H5Gclose(gid_level);
-		debug("Failed to create operations for %s",
-		      acct_gather_profile_type_to_string(type));
-		return NULL;
-	}
-	data = get_hdf5_data(gid_series, type, series, &size_data);
-	if (data) {
-		*nsmp = (size_data / ops->dataset_size());
-		*ops_p = ops;
-	} else {
-		xfree(ops);
+		if (group_mode) {
+			fprintf(output_file,
+			        "%s,%"PRIu64",%s %s,%"PRIu64",%s %s,"
+				"%"PRIu64",%"PRIu64",%lf,%"PRIu64,
+			        step_name, et,
+			        names[min_idx], nodes[min_idx], min_val,
+			        names[max_idx], nodes[max_idx], max_val,
+			        sum, avg, nb_series_in_smp);
+		} else {
+			fprintf(output_file,
+			        "%s,%"PRIu64",%s,%"PRIu64",%s,%"PRIu64",%"
+				PRIu64",%lf,%"PRIu64,
+			        step_name, et,
+			        nodes[min_idx], min_val,
+			        nodes[max_idx], max_val,
+			        sum, avg, nb_series_in_smp);
+		}
+
+		/* print value of each series */
+		for (i = 0; i < nb_tables; ++i) {
+			fprintf(output_file, ",%"PRIu64, values[i]);
+			/* and set their values to zero if no more values */
+			if (values[i] && nb_records[i] == 0)
+				values[i] = 0;
+		}
+		fputc('\n', output_file);
 	}
-	H5Gclose(gid_series);
-	H5Gclose(gid_level);
-	return data;
+	xfree(buffer);
+
+	printf("    Step %s Maximum accumulated %s Value (%"PRIu64") occurred "
+	       "at Time=%"PRIu64", Ave Node %lf\n",
+	       step_name, params.data_item, sum_max, et_max, avg_max);
 }
 
-static void _series_analysis(FILE *fp, bool hd, int stepx, int nseries,
-                             int nsmp, char **series_name, char **tod, double *et,
-                             double **all_series, uint64_t *series_smp)
+/**
+ * Perform the analysis on a given item of type double, present in multiple
+ * tables.
+ * See _item_analysis_uint for parameters description.
+ */
+static void _item_analysis_double(hsize_t nb_tables, hid_t *tables,
+	hsize_t *nb_records, size_t buf_size, size_t *offsets,
+	const char *names[], const char *nodes[], const char *step_name)
 {
-	double *mn_series;	// Min Value, each sample
-	double *mx_series;	// Max value, each sample
-	double *sum_series;	// Total of all series, each sample
-	double *smp_series;	// all samples for one node
-	uint64_t *mn_sx;	// Index of series with minimum value
-	uint64_t *mx_sx;   	// Index of series with maximum value
-	uint64_t *series_in_smp; // Number of series in the sample
-	int max_smpx = 0;
-	double max_smp_series = 0;
-	double ave_series;
-	int ix, isx;
-
-	mn_series = xmalloc(nsmp * sizeof(double));
-	mx_series = xmalloc(nsmp * sizeof(double));
-	sum_series =xmalloc(nsmp * sizeof(double));
-	mn_sx = xmalloc(nsmp * sizeof(uint64_t));
-	mx_sx = xmalloc(nsmp * sizeof(uint64_t));
-	series_in_smp = xmalloc(nsmp * sizeof(uint64_t));
-
-	for (ix = 0; ix < nsmp; ix++) {
-		for (isx=0; isx<nseries; isx++) {
-			if (series_smp[isx]<nsmp && ix>=series_smp[isx])
+	size_t   i;
+	double   min_val;
+	size_t   min_idx;
+	double   max_val;
+	size_t   max_idx;
+	double   sum, sum_max = 0;
+	double   avg, avg_max = 0;
+	size_t   nb_series_in_smp;
+	double   v;
+	double   values[nb_tables];
+	uint8_t  *buffer;
+	uint64_t et, et_max = 0;
+
+	buffer = xmalloc(buf_size);
+	for (;;) {
+		min_val = UINT64_MAX;
+		max_val = 0;
+		sum = 0;
+		nb_series_in_smp = 0;
+
+		/* compute aggregate values */
+		for (i = 0; i < nb_tables; ++i) {
+			if (nb_records[i] == 0)
 				continue;
-			series_in_smp[ix]++;
-			smp_series = all_series[isx];
-			if (smp_series) {
-				sum_series[ix] += smp_series[ix];
-				if (mn_series[ix] == 0
-				    || smp_series[ix] < mn_series[ix]) {
-					mn_series[ix] = smp_series[ix];
-					mn_sx[ix] = isx;
-				}
-				if (mx_series[ix] == 0
-				    || smp_series[ix] > mx_series[ix]) {
-					mx_series[ix] = smp_series[ix];
-					mx_sx[ix] = isx;
-				}
+			--nb_records[i];
+			++nb_series_in_smp;
+			/* read the value of the item in the series i */
+			H5PTget_next(tables[i], 1, (void *)buffer);
+			v = *(double *)(buffer + offsets[i]);
+			values[i] = v;
+			/* compute the sum, min and max */
+			sum += v;
+			if ((i == 0) || (v < min_val)) {
+				min_val = v;
+				min_idx = i;
 			}
+			if ((i == 0) || (v > max_val)) {
+				max_val = v;
+				max_idx = i;
+			}
+			/* Elapsed time is always at offset 0 */
+			et = *(double *)buffer;
 		}
-	}
 
-	for (ix = 0; ix < nsmp; ix++) {
-		if (sum_series[ix] > max_smp_series) {
-			max_smpx = ix;
-			max_smp_series = sum_series[ix];
-		}
-	}
+		if (nb_series_in_smp == 0) /* stop if no more samples */
+			break;
+
+		avg = (double)sum / (double)nb_series_in_smp;
 
-	ave_series = sum_series[max_smpx] / series_in_smp[max_smpx];
-	printf("    Step %d Maximum accumulated %s Value (%f) occurred "
-	       "at %s (Elapsed Time=%d) Ave Node %f\n",
-	       stepx, params.data_item, max_smp_series,
-	       tod[max_smpx], (int) et[max_smpx], ave_series);
-
-	// Put data for step
-	if (!hd) {
-		fprintf(fp,"TOD,Et,JobId,StepId,Min Node,Min %s,"
-		        "Ave %s,Max Node,Max %s,Total %s,"
-		        "Num Nodes",params.data_item,params.data_item,
-		        params.data_item,params.data_item);
-		for (isx = 0; isx < nseries; isx++) {
-			fprintf(fp,",%s",series_name[isx]);
+		/* store the greatest sum */
+		if (sum > sum_max) {
+			sum_max = sum;
+			avg_max = avg;
+			et_max = et;
 		}
-		fprintf(fp,"\n");
-	}
 
-	for (ix = 0; ix < nsmp; ix++) {
-		fprintf(fp,"%s, %d",tod[ix], (int) et[ix]);
-		fprintf(fp,",%d,%d",params.job_id,stepx);
-		fprintf(fp,",%s,%f",series_name[mn_sx[ix]],
-		        mn_series[ix]);
-		ave_series = sum_series[ix] / series_in_smp[ix];
-		fprintf(fp,",%f",ave_series);
-		fprintf(fp,",%s,%f",series_name[mx_sx[ix]],
-		        mx_series[ix]);
-		fprintf(fp,",%f",sum_series[ix]);
-		fprintf(fp,",%"PRIu64"",series_in_smp[ix]);
-		for (isx = 0; isx < nseries; isx++) {
-			if (series_smp[isx]<nsmp && ix>=series_smp[isx]) {
-				fprintf(fp,",0.0");
-			} else {
-				smp_series = all_series[isx];
-				fprintf(fp,",%f",smp_series[ix]);
-			}
+		fprintf(output_file,
+			"%s,%"PRIu64",%s,%lf,%s,%lf,%lf,%lf,%"PRIu64,
+		        step_name, et,
+		        names[min_idx], min_val, names[max_idx], max_val,
+		        sum, avg, nb_series_in_smp);
+
+		/* print value of each series */
+		for (i = 0; i < nb_tables; ++i) {
+			fprintf(output_file, ",%lf", values[i]);
+			/* and set their values to zero if no more values */
+			if (values[i] && nb_records[i] == 0)
+				values[i] = 0;
 		}
-		fprintf(fp,"\n");
+		fputc('\n', output_file);
 	}
+	xfree(buffer);
 
-	xfree(mn_series);
-	xfree(mx_series);
-	xfree(sum_series);
-	xfree(mn_sx);
-	xfree(mx_sx);
+	printf("    Step %s Maximum accumulated %s Value (%lf) occurred "
+	       "at Time=%"PRIu64", Ave Node %lf\n",
+	       step_name, params.data_item, sum_max, et_max, avg_max);
 }
 
-static void _get_all_node_series(FILE *fp, bool hd, hid_t jgid_step, int stepx)
+static herr_t _extract_item_step(hid_t g_id, const char *step_name,
+                                 const H5L_info_t *link_info, void *op_data)
 {
-	char     **tod = NULL;  // Date time at each sample
-	char     **node_name;	// Node Names
-	double **all_series;	// Pointers to all sampled for each node
-	double *et = NULL;	// Elapsed time at each sample
-	uint64_t *series_smp;   // Number of samples in this series
-
-	hid_t	jgid_nodes, jgid_node;
-	int	nnodes, ndx, len, nsmp = 0, nitem = -1;
-	char	jgrp_node_name[MAX_GROUP_NAME+1];
-	void*   series_data = NULL;
-	hdf5_api_ops_t* ops;
-
-	nnodes = get_int_attribute(jgid_step, ATTR_NNODES);
-	// allocate node arrays
-
-	series_smp = xmalloc(nnodes * (sizeof(uint64_t)));
-	if (series_smp == NULL) {
-		fatal("Failed to get memory for node_samples");
-		return;		/* fix for CLANG false positive */
+	static bool first = true;
+
+	char nodes_path[MAX_PROFILE_PATH];
+	char path[MAX_PROFILE_PATH];
+
+	size_t i, j;
+	size_t buf_size = 0;
+	char *m_name;
+
+	hid_t fid_job = *((hid_t *)op_data);
+	hid_t did = -1;    /* dataset id */
+	hid_t tid = -1;    /* file type ID */
+	hid_t n_tid = -1;  /* native type ID */
+	hid_t m_tid = -1;  /* member type ID */
+	hid_t nm_tid = -1; /* native member ID */
+	hsize_t nmembers;
+	hid_t item_type = -1;
+	herr_t err;
+
+	List tables = NULL;
+	ListIterator it = NULL;
+	table_t *t;
+
+	/* step filter */
+	if ((params.step_id != -1) && (atoi(step_name) != params.step_id))
+		return 0;
+
+	current_step = step_name;
+
+	snprintf(nodes_path, MAX_PROFILE_PATH, "%s/"GRP_NODES, step_name);
+
+	tables = list_create(_table_free);
+	err = H5Literate_by_name(g_id, nodes_path, H5_INDEX_NAME,
+	                         H5_ITER_INC, NULL, _collect_tables_node,
+	                         (void *)tables, H5P_DEFAULT);
+	if (err < 0) {
+		debug("1 Failed to iterate through group /"GRP_STEPS"/%s",
+		      nodes_path);
+		FREE_NULL_LIST(tables);
+		return -1;
 	}
 
-	node_name = xmalloc(nnodes * (sizeof(char*)));
-	if (node_name == NULL) {
-		fatal("Failed to get memory for node_name");
-		return;		/* fix for CLANG false positive */
-	}
+	size_t nb_tables = list_count(tables);
+	hid_t tables_id[nb_tables];
+	size_t offsets[nb_tables];
+	hsize_t nb_records[nb_tables];
+	const char *names[nb_tables];
+	const char *nodes[nb_tables];
 
-	all_series = xmalloc(nnodes * (sizeof(double*)));
-	if (all_series == NULL) {
-		fatal("Failed to get memory for all_series");
-		return;		/* fix for CLANG false positive */
+	for (i = 0; i < nb_tables; ++i) {
+		tables_id[i] = -1;
+		nb_records[i] = 0;
 	}
 
-	jgid_nodes = get_group(jgid_step, GRP_NODES);
-	if (jgid_nodes < 0)
-		fatal("Failed to open  group %s", GRP_NODES);
+	it = list_iterator_create(tables);
+	i = 0;
+	while ((t = (table_t *)list_next(it))) {
+		names[i] = t->name;
+		nodes[i] = t->node;
 
-	for (ndx=0; ndx<nnodes; ndx++) {
-		len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, ndx, jgrp_node_name,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if ((len < 0) || (len > MAX_GROUP_NAME)) {
-			debug("Invalid node name=%s", jgrp_node_name);
-			continue;
+		/* open the dataset. */
+		_table_path(t, path);
+		if ((did = H5Dopen(fid_job, path, H5P_DEFAULT)) < 0) {
+			error("Failed to open the series %s", path);
+			goto error;
 		}
-		node_name[ndx] = xstrdup(jgrp_node_name);
-		jgid_node = get_group(jgid_nodes, jgrp_node_name);
-		if (jgid_node < 0) {
-			debug("Failed to open group %s", jgrp_node_name);
-			continue;
+
+		/* get the datatype */
+		if ((tid = H5Dget_type(did)) < 0)
+			goto error;
+		if ((n_tid = H5Tget_native_type(tid, H5T_DIR_DEFAULT)) < 0)
+			goto error;
+
+		buf_size = MAX(buf_size, H5Tget_size(n_tid));
+
+		/* get the number of members */
+		if ((nmembers = H5Tget_nmembers(tid)) == 0)
+			goto error;
+
+		/* iterate through the members and stop when params.data_item
+		 * is found */
+		for (j = 0; j < nmembers; j++) {
+			m_name = H5Tget_member_name(tid, (unsigned)j);
+			if (xstrcasecmp(params.data_item, m_name) == 0) {
+				free(m_name);
+				break;
+			}
+			free(m_name);
 		}
-		ops = NULL;
-		nitem = 0;
-		series_data = _get_series_data(jgid_node, params.series,
-		                               &ops, &nitem);
-		if (series_data==NULL || nitem==0 || ops==NULL) {
-			if (ops != NULL)
-				xfree(ops);
-			continue;
+
+		if (j == nmembers) {
+			error("Item %s not found in series %s",
+			      params.data_item, path);
+			goto error;
 		}
-		all_series[ndx] = ops->get_series_values(
-			params.data_item, series_data, nitem);
-		if (!all_series[ndx])
-			fatal("No data item %s",params.data_item);
-		series_smp[ndx] = nitem;
-		if (ndx == 0) {
-			nsmp = nitem;
-			tod = ops->get_series_tod(series_data, nitem);
-			et = ops->get_series_values("time",
-			                            series_data, nitem);
-		} else {
-			if (nitem > nsmp) {
-				// new largest number of samples
-				_delete_string_list(tod, nsmp);
-				xfree(et);
-				nsmp = nitem;
-				tod = ops->get_series_tod(series_data,
-				                          nitem);
-				et = ops->get_series_values("time",
-				                            series_data, nitem);
-			}
+
+		offsets[i] = H5Tget_member_offset(n_tid, (unsigned)j);
+
+		/* get the member type */
+		if ((m_tid = H5Tget_member_type(tid, (unsigned)j)) < 0)
+			goto error;
+		if ((nm_tid = H5Tget_native_type(m_tid, H5T_DIR_DEFAULT)) < 0)
+			goto error;
+
+		if (item_type == -1) {
+			item_type = nm_tid;
+		} else if (nm_tid != item_type) {
+			error("Malformed file: fields with the same name in "
+			      "tables with the same name must have the same "
+			      "types");
+			goto error;
+		}
+
+		H5Tclose(nm_tid);
+		H5Tclose(m_tid);
+
+		H5Tclose(n_tid);
+		H5Tclose(tid);
+		H5Dclose(did);
+
+		/* open the table */
+		if ((tables_id[i] = H5PTopen(fid_job, path)) < 0) {
+			error("Failed to open the series %s", path);
+			goto error;
 		}
-		xfree(ops);
-		xfree(series_data);
-		H5Gclose(jgid_node);
+		H5PTget_num_packets(tables_id[i], &nb_records[i]);
+
+		++i;
+	}
+
+	if (first) {
+		/* complete header */
+		first = false;
+		list_iterator_reset(it);
+		while ((t = (table_t *)list_next(it))) {
+			if (group_mode)
+				fprintf(output_file, ",%s", t->node);
+			else
+				fprintf(output_file, ",%s %s", t->name, t->node);
+		}
+		fputc('\n', output_file);
 	}
-	if (nsmp == 0) {
-		// May be bad series name
-		info("No values %s for series %s found in step %d",
-		     params.data_item,params.series,
-		     stepx);
+
+	list_iterator_destroy(it);
+
+	if (H5Tequal(item_type, H5T_NATIVE_UINT64)) {
+		_item_analysis_uint(nb_tables, tables_id, nb_records, buf_size,
+		                    offsets, names, nodes, step_name);
+	} else if (H5Tequal(item_type, H5T_NATIVE_DOUBLE)) {
+		_item_analysis_double(nb_tables, tables_id, nb_records, buf_size,
+		                      offsets, names, nodes, step_name);
 	} else {
-		_series_analysis(fp, hd, stepx, nnodes, nsmp,
-		                 node_name, tod, et, all_series, series_smp);
+		error("Unknown type");
+		goto error;
 	}
-	for (ndx=0; ndx<nnodes; ndx++) {
-		xfree(node_name[ndx]);
-		xfree(all_series[ndx]);
+
+	/* clean up */
+	for (i = 0; i < nb_tables; ++i) {
+		H5PTclose(tables_id[i]);
 	}
-	xfree(node_name);
-	xfree(all_series);
-	xfree(series_smp);
-	_delete_string_list(tod, nsmp);
-	xfree(et);
+	FREE_NULL_LIST(tables);
 
-	H5Gclose(jgid_nodes);
+	return 0;
 
+error:
+	if (did >= 0) H5Dclose(did);
+	if (tid >= 0) H5Tclose(tid);
+	if (n_tid >= 0) H5Tclose(n_tid);
+	if (m_tid >= 0) H5Tclose(m_tid);
+	if (nm_tid >= 0) H5Tclose(nm_tid);
+	FREE_NULL_LIST(tables);
+	for (i = 0; i < nb_tables; ++i) {
+		if (tables_id[i] >= 0)
+			H5PTclose(tables_id[i]);
+	}
+	return -1;
 }
 
-static void _get_all_task_series(FILE *fp, bool hd, hid_t jgid_step, int stepx)
+static int _extract_item(void)
 {
+	hid_t fid_job;
+	herr_t err;
 
-	hid_t	jgid_tasks, jgid_task = 0, jgid_nodes, jgid_node;
-	H5G_info_t group_info;
-	int	ntasks,itx, tid;
-	uint64_t *task_id;
-	char     **task_node_name;	/* Node Name for each task */
-	char     **tod = NULL;  /* Date time at each sample */
-	char     **series_name;	/* Node Names */
-	double **all_series;	/* Pointers to all sampled for each node */
-	double *et = NULL;	/* Elapsed time at each sample */
-	uint64_t *series_smp;   /* Number of samples in this series */
-	int	nnodes, ndx, len, nsmp = 0, nitem = -1;
-	char	jgrp_node_name[MAX_GROUP_NAME+1];
-	char	jgrp_task_name[MAX_GROUP_NAME+1];
-	char	buf[MAX_GROUP_NAME+1];
-	void*   series_data = NULL;
-	hdf5_api_ops_t* ops;
-
-	jgid_nodes = get_group(jgid_step, GRP_NODES);
-	if (jgid_nodes < 0)
-		fatal("Failed to open  group %s", GRP_NODES);
-	jgid_tasks = get_group(jgid_step, GRP_TASKS);
-	if (jgid_tasks < 0)
-		fatal("No tasks in step %d", stepx);
-	H5Gget_info(jgid_tasks, &group_info);
-	ntasks = (int) group_info.nlinks;
-	if (ntasks <= 0)
-		fatal("No tasks in step %d", stepx);
-	task_id = xmalloc(ntasks*sizeof(uint64_t));
-	if (task_id == NULL)
-		fatal("Failed to get memory for task_ids");
-	task_node_name = xmalloc(ntasks*sizeof(char*));
-	if (task_node_name == NULL)
-		fatal("Failed to get memory for task_node_names");
-
-	for (itx = 0; itx<ntasks; itx++) {
-		// Get the name of the group.
-		len = H5Lget_name_by_idx(jgid_tasks, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, itx, buf, MAX_GROUP_NAME,
-		                         H5P_DEFAULT);
-		if ((len > 0) && (len < MAX_GROUP_NAME)) {
-			jgid_task = H5Gopen(jgid_tasks, buf, H5P_DEFAULT);
-			if (jgid_task < 0)
-				fatal("Failed to open %s", buf);
-		} else
-			fatal("Illegal task name %s",buf);
-		task_id[itx] = get_int_attribute(jgid_task, ATTR_TASKID);
-		task_node_name[itx] = get_string_attribute(jgid_task,
-		                                           ATTR_NODENAME);
-		H5Gclose(jgid_task);
-	}
-	H5Gclose(jgid_tasks);
-
-	nnodes = get_int_attribute(jgid_step, ATTR_NNODES);
-	// allocate node arrays
-	series_smp = (uint64_t*) xmalloc(ntasks*(sizeof(uint64_t)));
-	if (series_smp == NULL) {
-		fatal("Failed to get memory for node_samples");
-		return; /* Fix for CLANG false positive */
+	output_file = fopen(params.output, "w");
+	if (output_file == NULL) {
+		error("Failed to create output file %s -- %m",
+		      params.output);
 	}
-	series_name = (char**) xmalloc(ntasks*(sizeof(char*)));
-	if (series_name == NULL) {
-		fatal("Failed to get memory for series_name");
-		return; /* Fix for CLANG false positive */
+
+	fid_job = H5Fopen(params.input, H5F_ACC_RDONLY, H5P_DEFAULT);
+	if (fid_job < 0) {
+		fclose(output_file);
+		error("Failed to open %s", params.input);
+		return SLURM_ERROR;
 	}
-	all_series = (double**) xmalloc(ntasks*(sizeof(double*)));
-	if (all_series == NULL) {
-		fatal("Failed to get memory for all_series");
-		return; /* Fix for CLANG false positive */
+
+	/* csv header */
+	fputs("Step,ElaspedTime,MinNode,MinValue,MaxNode,MaxValue,Sum,Avg,"
+	      "NumNodes", output_file);
+
+	err = H5Literate_by_name(fid_job, "/" GRP_STEPS, H5_INDEX_NAME,
+	                         H5_ITER_INC, NULL, _extract_item_step,
+	                         (void *)(&fid_job), H5P_DEFAULT);
+	if (err < 0) {
+		debug("hnere Failed to iterate through group /" GRP_STEPS);
+		H5Fclose(fid_job);
+		fclose(output_file);
+		return SLURM_PROTOCOL_VERSION_ERROR;
 	}
 
-	for (ndx=0; ndx<nnodes; ndx++) {
+	H5Fclose(fid_job);
+	fclose(output_file);
 
-		len = H5Lget_name_by_idx(jgid_nodes, ".", H5_INDEX_NAME,
-		                         H5_ITER_INC, ndx, jgrp_node_name,
-		                         MAX_GROUP_NAME, H5P_DEFAULT);
-		if ((len < 0) || (len > MAX_GROUP_NAME))
-			fatal("Invalid node name=%s", jgrp_node_name);
-		jgid_node = get_group(jgid_nodes, jgrp_node_name);
+	return SLURM_SUCCESS;
+}
 
-		if (jgid_node < 0)
-			fatal("Failed to open group %s", jgrp_node_name);
-		for (itx = 0; itx<ntasks; itx++) {
-			if (strcmp(jgrp_node_name, task_node_name[itx]) != 0)
-				continue;
-			tid = task_id[itx];
-			series_name[itx] = xstrdup_printf("%s_%d %s",
-			                                  GRP_TASK,tid,jgrp_node_name);
-			sprintf(jgrp_task_name,"%s_%d",GRP_TASK, tid);
-
-			ops = NULL;
-			nitem = 0;
-			series_data = _get_series_data(jgid_node,
-			                               jgrp_task_name, &ops, &nitem);
-			if (series_data==NULL || nitem==0 || ops==NULL) {
-				if (ops != NULL)
-					xfree(ops);
-				continue;
+static int _fields_intersection(hid_t fid_job, List tables, List fields)
+{
+	hid_t jgid_table = -1;
+	hid_t tid = -1;
+	hssize_t nb_fields;
+	size_t i;
+	char *field;
+	ListIterator it1, it2;
+	bool found;
+	char path[MAX_PROFILE_PATH];
+	table_t *t;
+	bool first = true;
+
+	if (fields == NULL || tables == NULL)
+		return SLURM_ERROR;
+
+	it1 = list_iterator_create(tables);
+	while ((t = (table_t *)list_next(it1))) {
+		_table_path(t, path);
+		jgid_table = H5Dopen(fid_job, path, H5P_DEFAULT);
+		if (jgid_table < 0) {
+			error("Failed to open table %s", path);
+			return SLURM_ERROR;
+		}
+
+		tid = H5Dget_type(jgid_table);
+		nb_fields = H5Tget_nmembers(tid);
+
+		if (first) {
+			first = false;
+			/* nothing to intersect yet, copy all the fields */
+			for (i = 0; i < nb_fields; i++) {
+				field = H5Tget_member_name(tid, i);
+				list_append(fields, xstrdup(field));
+				free(field);
+			}
+		} else {
+			/* gather fields */
+			char *l_fields[nb_fields];
+			for (i = 0; i < nb_fields; i++) {
+				l_fields[i] = H5Tget_member_name(tid, i);
 			}
-			all_series[itx] = ops->get_series_values(
-				params.data_item, series_data, nitem);
-			if (!all_series[ndx])
-				fatal("No data item %s",params.data_item);
-			series_smp[itx] = nitem;
-			if (nsmp == 0) {
-				nsmp = nitem;
-				tod = ops->get_series_tod(series_data, nitem);
-				et = ops->get_series_values("time",
-				                            series_data, nitem);
-			} else {
-				if (nitem > nsmp) {
-					// new largest number of samples
-					_delete_string_list(tod, nsmp);
-					xfree(et);
-					nsmp = nitem;
-					tod = ops->get_series_tod(series_data,
-					                          nitem);
-					et = ops->get_series_values("time",
-					                            series_data, nitem);
+			/* remove fields that are not in current table */
+			it2 = list_iterator_create(fields);
+			while ((field = list_next(it2))) {
+				found = false;
+				for (i = 0; i < nb_fields; i++) {
+					if (xstrcmp(field, l_fields[i]) == 0) {
+						found = true;
+						break;
+					}
+				}
+				if (!found) {
+					list_delete_item(it2);
 				}
 			}
-			xfree(ops);
-			xfree(series_data);
+			list_iterator_destroy(it2);
+			/* clean up fields */
+			for (i = 0; i < nb_fields; i++)
+				free(l_fields[i]);
 		}
-		H5Gclose(jgid_node);
-	}
-	if (nsmp == 0) {
-		// May be bad series name
-		info("No values %s for series %s found in step %d",
-		     params.data_item,params.series,
-		     stepx);
-	} else {
-		_series_analysis(fp, hd, stepx, ntasks, nsmp,
-		                 series_name, tod, et, all_series, series_smp);
-	}
-	for (itx=0; itx<ntasks; itx++) {
-		xfree(all_series[itx]);
+
+		H5Tclose(tid);
+		H5Dclose(jgid_table);
 	}
-	xfree(series_name);
-	xfree(all_series);
-	xfree(series_smp);
-	_delete_string_list(tod, nsmp);
-	xfree(et);
-	_delete_string_list(task_node_name, ntasks);
-	xfree(task_id);
-
-	H5Gclose(jgid_nodes);
+	list_iterator_destroy(it1);
+
+	return SLURM_SUCCESS;
 }
 
-static int _series_data(void)
+/* List the intersection of the items of all tables with the same name, for all
+ * table names. The list is printed on the standard output */
+static int _list_items(void)
 {
-	FILE *fp;
-	bool hd = false;
-	hid_t fid_job;
-	hid_t jgid_root;
-	hid_t jgid_step;
-	int	nsteps;
-	int stepx;
-	char jgrp_step_name[MAX_GROUP_NAME + 1];
-
-	fp = fopen(params.output, "w");
-	if (fp == NULL) {
-		error("Failed open file %s -- %m", params.output);
-		return -1;
-	}
+	hid_t fid_job = -1;
+	List fields;
+	ListIterator it;
+	const char *field;
+	int rc = SLURM_ERROR;
+	List tables;
 
+	/* get series names */
 	fid_job = H5Fopen(params.input, H5F_ACC_RDONLY, H5P_DEFAULT);
 	if (fid_job < 0) {
-		fclose(fp);
 		error("Failed to open %s", params.input);
-		return -1;
+		return SLURM_ERROR;
 	}
 
-	jgid_root = H5Gopen(fid_job, "/", H5P_DEFAULT);
-	if (jgid_root < 0) {
-		fclose(fp);
+	/* Find the list of tables to be extracted */
+	tables = list_create(_table_free);
+	if ((rc = _tables_list(fid_job, tables)) != SLURM_SUCCESS) {
+		debug("Failed to list tables %s", params.series);
 		H5Fclose(fid_job);
-		error("Failed to open root");
-		return -1;
+		FREE_NULL_LIST(tables);
+		return rc;
 	}
 
-	nsteps = get_int_attribute(jgid_root, ATTR_NSTEPS);
-	for (stepx = 0; stepx < nsteps; stepx++) {
-
-		if ((params.step_id != -1) && (stepx != params.step_id))
-			continue;
-
-		sprintf(jgrp_step_name, "%s_%d", GRP_STEP, stepx);
-		jgid_step = get_group(jgid_root, jgrp_step_name);
-		if (jgid_step < 0) {
-			error("Failed to open  group %s", jgrp_step_name);
-			return -1;
-		}
-
-		if (strncmp(params.series,GRP_TASK,strlen(GRP_TASK)) == 0)
-			_get_all_task_series(fp,hd,jgid_step, stepx);
-		else
-			_get_all_node_series(fp,hd,jgid_step, stepx);
+	fields = list_create(_void_free);
+	if ((rc = _fields_intersection(fid_job, tables, fields))
+	    != SLURM_SUCCESS) {
+		error("Failed to intersect fields");
+		H5Fclose(fid_job);
+		FREE_NULL_LIST(tables);
+		FREE_NULL_LIST(fields);
+		return rc;
+	}
 
-		hd = true;
-		H5Gclose(jgid_step);
+	it = list_iterator_create(fields);
+	while ((field = list_next(it))) {
+		printf("%s\n", field);
 	}
+	list_iterator_destroy(it);
+
+	FREE_NULL_LIST(tables);
+	FREE_NULL_LIST(fields);
 
-	H5Gclose(jgid_root);
 	H5Fclose(fid_job);
-	fclose(fp);
 
-	return 0;
+	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.h b/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.h
new file mode 100644
index 000000000..8f111df85
--- /dev/null
+++ b/src/plugins/acct_gather_profile/hdf5/sh5util/sh5util.h
@@ -0,0 +1,70 @@
+/*****************************************************************************\
+ *  sh5util.h - slurm profile accounting plugin for io and energy using hdf5.
+ *            - Utility to merge node-step files into a job file
+ *            - or extract data from an job file
+ *****************************************************************************
+ *  Copyright (C) 2015 SchedMD LLC
+ *
+ *  Written by Danny Auble <da@schedmd.com> @ SchedMD.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://www.schedmd.com/slurmdocs/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+ *
+\*****************************************************************************/
+
+#ifndef __ACCT_SH5UTIL_H__
+#define __ACCT_SH5UTIL_H__
+
+typedef enum {
+	SH5UTIL_MODE_MERGE,
+	SH5UTIL_MODE_EXTRACT,
+	SH5UTIL_MODE_ITEM_EXTRACT,
+	SH5UTIL_MODE_ITEM_LIST,
+} sh5util_mode_t;
+
+typedef struct {
+	char *dir;
+	int help;
+	char *input;
+	int job_id;
+	bool keepfiles;
+	char *level;
+	sh5util_mode_t mode;
+	char *node;
+	char *output;
+	char *series;
+	char *data_item;
+	int step_id;
+	char *user;
+	int verbose;
+} sh5util_opts_t;
+
+extern sh5util_opts_t params;
+
+#endif // __ACCT_SH5UTIL_H__
diff --git a/src/plugins/acct_gather_profile/none/Makefile.in b/src/plugins/acct_gather_profile/none/Makefile.in
index 7fb07f1ed..8aa5fa909 100644
--- a/src/plugins/acct_gather_profile/none/Makefile.in
+++ b/src/plugins/acct_gather_profile/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/acct_gather_profile/none/acct_gather_profile_none.c b/src/plugins/acct_gather_profile/none/acct_gather_profile_none.c
index fc1161337..6a093cef2 100644
--- a/src/plugins/acct_gather_profile/none/acct_gather_profile_none.c
+++ b/src/plugins/acct_gather_profile/none/acct_gather_profile_none.c
@@ -47,6 +47,7 @@
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/slurmd/common/proctrack.h"
+#include "src/common/slurm_acct_gather_profile.h"
 
 #include <fcntl.h>
 #include <signal.h>
@@ -76,16 +77,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "AcctGatherProfile NONE plugin";
 const char plugin_type[] = "acct_gather_Profile/none";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -155,7 +152,19 @@ extern int acct_gather_profile_p_task_end(pid_t taskpid)
 	return SLURM_SUCCESS;
 }
 
-extern int acct_gather_profile_p_add_sample_data(uint32_t type, void* data)
+extern int acct_gather_profile_p_create_group(const char* name)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int acct_gather_profile_p_create_dataset(
+	const char* name, int parent, acct_gather_profile_dataset_t *dataset)
+{
+	return SLURM_SUCCESS;
+}
+
+extern int acct_gather_profile_p_add_sample_data(int dataset_id, void* data,
+						 time_t sample_time)
 {
 	return SLURM_SUCCESS;
 }
@@ -164,3 +173,9 @@ extern void acct_gather_profile_p_conf_values(List *data)
 {
 	return;
 }
+
+extern bool acct_gather_profile_p_is_active(uint32_t type)
+{
+	return false;
+}
+
diff --git a/src/plugins/auth/Makefile.in b/src/plugins/auth/Makefile.in
index c0f2f3714..1439845a4 100644
--- a/src/plugins/auth/Makefile.in
+++ b/src/plugins/auth/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/auth/authd/Makefile.in b/src/plugins/auth/authd/Makefile.in
index 0d80470cd..90a5d5334 100644
--- a/src/plugins/auth/authd/Makefile.in
+++ b/src/plugins/auth/authd/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/auth/authd/auth_authd.c b/src/plugins/auth/authd/auth_authd.c
index 89fb74aa9..5040a575b 100644
--- a/src/plugins/auth/authd/auth_authd.c
+++ b/src/plugins/auth/authd/auth_authd.c
@@ -63,6 +63,7 @@
 
 #include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
+#include "src/common/slurm_time.h"
 
 /*
  * These variables are required by the generic plugin interface.  If they
@@ -86,14 +87,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "Brent Chun's authd authentication plugin";
 const char plugin_type[]        = "auth/authd";
-const uint32_t plugin_version   = 100;
-const uint32_t min_plug_version = 90;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /* Default timeout. */
 static const int AUTHD_TTL = 2;
@@ -291,10 +290,6 @@ slurm_auth_unpack( Buf buf )
 	}
 
 	safe_unpack32( &version, buf );
-	if ( version < min_plug_version ) {
-		plugin_errno = SLURM_AUTH_VERSION;
-		return NULL;
-	}
 
 	/* Allocate a credential. */
 	cred = (slurm_auth_credential_t *)
@@ -330,8 +325,8 @@ slurm_auth_print( slurm_auth_credential_t *cred, FILE *fp )
 	verbose( "BEGIN AUTHD CREDENTIAL" );
 	verbose( "   UID: %u", cred->cred.uid );
 	verbose( "   GID: %u", cred->cred.gid );
-	verbose( "   Valid from: %s", slurm_ctime( &cred->cred.valid_from ) );
-	verbose( "   Valid to: %s", slurm_ctime( &cred->cred.valid_to ) );
+	verbose( "   Valid from: %s", slurm_ctime2( &cred->cred.valid_from ) );
+	verbose( "   Valid to: %s", slurm_ctime2( &cred->cred.valid_to ) );
 	verbose( "   Signature: 0x%02x%02x%02x%02x ...",
 			 cred->sig.data[ 0 ], cred->sig.data[ 1 ],
 			 cred->sig.data[ 2 ], cred->sig.data[ 3 ] );
diff --git a/src/plugins/auth/munge/Makefile.in b/src/plugins/auth/munge/Makefile.in
index 4cedc4e9c..4192cd7c6 100644
--- a/src/plugins/auth/munge/Makefile.in
+++ b/src/plugins/auth/munge/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/auth/munge/auth_munge.c b/src/plugins/auth/munge/auth_munge.c
index fe3550708..01623a3fc 100644
--- a/src/plugins/auth/munge/auth_munge.c
+++ b/src/plugins/auth/munge/auth_munge.c
@@ -71,6 +71,7 @@
 
 #include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
+#include "src/common/slurm_time.h"
 
 #define MUNGE_ERRNO_OFFSET	1000
 #define RETRY_COUNT		20
@@ -98,15 +99,13 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "auth plugin for Munge "
 				  "(http://code.google.com/p/munge/)";
 const char plugin_type[]       	= "auth/munge";
-const uint32_t plugin_version   = 10;
-const uint32_t min_plug_version = 10; /* minimum version accepted */
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 static int plugin_errno = SLURM_SUCCESS;
 static int bad_cred_test = -1;
@@ -437,10 +436,6 @@ slurm_auth_unpack( Buf buf )
 		return NULL;
 	}
 	safe_unpack32( &version, buf );
-	if ( version < min_plug_version ) {
-		plugin_errno = SLURM_AUTH_VERSION;
-		return NULL;
-	}
 
 	/* Allocate and initialize credential. */
 	cred = xmalloc(sizeof(*cred));
@@ -670,10 +665,10 @@ _print_cred_info(munge_info_t *mi)
 	xassert(mi != NULL);
 
 	if (mi->encoded > 0)
-		info ("ENCODED: %s", slurm_ctime_r(&mi->encoded, buf));
+		info ("ENCODED: %s", slurm_ctime2_r(&mi->encoded, buf));
 
 	if (mi->decoded > 0)
-		info ("DECODED: %s", slurm_ctime_r(&mi->decoded, buf));
+		info ("DECODED: %s", slurm_ctime2_r(&mi->decoded, buf));
 }
 
 
@@ -688,28 +683,21 @@ _print_cred(munge_ctx_t ctx)
 	cred_info_destroy(mi);
 }
 
-/* Convert AuthInfo to a socket path. Accepts two input formats:
- * 1) <path>		(Old format)
- * 2) socket=<path>[,]	(New format)
+/* Convert AuthInfo to a socket path. Accepts "socket=<path>[,]"
  * NOTE: Caller must xfree return value
  */
 static char *_auth_opts_to_socket(char *opts)
 {
 	char *socket = NULL, *sep, *tmp;
 
-	if (!opts)
-		return NULL;
-
-	tmp = strstr(opts, "socket=");
-	if (tmp) {	/* New format */
-		socket = xstrdup(tmp + 7);
-		sep = strchr(socket, ',');
-		if (sep)
-			sep[0] = '\0';
-	} else if (strchr(opts, '=')) {
-		;	/* New format, but socket not specified */
-	} else {
-		socket = xstrdup(opts);	/* Old format */
+	if (opts) {
+		tmp = strstr(opts, "socket=");
+		if (tmp) {	/* New format */
+			socket = xstrdup(tmp + 7);
+			sep = strchr(socket, ',');
+			if (sep)
+				sep[0] = '\0';
+		}
 	}
 
 	return socket;
diff --git a/src/plugins/auth/none/Makefile.in b/src/plugins/auth/none/Makefile.in
index 867cde6cb..13c76ec80 100644
--- a/src/plugins/auth/none/Makefile.in
+++ b/src/plugins/auth/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/auth/none/auth_none.c b/src/plugins/auth/none/auth_none.c
index 499e93c70..304629394 100644
--- a/src/plugins/auth/none/auth_none.c
+++ b/src/plugins/auth/none/auth_none.c
@@ -89,14 +89,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Null authentication plugin";
 const char plugin_type[]       	= "auth/none";
-const uint32_t plugin_version   = 100;
-const uint32_t min_plug_version = 90;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * An opaque type representing authentication credentials.  This type can be
@@ -297,10 +295,6 @@ slurm_auth_unpack( Buf buf )
 		return NULL;
 	}
 	safe_unpack32( &version, buf );
-	if ( version < min_plug_version ) {
-		plugin_errno = SLURM_AUTH_VERSION;
-		return NULL;
-	}
 
 	/* Allocate a new credential. */
 	cred = ((slurm_auth_credential_t *)
diff --git a/src/plugins/burst_buffer/Makefile.am b/src/plugins/burst_buffer/Makefile.am
new file mode 100644
index 000000000..2edd03649
--- /dev/null
+++ b/src/plugins/burst_buffer/Makefile.am
@@ -0,0 +1,3 @@
+# Makefile for burst buffer plugins
+
+SUBDIRS = cray generic
diff --git a/src/plugins/burst_buffer/Makefile.in b/src/plugins/burst_buffer/Makefile.in
new file mode 100644
index 000000000..6b29c33ca
--- /dev/null
+++ b/src/plugins/burst_buffer/Makefile.in
@@ -0,0 +1,778 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for burst buffer plugins
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/burst_buffer
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+	ctags-recursive dvi-recursive html-recursive info-recursive \
+	install-data-recursive install-dvi-recursive \
+	install-exec-recursive install-html-recursive \
+	install-info-recursive install-pdf-recursive \
+	install-ps-recursive install-recursive installcheck-recursive \
+	installdirs-recursive pdf-recursive ps-recursive \
+	tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+  $(RECURSIVE_TARGETS) \
+  $(RECURSIVE_CLEAN_TARGETS) \
+  $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+	distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+SUBDIRS = cray generic
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/burst_buffer/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu src/plugins/burst_buffer/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+#     (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+	@fail=; \
+	if $(am__make_keepgoing); then \
+	  failcom='fail=yes'; \
+	else \
+	  failcom='exit 1'; \
+	fi; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-recursive
+all-am: Makefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+	check-am clean clean-generic clean-libtool cscopelist-am ctags \
+	ctags-am distclean distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	installdirs-am maintainer-clean maintainer-clean-generic \
+	mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
+	ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/burst_buffer/common/Makefile.am b/src/plugins/burst_buffer/common/Makefile.am
new file mode 100644
index 000000000..167d84db9
--- /dev/null
+++ b/src/plugins/burst_buffer/common/Makefile.am
@@ -0,0 +1,13 @@
+# Makefile.am for burst_buffer/common
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+AM_CPPFLAGS = -I$(top_srcdir)
+
+# making a .la
+
+noinst_LTLIBRARIES = libburst_buffer_common.la
+libburst_buffer_common_la_SOURCES =	\
+	burst_buffer_common.c		\
+	burst_buffer_common.h
diff --git a/src/plugins/burst_buffer/common/Makefile.in b/src/plugins/burst_buffer/common/Makefile.in
new file mode 100644
index 000000000..3e23570d3
--- /dev/null
+++ b/src/plugins/burst_buffer/common/Makefile.in
@@ -0,0 +1,756 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile.am for burst_buffer/common
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/burst_buffer/common
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libburst_buffer_common_la_LIBADD =
+am_libburst_buffer_common_la_OBJECTS = burst_buffer_common.lo
+libburst_buffer_common_la_OBJECTS =  \
+	$(am_libburst_buffer_common_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(libburst_buffer_common_la_SOURCES)
+DIST_SOURCES = $(libburst_buffer_common_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+AM_CPPFLAGS = -I$(top_srcdir)
+
+# making a .la
+noinst_LTLIBRARIES = libburst_buffer_common.la
+libburst_buffer_common_la_SOURCES = \
+	burst_buffer_common.c		\
+	burst_buffer_common.h
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/burst_buffer/common/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/burst_buffer/common/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+libburst_buffer_common.la: $(libburst_buffer_common_la_OBJECTS) $(libburst_buffer_common_la_DEPENDENCIES) $(EXTRA_libburst_buffer_common_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(LINK)  $(libburst_buffer_common_la_OBJECTS) $(libburst_buffer_common_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/burst_buffer_common.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/burst_buffer/common/burst_buffer_common.c b/src/plugins/burst_buffer/common/burst_buffer_common.c
new file mode 100644
index 000000000..1dbf6e47d
--- /dev/null
+++ b/src/plugins/burst_buffer/common/burst_buffer_common.c
@@ -0,0 +1,1527 @@
+/*****************************************************************************\
+ *  burst_buffer_common.c - Common logic for managing burst_buffers
+ *
+ *  NOTE: These functions are designed so they can be used by multiple burst
+ *  buffer plugins at the same time (e.g. you might provide users access to
+ *  both burst_buffer/cray and burst_buffer/generic on the same system), so
+ *  the state information is largely in the individual plugin and passed as
+ *  a pointer argument to these functions.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#define _GNU_SOURCE	/* For POLLRDHUP */
+#include <fcntl.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurmdb.h"
+
+#include "src/common/assoc_mgr.h"
+#include "src/common/list.h"
+#include "src/common/pack.h"
+#include "src/common/parse_config.h"
+#include "src/common/slurm_accounting_storage.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/timers.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/slurmctld.h"
+
+#include "burst_buffer_common.h"
+
+/* For possible future use by burst_buffer/generic */
+#define _SUPPORT_GRES 0
+
+static void	_bb_job_del2(bb_job_t *bb_job);
+static uid_t *	_parse_users(char *buf);
+static int	_persist_match(void *x, void *key);
+static void	_persist_purge(void *x);
+static char *	_print_users(uid_t *buf);
+
+/* Translate comma delimitted list of users into a UID array,
+ * Return value must be xfreed */
+static uid_t *_parse_users(char *buf)
+{
+	char *tmp, *tok, *save_ptr = NULL;
+	int inx = 0, array_size;
+	uid_t *user_array = NULL;
+
+	if (!buf)
+		return user_array;
+	tmp = xstrdup(buf);
+	array_size = 1;
+	user_array = xmalloc(sizeof(uid_t) * array_size);
+	tok = strtok_r(tmp, ",", &save_ptr);
+	while (tok) {
+		if ((uid_from_string(tok, user_array + inx) == -1) ||
+		    (user_array[inx] == 0)) {
+			error("%s: ignoring invalid user: %s", __func__, tok);
+		} else {
+			if (++inx >= array_size) {
+				array_size *= 2;
+				user_array = xrealloc(user_array,
+						      sizeof(uid_t)*array_size);
+			}
+		}
+		tok = strtok_r(NULL, ",", &save_ptr);
+	}
+	xfree(tmp);
+	return user_array;
+}
+
+/* Translate an array of (zero terminated) UIDs into a string with colon
+ * delimited UIDs
+ * Return value must be xfreed */
+static char *_print_users(uid_t *buf)
+{
+	char *user_elem, *user_str = NULL;
+	int i;
+
+	if (!buf)
+		return user_str;
+	for (i = 0; buf[i]; i++) {
+		user_elem = uid_to_string(buf[i]);
+		if (!user_elem)
+			continue;
+		if (user_str)
+			xstrcat(user_str, ",");
+		xstrcat(user_str, user_elem);
+		xfree(user_elem);
+	}
+	return user_str;
+}
+
+/* Allocate burst buffer hash tables */
+extern void bb_alloc_cache(bb_state_t *state_ptr)
+{
+	state_ptr->bb_ahash = xmalloc(sizeof(bb_alloc_t *) * BB_HASH_SIZE);
+	state_ptr->bb_jhash = xmalloc(sizeof(bb_job_t *)   * BB_HASH_SIZE);
+	state_ptr->bb_uhash = xmalloc(sizeof(bb_user_t *)  * BB_HASH_SIZE);
+}
+
+/* Clear all cached burst buffer records, freeing all memory. */
+extern void bb_clear_cache(bb_state_t *state_ptr)
+{
+	bb_alloc_t *bb_current,   *bb_next;
+	bb_job_t   *job_current,  *job_next;
+	bb_user_t  *user_current, *user_next;
+	int i;
+
+	if (state_ptr->bb_ahash) {
+		for (i = 0; i < BB_HASH_SIZE; i++) {
+			bb_current = state_ptr->bb_ahash[i];
+			while (bb_current) {
+				xassert(bb_current->magic == BB_ALLOC_MAGIC);
+				bb_next = bb_current->next;
+				bb_free_alloc_buf(bb_current);
+				bb_current = bb_next;
+			}
+		}
+		xfree(state_ptr->bb_ahash);
+	}
+
+	if (state_ptr->bb_jhash) {
+		for (i = 0; i < BB_HASH_SIZE; i++) {
+			job_current = state_ptr->bb_jhash[i];
+			while (job_current) {
+				xassert(job_current->magic == BB_JOB_MAGIC);
+				job_next = job_current->next;
+				_bb_job_del2(job_current);
+				job_current = job_next;
+			}
+		}
+		xfree(state_ptr->bb_jhash);
+	}
+
+	if (state_ptr->bb_uhash) {
+		for (i = 0; i < BB_HASH_SIZE; i++) {
+			user_current = state_ptr->bb_uhash[i];
+			while (user_current) {
+				xassert(user_current->magic == BB_USER_MAGIC);
+				user_next = user_current->next;
+				xfree(user_current);
+				user_current = user_next;
+			}
+		}
+		xfree(state_ptr->bb_uhash);
+	}
+
+	xfree(state_ptr->name);
+	FREE_NULL_LIST(state_ptr->persist_resv_rec);
+}
+
+/* Clear configuration parameters, free memory
+ * config_ptr IN - Initial configuration to be cleared
+ * fini IN - True if shutting down, do more complete clean-up */
+extern void bb_clear_config(bb_config_t *config_ptr, bool fini)
+{
+	int i;
+
+	xassert(config_ptr);
+	xfree(config_ptr->allow_users);
+	xfree(config_ptr->allow_users_str);
+	xfree(config_ptr->create_buffer);
+	config_ptr->debug_flag = false;
+	xfree(config_ptr->default_pool);
+	xfree(config_ptr->deny_users);
+	xfree(config_ptr->deny_users_str);
+	xfree(config_ptr->destroy_buffer);
+	xfree(config_ptr->get_sys_state);
+	config_ptr->granularity = 1;
+	if (fini) {
+		for (i = 0; i < config_ptr->gres_cnt; i++)
+			xfree(config_ptr->gres_ptr[i].name);
+		xfree(config_ptr->gres_ptr);
+		config_ptr->gres_cnt = 0;
+	} else {
+		for (i = 0; i < config_ptr->gres_cnt; i++)
+			config_ptr->gres_ptr[i].avail_cnt = 0;
+	}
+	config_ptr->stage_in_timeout = 0;
+	config_ptr->stage_out_timeout = 0;
+	xfree(config_ptr->start_stage_in);
+	xfree(config_ptr->start_stage_out);
+	xfree(config_ptr->stop_stage_in);
+	xfree(config_ptr->stop_stage_out);
+}
+
+/* Find a per-job burst buffer record for a specific job.
+ * If not found, return NULL. */
+extern bb_alloc_t *bb_find_alloc_rec(bb_state_t *state_ptr,
+				     struct job_record *job_ptr)
+{
+	bb_alloc_t *bb_alloc = NULL;
+	char jobid_buf[32];
+
+	xassert(job_ptr);
+	xassert(state_ptr);
+	bb_alloc = state_ptr->bb_ahash[job_ptr->user_id % BB_HASH_SIZE];
+	while (bb_alloc) {
+		if (bb_alloc->job_id == job_ptr->job_id) {
+			if (bb_alloc->user_id == job_ptr->user_id) {
+				xassert(bb_alloc->magic == BB_ALLOC_MAGIC);
+				return bb_alloc;
+			}
+			error("%s: Slurm state inconsistent with burst "
+			      "buffer. %s has UserID mismatch (%u != %u)",
+			      __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)),
+			      bb_alloc->user_id, job_ptr->user_id);
+			/* This has been observed when slurmctld crashed and
+			 * the job state recovered was missing some jobs
+			 * which already had burst buffers configured. */
+		}
+		bb_alloc = bb_alloc->next;
+	}
+	return bb_alloc;
+}
+
+/* Find a burst buffer record by name
+ * bb_name IN - Buffer's name
+ * user_id IN - Possible user ID, advisory use only
+ * RET the buffer or NULL if not found */
+extern bb_alloc_t *bb_find_name_rec(char *bb_name, uint32_t user_id,
+				    bb_state_t *state_ptr)
+{
+	bb_alloc_t *bb_alloc = NULL;
+	int i, hash_inx = user_id % BB_HASH_SIZE;
+
+	/* Try this user ID first */
+	bb_alloc = state_ptr->bb_ahash[hash_inx];
+	while (bb_alloc) {
+		if (!xstrcmp(bb_alloc->name, bb_name))
+			return bb_alloc;
+		bb_alloc = bb_alloc->next;
+	}
+
+	/* Now search all other records */
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		if (i == hash_inx)
+			continue;
+		bb_alloc = state_ptr->bb_ahash[i];
+		while (bb_alloc) {
+			if (!xstrcmp(bb_alloc->name, bb_name)) {
+				xassert(bb_alloc->magic == BB_ALLOC_MAGIC);
+				return bb_alloc;
+			}
+			bb_alloc = bb_alloc->next;
+		}
+	}
+
+	return bb_alloc;
+}
+
+/* Find a per-user burst buffer record for a specific user ID */
+extern bb_user_t *bb_find_user_rec(uint32_t user_id, bb_state_t *state_ptr)
+{
+	int inx = user_id % BB_HASH_SIZE;
+	bb_user_t *user_ptr;
+
+	xassert(state_ptr);
+	xassert(state_ptr->bb_uhash);
+	user_ptr = state_ptr->bb_uhash[inx];
+	while (user_ptr) {
+		if (user_ptr->user_id == user_id)
+			return user_ptr;
+		user_ptr = user_ptr->next;
+	}
+	user_ptr = xmalloc(sizeof(bb_user_t));
+	xassert((user_ptr->magic = BB_USER_MAGIC));	/* Sets value */
+	user_ptr->next = state_ptr->bb_uhash[inx];
+	/* user_ptr->size = 0;	initialized by xmalloc */
+	user_ptr->user_id = user_id;
+	state_ptr->bb_uhash[inx] = user_ptr;
+	return user_ptr;
+}
+
+#if _SUPPORT_GRES
+static uint64_t _atoi(char *tok)
+{
+	char *end_ptr = NULL;
+	int64_t size_i;
+	uint64_t size_u = 0;
+
+	size_i = (int64_t) strtoll(tok, &end_ptr, 10);
+	if (size_i > 0) {
+		size_u = (uint64_t) size_i;
+		if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K')) {
+			size_u = size_u * 1024;
+		} else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M')) {
+			size_u = size_u * 1024 * 1024;
+		} else if ((end_ptr[0] == 'g') || (end_ptr[0] == 'G')) {
+			size_u = size_u * 1024 * 1024 * 1024;
+		} else if ((end_ptr[0] == 't') || (end_ptr[0] == 'T')) {
+			size_u = size_u * 1024 * 1024 * 1024 * 1024;
+		} else if ((end_ptr[0] == 'p') || (end_ptr[0] == 'P')) {
+			size_u = size_u * 1024 * 1024 * 1024 * 1024 * 1024;
+		}
+	}
+	return size_u;
+}
+#endif
+
+/* Set the bb_state's tres_id and tres_pos for limit enforcement.
+ * Value is set to -1 if not found. */
+extern void bb_set_tres_pos(bb_state_t *state_ptr)
+{
+	slurmdb_tres_rec_t tres_rec;
+	int inx;
+
+	xassert(state_ptr);
+	memset(&tres_rec, 0, sizeof(slurmdb_tres_rec_t));
+	tres_rec.type = "bb";
+	tres_rec.name = state_ptr->name;
+	inx = assoc_mgr_find_tres_pos(&tres_rec, false);
+	if (inx == -1) {
+		debug("%s: Tres %s not found by assoc_mgr",
+		       __func__, state_ptr->name);
+	} else {
+		state_ptr->tres_id  = assoc_mgr_tres_array[inx]->id;
+		state_ptr->tres_pos = inx;
+	}
+}
+
+/* Load and process configuration parameters */
+extern void bb_load_config(bb_state_t *state_ptr, char *plugin_type)
+{
+	s_p_hashtbl_t *bb_hashtbl = NULL;
+	char *bb_conf, *tmp = NULL, *value;
+#if _SUPPORT_GRES
+	char *colon, *save_ptr = NULL, *tok;
+	uint32_t gres_cnt;
+#endif
+	int fd, i;
+	static s_p_options_t bb_options[] = {
+		{"AllowUsers", S_P_STRING},
+		{"CreateBuffer", S_P_STRING},
+		{"DefaultPool", S_P_STRING},
+		{"DenyUsers", S_P_STRING},
+		{"DestroyBuffer", S_P_STRING},
+		{"Flags", S_P_STRING},
+		{"GetSysState", S_P_STRING},
+		{"Granularity", S_P_STRING},
+/*		{"Gres", S_P_STRING},	*/
+		{"StageInTimeout", S_P_UINT32},
+		{"StageOutTimeout", S_P_UINT32},
+		{"StartStageIn", S_P_STRING},
+		{"StartStageOut", S_P_STRING},
+		{"StopStageIn", S_P_STRING},
+		{"StopStageOut", S_P_STRING},
+		{NULL}
+	};
+
+	xfree(state_ptr->name);
+	if (plugin_type) {
+		tmp = strchr(plugin_type, '/');
+		if (tmp)
+			tmp++;
+		else
+			tmp = plugin_type;
+		state_ptr->name = xstrdup(tmp);
+	}
+
+	bb_clear_config(&state_ptr->bb_config, false);
+	if (slurm_get_debug_flags() & DEBUG_FLAG_BURST_BUF)
+		state_ptr->bb_config.debug_flag = true;
+
+	/* First look for "burst_buffer.conf" then with "type" field,
+	 * for example "burst_buffer_cray.conf" */
+	bb_conf = get_extra_conf_path("burst_buffer.conf");
+	fd = open(bb_conf, 0);
+	if (fd >= 0) {
+		close(fd);
+	} else {
+		char *new_path = NULL;
+		xfree(bb_conf);
+		xstrfmtcat(new_path, "burst_buffer_%s.conf", state_ptr->name);
+		bb_conf = get_extra_conf_path(new_path);
+		fd = open(bb_conf, 0);
+		if (fd < 0) {
+			fatal("%s: Unable to find configuration file %s or "
+			      "burst_buffer.conf", __func__, new_path);
+		}
+		xfree(new_path);
+	}
+
+	bb_hashtbl = s_p_hashtbl_create(bb_options);
+	if (s_p_parse_file(bb_hashtbl, NULL, bb_conf, false) == SLURM_ERROR) {
+		fatal("%s: something wrong with opening/reading %s: %m",
+		      __func__, bb_conf);
+	}
+	if (s_p_get_string(&state_ptr->bb_config.allow_users_str, "AllowUsers",
+			   bb_hashtbl)) {
+		state_ptr->bb_config.allow_users = _parse_users(
+					state_ptr->bb_config.allow_users_str);
+	}
+	s_p_get_string(&state_ptr->bb_config.create_buffer, "CreateBuffer",
+		       bb_hashtbl);
+	s_p_get_string(&state_ptr->bb_config.default_pool, "DefaultPool",
+		       bb_hashtbl);
+	if (s_p_get_string(&state_ptr->bb_config.deny_users_str, "DenyUsers",
+			   bb_hashtbl)) {
+		state_ptr->bb_config.deny_users = _parse_users(
+					state_ptr->bb_config.deny_users_str);
+	}
+	s_p_get_string(&state_ptr->bb_config.destroy_buffer, "DestroyBuffer",
+		       bb_hashtbl);
+
+	if (s_p_get_string(&tmp, "Flags", bb_hashtbl)) {
+		state_ptr->bb_config.flags = slurm_bb_str2flags(tmp);
+		xfree(tmp);
+	}
+	/* By default, disable persistent buffer creation by normal users */
+	if ((state_ptr->bb_config.flags & BB_FLAG_ENABLE_PERSISTENT) == 0)
+		state_ptr->bb_config.flags |= BB_FLAG_DISABLE_PERSISTENT;
+
+	s_p_get_string(&state_ptr->bb_config.get_sys_state, "GetSysState",
+		       bb_hashtbl);
+	if (s_p_get_string(&tmp, "Granularity", bb_hashtbl)) {
+		state_ptr->bb_config.granularity = bb_get_size_num(tmp, 1);
+		xfree(tmp);
+		if (state_ptr->bb_config.granularity == 0) {
+			error("%s: Granularity=0 is invalid", __func__);
+			state_ptr->bb_config.granularity = 1;
+		}
+	}
+#if _SUPPORT_GRES
+	if (s_p_get_string(&tmp, "Gres", bb_hashtbl)) {
+		tok = strtok_r(tmp, ",", &save_ptr);
+		while (tok) {
+			colon = strchr(tok, ':');
+			if (colon) {
+				colon[0] = '\0';
+				gres_cnt = _atoi(colon+1);
+			} else
+				gres_cnt = 1;
+			state_ptr->bb_config.gres_ptr = xrealloc(
+				state_ptr->bb_config.gres_ptr,
+				sizeof(burst_buffer_gres_t) *
+				(state_ptr->bb_config.gres_cnt + 1));
+			state_ptr->bb_config.
+				gres_ptr[state_ptr->bb_config.gres_cnt].name =
+				xstrdup(tok);
+			state_ptr->bb_config.
+				gres_ptr[state_ptr->bb_config.gres_cnt].
+				avail_cnt = gres_cnt;
+			state_ptr->bb_config.gres_cnt++;
+			tok = strtok_r(NULL, ",", &save_ptr);
+		}
+		xfree(tmp);
+	}
+#endif
+	s_p_get_uint32(&state_ptr->bb_config.stage_in_timeout, "StageInTimeout",
+		       bb_hashtbl);
+	s_p_get_uint32(&state_ptr->bb_config.stage_out_timeout,
+		       "StageOutTimeout", bb_hashtbl);
+	s_p_get_string(&state_ptr->bb_config.start_stage_in, "StartStageIn",
+		       bb_hashtbl);
+	s_p_get_string(&state_ptr->bb_config.start_stage_out, "StartStageOut",
+			    bb_hashtbl);
+	s_p_get_string(&state_ptr->bb_config.stop_stage_in, "StopStageIn",
+		       bb_hashtbl);
+	s_p_get_string(&state_ptr->bb_config.stop_stage_out, "StopStageOut",
+		       bb_hashtbl);
+
+	s_p_hashtbl_destroy(bb_hashtbl);
+	xfree(bb_conf);
+
+	if (state_ptr->bb_config.debug_flag) {
+		value = _print_users(state_ptr->bb_config.allow_users);
+		info("%s: AllowUsers:%s",  __func__, value);
+		xfree(value);
+		info("%s: CreateBuffer:%s",  __func__,
+		     state_ptr->bb_config.create_buffer);
+		info("%s: DefaultPool:%s",  __func__,
+		     state_ptr->bb_config.default_pool);
+		value = _print_users(state_ptr->bb_config.deny_users);
+		info("%s: DenyUsers:%s",  __func__, value);
+		xfree(value);
+		info("%s: DestroyBuffer:%s",  __func__,
+		     state_ptr->bb_config.destroy_buffer);
+		info("%s: GetSysState:%s",  __func__,
+		     state_ptr->bb_config.get_sys_state);
+		info("%s: Granularity:%"PRIu64"",  __func__,
+		     state_ptr->bb_config.granularity);
+		for (i = 0; i < state_ptr->bb_config.gres_cnt; i++) {
+			info("%s: Gres[%d]:%s:%"PRIu64"", __func__, i,
+			     state_ptr->bb_config.gres_ptr[i].name,
+			     state_ptr->bb_config.gres_ptr[i].avail_cnt);
+		}
+		info("%s: StageInTimeout:%u", __func__,
+		     state_ptr->bb_config.stage_in_timeout);
+		info("%s: StageOutTimeout:%u", __func__,
+		     state_ptr->bb_config.stage_out_timeout);
+		info("%s: StartStageIn:%s",  __func__,
+		     state_ptr->bb_config.start_stage_in);
+		info("%s: StartStageOut:%s",  __func__,
+		     state_ptr->bb_config.start_stage_out);
+		info("%s: StopStageIn:%s",  __func__,
+		     state_ptr->bb_config.stop_stage_in);
+		info("%s: StopStageOut:%s",  __func__,
+		     state_ptr->bb_config.stop_stage_out);
+	}
+}
+
+/* Pack individual burst buffer records into a buffer */
+extern int bb_pack_bufs(uid_t uid, bb_state_t *state_ptr, Buf buffer,
+			uint16_t protocol_version)
+{
+	int i, j, rec_count = 0;
+	struct bb_alloc *bb_alloc;
+	int eof, offset;
+
+	xassert(state_ptr);
+	offset = get_buf_offset(buffer);
+	pack32(rec_count,  buffer);
+	if (!state_ptr->bb_ahash)
+		return rec_count;
+
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_alloc = state_ptr->bb_ahash[i];
+		while (bb_alloc) {
+			if ((uid == 0) || (uid == bb_alloc->user_id)) {
+				packstr(bb_alloc->account,      buffer);
+				pack32(bb_alloc->array_job_id,  buffer);
+				pack32(bb_alloc->array_task_id, buffer);
+				pack_time(bb_alloc->create_time, buffer);
+				pack32(bb_alloc->gres_cnt, buffer);
+				for (j = 0; j < bb_alloc->gres_cnt; j++) {
+					packstr(bb_alloc->gres_ptr[j].name,
+						buffer);
+					pack64(bb_alloc->gres_ptr[j].used_cnt,
+					       buffer);
+				}
+				pack32(bb_alloc->job_id,        buffer);
+				packstr(bb_alloc->name,         buffer);
+				packstr(bb_alloc->partition,    buffer);
+				packstr(bb_alloc->qos,          buffer);
+				pack64(bb_alloc->size,          buffer);
+				pack16(bb_alloc->state,         buffer);
+				pack32(bb_alloc->user_id,       buffer);
+				rec_count++;
+			}
+			bb_alloc = bb_alloc->next;
+		}
+	}
+	if (rec_count != 0) {
+		eof = get_buf_offset(buffer);
+		set_buf_offset(buffer, offset);
+		pack32(rec_count, buffer);
+		set_buf_offset(buffer, eof);
+	}
+
+	return rec_count;
+}
+
+/* Pack state and configuration parameters into a buffer */
+extern void bb_pack_state(bb_state_t *state_ptr, Buf buffer,
+			  uint16_t protocol_version)
+{
+	bb_config_t *config_ptr = &state_ptr->bb_config;
+	int i;
+
+	packstr(config_ptr->allow_users_str, buffer);
+	packstr(config_ptr->create_buffer,   buffer);
+	packstr(config_ptr->default_pool,    buffer);
+	packstr(config_ptr->deny_users_str,  buffer);
+	packstr(config_ptr->destroy_buffer,  buffer);
+	pack32(config_ptr->flags,            buffer);
+	packstr(config_ptr->get_sys_state,   buffer);
+	pack64(config_ptr->granularity,      buffer);
+	pack32(config_ptr->gres_cnt,         buffer);
+	for (i = 0; i < config_ptr->gres_cnt; i++) {
+		packstr(config_ptr->gres_ptr[i].name, buffer);
+		pack64(config_ptr->gres_ptr[i].avail_cnt, buffer);
+		pack64(config_ptr->gres_ptr[i].used_cnt, buffer);
+	}
+	packstr(config_ptr->start_stage_in,  buffer);
+	packstr(config_ptr->start_stage_out, buffer);
+	packstr(config_ptr->stop_stage_in,   buffer);
+	packstr(config_ptr->stop_stage_out,  buffer);
+	pack32(config_ptr->stage_in_timeout, buffer);
+	pack32(config_ptr->stage_out_timeout,buffer);
+	pack64(state_ptr->total_space,       buffer);
+	pack64(state_ptr->used_space,        buffer);
+}
+
+/* Pack individual burst buffer usage records into a buffer (used for limits) */
+extern int bb_pack_usage(uid_t uid, bb_state_t *state_ptr, Buf buffer,
+			 uint16_t protocol_version)
+{
+	int i, rec_count = 0;
+	bb_user_t *bb_usage;
+	int eof, offset;
+
+	xassert(state_ptr);
+	offset = get_buf_offset(buffer);
+	pack32(rec_count,  buffer);
+	if (!state_ptr->bb_uhash)
+		return rec_count;
+
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_usage = state_ptr->bb_uhash[i];
+		while (bb_usage) {
+			if (((uid == 0) || (uid == bb_usage->user_id)) &&
+			    (bb_usage->size != 0)) {
+				pack64(bb_usage->size,          buffer);
+				pack32(bb_usage->user_id,       buffer);
+				rec_count++;
+			}
+			bb_usage = bb_usage->next;
+		}
+	}
+	if (rec_count != 0) {
+		eof = get_buf_offset(buffer);
+		set_buf_offset(buffer, offset);
+		pack32(rec_count, buffer);
+		set_buf_offset(buffer, eof);
+	}
+
+	return rec_count;
+}
+
+/* Translate a burst buffer size specification in string form to numeric form,
+ * recognizing various sufficies (MB, GB, TB, PB, and Nodes). Default units
+ * are bytes. */
+extern uint64_t bb_get_size_num(char *tok, uint64_t granularity)
+{
+	char *end_ptr = NULL;
+	int64_t bb_size_i;
+	uint64_t bb_size_u = 0;
+
+	bb_size_i = (int64_t) strtoll(tok, &end_ptr, 10);
+	if (bb_size_i > 0) {
+		bb_size_u = (uint64_t) bb_size_i;
+		if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K')) {
+			bb_size_u *= 1024;
+		} else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M')) {
+			bb_size_u *= ((uint64_t)1024 * 1024);
+		} else if ((end_ptr[0] == 'g') || (end_ptr[0] == 'G')) {
+			bb_size_u *= ((uint64_t)1024 * 1024 * 1024);
+		} else if ((end_ptr[0] == 't') || (end_ptr[0] == 'T')) {
+			bb_size_u *= ((uint64_t)1024 * 1024 * 1024 * 1024);
+		} else if ((end_ptr[0] == 'p') || (end_ptr[0] == 'P')) {
+			bb_size_u *= ((uint64_t)1024 * 1024 * 1024 * 1024
+				      * 1024);
+		} else if ((end_ptr[0] == 'n') || (end_ptr[0] == 'N')) {
+			bb_size_u |= BB_SIZE_IN_NODES;
+			granularity = 1;
+		}
+	}
+
+	if (granularity > 1) {
+		bb_size_u = ((bb_size_u + granularity - 1) / granularity) *
+			    granularity;
+	}
+
+	return bb_size_u;
+}
+
+/* Translate a burst buffer size specification in numeric form to string form,
+ * recognizing various sufficies (MB, GB, TB, PB, and Nodes). Default units
+ * are bytes. */
+extern char *bb_get_size_str(uint64_t size)
+{
+	static char size_str[64];
+
+	if (size == 0) {
+		snprintf(size_str, sizeof(size_str), "%"PRIu64, size);
+	} else if (size & BB_SIZE_IN_NODES) {
+		size &= (~BB_SIZE_IN_NODES);
+		snprintf(size_str, sizeof(size_str), "%"PRIu64"N", size);
+	} else if ((size % ((uint64_t)1024 * 1024 * 1024 * 1024 * 1024)) == 0) {
+		size /= ((uint64_t)1024 * 1024 * 1024 * 1024 * 1024);
+		snprintf(size_str, sizeof(size_str), "%"PRIu64"PB", size);
+	} else if ((size % ((uint64_t)1024 * 1024 * 1024 * 1024)) == 0) {
+		size /= ((uint64_t)1024 * 1024 * 1024 * 1024);
+		snprintf(size_str, sizeof(size_str), "%"PRIu64"TB", size);
+	} else if ((size % ((uint64_t)1024 * 1024 * 1024)) == 0) {
+		size /= ((uint64_t)1024 * 1024 * 1024);
+		snprintf(size_str, sizeof(size_str), "%"PRIu64"GB", size);
+	} else if ((size % ((uint64_t)1024 * 1024)) == 0) {
+		size /= ((uint64_t)1024 * 1024);
+		snprintf(size_str, sizeof(size_str), "%"PRIu64"MB", size);
+	} else if ((size % ((uint64_t)1024)) == 0) {
+		size /= ((uint64_t)1024);
+		snprintf(size_str, sizeof(size_str), "%"PRIu64"KB", size);
+	} else {
+		snprintf(size_str, sizeof(size_str), "%"PRIu64, size);
+	}
+
+	return size_str;
+}
+
+/* Round up a number based upon some granularity */
+extern uint64_t bb_granularity(uint64_t start_size, uint64_t granularity)
+{
+	if (start_size) {
+		start_size = start_size + granularity - 1;
+		start_size /= granularity;
+		start_size *= granularity;
+	}
+	return start_size;
+}
+
+extern void bb_job_queue_del(void *x)
+{
+	xfree(x);
+}
+
+/* Sort job queue by expected start time */
+extern int bb_job_queue_sort(void *x, void *y)
+{
+	bb_job_queue_rec_t *job_rec1 = *(bb_job_queue_rec_t **) x;
+	bb_job_queue_rec_t *job_rec2 = *(bb_job_queue_rec_t **) y;
+	struct job_record *job_ptr1 = job_rec1->job_ptr;
+	struct job_record *job_ptr2 = job_rec2->job_ptr;
+
+	if (job_ptr1->start_time > job_ptr2->start_time)
+		return 1;
+	if (job_ptr1->start_time < job_ptr2->start_time)
+		return -1;
+	return 0;
+}
+
+/* Sort preempt_bb_recs in order of DECREASING use_time */
+extern int bb_preempt_queue_sort(void *x, void *y)
+{
+	struct preempt_bb_recs *bb_ptr1 = *(struct preempt_bb_recs **) x;
+	struct preempt_bb_recs *bb_ptr2 = *(struct preempt_bb_recs **) y;
+
+	if (bb_ptr1->use_time > bb_ptr2->use_time)
+		return -1;
+	if (bb_ptr1->use_time < bb_ptr2->use_time)
+		return 1;
+	return 0;
+};
+
+/* For each burst buffer record, set the use_time to the time at which its
+ * use is expected to begin (i.e. each job's expected start time) */
+extern void bb_set_use_time(bb_state_t *state_ptr)
+{
+	struct job_record *job_ptr;
+	bb_alloc_t *bb_alloc = NULL;
+	time_t now = time(NULL);
+	int i;
+
+	state_ptr->next_end_time = now + 60 * 60; /* Start estimate now+1hour */
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_alloc = state_ptr->bb_ahash[i];
+		while (bb_alloc) {
+			if (bb_alloc->job_id &&
+			    ((bb_alloc->state == BB_STATE_STAGING_IN) ||
+			     (bb_alloc->state == BB_STATE_STAGED_IN))) {
+				job_ptr = find_job_record(bb_alloc->job_id);
+				if (!job_ptr) {
+					error("%s: job %u with allocated burst "
+					      "buffers not found",
+					      __func__, bb_alloc->job_id);
+					bb_alloc->use_time = now + 24 * 60 * 60;
+				} else if (job_ptr->start_time) {
+					bb_alloc->end_time = job_ptr->end_time;
+					bb_alloc->use_time = job_ptr->start_time;
+				} else {
+					/* Unknown start time */
+					bb_alloc->use_time = now + 60 * 60;
+				}
+			} else if (bb_alloc->job_id) {
+				job_ptr = find_job_record(bb_alloc->job_id);
+				if (job_ptr)
+					bb_alloc->end_time = job_ptr->end_time;
+			} else {
+				bb_alloc->use_time = now;
+			}
+			if (bb_alloc->end_time && bb_alloc->size) {
+				if (bb_alloc->end_time <= now)
+					state_ptr->next_end_time = now;
+				else if (state_ptr->next_end_time >
+					 bb_alloc->end_time) {
+					state_ptr->next_end_time =
+						bb_alloc->end_time;
+				}
+			}
+			bb_alloc = bb_alloc->next;
+		}
+	}
+}
+
+/* Sleep function, also handles termination signal */
+extern void bb_sleep(bb_state_t *state_ptr, int add_secs)
+{
+	struct timespec ts = {0, 0};
+	struct timeval  tv = {0, 0};
+
+	if (gettimeofday(&tv, NULL)) {		/* Some error */
+		sleep(1);
+		return;
+	}
+
+	ts.tv_sec  = tv.tv_sec + add_secs;
+	ts.tv_nsec = tv.tv_usec * 1000;
+	pthread_mutex_lock(&state_ptr->term_mutex);
+	if (!state_ptr->term_flag) {
+		pthread_cond_timedwait(&state_ptr->term_cond,
+				       &state_ptr->term_mutex, &ts);
+	}
+	pthread_mutex_unlock(&state_ptr->term_mutex);
+}
+
+
+/* Allocate a named burst buffer record for a specific user.
+ * Return a pointer to that record.
+ * Use bb_free_name_rec() to purge the returned record. */
+extern bb_alloc_t *bb_alloc_name_rec(bb_state_t *state_ptr, char *name,
+				     uint32_t user_id)
+{
+	bb_alloc_t *bb_alloc = NULL;
+	int i;
+
+	xassert(state_ptr->bb_ahash);
+	state_ptr->last_update_time = time(NULL);
+	bb_alloc = xmalloc(sizeof(bb_alloc_t));
+	i = user_id % BB_HASH_SIZE;
+	xassert((bb_alloc->magic = BB_ALLOC_MAGIC));	/* Sets value */
+	bb_alloc->next = state_ptr->bb_ahash[i];
+	state_ptr->bb_ahash[i] = bb_alloc;
+	bb_alloc->name = xstrdup(name);
+	bb_alloc->state = BB_STATE_ALLOCATED;
+	bb_alloc->state_time = time(NULL);
+	bb_alloc->seen_time = time(NULL);
+	bb_alloc->user_id = user_id;
+
+	return bb_alloc;
+}
+
+/* Allocate a per-job burst buffer record for a specific job.
+ * Return a pointer to that record.
+ * Use bb_free_alloc_rec() to purge the returned record. */
+extern bb_alloc_t *bb_alloc_job_rec(bb_state_t *state_ptr,
+				    struct job_record *job_ptr,
+				    bb_job_t *bb_job)
+{
+	bb_alloc_t *bb_alloc = NULL;
+	int i;
+
+	xassert(state_ptr->bb_ahash);
+	xassert(job_ptr);
+	state_ptr->last_update_time = time(NULL);
+	bb_alloc = xmalloc(sizeof(bb_alloc_t));
+	bb_alloc->account = xstrdup(bb_job->account);
+	bb_alloc->array_job_id = job_ptr->array_job_id;
+	bb_alloc->array_task_id = job_ptr->array_task_id;
+	bb_alloc->assoc_ptr = job_ptr->assoc_ptr;
+	bb_alloc->gres_cnt = bb_job->gres_cnt;
+	if (bb_alloc->gres_cnt) {
+		bb_alloc->gres_ptr = xmalloc(sizeof(burst_buffer_gres_t) *
+					     bb_alloc->gres_cnt);
+	}
+	for (i = 0; i < bb_alloc->gres_cnt; i++) {
+		bb_alloc->gres_ptr[i].used_cnt = bb_job->gres_ptr[i].count;
+		bb_alloc->gres_ptr[i].name = xstrdup(bb_job->gres_ptr[i].name);
+	}
+	bb_alloc->job_id = job_ptr->job_id;
+	xassert((bb_alloc->magic = BB_ALLOC_MAGIC));	/* Sets value */
+	i = job_ptr->user_id % BB_HASH_SIZE;
+	bb_alloc->next = state_ptr->bb_ahash[i];
+	bb_alloc->partition = xstrdup(bb_job->partition);
+	bb_alloc->qos = xstrdup(bb_job->qos);
+	state_ptr->bb_ahash[i] = bb_alloc;
+	bb_alloc->size = bb_job->total_size;
+	bb_alloc->state = BB_STATE_ALLOCATED;
+	bb_alloc->state_time = time(NULL);
+	bb_alloc->seen_time = time(NULL);
+	bb_alloc->user_id = job_ptr->user_id;
+
+	return bb_alloc;
+}
+
+/* Allocate a burst buffer record for a job and increase the job priority
+ * if so configured.
+ * Use bb_free_alloc_rec() to purge the returned record. */
+extern bb_alloc_t *bb_alloc_job(bb_state_t *state_ptr,
+				struct job_record *job_ptr, bb_job_t *bb_job)
+{
+	bb_alloc_t *bb_alloc;
+
+	bb_alloc = bb_alloc_job_rec(state_ptr, job_ptr, bb_job);
+	bb_limit_add(bb_alloc->user_id, bb_alloc->size, state_ptr);
+
+	return bb_alloc;
+}
+
+/* Free memory associated with allocated bb record, caller is responsible for
+ * maintaining linked list */
+extern void bb_free_alloc_buf(bb_alloc_t *bb_alloc)
+{
+	int i;
+
+	if (bb_alloc) {
+		xassert(bb_alloc->magic == BB_ALLOC_MAGIC);
+		bb_alloc->magic = 0;
+		xfree(bb_alloc->account);
+		xfree(bb_alloc->assocs);
+		for (i = 0; i < bb_alloc->gres_cnt; i++)
+			xfree(bb_alloc->gres_ptr[i].name);
+		xfree(bb_alloc->gres_ptr);
+		xfree(bb_alloc->name);
+		xfree(bb_alloc->partition);
+		xfree(bb_alloc->qos);
+		xfree(bb_alloc);
+	}
+}
+
+
+/* Remove a specific bb_alloc_t from global records.
+ * RET true if found, false otherwise */
+extern bool bb_free_alloc_rec(bb_state_t *state_ptr, bb_alloc_t *bb_alloc)
+{
+	bb_alloc_t *bb_link, **bb_plink;
+	int i;
+
+	xassert(state_ptr);
+	xassert(state_ptr->bb_ahash);
+	xassert(bb_alloc);
+
+	i = bb_alloc->user_id % BB_HASH_SIZE;
+	bb_plink = &state_ptr->bb_ahash[i];
+	bb_link = state_ptr->bb_ahash[i];
+	while (bb_link) {
+		if (bb_link == bb_alloc) {
+			xassert(bb_link->magic == BB_ALLOC_MAGIC);
+			*bb_plink = bb_alloc->next;
+			bb_free_alloc_buf(bb_alloc);
+			state_ptr->last_update_time = time(NULL);
+			return true;
+		}
+		bb_plink = &bb_link->next;
+		bb_link = bb_link->next;
+	}
+	return false;
+}
+
+/*
+ * Return time in msec since "start time"
+ */
+static int _tot_wait (struct timeval *start_time)
+{
+	struct timeval end_time;
+	int msec_delay;
+
+	gettimeofday(&end_time, NULL);
+	msec_delay =   (end_time.tv_sec  - start_time->tv_sec ) * 1000;
+	msec_delay += ((end_time.tv_usec - start_time->tv_usec + 500) / 1000);
+	return msec_delay;
+}
+
+/* Execute a script, wait for termination and return its stdout.
+ * script_type IN - Type of program being run (e.g. "StartStageIn")
+ * script_path IN - Fully qualified pathname of the program to execute
+ * script_args IN - Arguments to the script
+ * max_wait IN - Maximum time to wait in milliseconds,
+ *		 -1 for no limit (asynchronous)
+ * status OUT - Job exit code
+ * Return stdout+stderr of spawned program, value must be xfreed. */
+extern char *bb_run_script(char *script_type, char *script_path,
+			   char **script_argv, int max_wait, int *status)
+{
+	int i, new_wait, resp_size = 0, resp_offset = 0;
+	pid_t cpid;
+	char *resp = NULL;
+	int pfd[2] = { -1, -1 };
+
+	if ((script_path == NULL) || (script_path[0] == '\0')) {
+		error("%s: no script specified", __func__);
+		*status = 127;
+		resp = xstrdup("Slurm burst buffer configuration error");
+		return resp;
+	}
+	if (script_path[0] != '/') {
+		error("%s: %s is not fully qualified pathname (%s)",
+		      __func__, script_type, script_path);
+		*status = 127;
+		resp = xstrdup("Slurm burst buffer configuration error");
+		return resp;
+	}
+	if (access(script_path, R_OK | X_OK) < 0) {
+		error("%s: %s can not be executed (%s) %m",
+		      __func__, script_type, script_path);
+		*status = 127;
+		resp = xstrdup("Slurm burst buffer configuration error");
+		return resp;
+	}
+	if (max_wait != -1) {
+		if (pipe(pfd) != 0) {
+			error("%s: pipe(): %m", __func__);
+			*status = 127;
+			resp = xstrdup("System error");
+			return resp;
+		}
+	}
+	if ((cpid = fork()) == 0) {
+		int cc;
+
+		cc = sysconf(_SC_OPEN_MAX);
+		if (max_wait != -1) {
+			dup2(pfd[1], STDERR_FILENO);
+			dup2(pfd[1], STDOUT_FILENO);
+			for (i = 0; i < cc; i++) {
+				if ((i != STDERR_FILENO) &&
+				    (i != STDOUT_FILENO))
+					close(i);
+			}
+		} else {
+			for (i = 0; i < cc; i++)
+				close(i);
+			if ((cpid = fork()) < 0)
+				exit(127);
+			else if (cpid > 0)
+				exit(0);
+		}
+#ifdef SETPGRP_TWO_ARGS
+		setpgrp(0, 0);
+#else
+		setpgrp();
+#endif
+		execv(script_path, script_argv);
+		error("%s: execv(%s): %m", __func__, script_path);
+		exit(127);
+	} else if (cpid < 0) {
+		if (max_wait != -1) {
+			close(pfd[0]);
+			close(pfd[1]);
+		}
+		error("%s: fork(): %m", __func__);
+	} else if (max_wait != -1) {
+		struct pollfd fds;
+		struct timeval tstart;
+		resp_size = 1024;
+		resp = xmalloc(resp_size);
+		close(pfd[1]);
+		gettimeofday(&tstart, NULL);
+		while (1) {
+			fds.fd = pfd[0];
+			fds.events = POLLIN | POLLHUP | POLLRDHUP;
+			fds.revents = 0;
+			if (max_wait <= 0) {
+				new_wait = -1;
+			} else {
+				new_wait = max_wait - _tot_wait(&tstart);
+				if (new_wait <= 0)
+					break;
+			}
+			i = poll(&fds, 1, new_wait);
+			if (i == 0) {
+				error("%s: %s poll timeout @ %d msec",
+				      __func__, script_type, max_wait);
+				break;
+			} else if (i < 0) {
+				error("%s: %s poll:%m", __func__, script_type);
+				break;
+			}
+			if ((fds.revents & POLLIN) == 0)
+				break;
+			i = read(pfd[0], resp + resp_offset,
+				 resp_size - resp_offset);
+			if (i == 0) {
+				break;
+			} else if (i < 0) {
+				if (errno == EAGAIN)
+					continue;
+				error("%s: read(%s): %m", __func__,
+				      script_path);
+				break;
+			} else {
+				resp_offset += i;
+				if (resp_offset + 1024 >= resp_size) {
+					resp_size *= 2;
+					resp = xrealloc(resp, resp_size);
+				}
+			}
+		}
+		killpg(cpid, SIGKILL);
+		waitpid(cpid, status, 0);
+		close(pfd[0]);
+	} else {
+		waitpid(cpid, status, 0);
+	}
+	return resp;
+}
+
+static void _persist_purge(void *x)
+{
+	xfree(x);
+}
+
+static int _persist_match(void *x, void *key)
+{
+	bb_pend_persist_t *bb_pers_exist = (bb_pend_persist_t *) x;
+	bb_pend_persist_t *bb_pers_test  = (bb_pend_persist_t *) key;
+	if (bb_pers_exist->job_id == bb_pers_test->job_id)
+		return 1;
+	return 0;
+}
+
+/* Add persistent burst buffer reservation for this job, tests for duplicate */
+extern void bb_add_persist(bb_state_t *state_ptr,
+			   bb_pend_persist_t *bb_persist)
+{
+	bb_pend_persist_t *bb_pers_match;
+
+	xassert(state_ptr);
+	if (!state_ptr->persist_resv_rec) {
+		state_ptr->persist_resv_rec = list_create(_persist_purge);
+	} else {
+		bb_pers_match = list_find_first(state_ptr->persist_resv_rec,
+						_persist_match, bb_persist);
+		if (bb_pers_match)
+			return;
+	}
+
+	bb_pers_match = xmalloc(sizeof(bb_pend_persist_t));
+	bb_pers_match->job_id = bb_persist->job_id;
+	bb_pers_match->persist_add = bb_persist->persist_add;
+	list_append(state_ptr->persist_resv_rec, bb_pers_match);
+	state_ptr->persist_resv_sz += bb_persist->persist_add;
+}
+
+/* Remove persistent burst buffer reservation for this job.
+ * Call when job starts running or removed from pending state. */
+extern void bb_rm_persist(bb_state_t *state_ptr, uint32_t job_id)
+{
+	bb_pend_persist_t  bb_persist;
+	bb_pend_persist_t *bb_pers_match;
+
+	xassert(state_ptr);
+	if (!state_ptr->persist_resv_rec)
+		return;
+	bb_persist.job_id = job_id;
+	bb_pers_match = list_find_first(state_ptr->persist_resv_rec,
+					_persist_match, &bb_persist);
+	if (!bb_pers_match)
+		return;
+	if (state_ptr->persist_resv_sz >= bb_pers_match->persist_add) {
+		state_ptr->persist_resv_sz -= bb_pers_match->persist_add;
+	} else {
+		state_ptr->persist_resv_sz = 0;
+		error("%s: Reserved persistent storage size underflow",
+		      __func__);
+	}
+}
+
+/* Return true of the identified job has burst buffer space already reserved */
+extern bool bb_test_persist(bb_state_t *state_ptr, uint32_t job_id)
+{
+	bb_pend_persist_t bb_pers_match;
+
+	xassert(state_ptr);
+	if (!state_ptr->persist_resv_rec)
+		return false;
+	bb_pers_match.job_id = job_id;
+	if (list_find_first(state_ptr->persist_resv_rec, _persist_match,
+			    &bb_pers_match))
+		return true;
+	return false;
+}
+
+/* Allocate a bb_job_t record, hashed by job_id, delete with bb_job_del() */
+extern bb_job_t *bb_job_alloc(bb_state_t *state_ptr, uint32_t job_id)
+{
+	int inx = job_id % BB_HASH_SIZE;
+	bb_job_t *bb_job = xmalloc(sizeof(bb_job_t));
+
+	xassert(state_ptr);
+	xassert((bb_job->magic = BB_JOB_MAGIC));	/* Sets value */
+	bb_job->next = state_ptr->bb_jhash[inx];
+	bb_job->job_id = job_id;
+	state_ptr->bb_jhash[inx] = bb_job;
+
+	return bb_job;
+}
+
+/* Return a pointer to the existing bb_job_t record for a given job_id or
+ * NULL if not found */
+extern bb_job_t *bb_job_find(bb_state_t *state_ptr, uint32_t job_id)
+{
+	bb_job_t *bb_job;
+
+	xassert(state_ptr);
+
+	if (!state_ptr->bb_jhash)
+		return NULL;
+
+	bb_job = state_ptr->bb_jhash[job_id % BB_HASH_SIZE];
+	while (bb_job) {
+		if (bb_job->job_id == job_id) {
+			xassert(bb_job->magic == BB_JOB_MAGIC);
+			return bb_job;
+		}
+		bb_job = bb_job->next;
+	}
+
+	return bb_job;
+}
+
+/* Delete a bb_job_t record, hashed by job_id */
+extern void bb_job_del(bb_state_t *state_ptr, uint32_t job_id)
+{
+	int inx = job_id % BB_HASH_SIZE;
+	bb_job_t *bb_job, **bb_pjob;
+
+	xassert(state_ptr);
+	bb_pjob = &state_ptr->bb_jhash[inx];
+	bb_job  =  state_ptr->bb_jhash[inx];
+	while (bb_job) {
+		if (bb_job->job_id == job_id) {
+			xassert(bb_job->magic == BB_JOB_MAGIC);
+			bb_job->magic = 0;
+			*bb_pjob = bb_job->next;
+			_bb_job_del2(bb_job);
+			return;
+		}
+		bb_pjob = &bb_job->next;
+		bb_job  =  bb_job->next;
+	}
+}
+
+/* Delete a bb_job_t record. DOES NOT UNLINK FROM HASH TABLE */
+static void _bb_job_del2(bb_job_t *bb_job)
+{
+	int i;
+
+	if (bb_job) {
+		xfree(bb_job->account);
+		for (i = 0; i < bb_job->buf_cnt; i++) {
+			xfree(bb_job->buf_ptr[i].access);
+			xfree(bb_job->buf_ptr[i].name);
+			xfree(bb_job->buf_ptr[i].type);
+		}
+		xfree(bb_job->buf_ptr);
+		for (i = 0; i < bb_job->gres_cnt; i++)
+			xfree(bb_job->gres_ptr[i].name);
+		xfree(bb_job->gres_ptr);
+		xfree(bb_job->partition);
+		xfree(bb_job->qos);
+		xfree(bb_job);
+	}
+}
+
+/* Log the contents of a bb_job_t record using "info()" */
+extern void bb_job_log(bb_state_t *state_ptr, bb_job_t *bb_job)
+{
+	bb_buf_t *buf_ptr;
+	char *out_buf = NULL;
+	int i;
+
+	if (bb_job) {
+		xstrfmtcat(out_buf, "%s: Job:%u ",
+			   state_ptr->name, bb_job->job_id);
+		for (i = 0; i < bb_job->gres_cnt; i++) {
+			xstrfmtcat(out_buf, "Gres[%d]:%s:%"PRIu64" ",
+				   i, bb_job->gres_ptr[i].name,
+				   bb_job->gres_ptr[i].count);
+		}
+		xstrfmtcat(out_buf, "Swap:%ux%u ", bb_job->swap_size,
+			   bb_job->swap_nodes);
+		xstrfmtcat(out_buf, "TotalSize:%"PRIu64"", bb_job->total_size);
+		info("%s", out_buf);
+		xfree(out_buf);
+		for (i = 0, buf_ptr = bb_job->buf_ptr; i < bb_job->buf_cnt;
+		     i++, buf_ptr++) {
+			if (buf_ptr->create) {
+				info("  Create  Name:%s Size:%"PRIu64
+				     " Access:%s Type:%s State:%s",
+				     buf_ptr->name, buf_ptr->size,
+				     buf_ptr->access, buf_ptr->type,
+				     bb_state_string(buf_ptr->state));
+			} else if (buf_ptr->destroy) {
+				info("  Destroy Name:%s Hurry:%d",
+				     buf_ptr->name, (int) buf_ptr->hurry);
+			} else {
+				info("  Use  Name:%s", buf_ptr->name);
+			}
+		}
+	}
+}
+
+/* Make claim against resource limit for a user */
+extern void bb_limit_add(
+	uint32_t user_id, uint64_t bb_size, bb_state_t *state_ptr)
+{
+	bb_user_t *bb_user;
+
+	state_ptr->used_space += bb_size;
+
+	bb_user = bb_find_user_rec(user_id, state_ptr);
+	xassert(bb_user);
+	bb_user->size += bb_size;
+
+}
+
+/* Release claim against resource limit for a user */
+extern void bb_limit_rem(
+	uint32_t user_id, uint64_t bb_size, bb_state_t *state_ptr)
+{
+	bb_user_t *bb_user;
+
+	if (state_ptr->used_space >= bb_size) {
+		state_ptr->used_space -= bb_size;
+	} else {
+		error("%s: used_space underflow", __func__);
+		state_ptr->used_space = 0;
+	}
+
+	bb_user = bb_find_user_rec(user_id, state_ptr);
+	xassert(bb_user);
+	if (bb_user->size >= bb_size)
+		bb_user->size -= bb_size;
+	else {
+		bb_user->size = 0;
+		error("%s: user limit underflow for uid %u", __func__, user_id);
+	}
+
+}
+
+/* Log creation of a persistent burst buffer in the database
+ * job_ptr IN - Point to job that created, could be NULL at startup
+ * bb_alloc IN - Pointer to persistent burst buffer state info
+ * state_ptr IN - Pointer to burst_buffer plugin state info
+ * NOTE: assoc_mgr association and qos read lock should be set before this.
+ */
+extern int bb_post_persist_create(struct job_record *job_ptr,
+				  bb_alloc_t *bb_alloc, bb_state_t *state_ptr)
+{
+	int rc = SLURM_SUCCESS;
+	slurmdb_reservation_rec_t resv;
+	uint64_t size_mb;
+
+	if (!state_ptr->tres_id) {
+		debug2("%s: Not tracking this TRES, "
+		       "not sending to the database.", __func__);
+		return SLURM_SUCCESS;
+	}
+
+	size_mb = (bb_alloc->size / (1024 * 1024));
+
+	memset(&resv, 0, sizeof(slurmdb_reservation_rec_t));
+	resv.assocs = bb_alloc->assocs;
+	resv.cluster = slurmctld_cluster_name;
+	resv.name = bb_alloc->name;
+	resv.id = bb_alloc->id;
+	resv.time_start = bb_alloc->create_time;
+	xstrfmtcat(resv.tres_str, "%d=%"PRIu64, state_ptr->tres_id, size_mb);
+	rc = acct_storage_g_add_reservation(acct_db_conn, &resv);
+	xfree(resv.tres_str);
+
+	if (state_ptr->tres_pos) {
+		slurmdb_assoc_rec_t *assoc_ptr = bb_alloc->assoc_ptr;
+
+		while (assoc_ptr) {
+			assoc_ptr->usage->grp_used_tres[state_ptr->tres_pos] +=
+				size_mb;
+			debug2("%s: after adding persisant bb %s(%u), "
+			       "assoc %u(%s/%s/%s) grp_used_tres(%s) "
+			       "is %"PRIu64,
+			       __func__, bb_alloc->name, bb_alloc->id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[state_ptr->tres_pos],
+			       assoc_ptr->usage->
+			       grp_used_tres[state_ptr->tres_pos]);
+
+			/* FIXME: should grp_used_tres_run_secs be
+			 * done some how? Same for QOS below.
+			 */
+			/* debug2("%s: after adding persisant bb %s(%u), " */
+			/*        "assoc %u(%s/%s/%s) grp_used_tres_run_secs(%s) " */
+			/*        "is %"PRIu64, */
+			/*        __func__, bb_alloc->name, bb_alloc->id, */
+			/*        assoc_ptr->id, assoc_ptr->acct, */
+			/*        assoc_ptr->user, assoc_ptr->partition, */
+			/*        assoc_mgr_tres_name_array[state_ptr->tres_pos], */
+			/*        assoc_ptr->usage-> */
+			/*        grp_used_tres_run_secs[state_ptr->tres_pos]); */
+			assoc_ptr = assoc_ptr->usage->parent_assoc_ptr;
+		}
+
+		if (job_ptr && job_ptr->tres_alloc_cnt)
+			job_ptr->tres_alloc_cnt[state_ptr->tres_pos] -= size_mb;
+
+		if (bb_alloc->qos_ptr) {
+			bb_alloc->qos_ptr->usage->grp_used_tres[
+				state_ptr->tres_pos] += size_mb;
+		}
+	}
+
+	return rc;
+}
+
+/* Log deletion of a persistent burst buffer in the database */
+extern int bb_post_persist_delete(bb_alloc_t *bb_alloc, bb_state_t *state_ptr)
+{
+	int rc = SLURM_SUCCESS;
+	slurmdb_reservation_rec_t resv;
+	uint64_t size_mb;
+
+	if (!state_ptr->tres_id) {
+		debug2("%s: Not tracking this TRES, "
+		       "not sending to the database.", __func__);
+		return SLURM_SUCCESS;
+	}
+
+	size_mb = (bb_alloc->size / (1024 * 1024));
+
+	memset(&resv, 0, sizeof(slurmdb_reservation_rec_t));
+	resv.assocs = bb_alloc->assocs;
+	resv.cluster = slurmctld_cluster_name;
+	resv.name = bb_alloc->name;
+	resv.id = bb_alloc->id;
+	resv.time_end = time(NULL);
+	resv.time_start = bb_alloc->create_time;
+	xstrfmtcat(resv.tres_str, "%d=%"PRIu64, state_ptr->tres_id, size_mb);
+
+	rc = acct_storage_g_remove_reservation(acct_db_conn, &resv);
+	xfree(resv.tres_str);
+
+	if (state_ptr->tres_pos) {
+		slurmdb_assoc_rec_t *assoc_ptr = bb_alloc->assoc_ptr;
+
+		while (assoc_ptr) {
+			if (assoc_ptr->usage->grp_used_tres[state_ptr->tres_pos]
+			    >= size_mb) {
+				assoc_ptr->usage->grp_used_tres[
+					state_ptr->tres_pos] -= size_mb;
+				debug2("%s: after removing persisant "
+				       "bb %s(%u), assoc %u(%s/%s/%s) "
+				       "grp_used_tres(%s) is %"PRIu64,
+				       __func__, bb_alloc->name, bb_alloc->id,
+				       assoc_ptr->id, assoc_ptr->acct,
+				       assoc_ptr->user, assoc_ptr->partition,
+				       assoc_mgr_tres_name_array[
+					       state_ptr->tres_pos],
+				       assoc_ptr->usage->
+				       grp_used_tres[state_ptr->tres_pos]);
+			} else {
+				error("%s: underflow removing persisant "
+				      "bb %s(%u), assoc %u(%s/%s/%s) "
+				      "grp_used_tres(%s) had %"PRIu64
+				      " but we are trying to remove %"PRIu64,
+				      __func__, bb_alloc->name, bb_alloc->id,
+				      assoc_ptr->id, assoc_ptr->acct,
+				      assoc_ptr->user, assoc_ptr->partition,
+				      assoc_mgr_tres_name_array[
+					      state_ptr->tres_pos],
+				      assoc_ptr->usage->
+				      grp_used_tres[state_ptr->tres_pos],
+				      size_mb);
+				assoc_ptr->usage->grp_used_tres[
+					state_ptr->tres_pos] = 0;
+			}
+
+			/* FIXME: should grp_used_tres_run_secs be
+			 * done some how? Same for QOS below. */
+			/* debug2("%s: after removing persisant bb %s(%u), " */
+			/*        "assoc %u(%s/%s/%s) grp_used_tres_run_secs(%s) " */
+			/*        "is %"PRIu64, */
+			/*        __func__, bb_alloc->name, bb_alloc->id, */
+			/*        assoc_ptr->id, assoc_ptr->acct, */
+			/*        assoc_ptr->user, assoc_ptr->partition, */
+			/*        assoc_mgr_tres_name_array[state_ptr->tres_pos], */
+			/*        assoc_ptr->usage-> */
+			/*        grp_used_tres_run_secs[state_ptr->tres_pos]); */
+			assoc_ptr = assoc_ptr->usage->parent_assoc_ptr;
+		}
+
+		if (bb_alloc->qos_ptr) {
+			if (bb_alloc->qos_ptr->usage->grp_used_tres[
+				    state_ptr->tres_pos] >= size_mb)
+				bb_alloc->qos_ptr->usage->grp_used_tres[
+					state_ptr->tres_pos] -= size_mb;
+			else
+				bb_alloc->qos_ptr->usage->grp_used_tres[
+					state_ptr->tres_pos] = 0;
+		}
+	}
+
+	return rc;
+}
diff --git a/src/plugins/burst_buffer/common/burst_buffer_common.h b/src/plugins/burst_buffer/common/burst_buffer_common.h
new file mode 100644
index 000000000..616d2e27d
--- /dev/null
+++ b/src/plugins/burst_buffer/common/burst_buffer_common.h
@@ -0,0 +1,363 @@
+/*****************************************************************************\
+ *  burst_buffer_common.h - Common header for managing burst_buffers
+ *
+ *  NOTE: These functions are designed so they can be used by multiple burst
+ *  buffer plugins at the same time (e.g. you might provide users access to
+ *  both burst_buffer/cray and burst_buffer/generic on the same system), so
+ *  the state information is largely in the individual plugin and passed as
+ *  a pointer argument to these functions.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __BURST_BUFFER_COMMON_H__
+#define __BURST_BUFFER_COMMON_H__
+
+#include "src/common/list.h"
+#include "src/common/pack.h"
+#include "slurm/slurm.h"
+#include "slurm/slurmdb.h"
+
+/* Interval, in seconds, for purging orphan bb_alloc_t records and timing out
+ * staging */
+#define AGENT_INTERVAL	30
+
+/* Hash tables are used for both job burst buffer and user limit records */
+#define BB_HASH_SIZE	100
+
+/* Burst buffer configuration parameters */
+typedef struct bb_config {
+	uid_t   *allow_users;
+	char    *allow_users_str;
+	char    *create_buffer;
+	bool	debug_flag;
+	char	*default_pool;
+	uid_t   *deny_users;
+	char    *deny_users_str;
+	char    *destroy_buffer;
+	uint32_t flags;			/* See BB_FLAG_* in slurm.h */
+	char    *get_sys_state;
+	uint64_t granularity;		/* space allocation granularity,
+					 * units are GB */
+	uint32_t gres_cnt;		/* Count of records in gres_ptr */
+	burst_buffer_gres_t *gres_ptr;	/* Type is defined in slurm.h */
+	uint32_t prio_boost_alloc;
+	uint32_t prio_boost_use;
+	uint32_t stage_in_timeout;
+	uint32_t stage_out_timeout;
+	char    *start_stage_in;
+	char    *start_stage_out;
+	char    *stop_stage_in;
+	char    *stop_stage_out;
+} bb_config_t;
+
+/* Current burst buffer allocations (instances). Some of these will be job
+ * specific (job_id != 0) and others persistent */
+#define BB_ALLOC_MAGIC		0xDEAD3448
+typedef struct bb_alloc {
+	char *account;		/* Associated account (for limits) */
+	slurmdb_assoc_rec_t *assoc_ptr;
+	char *assocs;		/* Association string, used for accounting */
+	uint32_t array_job_id;
+	uint32_t array_task_id;
+	bool cancelled;
+	time_t create_time;	/* Time of creation */
+	time_t end_time;	/* Expected time when use will end */
+	uint32_t gres_cnt;	/* Count of records in gres_ptr */
+	burst_buffer_gres_t *gres_ptr;
+	uint32_t id;		/* ID for reservation/accounting */
+	uint32_t job_id;
+	uint32_t magic;
+	char *name;		/* For persistent burst buffers */
+	struct bb_alloc *next;
+	char *partition;	/* Associated partition (for limits) */
+	char *qos;		/* Associated QOS (for limits) */
+	slurmdb_qos_rec_t *qos_ptr;
+	time_t seen_time;	/* Time buffer last seen */
+	uint64_t size;
+	uint16_t state;
+	time_t state_time;	/* Time of last state change */
+	time_t use_time;	/* Expected time when use will begin */
+	uint32_t user_id;
+} bb_alloc_t;
+
+/* User's storage use, needed to enforce per-user limits without TRES */
+#define BB_USER_MAGIC		0xDEAD3493
+typedef struct bb_user {
+	uint32_t magic;
+	struct bb_user *next;
+	uint64_t size;
+	uint32_t user_id;
+} bb_user_t;
+
+/* Burst buffer creation records with state */
+typedef struct {
+	char    *access;	/* Buffer access */
+	bool     create;	/* Set if buffer create requested */
+	bool     destroy;	/* Set if buffer destroy requested */
+	bool     hurry;		/* Fast buffer destroy */
+	char    *name;		/* Buffer name, non-numeric for persistent */
+	uint64_t size;		/* Buffer size in bytes */
+	uint16_t state;		/* Buffer state, see BB_STATE_* in slurm.h.in */
+	char    *type;		/* Buffer type */
+} bb_buf_t;
+
+/* Generic burst buffer resources. Information about this is found in the Cray
+ * documentation, but the logic in Slurm is untested and the functionality may
+ * never be used. */
+typedef struct {
+	char *   name;		/* Generic burst buffer resource, e.g. "nodes" */
+	uint64_t count;		/* Count of required resources */
+} bb_gres_t;
+
+/* Burst buffer resources required for a job, based upon a job record's
+ * burst_buffer string field */
+#define BB_JOB_MAGIC		0xDEAD3412
+typedef struct bb_job {
+	char      *account;	 /* Associated account (for limits) */
+	uint32_t   buf_cnt;	/* Number of records in buf_ptr */
+	bb_buf_t  *buf_ptr;	/* Buffer creation records */
+	uint32_t   gres_cnt;	/* number of records in gres_ptr */
+	bb_gres_t *gres_ptr;
+	uint32_t   job_id;
+	uint32_t   magic;
+	struct bb_job *next;
+	char      *partition;	/* Associated partition (for limits) */
+	uint64_t   persist_add;	/* Persistent buffer space job adds, bytes */
+	char      *qos;	 	/* Associated QOS (for limits) */
+	int        state;	/* job state with respect to burst buffers,
+				 * See BB_STATE_* in slurm.h.in */
+	uint32_t   swap_size;	/* swap space required per node in GB */
+	uint32_t   swap_nodes;	/* Number of nodes needed */
+	uint64_t   total_size;	/* Total bytes required for job (excludes
+				 * persistent buffers) */
+} bb_job_t;
+
+/* Persistent buffer requests which are pending */
+typedef struct {
+	uint32_t   job_id;
+	uint64_t   persist_add;	/* Persistent buffer space job adds, bytes */
+} bb_pend_persist_t;
+
+/* Used for building queue of jobs records for various purposes */
+typedef struct bb_job_queue_rec {
+	uint64_t bb_size;	/* Used by generic plugin only */
+	bb_job_t *bb_job;	/* Used by cray plugin only */
+	struct job_record *job_ptr;
+} bb_job_queue_rec_t;
+
+/* Used for building queue of job preemption candidates */
+struct preempt_bb_recs {
+	bb_alloc_t *bb_ptr;
+	uint32_t job_id;
+	uint64_t size;
+	time_t   use_time;
+	uint32_t user_id;
+};
+
+/* Current plugin state information */
+typedef struct bb_state {
+	bb_config_t	bb_config;
+	bb_alloc_t **	bb_ahash;	/* Allocation buffers, hash by job_id */
+	bb_job_t **	bb_jhash;	/* Job state, hash by job_id */
+	bb_user_t **	bb_uhash;	/* User limit, hash by user_id */
+	pthread_mutex_t	bb_mutex;
+	pthread_t	bb_thread;
+	time_t		last_load_time;
+	char *		name;		/* Plugin name */
+	time_t		next_end_time;
+	time_t		last_update_time;
+	uint64_t	persist_resv_sz; /* Space reserved for persistent buffers */
+	List		persist_resv_rec;/* List of bb_pend_persist_t records */
+	pthread_cond_t	term_cond;
+	bool		term_flag;
+	pthread_mutex_t	term_mutex;
+	uint64_t	total_space;	/* units are bytes */
+	int		tres_id;	/* TRES ID, for limits */
+	int		tres_pos;	/* TRES index, for limits */
+	uint64_t	used_space;	/* units are bytes */
+} bb_state_t;
+
+/* Add persistent burst buffer reservation for this job, tests for duplicate */
+extern void bb_add_persist(bb_state_t *state_ptr,
+			   bb_pend_persist_t *bb_persist);
+
+/* Allocate burst buffer hash tables */
+extern void bb_alloc_cache(bb_state_t *state_ptr);
+
+/* Allocate a per-job burst buffer record for a specific job.
+ * Return a pointer to that record.
+ * Use bb_free_alloc_buf() to purge the returned record. */
+extern bb_alloc_t *bb_alloc_job_rec(bb_state_t *state_ptr,
+				    struct job_record *job_ptr,
+				    bb_job_t *bb_job);
+
+/* Allocate a burst buffer record for a job and increase the job priority
+ * if so configured.
+ * Use bb_free_alloc_buf() to purge the returned record. */
+extern bb_alloc_t *bb_alloc_job(bb_state_t *state_ptr,
+				struct job_record *job_ptr, bb_job_t *bb_job);
+
+/* Allocate a named burst buffer record for a specific user.
+ * Return a pointer to that record.
+ * Use bb_free_alloc_buf() to purge the returned record. */
+extern bb_alloc_t *bb_alloc_name_rec(bb_state_t *state_ptr, char *name,
+				     uint32_t user_id);
+
+/* Clear all cached burst buffer records, freeing all memory. */
+extern void bb_clear_cache(bb_state_t *state_ptr);
+
+/* Clear configuration parameters, free memory
+ * config_ptr IN - Initial configuration to be cleared
+ * fini IN - True if shutting down, do more complete clean-up */
+extern void bb_clear_config(bb_config_t *config_ptr, bool fini);
+
+/* Find a per-job burst buffer record for a specific job.
+ * If not found, return NULL. */
+extern bb_alloc_t *bb_find_alloc_rec(bb_state_t *state_ptr,
+				     struct job_record *job_ptr);
+
+/* Find a burst buffer record by name
+ * bb_name IN - Buffer's name
+ * user_id IN - Possible user ID, advisory use only
+ * RET the buffer or NULL if not found */
+extern bb_alloc_t *bb_find_name_rec(char *bb_name, uint32_t user_id,
+				    bb_state_t *state_ptr);
+
+/* Find a per-user burst buffer record for a specific user ID */
+extern bb_user_t *bb_find_user_rec(uint32_t user_id, bb_state_t *state_ptr);
+
+/* Remove a specific bb_alloc_t from global records.
+ * RET true if found, false otherwise */
+extern bool bb_free_alloc_rec(bb_state_t *state_ptr, bb_alloc_t *bb_ptr);
+
+/* Free memory associated with allocated bb record, caller is responsible for
+ * maintaining linked list */
+extern void bb_free_alloc_buf(bb_alloc_t *bb_alloc);
+
+/* Translate a burst buffer size specification in string form to numeric form,
+ * recognizing various sufficies (MB, GB, TB, PB, and Nodes). Default units
+ * are bytes. */
+extern uint64_t bb_get_size_num(char *tok, uint64_t granularity);
+
+/* Translate a burst buffer size specification in numeric form to string form,
+ * recognizing various sufficies (KB, MB, GB, TB, PB, and Nodes). */
+extern char *bb_get_size_str(uint64_t size);
+
+/* Round up a number based upon some granularity */
+extern uint64_t bb_granularity(uint64_t start_size, uint64_t granularity);
+
+/* Allocate a bb_job_t record, hashed by job_id, delete with bb_job_del() */
+extern bb_job_t *bb_job_alloc(bb_state_t *state_ptr, uint32_t job_id);
+
+/* Delete a bb_job_t record, hashed by job_id */
+extern void bb_job_del(bb_state_t *state_ptr, uint32_t job_id);
+
+/* Return a pointer to the existing bb_job_t record for a given job_id or
+ * NULL if not found */
+extern bb_job_t *bb_job_find(bb_state_t *state_ptr, uint32_t job_id);
+
+/* Log the contents of a bb_job_t record using "info()" */
+extern void bb_job_log(bb_state_t *state_ptr, bb_job_t *bb_job);
+
+extern void bb_job_queue_del(void *x);
+
+/* Sort job queue by expected start time */
+extern int bb_job_queue_sort(void *x, void *y);
+
+/* Load and process configuration parameters */
+extern void bb_load_config(bb_state_t *state_ptr, char *plugin_type);
+
+/* Pack individual burst buffer records into a  buffer */
+extern int bb_pack_bufs(uid_t uid, bb_state_t *state_ptr, Buf buffer,
+			uint16_t protocol_version);
+
+/* Pack state and configuration parameters into a buffer */
+extern void bb_pack_state(bb_state_t *state_ptr, Buf buffer,
+			  uint16_t protocol_version);
+
+/* Pack individual burst buffer usage records into a buffer (used for limits) */
+extern int bb_pack_usage(uid_t uid, bb_state_t *state_ptr, Buf buffer,
+			 uint16_t protocol_version);
+
+/* Sort preempt_bb_recs in order of DECREASING use_time */
+extern int bb_preempt_queue_sort(void *x, void *y);
+
+/* Remove persistent burst buffer reservation for this job.
+ * Call when job starts running or removed from pending state. */
+extern void bb_rm_persist(bb_state_t *state_ptr, uint32_t job_id);
+
+/* Set the bb_state's tres_pos for limit enforcement.
+ * Value is set to -1 if not found. */
+extern void bb_set_tres_pos(bb_state_t *state_ptr);
+
+/* For each burst buffer record, set the use_time to the time at which its
+ * use is expected to begin (i.e. each job's expected start time) */
+extern void bb_set_use_time(bb_state_t *state_ptr);
+
+/* Sleep function, also handles termination signal */
+extern void bb_sleep(bb_state_t *state_ptr, int add_secs);
+
+/* Return true of the identified job has burst buffer space already reserved */
+extern bool bb_test_persist(bb_state_t *state_ptr, uint32_t job_id);
+
+/* Execute a script, wait for termination and return its stdout.
+ * script_type IN - Type of program being run (e.g. "StartStageIn")
+ * script_path IN - Fully qualified pathname of the program to execute
+ * script_args IN - Arguments to the script
+ * max_wait IN - Maximum time to wait in milliseconds,
+ *		 -1 for no limit (asynchronous)
+ * status OUT - Job exit code
+ * Return stdout+stderr of spawned program, value must be xfreed. */
+extern char *bb_run_script(char *script_type, char *script_path,
+			   char **script_argv, int max_wait, int *status);
+
+/* Make claim against resource limit for a user */
+extern void bb_limit_add(
+	uint32_t user_id, uint64_t bb_size, bb_state_t *state_ptr);
+
+/* Release claim against resource limit for a user */
+extern void bb_limit_rem(
+	uint32_t user_id, uint64_t bb_size, bb_state_t *state_ptr);
+
+/* Log creation of a persistent burst buffer in the database
+ * job_ptr IN - Point to job that created, could be NULL at startup
+ * bb_alloc IN - Pointer to persistent burst buffer state info
+ * state_ptr IN - Pointer to burst_buffer plugin state info
+ */
+extern int bb_post_persist_create(struct job_record *job_ptr,
+				  bb_alloc_t *bb_alloc, bb_state_t *state_ptr);
+
+/* Log deletion of a persistent burst buffer in the database */
+extern int bb_post_persist_delete(bb_alloc_t *bb_alloc, bb_state_t *state_ptr);
+#endif	/* __BURST_BUFFER_COMMON_H__ */
diff --git a/src/plugins/burst_buffer/cray/Makefile.am b/src/plugins/burst_buffer/cray/Makefile.am
new file mode 100644
index 000000000..1ddaa196e
--- /dev/null
+++ b/src/plugins/burst_buffer/cray/Makefile.am
@@ -0,0 +1,22 @@
+# Makefile for burst_buffer/cray plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+if WITH_JSON_PARSER
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(DATAWARP_CPPFLAGS) $(JSON_CPPFLAGS)
+
+pkglib_LTLIBRARIES = burst_buffer_cray.la
+burst_buffer_cray_la_SOURCES = burst_buffer_cray.c
+burst_buffer_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(DATAWARP_LDFLAGS) $(JSON_LDFLAGS)
+burst_buffer_cray_la_LIBADD = ../common/libburst_buffer_common.la
+
+force:
+$(burst_buffer_cray_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+else
+EXTRA_burst_buffer_cray_la_SOURCES = burst_buffer_cray.c
+endif
diff --git a/src/plugins/burst_buffer/cray/Makefile.in b/src/plugins/burst_buffer/cray/Makefile.in
new file mode 100644
index 000000000..db24b282e
--- /dev/null
+++ b/src/plugins/burst_buffer/cray/Makefile.in
@@ -0,0 +1,824 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for burst_buffer/cray plugin
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/burst_buffer/cray
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+@WITH_JSON_PARSER_TRUE@burst_buffer_cray_la_DEPENDENCIES =  \
+@WITH_JSON_PARSER_TRUE@	../common/libburst_buffer_common.la
+am__burst_buffer_cray_la_SOURCES_DIST = burst_buffer_cray.c
+@WITH_JSON_PARSER_TRUE@am_burst_buffer_cray_la_OBJECTS =  \
+@WITH_JSON_PARSER_TRUE@	burst_buffer_cray.lo
+am__EXTRA_burst_buffer_cray_la_SOURCES_DIST = burst_buffer_cray.c
+burst_buffer_cray_la_OBJECTS = $(am_burst_buffer_cray_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+burst_buffer_cray_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(burst_buffer_cray_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+@WITH_JSON_PARSER_TRUE@am_burst_buffer_cray_la_rpath = -rpath \
+@WITH_JSON_PARSER_TRUE@	$(pkglibdir)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(burst_buffer_cray_la_SOURCES) \
+	$(EXTRA_burst_buffer_cray_la_SOURCES)
+DIST_SOURCES = $(am__burst_buffer_cray_la_SOURCES_DIST) \
+	$(am__EXTRA_burst_buffer_cray_la_SOURCES_DIST)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+@WITH_JSON_PARSER_TRUE@AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(DATAWARP_CPPFLAGS) $(JSON_CPPFLAGS)
+@WITH_JSON_PARSER_TRUE@pkglib_LTLIBRARIES = burst_buffer_cray.la
+@WITH_JSON_PARSER_TRUE@burst_buffer_cray_la_SOURCES = burst_buffer_cray.c
+@WITH_JSON_PARSER_TRUE@burst_buffer_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(DATAWARP_LDFLAGS) $(JSON_LDFLAGS)
+@WITH_JSON_PARSER_TRUE@burst_buffer_cray_la_LIBADD = ../common/libburst_buffer_common.la
+@WITH_JSON_PARSER_FALSE@EXTRA_burst_buffer_cray_la_SOURCES = burst_buffer_cray.c
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/burst_buffer/cray/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/burst_buffer/cray/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+burst_buffer_cray.la: $(burst_buffer_cray_la_OBJECTS) $(burst_buffer_cray_la_DEPENDENCIES) $(EXTRA_burst_buffer_cray_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(burst_buffer_cray_la_LINK) $(am_burst_buffer_cray_la_rpath) $(burst_buffer_cray_la_OBJECTS) $(burst_buffer_cray_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/burst_buffer_cray.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+@WITH_JSON_PARSER_TRUE@force:
+@WITH_JSON_PARSER_TRUE@$(burst_buffer_cray_la_LIBADD) : force
+@WITH_JSON_PARSER_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/burst_buffer/cray/burst_buffer_cray.c b/src/plugins/burst_buffer/cray/burst_buffer_cray.c
new file mode 100644
index 000000000..839cb93c9
--- /dev/null
+++ b/src/plugins/burst_buffer/cray/burst_buffer_cray.c
@@ -0,0 +1,4313 @@
+/*****************************************************************************\
+ *  burst_buffer_cray.c - Plugin for managing a Cray burst_buffer
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#define _GNU_SOURCE	/* For POLLRDHUP */
+#include <ctype.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#if HAVE_JSON
+#  include <json-c/json.h>
+#endif
+
+#include "slurm/slurm.h"
+
+#include "src/common/assoc_mgr.h"
+#include "src/common/fd.h"
+#include "src/common/list.h"
+#include "src/common/pack.h"
+#include "src/common/parse_config.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/timers.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/job_scheduler.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/reservation.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/state_save.h"
+#include "src/plugins/burst_buffer/common/burst_buffer_common.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "burst_buffer" for SLURM burst_buffer) and <method> is a
+ * description of how this plugin satisfies that application.  SLURM will only
+ * load a burst_buffer plugin if the plugin_type string has a prefix of
+ * "burst_buffer/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+const char plugin_name[]        = "burst_buffer cray plugin";
+const char plugin_type[]        = "burst_buffer/cray";
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
+
+/* Most state information is in a common structure so that we can more
+ * easily use common functions from multiple burst buffer plugins */
+static bb_state_t	bb_state;
+static uint32_t		last_persistent_id = 1;
+static char *		state_save_loc = NULL;
+
+/* Description of each Cray DW configuration entry
+ */
+typedef struct bb_configs {
+	uint32_t id;
+	uint32_t instance;
+} bb_configs_t;
+
+/* Description of each Cray DW instance entry, including persistent buffers
+ */
+typedef struct bb_instances {
+	uint32_t id;
+	uint32_t bytes;
+	char *label;
+} bb_instances_t;
+
+/* Description of each Cray DW pool entry
+ */
+typedef struct bb_pools {
+	char *id;
+	char *units;
+	uint64_t granularity;
+	uint64_t quantity;
+	uint64_t free;
+} bb_pools_t;
+
+/* Description of each Cray DW pool entry
+ */
+typedef struct bb_sessions {
+	uint32_t created;
+	uint32_t id;
+	char    *token;
+	bool     used;
+	uint32_t user_id;
+} bb_sessions_t;
+
+typedef struct {
+	char   **args;
+	uint32_t job_id;
+	uint32_t user_id;
+} pre_run_args_t;
+
+typedef struct {
+	char   **args1;
+	char   **args2;
+	uint32_t job_id;
+	uint32_t timeout;
+	uint32_t user_id;
+} stage_args_t;
+
+typedef struct {		/* Used for scheduling */
+	char *   name;		/* BB GRES name, e.g. "nodes" */
+	uint64_t add_cnt;	/* Additional GRES required */
+	uint64_t avail_cnt;	/* Additional GRES available */
+} needed_gres_t;
+
+typedef struct create_buf_data {
+	char *access;		/* Access mode */
+	bool hurry;		/* Set to destroy in a hurry (no stage-out) */
+	uint32_t job_id;	/* Job ID to use */
+	char *job_script;	/* Path to job script */
+	char *name;		/* Name of the persistent burst buffer */
+	uint64_t size;		/* Size in bytes */
+	char *type;		/* Access type */
+	uint32_t user_id;
+} create_buf_data_t;
+
+static int	_alloc_job_bb(struct job_record *job_ptr, bb_job_t *bb_job,
+			      bool job_ready);
+static void	_apply_limits(void);
+static void *	_bb_agent(void *args);
+static void	_bb_free_configs(bb_configs_t *ents, int num_ent);
+static void	_bb_free_instances(bb_instances_t *ents, int num_ent);
+static void	_bb_free_pools(bb_pools_t *ents, int num_ent);
+static void	_bb_free_sessions(bb_sessions_t *ents, int num_ent);
+static bb_configs_t *_bb_get_configs(int *num_ent, bb_state_t *state_ptr);
+static bb_instances_t *_bb_get_instances(int *num_ent, bb_state_t *state_ptr);
+static bb_pools_t *_bb_get_pools(int *num_ent, bb_state_t *state_ptr);
+static bb_sessions_t *_bb_get_sessions(int *num_ent, bb_state_t *state_ptr);
+static int	_build_bb_script(struct job_record *job_ptr, char *script_file);
+static int	_create_bufs(struct job_record *job_ptr, bb_job_t *bb_job,
+			     bool job_ready);
+static void *	_create_persistent(void *x);
+static void *	_destroy_persistent(void *x);
+static void	_free_create_args(create_buf_data_t *create_args);
+static void	_free_script_argv(char **script_argv);
+static bb_job_t *_get_bb_job(struct job_record *job_ptr);
+static void	_job_queue_del(void *x);
+static bb_configs_t *_json_parse_configs_array(json_object *jobj, char *key,
+					       int *num);
+static bb_instances_t *_json_parse_instances_array(json_object *jobj, char *key,
+						   int *num);
+static struct bb_pools *_json_parse_pools_array(json_object *jobj, char *key,
+						int *num);
+static struct bb_sessions *_json_parse_sessions_array(json_object *jobj,
+						      char *key, int *num);
+static void	_json_parse_configs_object(json_object *jobj,
+					   bb_configs_t *ent);
+static void	_json_parse_instances_object(json_object *jobj,
+					     bb_instances_t *ent);
+static void	_json_parse_pools_object(json_object *jobj, bb_pools_t *ent);
+static void	_json_parse_sessions_object(json_object *jobj,
+					    bb_sessions_t *ent);
+static void	_log_script_argv(char **script_argv, char *resp_msg);
+static void	_load_state(bool init_config);
+static int	_open_part_state_file(char **state_file);
+static int	_parse_bb_opts(struct job_descriptor *job_desc,
+			       uint64_t *bb_size, uid_t submit_uid);
+static void	_parse_config_links(json_object *instance, bb_configs_t *ent);
+static void	_parse_instance_capacity(json_object *instance,
+					 bb_instances_t *ent);
+static int	_xlate_batch(struct job_descriptor *job_desc);
+static int	_xlate_interactive(struct job_descriptor *job_desc);
+static void	_pick_alloc_account(bb_alloc_t *bb_alloc);
+static void	_purge_bb_files(uint32_t job_id, struct job_record *job_ptr);
+static void	_purge_vestigial_bufs(void);
+static void	_python2json(char *buf);
+static void	_recover_bb_state(void);
+static int	_queue_stage_in(struct job_record *job_ptr, bb_job_t *bb_job);
+static int	_queue_stage_out(struct job_record *job_ptr);
+static void	_queue_teardown(uint32_t job_id, uint32_t user_id, bool hurry);
+static void	_reset_buf_state(uint32_t user_id, uint32_t job_id, char *name,
+				 int new_state);
+static void	_save_bb_state(void);
+static void	_set_assoc_mgr_ptrs(bb_alloc_t *bb_alloc);
+static void *	_start_pre_run(void *x);
+static void *	_start_stage_in(void *x);
+static void *	_start_stage_out(void *x);
+static void *	_start_teardown(void *x);
+static void	_test_config(void);
+static bool	_test_persistent_use_ready(bb_job_t *bb_job,
+					   struct job_record *job_ptr);
+static int	_test_size_limit(struct job_record *job_ptr, bb_job_t *bb_job);
+static void	_timeout_bb_rec(void);
+static int	_write_file(char *file_name, char *buf);
+static int	_write_nid_file(char *file_name, char *node_list,
+				uint32_t job_id);
+
+/* Convert a Python string to real JSON format. Specifically replace single
+ * quotes with double quotes and strip leading "u" before the single quotes.
+ * See: https://github.com/stedolan/jq/issues/312 */
+static void _python2json(char *buf)
+{
+	bool quoted = false;
+	int i, o;
+
+	if (!buf)
+		return;
+	for (i = 0, o = 0; ; i++) {
+		if (buf[i] == '\'') {
+			buf[o++] = '\"';
+			quoted = !quoted;
+		} else if ((buf[i] == 'u') && (buf[i+1] == '\'') && !quoted) {
+			/* Skip over unicode flag */
+		} else {
+			buf[o++] = buf[i];
+			if (buf[i] == '\0')
+				break;
+		}
+	}
+}
+
+/* Free an array of xmalloced records. The array must be NULL terminated. */
+static void _free_script_argv(char **script_argv)
+{
+	int i;
+
+	for (i = 0; script_argv[i]; i++)
+		xfree(script_argv[i]);
+	xfree(script_argv);
+}
+
+/* Log a command's arguments. */
+static void _log_script_argv(char **script_argv, char *resp_msg)
+{
+	char *cmd_line = NULL;
+	int i;
+
+	if (!bb_state.bb_config.debug_flag)
+		return;
+
+	for (i = 0; script_argv[i]; i++) {
+		if (i)
+			xstrcat(cmd_line, " ");
+		xstrcat(cmd_line, script_argv[i]);
+	}
+	info("%s", cmd_line);
+	info("%s", resp_msg);
+	xfree(cmd_line);
+}
+
+static void _job_queue_del(void *x)
+{
+	bb_job_queue_rec_t *job_rec = (bb_job_queue_rec_t *) x;
+	if (job_rec) {
+		xfree(job_rec);
+	}
+}
+
+/* Purge files we have created for the job.
+ * bb_state.bb_mutex is locked on function entry.
+ * job_ptr may be NULL if not found */
+static void _purge_bb_files(uint32_t job_id, struct job_record *job_ptr)
+
+{
+	char *hash_dir = NULL, *job_dir = NULL;
+	char *script_file = NULL, *path_file = NULL, *client_nids_file = NULL;
+	int hash_inx;
+
+	hash_inx = job_id % 10;
+	xstrfmtcat(hash_dir, "%s/hash.%d", state_save_loc, hash_inx);
+	(void) mkdir(hash_dir, 0700);
+	xstrfmtcat(job_dir, "%s/job.%u", hash_dir, job_id);
+	(void) mkdir(job_dir, 0700);
+
+	xstrfmtcat(client_nids_file, "%s/client_nids", job_dir);
+	(void) unlink(client_nids_file);
+	xfree(client_nids_file);
+
+	xstrfmtcat(path_file, "%s/pathfile", job_dir);
+	(void) unlink(path_file);
+	xfree(path_file);
+
+	if (!job_ptr || (job_ptr->batch_flag == 0)) {
+		xstrfmtcat(script_file, "%s/script", job_dir);
+		(void) unlink(script_file);
+		xfree(script_file);
+	}
+
+	(void) unlink(job_dir);
+	xfree(job_dir);
+	xfree(hash_dir);
+}
+
+/* Validate that our configuration is valid for this plugin type */
+static void _test_config(void)
+{
+	if (!bb_state.bb_config.get_sys_state) {
+		debug("%s: GetSysState is NULL", __func__);
+		bb_state.bb_config.get_sys_state =
+			xstrdup("/opt/cray/dw_wlm/default/bin/dw_wlm_cli");
+	}
+}
+
+/* Allocate resources to a job and begin stage-in */
+static int _alloc_job_bb(struct job_record *job_ptr, bb_job_t *bb_job,
+			 bool job_ready)
+{
+	char jobid_buf[32];
+	int rc = SLURM_SUCCESS;
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: start job allocate %s", __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if (bb_job->buf_cnt &&
+	    (_create_bufs(job_ptr, bb_job, job_ready) > 0))
+		return EAGAIN;
+
+	if (bb_job->total_size || bb_job->swap_size) {
+		if (bb_job->state < BB_STATE_STAGING_IN) {
+			bb_job->state = BB_STATE_STAGING_IN;
+			rc = _queue_stage_in(job_ptr, bb_job);
+			if (rc != SLURM_SUCCESS) {
+				bb_job->state = BB_STATE_TEARDOWN;
+				_queue_teardown(job_ptr->job_id,
+						job_ptr->user_id, true);
+			}
+		}
+	} else {
+		bb_job->state = BB_STATE_STAGED_IN;
+	}
+
+	return rc;
+}
+
+/* Perform periodic background activities */
+static void *_bb_agent(void *args)
+{
+	/* Locks: write job */
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+
+	while (!bb_state.term_flag) {
+		bb_sleep(&bb_state, AGENT_INTERVAL);
+		if (!bb_state.term_flag) {
+			_load_state(false);	/* Has own locking */
+			lock_slurmctld(job_write_lock);
+			pthread_mutex_lock(&bb_state.bb_mutex);
+			_timeout_bb_rec();
+			pthread_mutex_unlock(&bb_state.bb_mutex);
+			unlock_slurmctld(job_write_lock);
+		}
+		_save_bb_state();	/* Has own locks excluding file write */
+	}
+
+	return NULL;
+}
+
+/* Return the burst buffer size specification of a job
+ * RET size data structure or NULL of none found
+ * NOTE: delete return value using _del_bb_size() */
+static bb_job_t *_get_bb_job(struct job_record *job_ptr)
+{
+	char *bb_specs, *bb_hurry, *bb_name, *bb_type, *bb_access;
+	char *end_ptr = NULL, *save_ptr = NULL, *sub_tok, *tok;
+	bool have_bb = false;
+	uint64_t tmp_cnt;
+	int inx;
+	bb_job_t *bb_job;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return NULL;
+
+	if ((bb_job = bb_job_find(&bb_state, job_ptr->job_id)))
+		return bb_job;	/* Cached data */
+
+	bb_job = bb_job_alloc(&bb_state, job_ptr->job_id);
+	bb_job->account = xstrdup(job_ptr->account);
+	if (job_ptr->part_ptr)
+		bb_job->partition = xstrdup(job_ptr->part_ptr->name);
+	if (job_ptr->qos_ptr) {
+		slurmdb_qos_rec_t *qos_ptr =
+			(slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+		bb_job->qos = xstrdup(qos_ptr->name);
+	}
+	bb_job->state = BB_STATE_PENDING;
+	bb_specs = xstrdup(job_ptr->burst_buffer);
+	tok = strtok_r(bb_specs, "\n", &save_ptr);
+	while (tok) {
+		if ((tok[1] == 'B') && (tok[2] == 'B')) {
+			tok += 3;
+			while (isspace(tok[0]))
+				tok++;
+			if (!strncmp(tok, "create_persistent", 17)) {
+				have_bb = true;
+				if ((sub_tok = strstr(tok, "access_mode="))) {
+					bb_access = xstrdup(sub_tok + 12);
+					sub_tok = strchr(bb_access, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				} else if ((sub_tok = strstr(tok, "access="))) {
+					bb_access = xstrdup(sub_tok + 7);
+					sub_tok = strchr(bb_access, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				}
+				if ((sub_tok = strstr(tok, "capacity="))) {
+					tmp_cnt = bb_get_size_num(
+						sub_tok + 9,
+						bb_state.bb_config.granularity);
+				} else {
+					tmp_cnt = 0;
+				}
+				if ((sub_tok = strstr(tok, "name="))) {
+					bb_name = xstrdup(sub_tok + 5);
+					sub_tok = strchr(bb_name, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				}
+				if ((sub_tok = strstr(tok, "type="))) {
+					bb_type = xstrdup(sub_tok + 5);
+					sub_tok = strchr(bb_type, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				}
+				inx = bb_job->buf_cnt++;
+				bb_job->buf_ptr = xrealloc(bb_job->buf_ptr,
+							   sizeof(bb_buf_t) *
+							   bb_job->buf_cnt);
+				bb_job->buf_ptr[inx].access = bb_access;
+				bb_job->buf_ptr[inx].create = true;
+				//bb_job->buf_ptr[inx].hurry = false;
+				bb_job->buf_ptr[inx].name = bb_name;
+				bb_job->buf_ptr[inx].size = tmp_cnt;
+				bb_job->buf_ptr[inx].state = BB_STATE_PENDING;
+				bb_job->buf_ptr[inx].type = bb_type;
+				bb_access = NULL;
+				bb_name = NULL;
+				bb_type = NULL;
+			} else if (!strncmp(tok, "destroy_persistent", 17) ||
+				   !strncmp(tok, "delete_persistent", 16)) {
+				have_bb = true;
+				if ((sub_tok = strstr(tok, "name="))) {
+					bb_name = xstrdup(sub_tok + 5);
+					sub_tok = strchr(bb_name, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				}
+				if ((sub_tok = strstr(tok, "type="))) {
+					bb_type = xstrdup(sub_tok + 5);
+					sub_tok = strchr(bb_type, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				}
+				bb_hurry = strstr(tok, "hurry");
+				inx = bb_job->buf_cnt++;
+				bb_job->buf_ptr = xrealloc(bb_job->buf_ptr,
+							   sizeof(bb_buf_t) *
+							   bb_job->buf_cnt);
+				//bb_job->buf_ptr[inx].access = NULL;
+				//bb_job->buf_ptr[inx].create = false;
+				bb_job->buf_ptr[inx].destroy = true;
+				bb_job->buf_ptr[inx].hurry = (bb_hurry != NULL);
+				bb_job->buf_ptr[inx].name = xstrdup(bb_name);
+				//bb_job->buf_ptr[inx].size = 0;
+				bb_job->buf_ptr[inx].state = BB_STATE_PENDING;
+				//bb_job->buf_ptr[inx].type = NULL;
+			} else {
+				/* Ignore other (future) options */
+			}
+		} else if ((tok[1] == 'D') && (tok[2] == 'W')) {
+			tok += 3;
+			while (isspace(tok[0]))
+				tok++;
+			if (!strncmp(tok, "jobdw", 5)) {
+				have_bb = true;
+				if ((sub_tok = strstr(tok, "capacity="))) {
+					tmp_cnt = bb_get_size_num(
+						sub_tok + 9,
+						bb_state.bb_config.granularity);
+				} else {
+					tmp_cnt = 0;
+				}
+				bb_job->total_size += tmp_cnt;
+			} else if (!strncmp(tok, "persistentdw", 12)) {
+				have_bb = true;
+				if ((sub_tok = strstr(tok, "name="))) {
+					bb_name = xstrdup(sub_tok + 5);
+					sub_tok = strchr(bb_name, ' ');
+					if (sub_tok)
+						sub_tok[0] = '\0';
+				}
+				inx = bb_job->buf_cnt++;
+				bb_job->buf_ptr = xrealloc(bb_job->buf_ptr,
+							   sizeof(bb_buf_t) *
+							   bb_job->buf_cnt);
+				//bb_job->buf_ptr[inx].access = NULL;
+				//bb_job->buf_ptr[inx].create = false;
+				//bb_job->buf_ptr[inx].destroy = false;
+				//bb_job->buf_ptr[inx].hurry = false;
+				bb_job->buf_ptr[inx].name = xstrdup(bb_name);
+				//bb_job->buf_ptr[inx].size = 0;
+				bb_job->buf_ptr[inx].state = BB_STATE_PENDING;
+				//bb_job->buf_ptr[inx].type = NULL;
+			} else if (!strncmp(tok, "swap", 4)) {
+				have_bb = true;
+				tok += 4;
+				while (isspace(tok[0]))
+					tok++;
+				bb_job->swap_size = strtol(tok, &end_ptr, 10);
+				if (job_ptr->details &&
+				    job_ptr->details->max_nodes) {
+					bb_job->swap_nodes =
+						job_ptr->details->max_nodes;
+				} else if (job_ptr->details) {
+					bb_job->swap_nodes =
+						job_ptr->details->min_nodes;
+				} else {
+					bb_job->swap_nodes = 1;
+				}
+				bb_job->total_size += (bb_job->swap_size *
+						       bb_job->swap_nodes);
+			} else {
+				/* Ignore stage-in, stage-out, etc. */
+			}
+		}
+		tok = strtok_r(NULL, "\n", &save_ptr);
+	}
+	xfree(bb_specs);
+
+	if (!have_bb) {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = FAIL_BURST_BUFFER_OP;
+		xstrfmtcat(job_ptr->state_desc,
+			   "%s: Invalid burst buffer spec (%s)",
+			   plugin_type, job_ptr->burst_buffer);
+		job_ptr->priority = 0;
+		info("Invalid burst buffer spec for job %u (%s)",
+		     job_ptr->job_id, job_ptr->burst_buffer);
+		bb_job_del(&bb_state, job_ptr->job_id);
+		return NULL;
+	}
+	if (bb_state.bb_config.debug_flag)
+		bb_job_log(&bb_state, bb_job);
+	return bb_job;
+}
+
+/* At slurmctld start up time, for every currently active burst buffer,
+ * update that user's limit */
+static void _apply_limits(void)
+{
+	bb_alloc_t *bb_alloc;
+	int i;
+
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_alloc = bb_state.bb_ahash[i];
+		while (bb_alloc) {
+			_set_assoc_mgr_ptrs(bb_alloc);
+			bb_limit_add(bb_alloc->user_id,
+				     bb_alloc->size, &bb_state);
+			bb_alloc = bb_alloc->next;
+		}
+	}
+}
+
+/* Write current burst buffer state to a file so that we can preserve account,
+ * partition, and QOS information of persistent burst buffers as there is no
+ * place to store that information within the DataWarp data structures */
+static void _save_bb_state(void)
+{
+	static time_t last_save_time = 0;
+	static int high_buffer_size = 16 * 1024;
+	time_t save_time;
+	bb_alloc_t *bb_alloc;
+	uint32_t rec_count = 0;
+	Buf buffer;
+	char *old_file = NULL, *new_file = NULL, *reg_file = NULL;
+	int i, count_offset, offset, state_fd;
+	int error_code = 0;
+	uint16_t protocol_version = SLURM_15_08_PROTOCOL_VERSION;
+
+	if ((bb_state.last_update_time <= last_save_time) &&
+	    !bb_state.term_flag)
+		return;
+
+	/* Build buffer with name/account/partition/qos information for all
+	 * named burst buffers so we can preserve limits across restarts */
+	buffer = init_buf(high_buffer_size);
+	pack16(protocol_version, buffer);
+	count_offset = get_buf_offset(buffer);
+	pack32(rec_count, buffer);
+	if (bb_state.bb_ahash) {
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		for (i = 0; i < BB_HASH_SIZE; i++) {
+			bb_alloc = bb_state.bb_ahash[i];
+			while (bb_alloc) {
+				if (bb_alloc->name) {
+					packstr(bb_alloc->account,	buffer);
+					pack_time(bb_alloc->create_time,buffer);
+					pack32(bb_alloc->id,		buffer);
+					packstr(bb_alloc->name,		buffer);
+					packstr(bb_alloc->partition,	buffer);
+					packstr(bb_alloc->qos,		buffer);
+					pack32(bb_alloc->user_id,	buffer);
+					if (bb_state.bb_config.flags &
+					    BB_FLAG_EMULATE_CRAY)
+						pack64(bb_alloc->size,	buffer);
+					rec_count++;
+				}
+				bb_alloc = bb_alloc->next;
+			}
+		}
+		save_time = time(NULL);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		offset = get_buf_offset(buffer);
+		set_buf_offset(buffer, count_offset);
+		pack32(rec_count, buffer);
+		set_buf_offset(buffer, offset);
+	}
+
+	xstrfmtcat(old_file, "%s/%s", slurmctld_conf.state_save_location,
+		   "burst_buffer_cray_state.old");
+	xstrfmtcat(reg_file, "%s/%s", slurmctld_conf.state_save_location,
+		   "burst_buffer_cray_state");
+	xstrfmtcat(new_file, "%s/%s", slurmctld_conf.state_save_location,
+		   "burst_buffer_cray_state.new");
+
+	state_fd = creat(new_file, 0600);
+	if (state_fd < 0) {
+		error("%s: Can't save state, error creating file %s, %m",
+		      __func__, new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite = get_buf_offset(buffer), amount, rc;
+		char *data = (char *)get_buf_data(buffer);
+		high_buffer_size = MAX(nwrite, high_buffer_size);
+		while (nwrite > 0) {
+			amount = write(state_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+
+		rc = fsync_and_close(state_fd, "burst_buffer_cray");
+		if (rc && !error_code)
+			error_code = rc;
+	}
+	if (error_code)
+		(void) unlink(new_file);
+	else {			/* file shuffle */
+		last_save_time = save_time;
+		(void) unlink(old_file);
+		if (link(reg_file, old_file)) {
+			debug4("unable to create link for %s -> %s: %m",
+			       reg_file, old_file);
+		}
+		(void) unlink(reg_file);
+		if (link(new_file, reg_file)) {
+			debug4("unable to create link for %s -> %s: %m",
+			       new_file, reg_file);
+		}
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+	free_buf(buffer);
+}
+
+/* Open the partition state save file, or backup if necessary.
+ * state_file IN - the name of the state save file used
+ * RET the file description to read from or error code
+ */
+static int _open_part_state_file(char **state_file)
+{
+	int state_fd;
+	struct stat stat_buf;
+
+	*state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(*state_file, "/burst_buffer_cray_state");
+	state_fd = open(*state_file, O_RDONLY);
+	if (state_fd < 0) {
+		error("Could not open burst buffer state file %s: %m",
+		      *state_file);
+	} else if (fstat(state_fd, &stat_buf) < 0) {
+		error("Could not stat burst buffer state file %s: %m",
+		      *state_file);
+		(void) close(state_fd);
+	} else if (stat_buf.st_size < 4) {
+		error("Burst buffer state file %s too small", *state_file);
+		(void) close(state_fd);
+	} else	/* Success */
+		return state_fd;
+
+	error("NOTE: Trying backup burst buffer state save file. "
+	      "Information may be lost!");
+	xstrcat(*state_file, ".old");
+	state_fd = open(*state_file, O_RDONLY);
+	return state_fd;
+}
+
+/* Recover saved burst buffer state and use it to preserve account, partition,
+ * and QOS information for persistent burst buffers. */
+static void _recover_bb_state(void)
+{
+	char *state_file = NULL, *data = NULL;
+	int data_allocated, data_read = 0;
+	uint16_t protocol_version = (uint16_t)NO_VAL;
+	uint32_t data_size = 0, rec_count = 0, name_len = 0;
+	uint32_t id = 0, user_id = 0;
+	uint64_t size;
+	int i, state_fd;
+	char *account = NULL, *name = NULL, *partition = NULL, *qos = NULL;
+	char *end_ptr = NULL;
+	time_t create_time = 0;
+	bb_alloc_t *bb_alloc;
+	Buf buffer;
+
+	state_fd = _open_part_state_file(&state_file);
+	if (state_fd < 0) {
+		info("No burst buffer state file (%s) to recover",
+		     state_file);
+		xfree(state_file);
+		return;
+	}
+	data_allocated = BUF_SIZE;
+	data = xmalloc(data_allocated);
+	while (1) {
+		data_read = read(state_fd, &data[data_size], BUF_SIZE);
+		if (data_read < 0) {
+			if  (errno == EINTR)
+				continue;
+			else {
+				error("Read error on %s: %m", state_file);
+				break;
+			}
+		} else if (data_read == 0)     /* eof */
+			break;
+		data_size      += data_read;
+		data_allocated += data_read;
+		xrealloc(data, data_allocated);
+	}
+	close(state_fd);
+	xfree(state_file);
+
+	buffer = create_buf(data, data_size);
+	safe_unpack16(&protocol_version, buffer);
+	if (protocol_version == (uint16_t)NO_VAL) {
+		error("******************************************************************");
+		error("Can not recover burst_buffer/cray state, data version incompatible");
+		error("******************************************************************");
+		return;
+	}
+
+	safe_unpack32(&rec_count, buffer);
+	for (i = 0; i < rec_count; i++) {
+		safe_unpackstr_xmalloc(&account,   &name_len, buffer);
+		safe_unpack_time(&create_time, buffer);
+		safe_unpack32(&id, buffer);
+		safe_unpackstr_xmalloc(&name,      &name_len, buffer);
+		safe_unpackstr_xmalloc(&partition, &name_len, buffer);
+		safe_unpackstr_xmalloc(&qos,       &name_len, buffer);
+		safe_unpack32(&user_id, buffer);
+		if (bb_state.bb_config.flags & BB_FLAG_EMULATE_CRAY)
+			safe_unpack64(&size, buffer);
+
+		if (bb_state.bb_config.flags & BB_FLAG_EMULATE_CRAY) {
+			bb_alloc = bb_alloc_name_rec(&bb_state, name, user_id);
+			bb_alloc->id = id;
+			last_persistent_id = MAX(last_persistent_id, id);
+			if (name && (name[0] >='0') && (name[0] <='9'))
+				bb_alloc->job_id = strtol(name, &end_ptr, 10);
+			bb_alloc->seen_time = time(NULL);
+			bb_alloc->size = size;
+		} else {
+			bb_alloc = bb_find_name_rec(name, user_id, &bb_state);
+		}
+		if (bb_alloc) {
+			xfree(bb_alloc->account);
+			bb_alloc->account = account;
+			account = NULL;
+			bb_alloc->create_time = create_time;
+			xfree(bb_alloc->partition);
+			bb_alloc->partition = partition;
+			partition = NULL;
+			xfree(bb_alloc->qos);
+			bb_alloc->qos = qos;
+			qos = NULL;
+		}
+		xfree(account);
+		xfree(name);
+		xfree(partition);
+		xfree(qos);
+	}
+
+	info("Recovered state of %d burst buffers", rec_count);
+	free_buf(buffer);
+	return;
+
+unpack_error:
+	error("Incomplete burst buffer data checkpoint file");
+	xfree(account);
+	xfree(name);
+	xfree(partition);
+	xfree(qos);
+	free_buf(buffer);
+	return;
+}
+
+/* We just found an unexpected session, set default account, QOS, & partition.
+ * Copy the information from any currently existing session for the same user.
+ * If none found, use his default account and QOS. */
+static void _pick_alloc_account(bb_alloc_t *bb_alloc)
+{
+	/* read locks on assoc & qos */
+	assoc_mgr_lock_t assoc_locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+					 NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_assoc_rec_t assoc_rec;
+	slurmdb_qos_rec_t   qos_rec;
+	bb_alloc_t *bb_ptr = NULL;
+
+	bb_ptr = bb_state.bb_ahash[bb_alloc->user_id % BB_HASH_SIZE];
+	while (bb_ptr) {
+		if ((bb_ptr          != bb_alloc) &&
+		    (bb_ptr->user_id == bb_alloc->user_id)) {
+			xfree(bb_alloc->account);
+			bb_alloc->account   = xstrdup(bb_ptr->account);
+			bb_alloc->assoc_ptr = bb_ptr->assoc_ptr;
+			xfree(bb_alloc->partition);
+			bb_alloc->partition = xstrdup(bb_ptr->partition);
+			xfree(bb_alloc->qos);
+			bb_alloc->qos       = xstrdup(bb_ptr->qos);
+			bb_alloc->qos_ptr = bb_ptr->qos_ptr;
+			xfree(bb_alloc->assocs);
+			bb_alloc->assocs    = xstrdup(bb_ptr->assocs);
+			return;
+		}
+		bb_ptr = bb_ptr->next;
+	}
+
+	/* Set default for this user */
+	bb_alloc->partition = xstrdup(default_part_name);
+	memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
+	memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+	assoc_rec.partition = default_part_name;
+	assoc_rec.uid = bb_alloc->user_id;
+	assoc_mgr_lock(&assoc_locks);
+	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+				    accounting_enforce,
+				    &bb_alloc->assoc_ptr,
+				    true) == SLURM_SUCCESS) {
+		xfree(bb_alloc->account);
+		bb_alloc->account   = xstrdup(assoc_rec.acct);
+		xfree(bb_alloc->assocs);
+		if (bb_alloc->assoc_ptr)
+			bb_alloc->assocs =
+				xstrdup_printf(",%u,", bb_alloc->assoc_ptr->id);
+
+		assoc_mgr_get_default_qos_info(bb_alloc->assoc_ptr, &qos_rec);
+		if (assoc_mgr_fill_in_qos(acct_db_conn, &qos_rec,
+					  accounting_enforce,
+					  &bb_alloc->qos_ptr,
+					  true) == SLURM_SUCCESS) {
+			xfree(bb_alloc->qos);
+			if (bb_alloc->qos_ptr)
+				bb_alloc->qos =
+					xstrdup(bb_alloc->qos_ptr->name);
+		}
+	}
+	assoc_mgr_unlock(&assoc_locks);
+}
+
+/* For a given user/partition/account, set it's assoc_ptr */
+static void _set_assoc_mgr_ptrs(bb_alloc_t *bb_alloc)
+{
+	/* read locks on assoc */
+	assoc_mgr_lock_t assoc_locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+					 NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_assoc_rec_t assoc_rec;
+	slurmdb_qos_rec_t qos_rec;
+
+	memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
+	assoc_rec.acct      = bb_alloc->account;
+	assoc_rec.partition = bb_alloc->partition;
+	assoc_rec.uid       = bb_alloc->user_id;
+	assoc_mgr_lock(&assoc_locks);
+	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+				    accounting_enforce,
+				    &bb_alloc->assoc_ptr,
+				    true) == SLURM_SUCCESS) {
+		xfree(bb_alloc->assocs);
+		bb_alloc->assocs =
+			xstrdup_printf(",%u,", bb_alloc->assoc_ptr->id);
+	}
+
+	memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+	qos_rec.name = bb_alloc->qos;
+	assoc_mgr_fill_in_qos(acct_db_conn, &qos_rec,
+			      accounting_enforce,
+			      &bb_alloc->qos_ptr,
+			      true);
+
+	assoc_mgr_unlock(&assoc_locks);
+}
+
+/*
+ * Determine the current actual burst buffer state.
+ */
+static void _load_state(bool init_config)
+{
+	burst_buffer_gres_t *gres_ptr;
+	bb_configs_t *configs;
+	bb_instances_t *instances;
+	bb_pools_t *pools;
+	bb_sessions_t *sessions;
+	bb_alloc_t *bb_alloc;
+	int num_configs = 0, num_instances = 0, num_pools = 0, num_sessions = 0;
+	int i, j;
+	char *end_ptr = NULL;
+	time_t now = time(NULL);
+	assoc_mgr_lock_t assoc_locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+					 NO_LOCK, NO_LOCK, NO_LOCK };
+
+	/*
+	 * Load the pools information
+	 */
+	pools = _bb_get_pools(&num_pools, &bb_state);
+	if (pools == NULL) {
+		error("%s: failed to find DataWarp entries, what now?",
+		      __func__);
+		return;
+	}
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (!bb_state.bb_config.default_pool && (num_pools > 0)) {
+		info("%s: Setting DefaultPool to %s", __func__, pools[0].id);
+		bb_state.bb_config.default_pool = xstrdup(pools[0].id);
+	}
+
+	for (i = 0; i < num_pools; i++) {
+		/* ID: "bytes" */
+		if (strcmp(pools[i].id, bb_state.bb_config.default_pool) == 0) {
+			bb_state.bb_config.granularity
+				= pools[i].granularity;
+			bb_state.total_space
+				= pools[i].quantity * pools[i].granularity;
+			if (bb_state.bb_config.flags & BB_FLAG_EMULATE_CRAY)
+				continue;
+			bb_state.used_space
+				= (pools[i].quantity - pools[i].free) *
+				pools[i].granularity;
+
+			/* Everything else is a generic burst buffer resource */
+			bb_state.bb_config.gres_cnt = 0;
+		} else {
+			bb_state.bb_config.gres_ptr
+				= xrealloc(bb_state.bb_config.gres_ptr,
+					   sizeof(burst_buffer_gres_t) *
+					   (bb_state.bb_config.gres_cnt + 1));
+			gres_ptr = bb_state.bb_config.gres_ptr +
+				bb_state.bb_config.gres_cnt;
+			bb_state.bb_config.gres_cnt++;
+			gres_ptr->avail_cnt = pools[i].quantity;
+			gres_ptr->granularity = pools[i].granularity;
+			gres_ptr->name = xstrdup(pools[i].id);
+			if (bb_state.bb_config.flags & BB_FLAG_EMULATE_CRAY)
+				continue;
+			gres_ptr->used_cnt = pools[i].quantity - pools[i].free;
+		}
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	_bb_free_pools(pools, num_pools);
+
+	/*
+	 * Load the instances information
+	 */
+	instances = _bb_get_instances(&num_instances, &bb_state);
+	if (instances == NULL) {
+		info("%s: failed to find DataWarp instances", __func__);
+		num_instances = 0;	/* Redundant, but fixes CLANG bug */
+	}
+	sessions = _bb_get_sessions(&num_sessions, &bb_state);
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	assoc_mgr_lock(&assoc_locks);
+	bb_state.last_load_time = time(NULL);
+	for (i = 0; i < num_sessions; i++) {
+		if (!init_config) {
+			bb_alloc = bb_find_name_rec(sessions[i].token,
+						    sessions[i].user_id,
+						    &bb_state);
+			if (bb_alloc) {
+				bb_alloc->seen_time = bb_state.last_load_time;
+				continue;
+			}
+			if (difftime(now, sessions[i].created) < 2) {
+				/* Newly created in other thread. Give that
+				 * thread a chance to add the entry */
+				continue;
+			}
+			error("%s: Unexpected burst buffer found: %s",
+			      __func__, sessions[i].token);
+		}
+
+		bb_alloc = bb_alloc_name_rec(&bb_state, sessions[i].token,
+					     sessions[i].user_id);
+		bb_alloc->create_time = sessions[i].created;
+		bb_alloc->id = sessions[i].id;
+		if ((sessions[i].token != NULL)  &&
+		    (sessions[i].token[0] >='0') &&
+		    (sessions[i].token[0] <='9')) {
+			bb_alloc->job_id =
+				strtol(sessions[i].token, &end_ptr, 10);
+		}
+		for (j = 0; j < num_instances; j++) {
+			if (xstrcmp(sessions[i].token, instances[j].label))
+				continue;
+			bb_alloc->size += instances[j].bytes;
+		}
+		bb_alloc->seen_time = bb_state.last_load_time;
+
+		if (!init_config) {	/* Newly found buffer */
+			_pick_alloc_account(bb_alloc);
+			bb_limit_add(bb_alloc->user_id,
+				     bb_alloc->size, &bb_state);
+		}
+		if (bb_alloc->job_id == 0)
+			bb_post_persist_create(NULL, bb_alloc, &bb_state);
+	}
+	assoc_mgr_unlock(&assoc_locks);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	_bb_free_sessions(sessions, num_sessions);
+	_bb_free_instances(instances, num_instances);
+
+	if (!init_config)
+		return;
+
+	/*
+	 * Load the configurations information
+	 * NOTE: This information is currently unused
+	 */
+	configs = _bb_get_configs(&num_configs, &bb_state);
+	if (configs == NULL) {
+		info("%s: failed to find DataWarp configurations", __func__);
+		num_configs = 0;
+	}
+	_bb_free_configs(configs, num_configs);
+
+	_recover_bb_state();
+	_apply_limits();
+	bb_state.last_update_time = time(NULL);
+
+	return;
+}
+
+/* Write an string representing the NIDs of a job's nodes to an arbitrary
+ * file location
+ * RET 0 or Slurm error code
+ */
+static int _write_nid_file(char *file_name, char *node_list, uint32_t job_id)
+{
+#if defined(HAVE_NATIVE_CRAY)
+	char *tmp, *sep, *tok, *save_ptr = NULL, *buf = NULL;
+	int i, rc;
+
+	xassert(file_name);
+	tmp = xstrdup(node_list);
+	sep = strrchr(tmp, ']');
+	if (sep)
+		sep[0] = '\0';
+	sep = strchr(tmp, '[');
+	if (sep) {
+		sep++;
+	} else {
+		sep = tmp;
+		for (i = 0; !isdigit(sep[0]) && sep[0]; i++)
+			sep++;
+	}
+	tok = strtok_r(sep, ",", &save_ptr);
+	while (tok) {
+		xstrfmtcat(buf, "%s\n", tok);
+		tok = strtok_r(NULL, ",", &save_ptr);
+	}
+	xfree(tmp);
+
+	if (buf) {
+		rc = _write_file(file_name, buf);
+		xfree(buf);
+	} else {
+		error("%s: job %u has node list without numeric component (%s)",
+		      __func__, job_id, node_list);
+		rc = EINVAL;
+	}
+	return rc;
+#else
+	char *tok, *buf = NULL;
+	int rc;
+
+	xassert(file_name);
+	if (node_list && node_list[0]) {
+		hostlist_t hl = hostlist_create(node_list);
+		while ((tok = hostlist_shift(hl))) {
+			xstrfmtcat(buf, "%s\n", tok);
+			free(tok);
+		}
+		hostlist_destroy(hl);
+		rc = _write_file(file_name, buf);
+		xfree(buf);
+	} else {
+		error("%s: job %u lacks a node list",  __func__, job_id);
+		rc = EINVAL;
+	}
+	return rc;
+#endif
+}
+
+/* Write an arbitrary string to an arbitrary file name */
+static int _write_file(char *file_name, char *buf)
+{
+	int amount, fd, nwrite, pos;
+
+	(void) unlink(file_name);
+	fd = creat(file_name, 0600);
+	if (fd < 0) {
+		error("Error creating file %s, %m", file_name);
+		return errno;
+	}
+
+	if (!buf) {
+		error("%s: buf is NULL", __func__);
+		return SLURM_ERROR;
+	}
+
+	nwrite = strlen(buf);
+	pos = 0;
+	while (nwrite > 0) {
+		amount = write(fd, &buf[pos], nwrite);
+		if ((amount < 0) && (errno != EINTR)) {
+			error("Error writing file %s, %m", file_name);
+			close(fd);
+			return ESLURM_WRITING_TO_FILE;
+		}
+		nwrite -= amount;
+		pos    += amount;
+	}
+
+	(void) close(fd);
+	return SLURM_SUCCESS;
+}
+
+static int _queue_stage_in(struct job_record *job_ptr, bb_job_t *bb_job)
+{
+	char *hash_dir = NULL, *job_dir = NULL;
+	char *client_nodes_file_nid = NULL;
+	char **setup_argv, **data_in_argv;
+	stage_args_t *stage_args;
+	int hash_inx = job_ptr->job_id % 10;
+	pthread_attr_t stage_attr;
+	pthread_t stage_tid = 0;
+	int rc = SLURM_SUCCESS;
+
+	xstrfmtcat(hash_dir, "%s/hash.%d", state_save_loc, hash_inx);
+	(void) mkdir(hash_dir, 0700);
+	xstrfmtcat(job_dir, "%s/job.%u", hash_dir, job_ptr->job_id);
+	if (job_ptr->sched_nodes) {
+		xstrfmtcat(client_nodes_file_nid, "%s/client_nids", job_dir);
+		if (_write_nid_file(client_nodes_file_nid,
+				    job_ptr->sched_nodes, job_ptr->job_id))
+			xfree(client_nodes_file_nid);
+	}
+	setup_argv = xmalloc(sizeof(char *) * 20);	/* NULL terminated */
+	setup_argv[0] = xstrdup("dw_wlm_cli");
+	setup_argv[1] = xstrdup("--function");
+	setup_argv[2] = xstrdup("setup");
+	setup_argv[3] = xstrdup("--token");
+	xstrfmtcat(setup_argv[4], "%u", job_ptr->job_id);
+	setup_argv[5] = xstrdup("--caller");
+	setup_argv[6] = xstrdup("SLURM");
+	setup_argv[7] = xstrdup("--user");
+	xstrfmtcat(setup_argv[8], "%d", job_ptr->user_id);
+	setup_argv[9] = xstrdup("--capacity");
+	xstrfmtcat(setup_argv[10], "%s:%s",
+		   bb_state.bb_config.default_pool,
+		   bb_get_size_str(bb_job->total_size));
+	setup_argv[11] = xstrdup("--job");
+	xstrfmtcat(setup_argv[12], "%s/script", job_dir);
+	if (client_nodes_file_nid) {
+#if defined(HAVE_NATIVE_CRAY)
+		setup_argv[13] = xstrdup("--nidlistfile");
+#else
+		setup_argv[13] = xstrdup("--nodehostnamefile");
+#endif
+		setup_argv[14] = xstrdup(client_nodes_file_nid);
+	}
+
+	data_in_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	data_in_argv[0] = xstrdup("dw_wlm_cli");
+	data_in_argv[1] = xstrdup("--function");
+	data_in_argv[2] = xstrdup("data_in");
+	data_in_argv[3] = xstrdup("--token");
+	xstrfmtcat(data_in_argv[4], "%u", job_ptr->job_id);
+	data_in_argv[5] = xstrdup("--job");
+	xstrfmtcat(data_in_argv[6], "%s/script", job_dir);
+
+	stage_args = xmalloc(sizeof(stage_args_t));
+	stage_args->job_id  = job_ptr->job_id;
+	stage_args->timeout = bb_state.bb_config.stage_in_timeout;
+	stage_args->args1   = setup_argv;
+	stage_args->args2   = data_in_argv;
+
+	slurm_attr_init(&stage_attr);
+	if (pthread_attr_setdetachstate(&stage_attr, PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+	while (pthread_create(&stage_tid, &stage_attr, _start_stage_in,
+			      stage_args)) {
+		if (errno != EAGAIN) {
+			error("%s: pthread_create: %m", __func__);
+			_start_stage_in(stage_args);	/* Do in-line */
+			break;
+		}
+		usleep(100000);
+	}
+	slurm_attr_destroy(&stage_attr);
+
+	xfree(hash_dir);
+	xfree(job_dir);
+	xfree(client_nodes_file_nid);
+	return rc;
+}
+
+static void *_start_stage_in(void *x)
+{
+	stage_args_t *stage_args;
+	char **setup_argv, **data_in_argv, *resp_msg = NULL, *op = NULL;
+	int rc = SLURM_SUCCESS, status = 0, timeout;
+	slurmctld_lock_t job_read_lock =
+		{ NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
+	slurmctld_lock_t job_write_lock =
+		{ NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	struct job_record *job_ptr;
+	bb_alloc_t *bb_alloc = NULL;
+	bb_job_t *bb_job;
+	DEF_TIMERS;
+
+	stage_args = (stage_args_t *) x;
+	setup_argv   = stage_args->args1;
+	data_in_argv = stage_args->args2;
+
+	if (stage_args->timeout)
+		timeout = stage_args->timeout * 1000;
+	else
+		timeout = 5000;
+	op = "setup";
+	START_TIMER;
+	resp_msg = bb_run_script("setup",
+				 bb_state.bb_config.get_sys_state,
+				 setup_argv, timeout, &status);
+	END_TIMER;
+	if (DELTA_TIMER > 500000) {	/* 0.5 secs */
+		info("%s: setup for job %u ran for %s",
+		     __func__, stage_args->job_id, TIME_STR);
+	} else if (bb_state.bb_config.debug_flag) {
+		debug("%s: setup for job %u ran for %s",
+		      __func__, stage_args->job_id, TIME_STR);
+	}
+	_log_script_argv(setup_argv, resp_msg);
+	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+		error("%s: setup for job %u status:%u response:%s",
+		      __func__, stage_args->job_id, status, resp_msg);
+		rc = SLURM_ERROR;
+	} else {
+		lock_slurmctld(job_read_lock);
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		job_ptr = find_job_record(stage_args->job_id);
+		bb_job = bb_job_find(&bb_state, stage_args->job_id);
+		if (!job_ptr) {
+			error("%s: unable to find job record for job %u",
+			      __func__, stage_args->job_id);
+			rc = SLURM_ERROR;
+		} else if (!bb_job) {
+			error("%s: unable to find bb_job record for job %u",
+			      __func__, stage_args->job_id);
+		} else {
+			bb_job->state = BB_STATE_STAGING_IN;
+			bb_alloc = bb_alloc_job(&bb_state, job_ptr, bb_job);
+			bb_alloc->create_time = time(NULL);
+		}
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		unlock_slurmctld(job_read_lock);
+	}
+
+	if (rc == SLURM_SUCCESS) {
+		if (stage_args->timeout)
+			timeout = stage_args->timeout * 1000;
+		else
+			timeout = 24 * 60 * 60 * 1000;	/* One day */
+		xfree(resp_msg);
+		op = "dws_data_in";
+		START_TIMER;
+		resp_msg = bb_run_script("dws_data_in",
+					 bb_state.bb_config.get_sys_state,
+					 data_in_argv, timeout, &status);
+		END_TIMER;
+		if (DELTA_TIMER > 5000000) {	/* 5 secs */
+			info("%s: dws_data_in for job %u ran for %s",
+			     __func__, stage_args->job_id, TIME_STR);
+		} else if (bb_state.bb_config.debug_flag) {
+			debug("%s: dws_data_in for job %u ran for %s",
+			      __func__, stage_args->job_id, TIME_STR);
+		}
+		_log_script_argv(data_in_argv, resp_msg);
+		if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+			error("%s: dws_data_in for job %u status:%u "
+			      "response:%s",
+			      __func__, stage_args->job_id, status, resp_msg);
+			rc = SLURM_ERROR;
+		}
+	}
+
+	lock_slurmctld(job_write_lock);
+	job_ptr = find_job_record(stage_args->job_id);
+	if (!job_ptr) {
+		error("%s: unable to find job record for job %u",
+		      __func__, stage_args->job_id);
+	} else if (rc == SLURM_SUCCESS) {
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		bb_job = bb_job_find(&bb_state, stage_args->job_id);
+		if (bb_job)
+			bb_job->state = BB_STATE_STAGED_IN;
+		if (bb_job && bb_job->total_size) {
+			bb_alloc = bb_find_alloc_rec(&bb_state, job_ptr);
+			if (bb_alloc) {
+				bb_alloc->state = BB_STATE_STAGED_IN;
+				bb_alloc->state_time = time(NULL);
+				if (bb_state.bb_config.debug_flag) {
+					info("%s: Stage-in complete for job %u",
+					     __func__, stage_args->job_id);
+				}
+				queue_job_scheduler();
+				bb_state.last_update_time = time(NULL);
+			} else {
+				error("%s: unable to find bb_alloc record "
+				      "for job %u",
+				      __func__, stage_args->job_id);
+			}
+		}
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+	} else {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = FAIL_BURST_BUFFER_OP;
+		xstrfmtcat(job_ptr->state_desc, "%s: %s: %s",
+			   plugin_type, op, resp_msg);
+		job_ptr->priority = 0;	/* Hold job */
+		bb_alloc = bb_find_alloc_rec(&bb_state, job_ptr);
+		if (bb_alloc) {
+			bb_alloc->state = BB_STATE_TEARDOWN;
+			bb_alloc->state_time = time(NULL);
+			bb_state.last_update_time = time(NULL);
+		}
+		_queue_teardown(job_ptr->job_id, job_ptr->user_id, true);
+	}
+	unlock_slurmctld(job_write_lock);
+
+	xfree(resp_msg);
+	_free_script_argv(setup_argv);
+	_free_script_argv(data_in_argv);
+	xfree(stage_args);
+	return NULL;
+}
+
+static int _queue_stage_out(struct job_record *job_ptr)
+{
+	char *hash_dir = NULL, *job_dir = NULL;
+	char **post_run_argv, **data_out_argv;
+	stage_args_t *stage_args;
+	int hash_inx = job_ptr->job_id % 10, rc = SLURM_SUCCESS;
+	pthread_attr_t stage_attr;
+	pthread_t stage_tid = 0;
+
+	xstrfmtcat(hash_dir, "%s/hash.%d", state_save_loc, hash_inx);
+	xstrfmtcat(job_dir, "%s/job.%u", hash_dir, job_ptr->job_id);
+
+	data_out_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	data_out_argv[0] = xstrdup("dw_wlm_cli");
+	data_out_argv[1] = xstrdup("--function");
+	data_out_argv[2] = xstrdup("data_out");
+	data_out_argv[3] = xstrdup("--token");
+	xstrfmtcat(data_out_argv[4], "%u", job_ptr->job_id);
+	data_out_argv[5] = xstrdup("--job");
+	xstrfmtcat(data_out_argv[6], "%s/script", job_dir);
+
+	post_run_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	post_run_argv[0] = xstrdup("dw_wlm_cli");
+	post_run_argv[1] = xstrdup("--function");
+	post_run_argv[2] = xstrdup("post_run");
+	post_run_argv[3] = xstrdup("--token");
+	xstrfmtcat(post_run_argv[4], "%u", job_ptr->job_id);
+	post_run_argv[5] = xstrdup("--job");
+	xstrfmtcat(post_run_argv[6], "%s/script", job_dir);
+
+	stage_args = xmalloc(sizeof(stage_args_t));
+	stage_args->args1   = data_out_argv;
+	stage_args->args2   = post_run_argv;
+	stage_args->job_id  = job_ptr->job_id;
+	stage_args->timeout = bb_state.bb_config.stage_out_timeout;
+	stage_args->user_id = job_ptr->user_id;
+
+	slurm_attr_init(&stage_attr);
+	if (pthread_attr_setdetachstate(&stage_attr, PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+	while (pthread_create(&stage_tid, &stage_attr, _start_stage_out,
+			      stage_args)) {
+		if (errno != EAGAIN) {
+			error("%s: pthread_create: %m", __func__);
+			_start_stage_out(stage_args);	/* Do in-line */
+			break;
+		}
+		usleep(100000);
+	}
+	slurm_attr_destroy(&stage_attr);
+
+	xfree(hash_dir);
+	xfree(job_dir);
+	return rc;
+}
+
+static void *_start_stage_out(void *x)
+{
+	stage_args_t *stage_args;
+	char **post_run_argv, **data_out_argv, *resp_msg = NULL, *op = NULL;
+	int rc = SLURM_SUCCESS, status = 0, timeout;
+	slurmctld_lock_t job_write_lock =
+		{ NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	struct job_record *job_ptr;
+	bb_alloc_t *bb_alloc = NULL;
+	bb_job_t *bb_job = NULL;
+	DEF_TIMERS;
+
+	stage_args = (stage_args_t *) x;
+	data_out_argv = stage_args->args1;
+	post_run_argv = stage_args->args2;
+
+	if (stage_args->timeout)
+		timeout = stage_args->timeout * 1000;
+	else
+		timeout = 24 * 60 * 60 * 1000;	/* One day */
+	op = "dws_data_out";
+	START_TIMER;
+	resp_msg = bb_run_script("dws_data_out",
+				 bb_state.bb_config.get_sys_state,
+				 data_out_argv, timeout, &status);
+	END_TIMER;
+	if (DELTA_TIMER > 5000000) {	/* 5 secs */
+		info("%s: dws_data_out for job %u ran for %s",
+		     __func__, stage_args->job_id, TIME_STR);
+	} else if (bb_state.bb_config.debug_flag) {
+		debug("%s: dws_data_out for job %u ran for %s",
+		      __func__, stage_args->job_id, TIME_STR);
+	}
+	_log_script_argv(data_out_argv, resp_msg);
+	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+		error("%s: dws_data_out for job %u status:%u response:%s",
+		      __func__, stage_args->job_id, status, resp_msg);
+		rc = SLURM_ERROR;
+	}
+
+	if (rc == SLURM_SUCCESS) {
+		if (stage_args->timeout)
+			timeout = stage_args->timeout * 1000;
+		else
+			timeout = 5000;
+		op = "dws_post_run";
+		START_TIMER;
+		xfree(resp_msg);
+		resp_msg = bb_run_script("dws_post_run",
+					 bb_state.bb_config.get_sys_state,
+					 post_run_argv, timeout, &status);
+		END_TIMER;
+		if (DELTA_TIMER > 500000) {	/* 0.5 secs */
+			info("%s: dws_post_run for job %u ran for %s",
+			     __func__, stage_args->job_id, TIME_STR);
+		} else if (bb_state.bb_config.debug_flag) {
+			debug("%s: dws_post_run for job %u ran for %s",
+			      __func__, stage_args->job_id, TIME_STR);
+		}
+		_log_script_argv(post_run_argv, resp_msg);
+		if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+			error("%s: dws_post_run for job %u "
+			      "status:%u response:%s",
+			      __func__, stage_args->job_id, status, resp_msg);
+			rc = SLURM_ERROR;
+		}
+	}
+
+	lock_slurmctld(job_write_lock);
+	job_ptr = find_job_record(stage_args->job_id);
+	if (!job_ptr) {
+		error("%s: unable to find job record for job %u",
+		      __func__, stage_args->job_id);
+	} else {
+		if (rc != SLURM_SUCCESS) {
+			job_ptr->state_reason = FAIL_BURST_BUFFER_OP;
+			xfree(job_ptr->state_desc);
+			xstrfmtcat(job_ptr->state_desc, "%s: %s: %s",
+				   plugin_type, op, resp_msg);
+		}
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		bb_alloc = bb_find_alloc_rec(&bb_state, job_ptr);
+		if (bb_alloc) {
+			if (rc == SLURM_SUCCESS) {
+				if (bb_state.bb_config.debug_flag) {
+					info("%s: Stage-out complete for "
+					     "job %u",
+					     __func__, stage_args->job_id);
+				}
+				/* bb_alloc->state = BB_STATE_STAGED_OUT; */
+				bb_alloc->state = BB_STATE_TEARDOWN;
+				bb_alloc->state_time = time(NULL);
+			} else if (bb_state.bb_config.debug_flag) {
+				info("%s: Stage-out failed for job %u",
+				     __func__, stage_args->job_id);
+			}
+			bb_state.last_update_time = time(NULL);
+		} else {
+			error("%s: unable to find bb record for job %u",
+			      __func__, stage_args->job_id);
+		}
+		bb_job = _get_bb_job(job_ptr);
+		if (bb_job)
+			bb_job->state = BB_STATE_TEARDOWN;
+		if (rc == SLURM_SUCCESS) {
+			_queue_teardown(stage_args->job_id, stage_args->user_id,
+					false);
+		}
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+	}
+	unlock_slurmctld(job_write_lock);
+
+	xfree(resp_msg);
+	_free_script_argv(post_run_argv);
+	_free_script_argv(data_out_argv);
+	xfree(stage_args);
+	return NULL;
+}
+
+static void _queue_teardown(uint32_t job_id, uint32_t user_id, bool hurry)
+{
+	struct stat buf;
+	char *hash_dir = NULL, *job_script = NULL;
+	char **teardown_argv;
+	stage_args_t *teardown_args;
+	int fd, hash_inx = job_id % 10;
+	pthread_attr_t teardown_attr;
+	pthread_t teardown_tid = 0;
+
+	xstrfmtcat(hash_dir, "%s/hash.%d", state_save_loc, hash_inx);
+	xstrfmtcat(job_script, "%s/job.%u/script", hash_dir, job_id);
+	if (stat(job_script, &buf) == -1) {
+		xfree(job_script);
+		xstrfmtcat(job_script, "%s/burst_buffer_script",
+			   state_save_loc);
+		if (stat(job_script, &buf) == -1) {
+			fd = creat(job_script, 0755);
+			if (fd >= 0) {
+				char *dummy_script = "#!/bin/bash\nexit 0\n";
+				write(fd, dummy_script, strlen(dummy_script)+1);
+				close(fd);
+			}
+		}
+	}
+
+	teardown_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	teardown_argv[0] = xstrdup("dw_wlm_cli");
+	teardown_argv[1] = xstrdup("--function");
+	teardown_argv[2] = xstrdup("teardown");
+	teardown_argv[3] = xstrdup("--token");
+	xstrfmtcat(teardown_argv[4], "%u", job_id);
+	teardown_argv[5] = xstrdup("--job");
+	teardown_argv[6] = xstrdup(job_script);
+	if (hurry)
+		teardown_argv[7] = xstrdup("--hurry");
+
+	teardown_args = xmalloc(sizeof(stage_args_t));
+	teardown_args->job_id  = job_id;
+	teardown_args->user_id = user_id;
+	teardown_args->timeout = 0;
+	teardown_args->args1   = teardown_argv;
+
+	slurm_attr_init(&teardown_attr);
+	if (pthread_attr_setdetachstate(&teardown_attr,PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+	while (pthread_create(&teardown_tid, &teardown_attr, _start_teardown,
+			      teardown_args)) {
+		if (errno != EAGAIN) {
+			error("%s: pthread_create: %m", __func__);
+			_start_teardown(teardown_args);	/* Do in-line */
+			break;
+		}
+		usleep(100000);
+	}
+	slurm_attr_destroy(&teardown_attr);
+
+	xfree(hash_dir);
+	xfree(job_script);
+}
+
+static void *_start_teardown(void *x)
+{
+	stage_args_t *teardown_args;
+	char **teardown_argv, *resp_msg = NULL;
+	int status = 0, timeout;
+	struct job_record *job_ptr;
+	bb_alloc_t *bb_alloc = NULL;
+	bb_job_t *bb_job = NULL;
+	/* Locks: write job */
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	DEF_TIMERS;
+
+	teardown_args = (stage_args_t *) x;
+	teardown_argv = teardown_args->args1;
+
+	START_TIMER;
+	if (teardown_args->timeout)
+		timeout = teardown_args->timeout * 1000;
+	else
+		timeout = 5000;
+	resp_msg = bb_run_script("teardown",
+				 bb_state.bb_config.get_sys_state,
+				 teardown_argv, timeout, &status);
+	END_TIMER;
+	if ((DELTA_TIMER > 500000) ||	/* 0.5 secs */
+	    (bb_state.bb_config.debug_flag)) {
+		info("%s: teardown for job %u ran for %s",
+		     __func__, teardown_args->job_id, TIME_STR);
+	}
+	_log_script_argv(teardown_argv, resp_msg);
+	/* "Teardown" is run at every termination of every job that _might_
+	 * have a burst buffer, so an error of "token not found" should be
+	 * fairly common and not indicative of a problem. */
+	if ((!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) &&
+	    (!resp_msg || !strstr(resp_msg, "token not found"))) {
+		error("%s: %s: teardown for job %u status:%u response:%s",
+		      plugin_name, __func__, teardown_args->job_id, status,
+		      resp_msg);
+	} else {
+		lock_slurmctld(job_write_lock);
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		job_ptr = find_job_record(teardown_args->job_id);
+		_purge_bb_files(teardown_args->job_id, job_ptr);
+		if (job_ptr) {
+			if ((bb_alloc = bb_find_alloc_rec(&bb_state, job_ptr))){
+				bb_limit_rem(bb_alloc->user_id,
+					     bb_alloc->size, &bb_state);
+				(void) bb_free_alloc_rec(&bb_state, bb_alloc);
+			}
+			if ((bb_job = _get_bb_job(job_ptr)))
+				bb_job->state = BB_STATE_COMPLETE;
+		} else {
+			/* This will happen when slurmctld restarts and needs
+			 * to clear vestigial buffers */
+			char buf_name[32];
+			snprintf(buf_name, sizeof(buf_name), "%u",
+				 teardown_args->job_id);
+			bb_alloc = bb_find_name_rec(buf_name,
+						    teardown_args->user_id,
+						    &bb_state);
+			if (bb_alloc) {
+				bb_limit_rem(bb_alloc->user_id,
+					     bb_alloc->size, &bb_state);
+				(void) bb_free_alloc_rec(&bb_state, bb_alloc);
+			}
+
+		}
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		unlock_slurmctld(job_write_lock);
+	}
+
+	xfree(resp_msg);
+	_free_script_argv(teardown_argv);
+	xfree(teardown_args);
+	return NULL;
+}
+
+static void _free_needed_gres_struct(needed_gres_t *needed_gres_ptr,
+				     int gres_cnt)
+{
+	int i;
+	if (needed_gres_ptr == NULL)
+		return;
+
+	for (i = 0; i < gres_cnt; i++)
+		xfree(needed_gres_ptr->name);
+	xfree(needed_gres_ptr);
+}
+
+static uint64_t _get_bb_resv(char *gres_name, burst_buffer_info_msg_t *resv_bb)
+{
+	burst_buffer_info_t *bb_array;
+	burst_buffer_gres_t *gres_ptr;
+	uint64_t resv_gres = 0;
+	int i, j;
+
+	if (!resv_bb)
+		return resv_gres;
+
+	for (i = 0, bb_array = resv_bb->burst_buffer_array;
+	     i < resv_bb->record_count; i++, bb_array++) {
+		if (bb_array->name && xstrcmp(bb_array->name, bb_state.name))
+			continue;
+		for (j = 0, gres_ptr = bb_array->gres_ptr;
+		     j < bb_array->gres_cnt; j++, gres_ptr++) {
+			if (!xstrcmp(gres_name, gres_ptr->name))
+				resv_gres += gres_ptr->used_cnt;
+		}
+	}
+
+	return resv_gres;
+}
+
+/* Test if a job can be allocated a burst buffer.
+ * This may preempt currently active stage-in for higher priority jobs.
+ *
+ * RET 0: Job can be started now
+ *     1: Job exceeds configured limits, continue testing with next job
+ *     2: Job needs more resources than currently available can not start,
+ *        skip all remaining jobs
+ */
+static int _test_size_limit(struct job_record *job_ptr, bb_job_t *bb_job)
+{
+	burst_buffer_info_msg_t *resv_bb;
+	needed_gres_t *needed_gres_ptr = NULL;
+	struct preempt_bb_recs *preempt_ptr = NULL;
+	List preempt_list;
+	ListIterator preempt_iter;
+	int64_t tmp_g, tmp_r;
+	int64_t add_space, resv_space = 0;
+	int64_t tmp_f;	/* Could go negative due to reservations */
+	int64_t add_total_space_needed = 0, add_user_space_needed = 0;
+	int64_t add_total_space_avail  = 0, add_user_space_avail  = 0;
+	int64_t add_total_gres_needed  = 0, add_total_gres_avail  = 0;
+	time_t now = time(NULL);
+	bb_alloc_t *bb_ptr = NULL;
+	int d, i, j, k;
+	char jobid_buf[32];
+
+	xassert(bb_job);
+	add_space = bb_job->total_size + bb_job->persist_add;
+
+	resv_bb = job_test_bb_resv(job_ptr, now);
+	if (resv_bb) {
+		burst_buffer_info_t *resv_bb_ptr;
+		for (i = 0, resv_bb_ptr = resv_bb->burst_buffer_array;
+		     i < resv_bb->record_count; i++, resv_bb_ptr++) {
+			if (xstrcmp(resv_bb_ptr->name, bb_state.name))
+				continue;
+			resv_bb_ptr->used_space =
+				bb_granularity(resv_bb_ptr->used_space,
+					       bb_state.bb_config.granularity);
+			resv_space += resv_bb_ptr->used_space;
+		}
+	}
+
+	add_total_space_needed = bb_state.used_space + add_space + resv_space -
+		bb_state.total_space;
+	needed_gres_ptr = xmalloc(sizeof(needed_gres_t) * bb_job->gres_cnt);
+	for (i = 0; i < bb_job->gres_cnt; i++) {
+		needed_gres_ptr[i].name = xstrdup(bb_job->gres_ptr[i].name);
+		for (j = 0; j < bb_state.bb_config.gres_cnt; j++) {
+			if (strcmp(bb_job->gres_ptr[i].name,
+				   bb_state.bb_config.gres_ptr[j].name))
+				continue;
+			tmp_g = bb_granularity(bb_job->gres_ptr[i].count,
+					       bb_state.bb_config.gres_ptr[j].
+					       granularity);
+			bb_job->gres_ptr[i].count = tmp_g;
+			if (tmp_g > bb_state.bb_config.gres_ptr[j].avail_cnt) {
+				debug("%s: %s requests more %s GRES than"
+				      "configured", __func__,
+				      jobid2fmt(job_ptr, jobid_buf,
+						sizeof(jobid_buf)),
+				      bb_job->gres_ptr[i].name);
+				_free_needed_gres_struct(needed_gres_ptr,
+							 bb_job->gres_cnt);
+				if (resv_bb)
+					slurm_free_burst_buffer_info_msg(
+						resv_bb);
+				return 1;
+			}
+			tmp_r = _get_bb_resv(bb_job->gres_ptr[i].name,resv_bb);
+			tmp_f = bb_state.bb_config.gres_ptr[j].avail_cnt -
+				bb_state.bb_config.gres_ptr[j].used_cnt - tmp_r;
+			if (tmp_g > tmp_f)
+				needed_gres_ptr[i].add_cnt = tmp_g - tmp_f;
+			add_total_gres_needed += needed_gres_ptr[i].add_cnt;
+			break;
+		}
+		if (j >= bb_state.bb_config.gres_cnt) {
+			debug("%s: %s requests %s GRES which are undefined",
+			      __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)),
+			      bb_job->gres_ptr[i].name);
+			_free_needed_gres_struct(needed_gres_ptr,
+						 bb_job->gres_cnt);
+			if (resv_bb)
+				slurm_free_burst_buffer_info_msg(resv_bb);
+			return 1;
+		}
+	}
+
+	if (resv_bb)
+		slurm_free_burst_buffer_info_msg(resv_bb);
+
+	if ((add_total_space_needed <= 0) &&
+	    (add_user_space_needed  <= 0) && (add_total_gres_needed <= 0)) {
+		_free_needed_gres_struct(needed_gres_ptr, bb_job->gres_cnt);
+		return 0;
+	}
+
+	/* Identify candidate burst buffers to revoke for higher priority job */
+	preempt_list = list_create(bb_job_queue_del);
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_ptr = bb_state.bb_ahash[i];
+		while (bb_ptr) {
+			if (bb_ptr->job_id &&
+			    (bb_ptr->use_time > now) &&
+			    (bb_ptr->use_time > job_ptr->start_time)) {
+				preempt_ptr = xmalloc(
+					sizeof(struct preempt_bb_recs));
+				preempt_ptr->bb_ptr = bb_ptr;
+				preempt_ptr->job_id = bb_ptr->job_id;
+				preempt_ptr->size = bb_ptr->size;
+				preempt_ptr->use_time = bb_ptr->use_time;
+				preempt_ptr->user_id = bb_ptr->user_id;
+				list_push(preempt_list, preempt_ptr);
+				add_total_space_avail += bb_ptr->size;
+				if (bb_ptr->user_id == job_ptr->user_id);
+				add_user_space_avail += bb_ptr->size;
+				if (add_total_gres_needed<add_total_gres_avail)
+					j = bb_ptr->gres_cnt;
+				else
+					j = 0;
+				for ( ; j < bb_ptr->gres_cnt; j++) {
+					d = needed_gres_ptr[j].add_cnt -
+						needed_gres_ptr[j].avail_cnt;
+					if (d <= 0)
+						continue;
+					for (k = 0; k < bb_job->gres_cnt; k++){
+						if (strcmp(needed_gres_ptr[j].
+							   name,
+							   bb_job->gres_ptr[k].
+							   name))
+							continue;
+						if (bb_job->gres_ptr[k].count <
+						    d) {
+							d = bb_job->gres_ptr[k].
+								count;
+						}
+						add_total_gres_avail += d;
+						needed_gres_ptr[j].avail_cnt+=d;
+					}
+				}
+			}
+			bb_ptr = bb_ptr->next;
+		}
+	}
+
+	if ((add_total_space_avail >= add_total_space_needed) &&
+	    (add_user_space_avail  >= add_user_space_needed)  &&
+	    (add_total_gres_avail  >= add_total_gres_needed)) {
+		list_sort(preempt_list, bb_preempt_queue_sort);
+		preempt_iter = list_iterator_create(preempt_list);
+		while ((preempt_ptr = list_next(preempt_iter)) &&
+		       (add_total_space_needed || add_user_space_needed ||
+			add_total_gres_needed)) {
+			bool do_preempt = false;
+			if (add_user_space_needed &&
+			    (preempt_ptr->user_id == job_ptr->user_id)) {
+				do_preempt = true;
+				add_user_space_needed  -= preempt_ptr->size;
+				add_total_space_needed -= preempt_ptr->size;
+			}
+			if ((add_total_space_needed > add_user_space_needed) &&
+			    (preempt_ptr->user_id != job_ptr->user_id)) {
+				do_preempt = true;
+				add_total_space_needed -= preempt_ptr->size;
+			}
+			if (add_total_gres_needed) {
+				for (j = 0; j < bb_job->gres_cnt; j++) {
+					d = needed_gres_ptr[j].add_cnt;
+					if (d <= 0)
+						continue;
+					for (k = 0;
+					     k < preempt_ptr->bb_ptr->gres_cnt;
+					     k++) {
+						if (strcmp(needed_gres_ptr[j].
+							   name,
+							   preempt_ptr->bb_ptr->
+							   gres_ptr[k].name))
+							continue;
+						if (preempt_ptr->bb_ptr->
+						    gres_ptr[k].used_cnt < d) {
+							d = preempt_ptr->
+								bb_ptr->
+								gres_ptr[k].
+								used_cnt;
+						}
+						add_total_gres_needed -= d;
+						needed_gres_ptr[j].add_cnt -= d;
+						do_preempt = true;
+					}
+				}
+			}
+			if (do_preempt) {
+				preempt_ptr->bb_ptr->cancelled = true;
+				preempt_ptr->bb_ptr->end_time = 0;
+				preempt_ptr->bb_ptr->state = BB_STATE_TEARDOWN;
+				preempt_ptr->bb_ptr->state_time = time(NULL);
+				_queue_teardown(preempt_ptr->job_id,
+						preempt_ptr->user_id, true);
+				if (bb_state.bb_config.debug_flag) {
+					info("%s: %s: Preempting stage-in of "
+					     "job %u for %s", plugin_type,
+					     __func__, preempt_ptr->job_id,
+					     jobid2fmt(job_ptr, jobid_buf,
+						       sizeof(jobid_buf)));
+				}
+			}
+		}
+		list_iterator_destroy(preempt_iter);
+	}
+	FREE_NULL_LIST(preempt_list);
+	_free_needed_gres_struct(needed_gres_ptr, bb_job->gres_cnt);
+
+	return 2;
+}
+
+/* Handle timeout of burst buffer events:
+ * 1. Purge per-job burst buffer records when the stage-out has completed and
+ *    the job has been purged from Slurm
+ * 2. Test for StageInTimeout events
+ * 3. Test for StageOutTimeout events
+ */
+static void _timeout_bb_rec(void)
+{
+	bb_alloc_t **bb_pptr, *bb_alloc = NULL;
+	struct job_record *job_ptr;
+	int i;
+
+	if (bb_state.bb_config.flags & BB_FLAG_EMULATE_CRAY)
+		return;
+
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_pptr = &bb_state.bb_ahash[i];
+		bb_alloc = bb_state.bb_ahash[i];
+		while (bb_alloc) {
+			if (bb_alloc->seen_time < bb_state.last_load_time) {
+				if (bb_alloc->job_id == 0) {
+					info("%s: Persistent burst buffer %s "
+					     "purged",
+					     __func__, bb_alloc->name);
+				} else if (bb_state.bb_config.debug_flag) {
+					info("%s: burst buffer for job %u "
+					     "purged",
+					     __func__, bb_alloc->job_id);
+				}
+				bb_limit_rem(bb_alloc->user_id,
+					     bb_alloc->size, &bb_state);
+				bb_post_persist_delete(bb_alloc, &bb_state);
+				*bb_pptr = bb_alloc->next;
+				bb_free_alloc_buf(bb_alloc);
+				break;
+			}
+			if (bb_alloc->state == BB_STATE_COMPLETE) {
+				job_ptr = find_job_record(bb_alloc->job_id);
+				if (!job_ptr || IS_JOB_PENDING(job_ptr)) {
+					/* Job purged or BB preempted */
+					*bb_pptr = bb_alloc->next;
+					bb_free_alloc_buf(bb_alloc);
+					break;
+				}
+			}
+			bb_pptr = &bb_alloc->next;
+			bb_alloc = bb_alloc->next;
+		}
+	}
+}
+
+/* Perform basic burst_buffer option validation */
+static int _parse_bb_opts(struct job_descriptor *job_desc, uint64_t *bb_size,
+			  uid_t submit_uid)
+{
+	char *bb_script, *save_ptr = NULL;
+	char *bb_name = NULL, *capacity;
+	char *end_ptr = NULL, *sub_tok, *tok;
+	uint64_t tmp_cnt;
+	int rc = SLURM_SUCCESS, swap_cnt;
+	bool enable_persist = false, have_bb = false;
+
+	xassert(bb_size);
+	*bb_size = 0;
+
+	if (validate_operator(submit_uid) ||
+	    (bb_state.bb_config.flags & BB_FLAG_ENABLE_PERSISTENT))
+		enable_persist = true;
+
+	if (job_desc->script)
+		rc = _xlate_batch(job_desc);
+	else
+		rc = _xlate_interactive(job_desc);
+	if ((rc != SLURM_SUCCESS) || (!job_desc->burst_buffer))
+		return rc;
+
+	bb_script = xstrdup(job_desc->burst_buffer);
+	tok = strtok_r(bb_script, "\n", &save_ptr);
+	while (tok) {
+		tmp_cnt = 0;
+		if (tok[0] != '#') {
+			break;	/* Quit at first non-comment */
+		} else if ((tok[1] == 'B') && (tok[2] == 'B')) {
+			tok += 3;
+			while (isspace(tok[0]))
+				tok++;
+			if (!strncmp(tok, "create_persistent", 17) &&
+			    !enable_persist) {
+				info("%s: User %d disabled from creating "
+				     "persistent burst buffer",
+				     __func__, submit_uid);
+				rc = ESLURM_INVALID_BURST_BUFFER_REQUEST;
+				break;
+			} else if (!strncmp(tok, "create_persistent", 17)) {
+				have_bb = true;
+				if ((sub_tok = strstr(tok, "capacity="))) {
+					tmp_cnt = bb_get_size_num(
+						sub_tok + 9,
+						bb_state.bb_config.granularity);
+					*bb_size += tmp_cnt;
+				}
+				if (tmp_cnt == 0)
+					rc =ESLURM_INVALID_BURST_BUFFER_REQUEST;
+				if ((sub_tok = strstr(tok, "name="))) {
+					bb_name = xstrdup(sub_tok + 5);
+					if ((sub_tok = strchr(bb_name, ' ')))
+						sub_tok[0] = '\0';
+				} else {
+					rc =ESLURM_INVALID_BURST_BUFFER_REQUEST;
+				}
+				if ((bb_name[0] >= '0') && (bb_name[0] <= '9'))
+					rc =ESLURM_INVALID_BURST_BUFFER_REQUEST;
+				xfree(bb_name);
+				if (rc != SLURM_SUCCESS)
+					break;
+			} else if (!strncmp(tok, "destroy_persistent", 17) &&
+				   !enable_persist) {
+				info("%s: User %d disabled from destroying "
+				     "persistent burst buffer",
+				     __func__, submit_uid);
+				rc = ESLURM_INVALID_BURST_BUFFER_REQUEST;
+				break;
+			} else if (!strncmp(tok, "destroy_persistent", 17)) {
+				have_bb = true;
+				if (!(sub_tok = strstr(tok, "name="))) {
+					rc =ESLURM_INVALID_BURST_BUFFER_REQUEST;
+					break;
+				}
+			} else {
+				/* Ignore other (future) options */
+			}
+		} else if ((tok[1] == 'D') && (tok[2] == 'W')) {
+			tok += 3;
+			while (isspace(tok[0]) && (tok[0] != '\0'))
+				tok++;
+			if (!strncmp(tok, "jobdw", 5) &&
+			    (capacity = strstr(tok, "capacity="))) {
+				have_bb = true;
+				tmp_cnt = bb_get_size_num(
+					capacity + 9,
+					bb_state.bb_config.granularity);
+				if (tmp_cnt == 0) {
+					rc =ESLURM_INVALID_BURST_BUFFER_REQUEST;
+					break;
+				}
+				*bb_size += tmp_cnt;
+			} else if (!strncmp(tok, "persistentdw", 12)) {
+				have_bb = true;
+			} else if (!strncmp(tok, "swap", 4)) {
+				have_bb = true;
+				tok += 4;
+				while (isspace(tok[0]) && (tok[0] != '\0'))
+					tok++;
+				swap_cnt += strtol(tok, &end_ptr, 10);
+				if ((job_desc->max_nodes == 0) ||
+				    (job_desc->max_nodes == NO_VAL)) {
+					info("%s: user %u submitted job with "
+					     "swap space specification, but "
+					     "no max node count specification",
+					     __func__, job_desc->user_id);
+					if (job_desc->min_nodes == NO_VAL)
+						job_desc->min_nodes = 1;
+					job_desc->max_nodes =
+						job_desc->min_nodes;
+				}
+				*bb_size += swap_cnt * job_desc->max_nodes;
+			}
+		}
+		tok = strtok_r(NULL, "\n", &save_ptr);
+	}
+	xfree(bb_script);
+
+	if (!have_bb)
+		rc = ESLURM_INVALID_BURST_BUFFER_REQUEST;
+
+	return rc;
+}
+
+/* Copy a batch job's burst_buffer options into a separate buffer */
+static int _xlate_batch(struct job_descriptor *job_desc)
+{
+	char *script, *save_ptr = NULL, *tok;
+
+	xfree(job_desc->burst_buffer);
+	script = xstrdup(job_desc->script);
+	tok = strtok_r(script, "\n", &save_ptr);
+	while (tok) {
+		if (tok[0] != '#')
+			break;	/* Quit at first non-comment */
+		if (((tok[1] == 'B') && (tok[2] == 'B')) ||
+		    ((tok[1] == 'D') && (tok[2] == 'W'))) {
+			if (job_desc->burst_buffer)
+				xstrcat(job_desc->burst_buffer, "\n");
+			xstrcat(job_desc->burst_buffer, tok);
+		}
+		tok = strtok_r(NULL, "\n", &save_ptr);
+	}
+	xfree(script);
+	return SLURM_SUCCESS;
+}
+
+/* Parse simple interactive burst_buffer options into an format identical to
+ * burst_buffer options in a batch script file */
+static int _xlate_interactive(struct job_descriptor *job_desc)
+{
+	char *access = NULL, *type = NULL;
+	char *end_ptr = NULL, *tok;
+	uint32_t buf_size = 0, swap_cnt = 0;
+	int rc = SLURM_SUCCESS;
+
+	if (!job_desc->burst_buffer || (job_desc->burst_buffer[0] == '#'))
+		return rc;
+
+	if ((tok = strstr(job_desc->burst_buffer, "access="))) {
+		access = xstrdup(tok + 7);
+		tok = strchr(access, ',');
+		if (tok)
+			tok[0] = '\0';
+		tok = strchr(access, ' ');
+		if (tok)
+			tok[0] = '\0';
+	}
+
+	if ((tok = strstr(job_desc->burst_buffer, "capacity="))) {
+		buf_size = bb_get_size_num(tok + 9, 1);
+		if (buf_size == 0) {
+			rc = ESLURM_INVALID_BURST_BUFFER_CHANGE;
+			goto fini;
+		}
+	}
+
+	if ((tok = strstr(job_desc->burst_buffer, "swap=")))
+		swap_cnt = strtol(tok + 5, &end_ptr, 10);
+
+	if ((tok = strstr(job_desc->burst_buffer, "type="))) {
+		type = xstrdup(tok + 5);
+		tok = strchr(access, ',');
+		if (tok)
+			tok[0] = '\0';
+		tok = strchr(access, ' ');
+		if (tok)
+			tok[0] = '\0';
+	}
+
+	if (rc == SLURM_SUCCESS)
+		xfree(job_desc->burst_buffer);
+	if ((rc == SLURM_SUCCESS) && (swap_cnt || buf_size)) {
+		if (swap_cnt) {
+			xstrfmtcat(job_desc->burst_buffer,
+				   "#DW swap %uGiB\n", swap_cnt);
+		}
+		if (buf_size) {
+			xstrfmtcat(job_desc->burst_buffer,
+				   "#DW jobdw capacity=%s",
+				   bb_get_size_str(buf_size));
+			if (access) {
+				xstrfmtcat(job_desc->burst_buffer,
+					   " access_mode=%s", access);
+			}
+			if (type) {
+				xstrfmtcat(job_desc->burst_buffer,
+					   " type=%s", type);
+			}
+			xstrfmtcat(job_desc->burst_buffer, "\n");
+		}
+	}
+
+fini:	xfree(access);
+	xfree(type);
+	return rc;
+}
+
+/* For interactive jobs, build a script containing the relevant DataWarp
+ * commands, as needed by the Cray API */
+static int _build_bb_script(struct job_record *job_ptr, char *script_file)
+{
+	char *out_buf = NULL;
+	int rc;
+
+	xstrcat(out_buf, "#!/bin/bash\n");
+	xstrcat(out_buf, job_ptr->burst_buffer);
+	rc = _write_file(script_file, out_buf);
+	xfree(out_buf);
+
+	return rc;
+}
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Read and validate configuration file here. Spawn thread to
+ * periodically read Datawarp state.
+ */
+extern int init(void)
+{
+	pthread_attr_t attr;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_load_config(&bb_state, (char *)plugin_type); /* Removes "const" */
+	_test_config();
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+	bb_alloc_cache(&bb_state);
+	slurm_attr_init(&attr);
+	while (pthread_create(&bb_state.bb_thread, &attr, _bb_agent, NULL)) {
+		if (errno != EAGAIN) {
+			fatal("%s: Unable to start thread: %m", __func__);
+			break;
+		}
+		usleep(100000);
+	}
+	slurm_attr_destroy(&attr);
+	if (!state_save_loc)
+		state_save_loc = slurm_get_state_save_location();
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is unloaded. Free all memory and shutdown
+ * threads.
+ */
+extern int fini(void)
+{
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+
+	pthread_mutex_lock(&bb_state.term_mutex);
+	bb_state.term_flag = true;
+	pthread_cond_signal(&bb_state.term_cond);
+	pthread_mutex_unlock(&bb_state.term_mutex);
+
+	if (bb_state.bb_thread) {
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		pthread_join(bb_state.bb_thread, NULL);
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		bb_state.bb_thread = 0;
+	}
+	bb_clear_config(&bb_state.bb_config, true);
+	bb_clear_cache(&bb_state);
+	xfree(state_save_loc);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/* Identify and purge any vestigial buffers (i.e. we have a job buffer, but
+ * the matching job is either gone or completed) */
+static void _purge_vestigial_bufs(void)
+{
+	bb_alloc_t *bb_alloc = NULL;
+	int i;
+
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_alloc = bb_state.bb_ahash[i];
+		while (bb_alloc) {
+			if (bb_alloc->job_id &&
+			    !find_job_record(bb_alloc->job_id)) {
+				info("%s: Purging vestigial buffer for job %u",
+				     plugin_type, bb_alloc->job_id);
+				_queue_teardown(bb_alloc->job_id,
+						bb_alloc->user_id, false);
+			}
+			bb_alloc = bb_alloc->next;
+		}
+	}
+}
+
+/*
+ * Return the total burst buffer size in MB
+ */
+extern uint64_t bb_p_get_system_size(void)
+{
+	uint64_t size = 0;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	size = bb_state.total_space / (1024 * 1024);	/* bytes to MB */
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	return size;
+}
+
+/*
+ * Load the current burst buffer state (e.g. how much space is available now).
+ * Run at the beginning of each scheduling cycle in order to recognize external
+ * changes to the burst buffer state (e.g. capacity is added, removed, fails,
+ * etc.)
+ *
+ * init_config IN - true if called as part of slurmctld initialization
+ * Returns a SLURM errno.
+ */
+extern int bb_p_load_state(bool init_config)
+{
+	if (!init_config)
+		return SLURM_SUCCESS;
+
+	/* In practice the Cray APIs are too slow to run inline on each
+	 * scheduling cycle. Do so on a periodic basis from _bb_agent(). */
+	if (bb_state.bb_config.debug_flag)
+		debug("%s: %s", plugin_type,  __func__);
+	_load_state(init_config);	/* Has own locking */
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_set_tres_pos(&bb_state);
+	_purge_vestigial_bufs();
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	_save_bb_state();	/* Has own locks excluding file write */
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Note configuration may have changed. Handle changes in BurstBufferParameters.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_reconfig(void)
+{
+	char *old_default_pool;
+	int i;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+	old_default_pool = bb_state.bb_config.default_pool;
+	bb_state.bb_config.default_pool = NULL;
+	bb_load_config(&bb_state, (char *)plugin_type); /* Remove "const" */
+	if (!bb_state.bb_config.default_pool)
+		bb_state.bb_config.default_pool = old_default_pool;
+	else
+		xfree(old_default_pool);
+	_test_config();
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	/* reconfig is the place we make sure the pointers are correct */
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_alloc_t *bb_alloc = bb_state.bb_ahash[i];
+		while (bb_alloc) {
+			_set_assoc_mgr_ptrs(bb_alloc);
+			bb_alloc = bb_alloc->next;
+		}
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Pack current burst buffer state information for network transmission to
+ * user (e.g. "scontrol show burst")
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_state_pack(uid_t uid, Buf buffer, uint16_t protocol_version)
+{
+	uint32_t rec_count = 0;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	packstr(bb_state.name, buffer);
+	bb_pack_state(&bb_state, buffer, protocol_version);
+	if ((bb_state.bb_config.flags & BB_FLAG_PRIVATE_DATA) == 0)
+		uid = 0;	/* Any user can see all data */
+	rec_count = bb_pack_bufs(uid, &bb_state, buffer, protocol_version);
+	(void) bb_pack_usage(uid, &bb_state, buffer, protocol_version);
+	if (bb_state.bb_config.debug_flag) {
+		debug("%s: %s: record_count:%u",
+		      plugin_type,  __func__, rec_count);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Preliminary validation of a job submit request with respect to burst buffer
+ * options. Performed after setting default account + qos, but prior to
+ * establishing job ID or creating script file.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_validate(struct job_descriptor *job_desc,
+			     uid_t submit_uid)
+{
+	uint64_t bb_size = 0;
+	int i, rc;
+
+	xassert(job_desc);
+	xassert(job_desc->tres_req_cnt);
+
+	rc = _parse_bb_opts(job_desc, &bb_size, submit_uid);
+	if (rc != SLURM_SUCCESS)
+		return rc;
+
+	if ((job_desc->burst_buffer == NULL) ||
+	    (job_desc->burst_buffer[0] == '\0'))
+		return rc;
+
+	if (job_desc->array_inx)	/* Job arrays not supported */
+		return ESLURM_INVALID_ARRAY;
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: job_user_id:%u, submit_uid:%d",
+		     plugin_type, __func__, job_desc->user_id, submit_uid);
+		info("%s: burst_buffer:%s", __func__, job_desc->burst_buffer);
+	}
+
+	if (job_desc->user_id == 0) {
+		info("%s: User root can not allocate burst buffers", __func__);
+		return EPERM;
+	}
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.allow_users) {
+		bool found_user = false;
+		for (i = 0; bb_state.bb_config.allow_users[i]; i++) {
+			if (job_desc->user_id ==
+			    bb_state.bb_config.allow_users[i]) {
+				found_user = true;
+				break;
+			}
+		}
+		if (!found_user) {
+			rc = ESLURM_BURST_BUFFER_PERMISSION;
+			goto fini;
+		}
+	}
+
+	if (bb_state.bb_config.deny_users) {
+		bool found_user = false;
+		for (i = 0; bb_state.bb_config.deny_users[i]; i++) {
+			if (job_desc->user_id ==
+			    bb_state.bb_config.deny_users[i]) {
+				found_user = true;
+				break;
+			}
+		}
+		if (found_user) {
+			rc = ESLURM_BURST_BUFFER_PERMISSION;
+			goto fini;
+		}
+	}
+
+	job_desc->shared = 0;	/* Compute nodes can not be shared */
+	job_desc->tres_req_cnt[bb_state.tres_pos] = bb_size / (1024 * 1024);
+
+fini:	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return rc;
+}
+
+/* Add key=value pairs from "resp_msg" to the job's environment */
+static void _update_job_env(struct job_record *job_ptr, char *file_path)
+{
+	struct stat stat_buf;
+	char *data_buf = NULL, *start, *sep;
+	int path_fd, i, inx = 0, env_cnt = 0;
+	size_t read_size;
+
+	/* Read the DataWarp generated environment variable file */
+	path_fd = open(file_path, 0);
+	if (path_fd == -1) {
+		error("%s: open error on file %s: %m", __func__, file_path);
+		return;
+	}
+	fd_set_close_on_exec(path_fd);
+	if (fstat(path_fd, &stat_buf) == -1) {
+		error("%s: stat error on file %s: %m", __func__, file_path);
+		stat_buf.st_size = 2048;
+	} else if (stat_buf.st_size == 0)
+		goto fini;
+	data_buf = xmalloc(stat_buf.st_size + 1);
+	while (inx < stat_buf.st_size) {
+		read_size = read(path_fd, data_buf + inx, stat_buf.st_size);
+		if (read_size > 0) {
+			inx += read_size;
+		} else if (read_size == 0) {	/* EOF */
+			break;
+		} else if (read_size < 0) {	/* error */
+			if ((errno == EAGAIN) || (errno == EINTR))
+				continue;
+			error("%s: read error on file %s: %m", __func__,
+			      file_path);
+			break;
+		}
+	}
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", __func__, data_buf);
+
+	/* Get count of environment variables in the file */
+	env_cnt = 0;
+	if (data_buf) {
+		for (i = 0; data_buf[i]; i++) {
+			if (data_buf[i] == '=')
+				env_cnt++;
+		}
+	}
+
+	/* Add to supplemental environment variables (in job record) */
+	if (env_cnt) {
+		job_ptr->details->env_sup =
+			xrealloc(job_ptr->details->env_sup,
+				 sizeof(char *) *
+				 (job_ptr->details->env_cnt + env_cnt));
+		start = data_buf;
+		for (i = 0; (i < env_cnt) && start[0]; i++) {
+			sep = strchr(start, '\n');
+			if (sep)
+				sep[0] = '\0';
+			job_ptr->details->env_sup[job_ptr->details->env_cnt++] =
+				xstrdup(start);
+			if (sep)
+				start = sep + 1;
+			else
+				break;
+		}
+	}
+
+fini:	xfree(data_buf);
+	close(path_fd);
+}
+
+/*
+ * Secondary validation of a job submit request with respect to burst buffer
+ * options. Performed after establishing job ID and creating script file.
+ *
+ * NOTE: We run several DW APIs at job submit time so that we can notify the
+ * user immediately if there is some error, although that can be a relatively
+ * slow operation.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_validate2(struct job_record *job_ptr, char **err_msg)
+{
+	char *hash_dir = NULL, *job_dir = NULL, *script_file = NULL;
+	char *path_file = NULL, *resp_msg = NULL, **script_argv;
+	char *dw_cli_path;
+	int hash_inx, rc = SLURM_SUCCESS, status = 0;
+	char jobid_buf[32];
+	bb_job_t *bb_job;
+	DEF_TIMERS;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return rc;
+
+	if (job_ptr->array_recs) {
+		if (err_msg) {
+			xfree(*err_msg);
+			xstrfmtcat(*err_msg,
+				   "%s: Burst buffers not currently "
+				   "supported for job arrays",
+				   plugin_type);
+		}
+		return ESLURM_INVALID_BURST_BUFFER_REQUEST;
+	}
+
+	/* Initialization */
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_job = _get_bb_job(job_ptr);
+	if (bb_job == NULL) {
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		return rc;
+	}
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+	dw_cli_path = xstrdup(bb_state.bb_config.get_sys_state);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	hash_inx = job_ptr->job_id % 10;
+	xstrfmtcat(hash_dir, "%s/hash.%d", state_save_loc, hash_inx);
+	(void) mkdir(hash_dir, 0700);
+	xstrfmtcat(job_dir, "%s/job.%u", hash_dir, job_ptr->job_id);
+	(void) mkdir(job_dir, 0700);
+	xstrfmtcat(script_file, "%s/script", job_dir);
+	xstrfmtcat(path_file, "%s/pathfile", job_dir);
+	if (job_ptr->batch_flag == 0)
+		rc = _build_bb_script(job_ptr, script_file);
+
+	/* Run "job_process" function, validates user script */
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("job_process");
+	script_argv[3] = xstrdup("--job");
+	xstrfmtcat(script_argv[4], "%s", script_file);
+	START_TIMER;
+	resp_msg = bb_run_script("job_process",
+				 bb_state.bb_config.get_sys_state,
+				 script_argv, 2000, &status);
+	END_TIMER;
+	if (DELTA_TIMER > 200000)	/* 0.2 secs */
+		info("%s: job_process ran for %s", __func__, TIME_STR);
+	else if (bb_state.bb_config.debug_flag)
+		debug("%s: job_process ran for %s", __func__, TIME_STR);
+	_log_script_argv(script_argv, resp_msg);
+	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+		error("%s: job_process for job %u status:%u response:%s",
+		      __func__, job_ptr->job_id, status, resp_msg);
+		if (err_msg) {
+			xfree(*err_msg);
+			xstrfmtcat(*err_msg, "%s: %s", plugin_type, resp_msg);
+		}
+		rc = ESLURM_INVALID_BURST_BUFFER_REQUEST;
+	}
+	xfree(resp_msg);
+	_free_script_argv(script_argv);
+
+	/* Run "paths" function, get DataWarp environment variables */
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("paths");
+	script_argv[3] = xstrdup("--job");
+	xstrfmtcat(script_argv[4], "%s", script_file);
+	script_argv[5] = xstrdup("--token");
+	xstrfmtcat(script_argv[6], "%u", job_ptr->job_id);
+	script_argv[7] = xstrdup("--pathfile");
+	script_argv[8] = xstrdup(path_file);
+	START_TIMER;
+	resp_msg = bb_run_script("paths",
+				 bb_state.bb_config.get_sys_state,
+				 script_argv, 2000, &status);
+	END_TIMER;
+	if (DELTA_TIMER > 200000)	/* 0.2 secs */
+		info("%s: paths ran for %s", __func__, TIME_STR);
+	else if (bb_state.bb_config.debug_flag)
+		debug("%s: paths ran for %s", __func__, TIME_STR);
+	_log_script_argv(script_argv, resp_msg);
+	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+		error("%s: paths for job %u status:%u response:%s",
+		      __func__, job_ptr->job_id, status, resp_msg);
+		if (err_msg) {
+			xfree(*err_msg);
+			xstrfmtcat(*err_msg, "%s: %s", plugin_type, resp_msg);
+		}
+		rc = ESLURM_INVALID_BURST_BUFFER_REQUEST;
+	} else {
+		_update_job_env(job_ptr, path_file);
+	}
+	xfree(resp_msg);
+	_free_script_argv(script_argv);
+	xfree(path_file);
+
+	/* Clean-up */
+	if (rc != SLURM_SUCCESS) {
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		bb_job_del(&bb_state, job_ptr->job_id);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+	}
+
+	xfree(hash_dir);
+	xfree(job_dir);
+	xfree(script_file);
+	xfree(dw_cli_path);
+
+	return rc;
+}
+
+/*
+ * Fill in the tres_cnt (in MB) based off the job record
+ * NOTE: Based upon job-specific burst buffers, excludes persistent buffers
+ * IN job_ptr - job record
+ * IN/OUT tres_cnt - fill in this already allocated array with tres_cnts
+ * IN locked - if the assoc_mgr tres read locked is locked or not
+ */
+extern void bb_p_job_set_tres_cnt(struct job_record *job_ptr,
+				  uint64_t *tres_cnt,
+				  bool locked)
+{
+	bb_job_t *bb_job;
+
+	if (!tres_cnt) {
+		error("%s: No tres_cnt given when looking at job %u",
+		      __func__, job_ptr->job_id);
+	}
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if ((bb_job = _get_bb_job(job_ptr))) {
+		tres_cnt[bb_state.tres_pos] =
+			bb_job->total_size / (1024 * 1024);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+}
+
+/*
+ * For a given job, return our best guess if when it might be able to start
+ */
+extern time_t bb_p_job_get_est_start(struct job_record *job_ptr)
+{
+	time_t est_start = time(NULL);
+	bb_job_t *bb_job;
+	char jobid_buf[32];
+	int rc;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return est_start;
+
+	if (job_ptr->array_recs && (job_ptr->array_task_id == NO_VAL))
+		return est_start;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if ((bb_job = _get_bb_job(job_ptr)) == NULL) {
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		return est_start;
+	}
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s",
+		     plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if ((bb_job->persist_add == 0) && (bb_job->swap_size == 0) &&
+	    (bb_job->total_size == 0)) {
+		/* Only deleting or using persistent buffers */
+		if (!_test_persistent_use_ready(bb_job, job_ptr))
+			est_start += 60 * 60;	/* one hour, guess... */
+	} else if (bb_job->state == BB_STATE_PENDING) {
+		rc = _test_size_limit(job_ptr, bb_job);
+		if (rc == 0) {		/* Could start now */
+			;
+		} else if (rc == 1) {	/* Exceeds configured limits */
+			est_start += 365 * 24 * 60 * 60;
+		} else {		/* No space currently available */
+			est_start = MAX(est_start, bb_state.next_end_time);
+		}
+	} else {	/* Allocation or staging in progress */
+		est_start++;
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return est_start;
+}
+
+/*
+ * Attempt to allocate resources and begin file staging for pending jobs.
+ */
+extern int bb_p_job_try_stage_in(List job_queue)
+{
+	bb_job_queue_rec_t *job_rec;
+	List job_candidates;
+	ListIterator job_iter;
+	struct job_record *job_ptr;
+	bb_job_t *bb_job;
+	int rc;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+
+	/* Identify candidates to be allocated burst buffers */
+	job_candidates = list_create(_job_queue_del);
+	job_iter = list_iterator_create(job_queue);
+	while ((job_ptr = list_next(job_iter))) {
+		if (!IS_JOB_PENDING(job_ptr) ||
+		    (job_ptr->start_time == 0) ||
+		    (job_ptr->burst_buffer == NULL) ||
+		    (job_ptr->burst_buffer[0] == '\0'))
+			continue;
+		if (job_ptr->array_recs && (job_ptr->array_task_id == NO_VAL))
+			continue;
+		bb_job = _get_bb_job(job_ptr);
+		if (bb_job == NULL)
+			continue;
+		if (bb_job->state == BB_STATE_COMPLETE)
+			bb_job->state = BB_STATE_PENDING;     /* job requeued */
+		job_rec = xmalloc(sizeof(bb_job_queue_rec_t));
+		job_rec->job_ptr = job_ptr;
+		job_rec->bb_job = bb_job;
+		list_push(job_candidates, job_rec);
+	}
+	list_iterator_destroy(job_iter);
+
+	/* Sort in order of expected start time */
+	list_sort(job_candidates, bb_job_queue_sort);
+
+	bb_set_use_time(&bb_state);
+	job_iter = list_iterator_create(job_candidates);
+	while ((job_rec = list_next(job_iter))) {
+		job_ptr = job_rec->job_ptr;
+		bb_job = job_rec->bb_job;
+		if (bb_job->state >= BB_STATE_STAGING_IN)
+			continue;	/* Job was already allocated a buffer */
+
+		rc = _test_size_limit(job_ptr, bb_job);
+		if (rc == 0)		/* Could start now */
+			(void) _alloc_job_bb(job_ptr, bb_job, true);
+		else if (rc == 1)	/* Exceeds configured limits */
+			continue;
+		else			/* No space currently available */
+			break;
+	}
+	list_iterator_destroy(job_iter);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	FREE_NULL_LIST(job_candidates);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Determine if a job's burst buffer stage-in is complete
+ * job_ptr IN - Job to test
+ * test_only IN - If false, then attempt to allocate burst buffer if possible
+ *
+ * RET: 0 - stage-in is underway
+ *      1 - stage-in complete
+ *     -1 - stage-in not started or burst buffer in some unexpected state
+ */
+extern int bb_p_job_test_stage_in(struct job_record *job_ptr, bool test_only)
+{
+	bb_job_t *bb_job;
+	int rc = 1;
+	char jobid_buf[32];
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return 1;
+
+	if (job_ptr->array_recs && (job_ptr->array_task_id == NO_VAL))
+		return -1;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s test_only:%d",
+		     plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)),
+		     (int) test_only);
+	}
+	bb_job = _get_bb_job(job_ptr);
+	if (bb_job && (bb_job->state == BB_STATE_COMPLETE))
+		bb_job->state = BB_STATE_PENDING;	/* job requeued */
+	if (bb_job == NULL) {
+		rc = -1;
+	} else if (bb_job->state < BB_STATE_STAGING_IN) {
+		/* Job buffer not allocated, create now if space available */
+		rc = -1;
+		if ((test_only == false) &&
+		    (_test_size_limit(job_ptr, bb_job) == 0) &&
+		    (_alloc_job_bb(job_ptr, bb_job, false) == SLURM_SUCCESS)) {
+			if (bb_job->total_size == 0)
+				rc = 1;	/* Persistent only, space available */
+			else
+				rc = 0;	/* Stage-in job buffer now */
+		}
+	} else if (bb_job->state == BB_STATE_STAGING_IN) {
+		rc = 0;
+	} else if (bb_job->state >= BB_STATE_STAGED_IN) {
+		rc = 1;
+	} else {
+		error("%s: Unexpected burst buffer state (%d) for job %u",
+		      __func__, bb_job->state, job_ptr->job_id);
+		rc = -1;
+	}
+
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return rc;
+}
+
+/* Attempt to claim burst buffer resources.
+ * At this time, bb_g_job_test_stage_in() should have been run sucessfully AND
+ * the compute nodes selected for the job.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_begin(struct job_record *job_ptr)
+{
+	char  *client_nodes_file_nid = NULL;
+	pre_run_args_t *pre_run_args;
+	char **pre_run_argv = NULL;
+	char *job_dir = NULL;
+	int hash_inx, rc = SLURM_SUCCESS;
+	bb_job_t *bb_job;
+	char jobid_buf[64];
+	pthread_attr_t pre_run_attr;
+	pthread_t pre_run_tid = 0;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return SLURM_SUCCESS;
+
+	if (!job_ptr->job_resrcs || !job_ptr->job_resrcs->nodes) {
+		error("%s: %s lacks node allocation", __func__,
+		      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+		return SLURM_ERROR;
+	}
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s",
+		     plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+	bb_job = _get_bb_job(job_ptr);
+	if (!bb_job) {
+		error("%s: %s: no job record buffer for job %u",
+		      plugin_type, __func__, job_ptr->job_id);
+		xfree(job_ptr->state_desc);
+		job_ptr->state_desc =
+			xstrdup("Could not find burst buffer record");
+		job_ptr->state_reason = FAIL_BURST_BUFFER_OP;
+		_queue_teardown(job_ptr->job_id, job_ptr->user_id, true);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		return SLURM_ERROR;
+	}
+
+	/* Confirm that persistent burst buffers work has been completed */
+	if ((_create_bufs(job_ptr, bb_job, true) > 0)) {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_desc =
+			xstrdup("Error managing persistent burst buffers");
+		job_ptr->state_reason = FAIL_BURST_BUFFER_OP;
+		_queue_teardown(job_ptr->job_id, job_ptr->user_id, true);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		return SLURM_ERROR;
+	}
+
+	hash_inx = job_ptr->job_id % 10;
+	xstrfmtcat(job_dir, "%s/hash.%d/job.%u", state_save_loc, hash_inx,
+		   job_ptr->job_id);
+	xstrfmtcat(client_nodes_file_nid, "%s/client_nids", job_dir);
+	bb_job->state = BB_STATE_RUNNING;
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	if (_write_nid_file(client_nodes_file_nid, job_ptr->job_resrcs->nodes,
+			    job_ptr->job_id)) {
+		xfree(client_nodes_file_nid);
+	}
+
+	pre_run_argv = xmalloc(sizeof(char *) * 10);
+	pre_run_argv[0] = xstrdup("dw_wlm_cli");
+	pre_run_argv[1] = xstrdup("--function");
+	pre_run_argv[2] = xstrdup("pre_run");
+	pre_run_argv[3] = xstrdup("--token");
+	xstrfmtcat(pre_run_argv[4], "%u", job_ptr->job_id);
+	pre_run_argv[5] = xstrdup("--job");
+	xstrfmtcat(pre_run_argv[6], "%s/script", job_dir);
+	if (client_nodes_file_nid) {
+#if defined(HAVE_NATIVE_CRAY)
+		pre_run_argv[7] = xstrdup("--nidlistfile");
+#else
+		pre_run_argv[7] = xstrdup("--nodehostnamefile");
+#endif
+		pre_run_argv[8] = xstrdup(client_nodes_file_nid);
+	}
+	pre_run_args = xmalloc(sizeof(pre_run_args_t));
+	pre_run_args->args    = pre_run_argv;
+	pre_run_args->job_id  = job_ptr->job_id;
+	pre_run_args->user_id = job_ptr->user_id;
+	if (job_ptr->details)	/* Prevent launch until "pre_run" completes */
+		job_ptr->details->prolog_running++;
+
+	slurm_attr_init(&pre_run_attr);
+	if (pthread_attr_setdetachstate(&pre_run_attr, PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+	while (pthread_create(&pre_run_tid, &pre_run_attr, _start_pre_run,
+			      pre_run_args)) {
+		if (errno != EAGAIN) {
+			error("%s: pthread_create: %m", __func__);
+			_start_pre_run(pre_run_argv);	/* Do in-line */
+			break;
+		}
+		usleep(100000);
+	}
+	slurm_attr_destroy(&pre_run_attr);
+
+	xfree(job_dir);
+	xfree(client_nodes_file_nid);
+	return rc;
+}
+
+static void *_start_pre_run(void *x)
+{
+	/* Locks: write job */
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	pre_run_args_t *pre_run_args = (pre_run_args_t *) x;
+	char *resp_msg = NULL;
+	char jobid_buf[64];
+	bb_job_t *bb_job;
+	int status = 0;
+	struct job_record *job_ptr;
+	DEF_TIMERS;
+
+	START_TIMER;
+	resp_msg = bb_run_script("dws_pre_run",
+				 bb_state.bb_config.get_sys_state,
+				 pre_run_args->args, 2000, &status);
+	END_TIMER;
+
+	lock_slurmctld(job_write_lock);
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	job_ptr = find_job_record(pre_run_args->job_id);
+	if (job_ptr) {
+		jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf));
+	} else {
+		error("%s: Could not find job record for job %u", __func__,
+		      pre_run_args->job_id);
+		snprintf(jobid_buf, sizeof(jobid_buf), "%u",
+			 pre_run_args->job_id);
+	}
+	if (DELTA_TIMER > 500000) {	/* 0.5 secs */
+		info("%s: dws_pre_run for %s ran for %s", __func__,
+		     jobid_buf, TIME_STR);
+	} else if (bb_state.bb_config.debug_flag) {
+		debug("%s: dws_pre_run for %s ran for %s", __func__,
+		      jobid_buf, TIME_STR);
+	}
+	_log_script_argv(pre_run_args->args, resp_msg);
+//	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+	if (0) { // FIXME: Cray API is always returning an exit code of 1
+		time_t now = time(NULL);
+		error("%s: dws_pre_run for %s status:%u response:%s", __func__,
+		      jobid_buf, status, resp_msg);
+		if (job_ptr) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_desc =
+				xstrdup("Burst buffer pre_run error");
+			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+			last_job_update = now;
+			bb_job = _get_bb_job(job_ptr);
+			if (bb_job)
+				bb_job->state = BB_STATE_TEARDOWN;
+		}
+		_queue_teardown(pre_run_args->job_id, pre_run_args->user_id,
+				true);
+	} else {
+		prolog_running_decr(job_ptr);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	unlock_slurmctld(job_write_lock);
+
+	xfree(resp_msg);
+	_free_script_argv(pre_run_args->args);
+	xfree(pre_run_args);
+	return NULL;
+}
+
+/*
+ * Trigger a job's burst buffer stage-out to begin
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_start_stage_out(struct job_record *job_ptr)
+{
+	bb_job_t *bb_job;
+	char jobid_buf[32];
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return SLURM_SUCCESS;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+	bb_job = _get_bb_job(job_ptr);
+	if (!bb_job) {
+		/* No job buffers. Assuming use of persistent buffers only */
+		verbose("%s: %s bb job record not found", __func__,
+			jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	} else if (bb_job->total_size == 0) {
+		bb_job->state = BB_STATE_TEARDOWN;
+		_queue_teardown(job_ptr->job_id, job_ptr->user_id, false);
+	} else if (bb_job->state < BB_STATE_STAGING_OUT) {
+		bb_job->state = BB_STATE_STAGING_OUT;
+		_queue_stage_out(job_ptr);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Determine if a job's burst buffer stage-out is complete
+ *
+ * RET: 0 - stage-out is underway
+ *      1 - stage-out complete
+ *     -1 - fatal error
+ */
+extern int bb_p_job_test_stage_out(struct job_record *job_ptr)
+{
+	bb_job_t *bb_job;
+	int rc = -1;
+	char jobid_buf[32];
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return 1;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+	bb_job = _get_bb_job(job_ptr);
+	if (!bb_job) {
+		/* No job buffers. Assuming use of persistent buffers only */
+		verbose("%s: %s bb job record not found", __func__,
+			jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+		rc =  1;
+	} else {
+		if (bb_job->state < BB_STATE_STAGING_OUT) {
+			rc = -1;
+		} else if (bb_job->state == BB_STATE_STAGING_OUT) {
+			rc =  0;
+		} else { /* bb_job->state > BB_STATE_STAGING_OUT) */
+			rc =  1;
+		}
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return rc;
+}
+
+/*
+ * Terminate any file staging and completely release burst buffer resources
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_cancel(struct job_record *job_ptr)
+{
+	bb_job_t *bb_job;
+	bb_alloc_t *bb_alloc;
+	char jobid_buf[32];
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	bb_job = _get_bb_job(job_ptr);
+	if (!bb_job || (bb_job->state == BB_STATE_PENDING)) {
+		/* Nothing to clean up */
+	} else {
+		/* Note: Persistent burst buffer actions already completed
+		 * for the job are not reversed */
+		bb_job->state = BB_STATE_TEARDOWN;
+		bb_alloc = bb_find_alloc_rec(&bb_state, job_ptr);
+		if (bb_alloc) {
+			bb_alloc->state = BB_STATE_TEARDOWN;
+			bb_alloc->state_time = time(NULL);
+			bb_state.last_update_time = time(NULL);
+
+		}
+		_queue_teardown(job_ptr->job_id, job_ptr->user_id, true);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+static void _free_create_args(create_buf_data_t *create_args)
+{
+	if (create_args) {
+		xfree(create_args->access);
+		xfree(create_args->job_script);
+		xfree(create_args->name);
+		xfree(create_args->type);
+		xfree(create_args);
+	}
+}
+
+/* Create/destroy persistent burst buffers
+ * job_ptr IN - job to operate upon
+ * bb_job IN - job's burst buffer data
+ * job_ready IN - if true, job is ready to run now, if false then do not
+ *                delete persistent buffers
+ * Returns count of buffer create/destroy requests which are pending */
+static int _create_bufs(struct job_record *job_ptr, bb_job_t *bb_job,
+			bool job_ready)
+{
+	pthread_attr_t create_attr;
+	pthread_t create_tid = 0;
+	create_buf_data_t *create_args;
+	bb_buf_t *buf_ptr;
+	bb_alloc_t *bb_alloc;
+	int i, hash_inx, rc = 0;
+
+	xassert(bb_job);
+	for (i = 0, buf_ptr = bb_job->buf_ptr; i < bb_job->buf_cnt;
+	     i++, buf_ptr++) {
+		if ((buf_ptr->state == BB_STATE_ALLOCATING) ||
+		    (buf_ptr->state == BB_STATE_DELETING)) {
+			rc++;
+		} else if (buf_ptr->state != BB_STATE_PENDING) {
+			;	/* Nothing to do */
+		} else if (buf_ptr->create) {	/* Create the buffer */
+			bb_alloc = bb_find_name_rec(buf_ptr->name,
+						    job_ptr->user_id,
+						    &bb_state);
+			if (bb_alloc) {
+				info("Attempt by job %u to create duplicate "
+				     "persistent burst buffer named %s",
+				     job_ptr->job_id, buf_ptr->name);
+				continue;
+			}
+			rc++;
+			bb_limit_add(job_ptr->user_id,
+				     buf_ptr->size, &bb_state);
+			bb_job->state = BB_STATE_ALLOCATING;
+			buf_ptr->state = BB_STATE_ALLOCATING;
+			create_args = xmalloc(sizeof(create_buf_data_t));
+			create_args->access = xstrdup(buf_ptr->access);
+			create_args->job_id = job_ptr->job_id;
+			create_args->name = xstrdup(buf_ptr->name);
+			create_args->size = buf_ptr->size;
+			create_args->type = xstrdup(buf_ptr->type);
+			create_args->user_id = job_ptr->user_id;
+			slurm_attr_init(&create_attr);
+			if (pthread_attr_setdetachstate(
+				    &create_attr, PTHREAD_CREATE_DETACHED))
+				error("pthread_attr_setdetachstate error %m");
+			while (pthread_create(&create_tid, &create_attr,
+					      _create_persistent,
+					      create_args)) {
+				if (errno != EAGAIN) {
+					error("%s: pthread_create: %m",
+					      __func__);
+					_create_persistent(create_args);
+					break;
+				}
+				usleep(100000);
+			}
+			slurm_attr_destroy(&create_attr);
+		} else if (buf_ptr->destroy && job_ready) {
+			/* Delete the buffer */
+			bb_alloc = bb_find_name_rec(buf_ptr->name,
+						    job_ptr->user_id,
+						    &bb_state);
+			if (!bb_alloc) {
+				/* Ignore request if named buffer not found */
+				info("%s: destroy_persistent: No burst buffer "
+				     "with name '%s' found for job %u",
+				     plugin_type, buf_ptr->name,
+				     job_ptr->job_id);
+				continue;
+			}
+			rc++;
+			if ((bb_alloc->user_id != job_ptr->user_id) &&
+			    !validate_super_user(job_ptr->user_id)) {
+				info("%s: destroy_persistent: Attempt by "
+				     "user %u job %u to destroy buffer %s "
+				     "owned by user %u",
+				     plugin_type, job_ptr->user_id,
+				     job_ptr->job_id, buf_ptr->name,
+				     bb_alloc->user_id);
+				job_ptr->state_reason = FAIL_BURST_BUFFER_OP;
+				xstrfmtcat(job_ptr->state_desc,
+					   "%s: Delete buffer %s permission "
+					   "denied",
+					   plugin_type, buf_ptr->name);
+				job_ptr->priority = 0;  /* Hold job */
+				continue;
+			}
+
+			bb_job->state = BB_STATE_DELETING;
+			buf_ptr->state = BB_STATE_DELETING;
+			create_args = xmalloc(sizeof(create_buf_data_t));
+			create_args->hurry = buf_ptr->hurry;
+			create_args->job_id = job_ptr->job_id;
+			hash_inx = job_ptr->job_id % 10;
+			xstrfmtcat(create_args->job_script,
+				   "%s/hash.%d/job.%u/script",
+				   state_save_loc, hash_inx, job_ptr->job_id);
+			create_args->name = xstrdup(buf_ptr->name);
+			create_args->user_id = job_ptr->user_id;
+			slurm_attr_init(&create_attr);
+			if (pthread_attr_setdetachstate(
+				    &create_attr, PTHREAD_CREATE_DETACHED))
+				error("pthread_attr_setdetachstate error %m");
+			while (pthread_create(&create_tid, &create_attr,
+					      _destroy_persistent,
+					      create_args)) {
+				if (errno != EAGAIN) {
+					error("%s: pthread_create: %m",
+					      __func__);
+					_destroy_persistent(create_args);
+					break;
+				}
+				usleep(100000);
+			}
+			slurm_attr_destroy(&create_attr);
+		} else if (buf_ptr->destroy) {
+			rc++;
+		} else {
+			/* Buffer used, not created or destroyed.
+			 * Just check for existence */
+			bb_alloc = bb_find_name_rec(buf_ptr->name,
+						    job_ptr->user_id,
+						    &bb_state);
+			if (bb_alloc && (bb_alloc->state == BB_STATE_ALLOCATED))
+				bb_job->state = BB_STATE_ALLOCATED;
+			else
+				rc++;
+		}
+	}
+
+	return rc;
+}
+
+/* Test for the existence of persistent burst buffers to be used (but not
+ * created) by this job. Return TRUE of they are all ready */
+static bool _test_persistent_use_ready(bb_job_t *bb_job,
+				       struct job_record *job_ptr)
+{
+	int i, not_ready_cnt = 0;
+	bb_alloc_t *bb_alloc;
+	bb_buf_t *buf_ptr;
+
+	xassert(bb_job);
+	for (i = 0, buf_ptr = bb_job->buf_ptr; i < bb_job->buf_cnt;
+	     i++, buf_ptr++) {
+		if (buf_ptr->create || buf_ptr->destroy)
+			continue;
+		bb_alloc = bb_find_name_rec(buf_ptr->name, job_ptr->user_id,
+					    &bb_state);
+		if (bb_alloc && (bb_alloc->state == BB_STATE_ALLOCATED)) {
+			bb_job->state = BB_STATE_ALLOCATED;
+		} else {
+			not_ready_cnt++;
+			break;
+		}
+	}
+	if (not_ready_cnt != 0)
+		return false;
+	return true;
+}
+
+static void _reset_buf_state(uint32_t user_id, uint32_t job_id, char *name,
+			     int new_state)
+{
+	bb_buf_t *buf_ptr;
+	bb_job_t *bb_job;
+	int i, old_state;
+	bool active_buf = false;
+
+	bb_job = bb_job_find(&bb_state, job_id);
+	if (!bb_job) {
+		error("%s: Could not find job record for %u", __func__, job_id);
+		return;
+	}
+
+	/* Update the buffer's state in job record */
+	for (i = 0, buf_ptr = bb_job->buf_ptr; i < bb_job->buf_cnt;
+	     i++, buf_ptr++) {
+		if (strcmp(name, buf_ptr->name))
+			continue;
+		old_state = buf_ptr->state;
+		buf_ptr->state = new_state;
+		if ((old_state == BB_STATE_ALLOCATING) &&
+		    (new_state == BB_STATE_PENDING))
+			bb_limit_rem(user_id, buf_ptr->size, &bb_state);
+		if ((old_state == BB_STATE_DELETING) &&
+		    (new_state == BB_STATE_PENDING))
+			bb_limit_rem(user_id, buf_ptr->size, &bb_state);
+		break;
+	}
+
+	for (i = 0, buf_ptr = bb_job->buf_ptr; i < bb_job->buf_cnt;
+	     i++, buf_ptr++) {
+		old_state = buf_ptr->state;
+		if ((old_state == BB_STATE_PENDING)    ||
+		    (old_state == BB_STATE_ALLOCATING) ||
+		    (old_state == BB_STATE_DELETING)   ||
+		    (old_state == BB_STATE_TEARDOWN))
+			active_buf = true;
+		break;
+	}
+	if (!active_buf) {
+		if (bb_job->state == BB_STATE_ALLOCATING)
+			bb_job->state = BB_STATE_ALLOCATED;
+		else if (bb_job->state == BB_STATE_DELETING)
+			bb_job->state = BB_STATE_DELETED;
+		queue_job_scheduler();
+	}
+}
+
+/* Create a persistent burst buffer based upon user specifications. */
+static void *_create_persistent(void *x)
+{
+	slurmctld_lock_t job_write_lock =
+		{ NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	create_buf_data_t *create_args = (create_buf_data_t *) x;
+	struct job_record *job_ptr;
+	bb_alloc_t *bb_alloc;
+	char **script_argv, *resp_msg;
+	int i, status = 0;
+	DEF_TIMERS;
+
+	script_argv = xmalloc(sizeof(char *) * 20);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("create_persistent");
+	script_argv[3] = xstrdup("-c");
+	script_argv[4] = xstrdup("CLI");
+	script_argv[5] = xstrdup("-t");		/* name */
+	script_argv[6] = xstrdup(create_args->name);
+	script_argv[7] = xstrdup("-u");		/* user iD */
+	xstrfmtcat(script_argv[8], "%u", create_args->user_id);
+	script_argv[9] = xstrdup("-C");		/* configuration */
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	xstrfmtcat(script_argv[10], "%s:%"PRIu64"",
+		   bb_state.bb_config.default_pool, create_args->size);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	i = 11;
+	if (create_args->access) {
+		script_argv[i++] = xstrdup("-a");
+		script_argv[i++] = xstrdup(create_args->access);
+	}
+	if (create_args->type) {
+		script_argv[i++] = xstrdup("-T");
+		script_argv[i++] = xstrdup(create_args->type);
+	}
+	/* NOTE: There is an optional group ID parameter available and
+	 * currently not used by Slurm */
+
+	START_TIMER;
+	resp_msg = bb_run_script("create_persistent",
+				 bb_state.bb_config.get_sys_state,
+				 script_argv, 3000, &status);
+	_log_script_argv(script_argv, resp_msg);
+	_free_script_argv(script_argv);
+	END_TIMER;
+	if (bb_state.bb_config.debug_flag)
+		debug("%s: ran for %s", __func__, TIME_STR);
+//	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+	if (0) { //FIXME: Cray bug: API exit code NOT 0 on success as documented
+		error("%s: For JobID=%u Name=%s status:%u response:%s",
+		      __func__, create_args->job_id, create_args->name,
+		      status, resp_msg);
+		lock_slurmctld(job_write_lock);
+		job_ptr = find_job_record(create_args->job_id);
+		if (!job_ptr) {
+			error("%s: unable to find job record for job %u",
+			      __func__, create_args->job_id);
+		} else {
+			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+			job_ptr->priority = 0;
+			xfree(job_ptr->state_desc);
+			xstrfmtcat(job_ptr->state_desc, "%s: %s: %s",
+				   plugin_type, __func__, resp_msg);
+			resp_msg = NULL;
+		}
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		_reset_buf_state(create_args->user_id,
+				 create_args->job_id,
+				 create_args->name, BB_STATE_PENDING);
+		bb_state.last_update_time = time(NULL);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		unlock_slurmctld(job_write_lock);
+	} else if (resp_msg && strstr(resp_msg, "created")) {
+		assoc_mgr_lock_t assoc_locks =
+			{ READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+			  NO_LOCK, NO_LOCK, NO_LOCK };
+		lock_slurmctld(job_write_lock);
+		job_ptr = find_job_record(create_args->job_id);
+		if (!job_ptr) {
+			error("%s: unable to find job record for job %u",
+			      __func__, create_args->job_id);
+		}
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		_reset_buf_state(create_args->user_id,
+				 create_args->job_id, create_args->name,
+				 BB_STATE_ALLOCATED);
+		bb_alloc = bb_alloc_name_rec(&bb_state, create_args->name,
+					     create_args->user_id);
+		bb_alloc->size = create_args->size;
+		assoc_mgr_lock(&assoc_locks);
+		if (job_ptr) {
+			bb_alloc->account   = xstrdup(job_ptr->account);
+			if (job_ptr->assoc_ptr) {
+				/* Only add the direct association id
+				 * here, we don't need to keep track
+				 * of the tree.
+				 */
+				slurmdb_assoc_rec_t *assoc = job_ptr->assoc_ptr;
+				bb_alloc->assoc_ptr = assoc;
+				xfree(bb_alloc->assocs);
+				bb_alloc->assocs = xstrdup_printf(
+					",%u,", assoc->id);
+			}
+			if (job_ptr->qos_ptr) {
+				slurmdb_qos_rec_t *qos_ptr =
+					(slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+				bb_alloc->qos_ptr = qos_ptr;
+				bb_alloc->qos = xstrdup(qos_ptr->name);
+			}
+
+			if (job_ptr->part_ptr) {
+				bb_alloc->partition =
+					xstrdup(job_ptr->part_ptr->name);
+			}
+		}
+		if (bb_state.bb_config.flags & BB_FLAG_EMULATE_CRAY) {
+			bb_alloc->create_time = time(NULL);
+			bb_alloc->id = ++last_persistent_id;
+		} else {
+			bb_sessions_t *sessions;
+			int  num_sessions = 0;
+			sessions = _bb_get_sessions(&num_sessions, &bb_state);
+			for (i = 0; i < num_sessions; i++) {
+				if (xstrcmp(sessions[i].token,
+					    create_args->name))
+					continue;
+				bb_alloc->create_time = sessions[i].created;
+				bb_alloc->id = sessions[i].id;
+				break;
+			}
+			_bb_free_sessions(sessions, num_sessions);
+		}
+		(void) bb_post_persist_create(job_ptr, bb_alloc, &bb_state);
+		bb_state.last_update_time = time(NULL);
+		assoc_mgr_unlock(&assoc_locks);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		unlock_slurmctld(job_write_lock);
+	}
+	xfree(resp_msg);
+	_free_create_args(create_args);
+	return NULL;
+}
+
+/* Destroy a persistent burst buffer */
+static void *_destroy_persistent(void *x)
+{
+	slurmctld_lock_t job_write_lock =
+		{ NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	create_buf_data_t *destroy_args = (create_buf_data_t *) x;
+	struct job_record *job_ptr;
+	bb_alloc_t *bb_alloc;
+	char **script_argv, *resp_msg;
+	int status = 0;
+	DEF_TIMERS;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_alloc = bb_find_name_rec(destroy_args->name, destroy_args->user_id,
+				    &bb_state);
+	if (!bb_alloc) {
+		info("%s: destroy_persistent: No burst buffer with name "
+		     "'%s' found for job %u",
+		     plugin_type, destroy_args->name, destroy_args->job_id);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("teardown");
+	script_argv[3] = xstrdup("--token");	/* name */
+	script_argv[4] = xstrdup(destroy_args->name);
+	script_argv[5] = xstrdup("--job");	/* script */
+	script_argv[6] = xstrdup(destroy_args->job_script);
+	if (destroy_args->hurry)
+		script_argv[7] = xstrdup("--hurry");
+
+	START_TIMER;
+	resp_msg = bb_run_script("destroy_persistent",
+				 bb_state.bb_config.get_sys_state,
+				 script_argv, 3000, &status);
+	_log_script_argv(script_argv, resp_msg);
+	_free_script_argv(script_argv);
+	END_TIMER;
+	if (bb_state.bb_config.debug_flag)
+		debug("%s: destroy_persistent ran for %s", __func__, TIME_STR);
+	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+		error("%s: destroy_persistent for JobID=%u Name=%s "
+		      "status:%u response:%s",
+		      __func__, destroy_args->job_id, destroy_args->name,
+		      status, resp_msg);
+		lock_slurmctld(job_write_lock);
+		job_ptr = find_job_record(destroy_args->job_id);
+		if (!job_ptr) {
+			error("%s: unable to find job record for job %u",
+			      __func__, destroy_args->job_id);
+		} else {
+			job_ptr->state_reason = FAIL_BAD_CONSTRAINTS;
+			xfree(job_ptr->state_desc);
+			job_ptr->state_desc = resp_msg;
+			resp_msg = NULL;
+			xstrfmtcat(job_ptr->state_desc, "%s: %s: %s",
+				   plugin_type, __func__, resp_msg);
+		}
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		_reset_buf_state(destroy_args->user_id,
+				 destroy_args->job_id, destroy_args->name,
+				 BB_STATE_PENDING);
+		bb_state.last_update_time = time(NULL);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		unlock_slurmctld(job_write_lock);
+	} else {
+		assoc_mgr_lock_t assoc_locks =
+			{ READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+			  NO_LOCK, NO_LOCK, NO_LOCK };
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		_reset_buf_state(destroy_args->user_id,
+				 destroy_args->job_id, destroy_args->name,
+				 BB_STATE_DELETED);
+
+		/* Modify internal buffer record for purging */
+		if (bb_alloc) {
+			bb_alloc->state = BB_STATE_COMPLETE;
+			bb_alloc->job_id = destroy_args->job_id;
+			bb_alloc->state_time = time(NULL);
+			bb_limit_rem(bb_alloc->user_id,
+				     bb_alloc->size, &bb_state);
+
+			assoc_mgr_lock(&assoc_locks);
+			(void) bb_post_persist_delete(bb_alloc, &bb_state);
+			assoc_mgr_unlock(&assoc_locks);
+
+			(void) bb_free_alloc_rec(&bb_state, bb_alloc);
+		}
+		bb_state.last_update_time = time(NULL);
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+	}
+	xfree(resp_msg);
+	_free_create_args(destroy_args);
+	return NULL;
+}
+
+/* _bb_get_configs()
+ *
+ * Handle the JSON stream with configuration info (instance use details).
+ */
+static bb_configs_t *
+_bb_get_configs(int *num_ent, bb_state_t *state_ptr)
+{
+	bb_configs_t *ents = NULL;
+	json_object *j;
+	json_object_iter iter;
+	int status = 0;
+	DEF_TIMERS;
+	char *resp_msg;
+	char **script_argv;
+
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("show_configurations");
+
+	START_TIMER;
+	resp_msg = bb_run_script("show_configurations",
+				 state_ptr->bb_config.get_sys_state,
+				 script_argv, 3000, &status);
+	END_TIMER;
+	if (bb_state.bb_config.debug_flag)
+		debug("%s: show_configurations ran for %s", __func__, TIME_STR);
+	_log_script_argv(script_argv, resp_msg);
+	_free_script_argv(script_argv);
+//FIXME: Cray API returning error if no configurations
+//	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+	if (0) {
+		error("%s: show_configurations status:%u response:%s",
+		      __func__, status, resp_msg);
+	}
+	if (resp_msg == NULL) {
+		info("%s: %s returned no configurations",
+		     __func__, state_ptr->bb_config.get_sys_state);
+		return ents;
+	}
+
+
+	_python2json(resp_msg);
+	j = json_tokener_parse(resp_msg);
+	if (j == NULL) {
+		error("%s: json parser failed on %s", __func__, resp_msg);
+		xfree(resp_msg);
+		return ents;
+	}
+	xfree(resp_msg);
+
+	json_object_object_foreachC(j, iter) {
+		ents = _json_parse_configs_array(j, iter.key, num_ent);
+	}
+	json_object_put(j);	/* Frees json memory */
+
+	return ents;
+}
+
+/* _bb_get_instances()
+ *
+ * Handle the JSON stream with instance info (resource reservations).
+ */
+static bb_instances_t *
+_bb_get_instances(int *num_ent, bb_state_t *state_ptr)
+{
+	bb_instances_t *ents = NULL;
+	json_object *j;
+	json_object_iter iter;
+	int status = 0;
+	DEF_TIMERS;
+	char *resp_msg;
+	char **script_argv;
+
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("show_instances");
+
+	START_TIMER;
+	resp_msg = bb_run_script("show_instances",
+				 state_ptr->bb_config.get_sys_state,
+				 script_argv, 3000, &status);
+	END_TIMER;
+	if (bb_state.bb_config.debug_flag)
+		debug("%s: show_instances ran for %s", __func__, TIME_STR);
+	_log_script_argv(script_argv, resp_msg);
+	_free_script_argv(script_argv);
+//FIXME: Cray API returning error if no instances
+//	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+	if (0) {
+		error("%s: show_instances status:%u response:%s",
+		      __func__, status, resp_msg);
+	}
+	if (resp_msg == NULL) {
+		info("%s: %s returned no instances",
+		     __func__, state_ptr->bb_config.get_sys_state);
+		return ents;
+	}
+
+	_python2json(resp_msg);
+	j = json_tokener_parse(resp_msg);
+	if (j == NULL) {
+		error("%s: json parser failed on %s", __func__, resp_msg);
+		xfree(resp_msg);
+		return ents;
+	}
+	xfree(resp_msg);
+
+	json_object_object_foreachC(j, iter) {
+		ents = _json_parse_instances_array(j, iter.key, num_ent);
+	}
+	json_object_put(j);	/* Frees json memory */
+
+	return ents;
+}
+
+/* _bb_get_pools()
+ *
+ * Handle the JSON stream with resource pool info (available resource type).
+ */
+static bb_pools_t *
+_bb_get_pools(int *num_ent, bb_state_t *state_ptr)
+{
+	bb_pools_t *ents = NULL;
+	json_object *j;
+	json_object_iter iter;
+	int status = 0;
+	DEF_TIMERS;
+	char *resp_msg;
+	char **script_argv;
+
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("pools");
+
+	START_TIMER;
+	resp_msg = bb_run_script("pools",
+				 state_ptr->bb_config.get_sys_state,
+				 script_argv, 3000, &status);
+	END_TIMER;
+	if (bb_state.bb_config.debug_flag) {
+		/* Only log pools data if different to limit volume of logs */
+		static uint32_t last_csum = 0;
+		uint32_t i, resp_csum = 0;
+		debug("%s: pools ran for %s", __func__, TIME_STR);
+		for (i = 0; resp_msg[i]; i++)
+			resp_csum += ((i * resp_msg[i]) % 1000000);
+		if (last_csum != resp_csum)
+			_log_script_argv(script_argv, resp_msg);
+		last_csum = resp_csum;
+	}
+	_free_script_argv(script_argv);
+	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+		error("%s: pools status:%u response:%s",
+		      __func__, status, resp_msg);
+	}
+	if (resp_msg == NULL) {
+		error("%s: %s returned no pools",
+		      __func__, state_ptr->bb_config.get_sys_state);
+		return ents;
+	}
+
+	_python2json(resp_msg);
+	j = json_tokener_parse(resp_msg);
+	if (j == NULL) {
+		error("%s: json parser failed on %s", __func__, resp_msg);
+		xfree(resp_msg);
+		return ents;
+	}
+	xfree(resp_msg);
+
+	json_object_object_foreachC(j, iter) {
+		ents = _json_parse_pools_array(j, iter.key, num_ent);
+	}
+	json_object_put(j);	/* Frees json memory */
+
+	return ents;
+}
+
+static bb_sessions_t *
+_bb_get_sessions(int *num_ent, bb_state_t *state_ptr)
+{
+	bb_sessions_t *ents = NULL;
+	json_object *j;
+	json_object_iter iter;
+	int status = 0;
+	DEF_TIMERS;
+	char *resp_msg;
+	char **script_argv;
+
+	script_argv = xmalloc(sizeof(char *) * 10);	/* NULL terminated */
+	script_argv[0] = xstrdup("dw_wlm_cli");
+	script_argv[1] = xstrdup("--function");
+	script_argv[2] = xstrdup("show_sessions");
+
+	START_TIMER;
+	resp_msg = bb_run_script("show_sessions",
+				 state_ptr->bb_config.get_sys_state,
+				 script_argv, 3000, &status);
+	END_TIMER;
+	if (bb_state.bb_config.debug_flag)
+		debug("%s: show_sessions ran for %s", __func__, TIME_STR);
+	_log_script_argv(script_argv, resp_msg);
+	_free_script_argv(script_argv);
+//FIXME: Cray API returning error if no sessions
+//	if (!WIFEXITED(status) || (WEXITSTATUS(status) != 0)) {
+	if (0) {
+		error("%s: show_sessions status:%u response:%s",
+		      __func__, status, resp_msg);
+	}
+	if (resp_msg == NULL) {
+		info("%s: %s returned no sessions",
+		     __func__, state_ptr->bb_config.get_sys_state);
+		_free_script_argv(script_argv);
+		return ents;
+	}
+
+	_python2json(resp_msg);
+	j = json_tokener_parse(resp_msg);
+	if (j == NULL) {
+		error("%s: json parser failed on %s", __func__, resp_msg);
+		xfree(resp_msg);
+		return ents;
+	}
+	xfree(resp_msg);
+
+	json_object_object_foreachC(j, iter) {
+		ents = _json_parse_sessions_array(j, iter.key, num_ent);
+	}
+	json_object_put(j);	/* Frees json memory */
+
+	return ents;
+}
+
+/* _bb_free_configs()
+ */
+static void
+_bb_free_configs(bb_configs_t *ents, int num_ent)
+{
+	xfree(ents);
+}
+
+/* _bb_free_instances()
+ */
+static void
+_bb_free_instances(bb_instances_t *ents, int num_ent)
+{
+	int i;
+
+	for (i = 0; i < num_ent; i++) {
+		xfree(ents[i].label);
+	}
+
+	xfree(ents);
+}
+
+/* _bb_free_pools()
+ */
+static void
+_bb_free_pools(bb_pools_t *ents, int num_ent)
+{
+	int i;
+
+	for (i = 0; i < num_ent; i++) {
+		xfree(ents[i].id);
+		xfree(ents[i].units);
+	}
+
+	xfree(ents);
+}
+
+/* _bb_free_sessions()
+ */
+static void
+_bb_free_sessions(bb_sessions_t *ents, int num_ent)
+{
+	int i;
+
+	for (i = 0; i < num_ent; i++) {
+		xfree(ents[i].token);
+	}
+
+	xfree(ents);
+}
+
+/* _json_parse_configs_array()
+ */
+static bb_configs_t *
+_json_parse_configs_array(json_object *jobj, char *key, int *num)
+{
+	json_object *jarray;
+	int i;
+	json_object *jvalue;
+	bb_configs_t *ents;
+
+	jarray = jobj;
+	json_object_object_get_ex(jobj, key, &jarray);
+
+	*num = json_object_array_length(jarray);
+	ents = xmalloc(*num * sizeof(bb_configs_t));
+
+	for (i = 0; i < *num; i++) {
+		jvalue = json_object_array_get_idx(jarray, i);
+		_json_parse_configs_object(jvalue, &ents[i]);
+	}
+
+	return ents;
+}
+
+/* _json_parse_instances_array()
+ */
+static bb_instances_t *
+_json_parse_instances_array(json_object *jobj, char *key, int *num)
+{
+	json_object *jarray;
+	int i;
+	json_object *jvalue;
+	bb_instances_t *ents;
+
+	jarray = jobj;
+	json_object_object_get_ex(jobj, key, &jarray);
+
+	*num = json_object_array_length(jarray);
+	ents = xmalloc(*num * sizeof(bb_instances_t));
+
+	for (i = 0; i < *num; i++) {
+		jvalue = json_object_array_get_idx(jarray, i);
+		_json_parse_instances_object(jvalue, &ents[i]);
+	}
+
+	return ents;
+}
+
+/* _json_parse_pools_array()
+ */
+static bb_pools_t *
+_json_parse_pools_array(json_object *jobj, char *key, int *num)
+{
+	json_object *jarray;
+	int i;
+	json_object *jvalue;
+	bb_pools_t *ents;
+
+	jarray = jobj;
+	json_object_object_get_ex(jobj, key, &jarray);
+
+	*num = json_object_array_length(jarray);
+	ents = xmalloc(*num * sizeof(bb_pools_t));
+
+	for (i = 0; i < *num; i++) {
+		jvalue = json_object_array_get_idx(jarray, i);
+		_json_parse_pools_object(jvalue, &ents[i]);
+	}
+
+	return ents;
+}
+
+/* _json_parse_sessions_array()
+ */
+static bb_sessions_t *
+_json_parse_sessions_array(json_object *jobj, char *key, int *num)
+{
+	json_object *jarray;
+	int i;
+	json_object *jvalue;
+	bb_sessions_t *ents;
+
+	jarray = jobj;
+	json_object_object_get_ex(jobj, key, &jarray);
+
+	*num = json_object_array_length(jarray);
+	ents = xmalloc(*num * sizeof(bb_pools_t));
+
+	for (i = 0; i < *num; i++) {
+		jvalue = json_object_array_get_idx(jarray, i);
+		_json_parse_sessions_object(jvalue, &ents[i]);
+	}
+
+	return ents;
+}
+
+/* Parse "links" object in the "configuration" object */
+static void
+_parse_config_links(json_object *instance, bb_configs_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int x;
+
+	json_object_object_foreachC(instance, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+		case json_type_int:
+			x = json_object_get_int64(iter.val);
+			if (!strcmp(iter.key, "instance"))
+				ent->instance = x;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/* _json_parse_configs_object()
+ */
+static void
+_json_parse_configs_object(json_object *jobj, bb_configs_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int64_t x;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+		case json_type_object:
+			if (strcmp(iter.key, "links") == 0)
+				_parse_config_links(iter.val, ent);
+			break;
+		case json_type_int:
+			x = json_object_get_int64(iter.val);
+			if (strcmp(iter.key, "id") == 0) {
+				ent->id = x;
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/* Parse "capacity" object in the "instance" object */
+static void
+_parse_instance_capacity(json_object *instance, bb_instances_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int x;
+
+	json_object_object_foreachC(instance, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+		case json_type_int:
+			x = json_object_get_int64(iter.val);
+			if (!strcmp(iter.key, "bytes"))
+				ent->bytes = x;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/* _json_parse_instances_object()
+ */
+static void
+_json_parse_instances_object(json_object *jobj, bb_instances_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int64_t x;
+	const char *p;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+		case json_type_object:
+			if (strcmp(iter.key, "capacity") == 0)
+				_parse_instance_capacity(iter.val, ent);
+			break;
+		case json_type_int:
+			x = json_object_get_int64(iter.val);
+			if (strcmp(iter.key, "id") == 0) {
+				ent->id = x;
+			}
+			break;
+		case json_type_string:
+			p = json_object_get_string(iter.val);
+			if (strcmp(iter.key, "label") == 0) {
+				ent->label = xstrdup(p);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/* _json_parse_pools_object()
+ */
+static void
+_json_parse_pools_object(json_object *jobj, bb_pools_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int64_t x;
+	const char *p;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+		case json_type_int:
+			x = json_object_get_int64(iter.val);
+			if (strcmp(iter.key, "granularity") == 0) {
+				ent->granularity = x;
+			} else if (strcmp(iter.key, "quantity") == 0) {
+				ent->quantity = x;
+			} else if (strcmp(iter.key, "free") == 0) {
+				ent->free = x;
+			}
+			break;
+		case json_type_string:
+			p = json_object_get_string(iter.val);
+			if (strcmp(iter.key, "id") == 0) {
+				ent->id = xstrdup(p);
+			} else if (strcmp(iter.key, "units") == 0) {
+				ent->units = xstrdup(p);
+			}
+			break;
+		default:
+			break;
+		}
+	}
+}
+
+/* _json_parse_session_object()
+ */
+static void
+_json_parse_sessions_object(json_object *jobj, bb_sessions_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int64_t x;
+	const char *p;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+		case json_type_int:
+			x = json_object_get_int64(iter.val);
+			if (strcmp(iter.key, "created") == 0) {
+				ent->created = x;
+			} else if (strcmp(iter.key, "id") == 0) {
+				ent->id = x;
+			} else if (strcmp(iter.key, "owner") == 0) {
+				ent->user_id = x;
+			}
+			break;
+		case json_type_string:
+			p = json_object_get_string(iter.val);
+			if (strcmp(iter.key, "token") == 0) {
+				ent->token = xstrdup(p);
+			}
+		default:
+			break;
+		}
+	}
+}
+
+/*
+ * Translate a burst buffer string to it's equivalent TRES string
+ * (e.g. "cray:2G,generic:4M" -> "1004=2048,1005=4")
+ * Caller must xfree the return value
+ */
+extern char *bb_p_xlate_bb_2_tres_str(char *burst_buffer)
+{
+	char *save_ptr = NULL, *sep, *tmp, *tok;
+	char *result = NULL;
+	uint64_t size, total = 0;
+
+	if (!burst_buffer || (bb_state.tres_id < 1))
+		return result;
+
+	tmp = xstrdup(burst_buffer);
+	tok = strtok_r(tmp, ",", &save_ptr);
+	while (tok) {
+		sep = strchr(tok, ':');
+		if (sep) {
+			if (!strncmp(tok, "cray:", 5))
+				tok += 5;
+			else
+				tok = NULL;
+		}
+
+		if (tok) {
+			uint64_t mb_xlate = 1024 * 1024;
+			size = bb_get_size_num(tok,
+					       bb_state.bb_config.granularity);
+			total += (size + mb_xlate - 1) / mb_xlate;
+		}
+
+		tok = strtok_r(NULL, ",", &save_ptr);
+	}
+
+	if (total)
+		xstrfmtcat(result, "%d=%"PRIu64, bb_state.tres_id, total);
+
+	return result;
+}
diff --git a/src/plugins/burst_buffer/cray/dw_wlm_cli b/src/plugins/burst_buffer/cray/dw_wlm_cli
new file mode 100755
index 000000000..24c66ee02
--- /dev/null
+++ b/src/plugins/burst_buffer/cray/dw_wlm_cli
@@ -0,0 +1,49 @@
+#!/bin/bash
+# Emulate dw_wlm_cli (Cray's DataWarp interface) for testing purposes
+# See "Flags=EmulateCray" in burst_buffer.conf man page
+if [ $2 == "create_persistent" ]; then
+   echo 'created'
+fi
+if [ $2 == "data_in" ]; then
+   sleep 1
+fi
+if [ $2 == "data_out" ]; then
+   sleep 1
+fi
+if [ $2 == "destroy_persistent" ]; then
+   sleep 0.1
+fi
+if [ $2 == "job_process" ]; then
+   sleep 0.1
+fi
+if [ $2 == "paths" ]; then
+   if [ $7 == "--pathfile" ]; then
+      echo 'DWPATHS=/tmp/dw' > $8
+   fi
+fi
+if [ $2 == "pre_run" ]; then
+   sleep 0.1
+fi
+if [ $2 == "post_run" ]; then
+   sleep 0.1
+fi
+if [ $2 == "pools" ]; then
+   echo '{ "pools": [ { "id":"dwcache", "units":"bytes", "granularity":16777216, "quantity":2048, "free":2048 } ] }'
+fi
+if [ $2 == "setup" ]; then
+   sleep 0.1
+fi
+if [ $2 == "show_configurations" ]; then
+   echo '{ "configurations": [ ] }'
+fi
+if [ $2 == "show_instances" ]; then
+   echo '{ "instances": [ ] }'
+fi
+if [ $2 == "show_sessions" ]; then
+   echo '{ "sessions": [ ] }'
+fi
+if [ $2 == "teardown" ]; then
+   sleep 0.1
+fi
+
+exit 0
diff --git a/src/plugins/burst_buffer/generic/Makefile.am b/src/plugins/burst_buffer/generic/Makefile.am
new file mode 100644
index 000000000..e3fa3cd77
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/Makefile.am
@@ -0,0 +1,22 @@
+# Makefile for burst_buffer/generic plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+EXTRA_DIST =				\
+	bb_get_state.example		\
+	bb_start_stage_in.example	\
+	bb_start_stage_out.example	\
+	bb_stop_stage_out.example
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = burst_buffer_generic.la
+burst_buffer_generic_la_SOURCES = burst_buffer_generic.c
+burst_buffer_generic_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+burst_buffer_generic_la_LIBADD = ../common/libburst_buffer_common.la
+
+force:
+$(burst_buffer_generic_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/plugins/burst_buffer/generic/Makefile.in b/src/plugins/burst_buffer/generic/Makefile.in
new file mode 100644
index 000000000..d6b35d5c3
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/Makefile.in
@@ -0,0 +1,823 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for burst_buffer/generic plugin
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/burst_buffer/generic
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+burst_buffer_generic_la_DEPENDENCIES =  \
+	../common/libburst_buffer_common.la
+am_burst_buffer_generic_la_OBJECTS = burst_buffer_generic.lo
+burst_buffer_generic_la_OBJECTS =  \
+	$(am_burst_buffer_generic_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+burst_buffer_generic_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(burst_buffer_generic_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(burst_buffer_generic_la_SOURCES)
+DIST_SOURCES = $(burst_buffer_generic_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+EXTRA_DIST = \
+	bb_get_state.example		\
+	bb_start_stage_in.example	\
+	bb_start_stage_out.example	\
+	bb_stop_stage_out.example
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = burst_buffer_generic.la
+burst_buffer_generic_la_SOURCES = burst_buffer_generic.c
+burst_buffer_generic_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+burst_buffer_generic_la_LIBADD = ../common/libburst_buffer_common.la
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/burst_buffer/generic/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/burst_buffer/generic/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+burst_buffer_generic.la: $(burst_buffer_generic_la_OBJECTS) $(burst_buffer_generic_la_DEPENDENCIES) $(EXTRA_burst_buffer_generic_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(burst_buffer_generic_la_LINK) -rpath $(pkglibdir) $(burst_buffer_generic_la_OBJECTS) $(burst_buffer_generic_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/burst_buffer_generic.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+force:
+$(burst_buffer_generic_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/burst_buffer/generic/bb_get_state.example b/src/plugins/burst_buffer/generic/bb_get_state.example
new file mode 100755
index 000000000..36bc8053f
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/bb_get_state.example
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# bb_get_state
+#
+# NOTE: Sample script used for burst buffer management. Used for development
+# purposes and is not intended for production use.
+#
+# File system formats:
+# I. BB_DIRECTORY (environment variable): Defines head of burst buffer file
+#    system
+#    A. .slurm_state, child of BB_DIRECTORY, contains state information, file
+#       format is "TotalSize=#"
+#    B. Slurm job ID number, child directories of BB_DIRECTORY, one per job
+#       1. .slurm_state, child of Slurm job ID directory, contains state
+#          information, file format is
+#          "UserID=# JobID=# Name=name State=name Size=#"
+#          (NOTE: "UserID" must be first)
+#          (NOTE: Either the JobID or Name should be specified, but not both)
+#
+
+# Validate execute line
+valid=0
+if [[ $# -eq 1 && $1 = "get_sys" ]]; then
+	valid=1
+fi
+if [[ $# -eq 2 && $1 = "get_job" ]]; then
+	job_id=$2
+	valid=1
+fi
+if [[ $valid -eq 0 ]]; then
+	echo "Usage: $0 [get_sys | get_job #]"
+	exit 1
+fi
+
+# Default BB_DIRECTORY for testing is $HOME/slurm_bb
+if [[ -z "$BB_DIRECTORY" ]] ; then
+	BB_DIRECTORY=$HOME/slurm_bb
+fi
+
+# Get information about a specific job's burst buffer state
+if [[ $1 = "get_job" ]]; then
+	if [[ -e $BB_DIRECTORY/$job_id/.slurm_state ]]; then
+		cat $BB_DIRECTORY/$job_id/.slurm_state
+		exit 0
+	else
+		echo "ENOENT=$BB_DIRECTORY/$job_id/.slurm_state"
+		exit 1
+	fi
+fi
+
+# Get global state information
+if [[ -e $BB_DIRECTORY/.slurm_state ]]; then
+	cat $BB_DIRECTORY/.slurm_state
+else
+	echo "ENOENT=$BB_DIRECTORY/.slurm_state"
+fi
+
+# First set bash option to avoid unmatched patterns expand as result values
+shopt -s nullglob
+# Then store matching file names into array
+filearray=( "$BB_DIRECTORY"/* )
+# Then print state of each job's burst buffer state
+for file in "${filearray[@]}"; do
+	if [[ -e $file/.slurm_state ]]; then
+		cat $file/.slurm_state
+	else
+		echo "ENOENT=$file/.slurm_state"
+	fi
+done
+exit 0
diff --git a/src/plugins/burst_buffer/generic/bb_start_stage_in.example b/src/plugins/burst_buffer/generic/bb_start_stage_in.example
new file mode 100644
index 000000000..64d87dc45
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/bb_start_stage_in.example
@@ -0,0 +1,76 @@
+#!/bin/bash
+#
+# bb_start_stage_in
+# Perform stage-in of files for a specific job
+*
+# NOTE: Sample script used for burst buffer management. Used for development
+# purposes and is not intended for production use.
+#
+# File system formats:
+# I. BB_DIRECTORY (environment variable): Defines head of burst buffer file
+#    system
+#    A. .slurm_state, child of BB_DIRECTORY, contains state information, file
+#       format is "TotalSize=#"
+#    B. Slurm job ID number, child directories of BB_DIRECTORY, one per job
+#       1. .slurm_state, child of Slurm job ID directory, contains state
+#          information, file format is
+#          "UserID=# JobID=# Name=name State=name Size=#"
+#          (NOTE: "UserID" must be first)
+#          (NOTE: Either the JobID or Name should be specified, but not both)
+#
+
+# Validate execute line
+valid=0
+if [[ $# -ge 4 && $1 = "start_stage_in" ]]; then
+	job_id=$2
+	user_id=$3
+	buf_size=$4
+	valid=1
+fi
+if [[ $valid -eq 0 ]]; then
+	echo "Usage: $0 start_stage_in <jobid> <userid> <size> [script]"
+	exit 1
+fi
+
+# Default BB_DIRECTORY for testing is $HOME/slurm_bb
+if [[ -z "$BB_DIRECTORY" ]] ; then
+	BB_DIRECTORY=$HOME/slurm_bb
+fi
+
+if [[ ! -d $BB_DIRECTORY/$job_id ]]; then
+	mkdir $BB_DIRECTORY/$job_id
+fi
+
+# Set "State=staging-in" in burst buffer's state file
+if [[ -e $BB_DIRECTORY/$job_id/.slurm_state ]]; then
+	cp $BB_DIRECTORY/$job_id/.slurm_state $BB_DIRECTORY/$job_id/.slurm_state2
+	cat $BB_DIRECTORY/$job_id/.slurm_state2 | awk 'BEGIN {FS=" "}{print $1, $2, "State=staging-in", $4}' >$BB_DIRECTORY/$job_id/.slurm_state
+	rm $BB_DIRECTORY/$job_id/.slurm_state2
+else
+	echo "UserID=$user_id JobId=$job_id State=staging-in Size=$buf_size" >$BB_DIRECTORY/$job_id/.slurm_state
+fi
+
+################################################################################
+# Move files here
+#
+# For testing purposes, we just store the script here, then briefly sleep
+shift
+shift
+rm -f $BB_DIRECTORY/$job_id/stage_out_script
+while (($#)); do
+	echo "$1" >>$BB_DIRECTORY/$job_id/stage_in_script
+	shift
+done
+sleep 10
+################################################################################
+
+# Set "State=staged-in" in burst buffer's state file
+if [[ -e $BB_DIRECTORY/$job_id/.slurm_state ]]; then
+	cp $BB_DIRECTORY/$job_id/.slurm_state $BB_DIRECTORY/$job_id/.slurm_state2
+	cat $BB_DIRECTORY/$job_id/.slurm_state2 | awk 'BEGIN {FS=" "}{print $1, $2, "State=staged-in", $4}' >$BB_DIRECTORY/$job_id/.slurm_state
+	rm $BB_DIRECTORY/$job_id/.slurm_state2
+	exit 0
+else
+	echo "UserID=$user_id JobId=$job_id State=staged-in Size=$buf_size" >$BB_DIRECTORY/$job_id/.slurm_state
+	exit 1
+fi
diff --git a/src/plugins/burst_buffer/generic/bb_start_stage_out.example b/src/plugins/burst_buffer/generic/bb_start_stage_out.example
new file mode 100755
index 000000000..6d5d98a09
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/bb_start_stage_out.example
@@ -0,0 +1,77 @@
+#!/bin/bash
+#
+# bb_start_stage_out
+# Perform stage-out of files for a specific job
+*
+# NOTE: Sample script used for burst buffer management. Used for development
+# purposes and is not intended for production use.
+#
+# File system formats:
+# I. BB_DIRECTORY (environment variable): Defines head of burst buffer file
+#    system
+#    A. .slurm_state, child of BB_DIRECTORY, contains state information, file
+#       format is "TotalSize=#"
+#    B. Slurm job ID number, child directories of BB_DIRECTORY, one per job
+#       1. .slurm_state, child of Slurm job ID directory, contains state
+#          information, file format is
+#          "UserID=# JobID=# Name=name State=name Size=#"
+#          (NOTE: "UserID" must be first)
+#          (NOTE: Either the JobID or Name should be specified, but not both)
+#
+
+# Validate execute line
+valid=0
+if [[ $# -ge 4 && $1 = "start_stage_out" ]]; then
+	job_id=$2
+	user_id=$3
+	buf_size=$4
+	valid=1
+fi
+if [[ $valid -eq 0 ]]; then
+	echo "Usage: $0 start_stage_out <jobid> <userid> <size> [script]"
+	exit 1
+fi
+
+# Default BB_DIRECTORY for testing is $HOME/slurm_bb
+if [[ -z "$BB_DIRECTORY" ]] ; then
+	BB_DIRECTORY=$HOME/slurm_bb
+fi
+
+if [[ ! -d $BB_DIRECTORY/$job_id ]]; then
+	mkdir $BB_DIRECTORY/$job_id
+fi
+
+
+# Set "State=staging-out" in burst buffer's state file
+if [[ -e $BB_DIRECTORY/$job_id/.slurm_state ]]; then
+	cp $BB_DIRECTORY/$job_id/.slurm_state $BB_DIRECTORY/$job_id/.slurm_state2
+	cat $BB_DIRECTORY/$job_id/.slurm_state2 | awk 'BEGIN {FS=" "}{print $1, $2, "State=staging-out", $4}' >$BB_DIRECTORY/$job_id/.slurm_state
+	rm $BB_DIRECTORY/$job_id/.slurm_state2
+else
+	echo "UserID=$user_id JobId=$job_id State=staging-out Size=$buf_size" >$BB_DIRECTORY/$job_id/.slurm_state
+fi
+
+################################################################################
+# Move files here
+#
+# For testing purposes, we just store the script here, then briefly sleep
+shift
+shift
+rm -f $BB_DIRECTORY/$job_id/stage_out_script
+while (($#)); do
+	echo "$1" >>$BB_DIRECTORY/$job_id/stage_out_script
+	shift
+done
+sleep 10
+################################################################################
+
+# Set "State=staged-out" in burst buffer's state file
+if [[ -e $BB_DIRECTORY/$job_id/.slurm_state ]]; then
+	cp $BB_DIRECTORY/$job_id/.slurm_state $BB_DIRECTORY/$job_id/.slurm_state2
+	cat $BB_DIRECTORY/$job_id/.slurm_state2 | awk 'BEGIN {FS=" "}{print $1, $2, "State=staged-out", $4}' >$BB_DIRECTORY/$job_id/.slurm_state
+	rm $BB_DIRECTORY/$job_id/.slurm_state2
+	exit 0
+else
+	echo "UserID=$user_id JobId=$job_id State=staged-out Size=$buf_size" >$BB_DIRECTORY/$job_id/.slurm_state
+	exit 1
+fi
diff --git a/src/plugins/burst_buffer/generic/bb_stop_stage_out.example b/src/plugins/burst_buffer/generic/bb_stop_stage_out.example
new file mode 100755
index 000000000..b61f2ba89
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/bb_stop_stage_out.example
@@ -0,0 +1,48 @@
+#!/bin/bash
+#
+# bb_stop_stage_out
+# Terminate any file staging and completely release burst buffer resources
+#
+# NOTE: Sample script used for burst buffer management. Used for development
+# purposes and is not intended for production use.
+#
+# File system formats:
+# I. BB_DIRECTORY (environment variable): Defines head of burst buffer file
+#    system
+#    A. .slurm_state, child of BB_DIRECTORY, contains state information, file
+#       format is "TotalSize=#"
+#    B. Slurm job ID number, child directories of BB_DIRECTORY, one per job
+#       1. .slurm_state, child of Slurm job ID directory, contains state
+#          information, file format is
+#          "UserID=# JobID=# Name=name State=name Size=#"
+#          (NOTE: "UserID" must be first)
+#          (NOTE: Either the JobID or Name should be specified, but not both)
+#
+
+# Validate execute line
+valid=0
+if [[ $# -ge 2 && $1 = "stop_stage_in" ]]; then
+	job_id=$2
+	valid=1
+fi
+if [[ $# -ge 2 && $1 = "stop_stage_out" ]]; then
+	job_id=$2
+	valid=1
+fi
+if [[ $valid -eq 0 ]]; then
+	echo "Usage: $0 stop_stage_out|stop_stage_in <jobid> <userid> <size>"
+	exit 1
+fi
+
+# Default BB_DIRECTORY for testing is $HOME/slurm_bb
+if [[ -z "$BB_DIRECTORY" ]] ; then
+	BB_DIRECTORY=$HOME/slurm_bb
+fi
+
+while [[ -d $BB_DIRECTORY/$job_id ]]; do
+	rm -rf $BB_DIRECTORY/$job_id
+	if [[ -d $BB_DIRECTORY/$job_id ]] ; then
+		sleep 2
+	fi
+done
+exit 0
diff --git a/src/plugins/burst_buffer/generic/burst_buffer_generic.c b/src/plugins/burst_buffer/generic/burst_buffer_generic.c
new file mode 100644
index 000000000..2c389a899
--- /dev/null
+++ b/src/plugins/burst_buffer/generic/burst_buffer_generic.c
@@ -0,0 +1,1372 @@
+/*****************************************************************************\
+ *  burst_buffer_generic.c - Generic library for managing a burst_buffer
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <poll.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+
+#include "src/common/list.h"
+#include "src/common/pack.h"
+#include "src/common/parse_config.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/timers.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/reservation.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/plugins/burst_buffer/common/burst_buffer_common.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "burst_buffer" for SLURM burst_buffer) and <method> is a
+ * description of how this plugin satisfies that application.  SLURM will only
+ * load a burst_buffer plugin if the plugin_type string has a prefix of
+ * "burst_buffer/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+const char plugin_name[]        = "burst_buffer generic plugin";
+const char plugin_type[]        = "burst_buffer/generic";
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
+
+/* Most state information is in a common structure so that we can more
+ * easily use common functions from multiple burst buffer plugins */
+static bb_state_t 	bb_state;
+
+/* Local function defintions */
+static void	_alloc_job_bb(struct job_record *job_ptr, uint64_t bb_size);
+static void *	_bb_agent(void *args);
+static char **	_build_stage_args(char *cmd, char *opt,
+				  struct job_record *job_ptr,
+				  uint64_t bb_size);
+static void	_destroy_job_info(void *data);
+static bb_alloc_t *_find_bb_name_rec(char *name, uint32_t user_id);
+static uint64_t	_get_bb_size(struct job_record *job_ptr);
+static void	_load_state(uint32_t job_id);
+static int	_parse_job_info(void **dest, slurm_parser_enum_t type,
+				const char *key, const char *value,
+				const char *line, char **leftover);
+static void	_stop_stage_in(uint32_t job_id);
+static void	_stop_stage_out(uint32_t job_id);
+static void	_test_config(void);
+static int	_test_size_limit(struct job_record *job_ptr,uint64_t add_space);
+static void	_timeout_bb_rec(void);
+
+/* Validate that our configuration is valid for this plugin type */
+static void _test_config(void)
+{
+	if (!bb_state.bb_config.get_sys_state)
+		fatal("%s: GetSysState is NULL", __func__);
+	if (!bb_state.bb_config.start_stage_in)
+		fatal("%s: StartStageIn is NULL", __func__);
+	if (!bb_state.bb_config.start_stage_out)
+		fatal("%s: StartStageOUT is NULL", __func__);
+	if (!bb_state.bb_config.stop_stage_in)
+		fatal("%s: StopStageIn is NULL", __func__);
+	if (!bb_state.bb_config.stop_stage_out)
+		fatal("%s: StopStageOUT is NULL", __func__);
+}
+
+/* Return the burst buffer size requested by a job */
+static uint64_t _get_bb_size(struct job_record *job_ptr)
+{
+	char *tok;
+	uint64_t bb_size_u = 0;
+
+	if (job_ptr->burst_buffer) {
+		tok = strstr(job_ptr->burst_buffer, "size=");
+		if (tok)
+			bb_size_u = bb_get_size_num(tok + 5,
+						bb_state.bb_config.granularity);
+	}
+
+	return bb_size_u;
+}
+
+static char **_build_stage_args(char *cmd, char *opt,
+				struct job_record *job_ptr, uint64_t bb_size)
+{
+	char **script_argv = NULL;
+	char *save_ptr = NULL, *script, *tok;
+	int script_argc = 0, size;
+	char jobid_buf[32];
+
+	if (job_ptr->batch_flag == 0)
+		return script_argv;
+
+	script = get_job_script(job_ptr);
+	if (!script) {
+		error("%s: failed to get script for %s", __func__,
+		      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+		return script_argv;
+	}
+
+	size = 20;
+	script_argv = xmalloc(sizeof(char *) * size);
+	tok = strrchr(cmd, '/');
+	if (tok)
+		xstrfmtcat(script_argv[0], "%s", tok + 1);
+	else
+		xstrfmtcat(script_argv[0], "%s", cmd);
+	xstrfmtcat(script_argv[1], "%s", opt);
+	xstrfmtcat(script_argv[2], "%u", job_ptr->job_id);
+	xstrfmtcat(script_argv[3], "%u", job_ptr->user_id);
+	xstrfmtcat(script_argv[4], "%"PRIu64"", bb_size);
+	script_argc += 5;
+	tok = strtok_r(script, "\n", &save_ptr);
+	while (tok) {
+		if (tok[0] != '#')
+			break;
+		if (tok[1] != '!') {
+			if ((script_argc + 1) >= size) {
+				size *= 2;
+				script_argv = xrealloc(script_argv,
+						       sizeof(char *) * size);
+			}
+			script_argv[script_argc++] = xstrdup(tok);
+		}
+		tok = strtok_r(NULL, "\n", &save_ptr);
+	}
+	xfree(script);
+
+	return script_argv;
+}
+
+static void _stop_stage_in(uint32_t job_id)
+{
+	char **script_argv = NULL;
+	char *resp, *tok;
+	int i, status = 0;
+
+	if (!bb_state.bb_config.stop_stage_in)
+		return;
+
+	script_argv = xmalloc(sizeof(char *) * 4);
+	tok = strrchr(bb_state.bb_config.stop_stage_in, '/');
+	if (tok) {
+		xstrfmtcat(script_argv[0], "%s", tok + 1);
+	} else {
+		xstrfmtcat(script_argv[0], "%s",
+			   bb_state.bb_config.stop_stage_in);
+	}
+	xstrfmtcat(script_argv[1], "%s", "stop_stage_in");
+	xstrfmtcat(script_argv[2], "%u", job_id);
+
+	resp = bb_run_script("StopStageIn",
+			     bb_state.bb_config.stop_stage_in,
+			     script_argv, -1, &status);
+	if (resp) {
+		error("%s: StopStageIn: %s", __func__, resp);
+		xfree(resp);
+	}
+	for (i = 0; script_argv[i]; i++)
+		xfree(script_argv[i]);
+	xfree(script_argv);
+}
+
+static void _stop_stage_out(uint32_t job_id)
+{
+	char **script_argv = NULL;
+	char *resp, *tok;
+	int i, status = 0;
+
+	if (!bb_state.bb_config.stop_stage_out)
+		return;
+
+	script_argv = xmalloc(sizeof(char *) * 4);
+	tok = strrchr(bb_state.bb_config.stop_stage_out, '/');
+	if (tok)
+		xstrfmtcat(script_argv[0], "%s", tok + 1);
+	else
+		xstrfmtcat(script_argv[0], "%s",
+			   bb_state.bb_config.stop_stage_out);
+	xstrfmtcat(script_argv[1], "%s", "stop_stage_out");
+	xstrfmtcat(script_argv[2], "%u", job_id);
+
+	resp = bb_run_script("StopStageOut", bb_state.bb_config.stop_stage_out,
+			     script_argv, -1, &status);
+	if (resp) {
+		error("%s: StopStageOut: %s", __func__, resp);
+		xfree(resp);
+	}
+	for (i = 0; script_argv[i]; i++)
+		xfree(script_argv[i]);
+	xfree(script_argv);
+}
+
+/* Find a per-job burst buffer record with a specific name.
+ * If not found, return NULL. */
+static bb_alloc_t * _find_bb_name_rec(char *name, uint32_t user_id)
+{
+	bb_alloc_t *bb_ptr = NULL;
+
+	xassert(bb_state.bb_ahash);
+	bb_ptr = bb_state.bb_ahash[user_id % BB_HASH_SIZE];
+	while (bb_ptr) {
+		if (!xstrcmp(bb_ptr->name, name))
+			return bb_ptr;
+		bb_ptr = bb_ptr->next;
+	}
+	return bb_ptr;
+}
+
+/* Handle timeout of burst buffer events:
+ * 1. Purge per-job burst buffer records when the stage-out has completed and
+ *    the job has been purged from Slurm
+ * 2. Test for StageInTimeout events
+ * 3. Test for StageOutTimeout events
+ */
+static void _timeout_bb_rec(void)
+{
+	struct job_record *job_ptr;
+	bb_alloc_t **bb_pptr, *bb_ptr = NULL;
+	uint32_t age;
+	time_t now = time(NULL);
+	int i;
+
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_pptr = &bb_state.bb_ahash[i];
+		bb_ptr = bb_state.bb_ahash[i];
+		while (bb_ptr) {
+			if (bb_ptr->seen_time < bb_state.last_load_time) {
+				if (bb_ptr->job_id == 0) {
+					info("%s: Persistent burst buffer %s "
+					     "purged",
+					     __func__, bb_ptr->name);
+				} else if (bb_state.bb_config.debug_flag) {
+					info("%s: burst buffer for job %u "
+					     "purged",
+					     __func__, bb_ptr->job_id);
+				}
+//FIXME: VESTIGIAL: Use bb_limit_rem
+//				bb_remove_user_load(bb_ptr, &bb_state);
+				*bb_pptr = bb_ptr->next;
+				bb_free_alloc_buf(bb_ptr);
+				break;
+			}
+			if ((bb_ptr->job_id != 0) &&
+			    (bb_ptr->state >= BB_STATE_STAGED_OUT) &&
+			    !find_job_record(bb_ptr->job_id)) {
+				_stop_stage_out(bb_ptr->job_id);
+				bb_ptr->cancelled = true;
+				bb_ptr->end_time = 0;
+				*bb_pptr = bb_ptr->next;
+				bb_free_alloc_buf(bb_ptr);
+				break;
+			}
+			age = difftime(now, bb_ptr->state_time);
+			if ((bb_ptr->job_id != 0) &&
+			    bb_state.bb_config.stop_stage_in &&
+			    (bb_ptr->state == BB_STATE_STAGING_IN) &&
+			    (bb_state.bb_config.stage_in_timeout != 0) &&
+			    (!bb_ptr->cancelled) &&
+			    (age >= bb_state.bb_config.stage_in_timeout)) {
+				_stop_stage_in(bb_ptr->job_id);
+				bb_ptr->cancelled = true;
+				bb_ptr->end_time = 0;
+				job_ptr = find_job_record(bb_ptr->job_id);
+				if (job_ptr) {
+					error("%s: StageIn timed out, holding "
+					      "job %u",
+					      __func__, bb_ptr->job_id);
+					job_ptr->priority = 0;
+					job_ptr->direct_set_prio = 1;
+					job_ptr->state_reason = WAIT_HELD;
+					xfree(job_ptr->state_desc);
+					job_ptr->state_desc = xstrdup(
+						"Burst buffer stage-in timeout");
+					last_job_update = now;
+				} else {
+					error("%s: StageIn timed out for "
+					      "vestigial job %u ",
+					      __func__, bb_ptr->job_id);
+				}
+			}
+			if ((bb_ptr->job_id != 0) &&
+			    bb_state.bb_config.stop_stage_out &&
+			    (bb_ptr->state == BB_STATE_STAGING_OUT) &&
+			    (bb_state.bb_config.stage_out_timeout != 0) &&
+			    (!bb_ptr->cancelled) &&
+			    (age >= bb_state.bb_config.stage_out_timeout)) {
+				error("%s: StageOut for job %u timed out",
+				      __func__, bb_ptr->job_id);
+				_stop_stage_out(bb_ptr->job_id);
+				bb_ptr->cancelled = true;
+				bb_ptr->end_time = 0;
+			}
+			bb_pptr = &bb_ptr->next;
+			bb_ptr = bb_ptr->next;
+		}
+	}
+}
+
+/* Test if a job can be allocated a burst buffer.
+ * This may preempt currently active stage-in for higher priority jobs.
+ *
+ * RET 0: Job can be started now
+ *     1: Job exceeds configured limits, continue testing with next job
+ *     2: Job needs more resources than currently available can not start,
+ *        skip all remaining jobs
+ */
+static int _test_size_limit(struct job_record *job_ptr, uint64_t add_space)
+{
+	burst_buffer_info_msg_t *resv_bb;
+	struct preempt_bb_recs *preempt_ptr = NULL;
+	List preempt_list;
+	ListIterator preempt_iter;
+	uint64_t resv_space = 0;
+	int add_total_space_needed = 0, add_user_space_needed = 0;
+	int add_total_space_avail  = 0, add_user_space_avail  = 0;
+	time_t now = time(NULL), when;
+	bb_alloc_t *bb_ptr = NULL;
+	int i;
+	char jobid_buf[32];
+
+	if (job_ptr->start_time <= now)
+		when = now;
+	else
+		when = job_ptr->start_time;
+	resv_bb = job_test_bb_resv(job_ptr, when);
+	if (resv_bb) {
+		burst_buffer_info_t *resv_bb_ptr;
+		for (i = 0, resv_bb_ptr = resv_bb->burst_buffer_array;
+		     i < resv_bb->record_count; i++, resv_bb_ptr++) {
+			if (resv_bb_ptr->name &&
+			    strcmp(resv_bb_ptr->name, bb_state.name))
+				continue;
+			resv_bb_ptr->used_space =
+				bb_granularity(resv_bb_ptr->used_space,
+					       bb_state.bb_config.granularity);
+			resv_space += resv_bb_ptr->used_space;
+		}
+		slurm_free_burst_buffer_info_msg(resv_bb);
+	}
+
+	add_total_space_needed = bb_state.used_space + add_space + resv_space -
+				 bb_state.total_space;
+
+	if ((add_total_space_needed <= 0) &&
+	    (add_user_space_needed  <= 0))
+		return 0;
+
+	/* Identify candidate burst buffers to revoke for higher priority job */
+	preempt_list = list_create(bb_job_queue_del);
+	for (i = 0; i < BB_HASH_SIZE; i++) {
+		bb_ptr = bb_state.bb_ahash[i];
+		while (bb_ptr) {
+			if (bb_ptr->job_id &&
+			    (bb_ptr->use_time > now) &&
+			    (bb_ptr->use_time > job_ptr->start_time)) {
+				preempt_ptr = xmalloc(sizeof(
+						struct preempt_bb_recs));
+				preempt_ptr->bb_ptr = bb_ptr;
+				preempt_ptr->job_id = bb_ptr->job_id;
+				preempt_ptr->size = bb_ptr->size;
+				preempt_ptr->use_time = bb_ptr->use_time;
+				preempt_ptr->user_id = bb_ptr->user_id;
+				list_push(preempt_list, preempt_ptr);
+
+				add_total_space_avail += bb_ptr->size;
+				if (bb_ptr->user_id == job_ptr->user_id)
+					add_user_space_avail += bb_ptr->size;
+			}
+			bb_ptr = bb_ptr->next;
+		}
+	}
+
+	if ((add_total_space_avail >= add_total_space_needed) &&
+	    (add_user_space_avail  >= add_user_space_needed)) {
+		list_sort(preempt_list, bb_preempt_queue_sort);
+		preempt_iter = list_iterator_create(preempt_list);
+		while ((preempt_ptr = list_next(preempt_iter)) &&
+		       (add_total_space_needed || add_user_space_needed)) {
+			if (add_user_space_needed &&
+			    (preempt_ptr->user_id == job_ptr->user_id)) {
+				_stop_stage_in(preempt_ptr->job_id);
+				preempt_ptr->bb_ptr->cancelled = true;
+				preempt_ptr->bb_ptr->end_time = 0;
+				if (bb_state.bb_config.debug_flag) {
+					info("%s: %s: Preempting stage-in of "
+					     "job %u for %s", plugin_type,
+					     __func__, preempt_ptr->job_id,
+					     jobid2fmt(job_ptr, jobid_buf,
+						       sizeof(jobid_buf)));
+				}
+				add_user_space_needed  -= preempt_ptr->size;
+				add_total_space_needed -= preempt_ptr->size;
+			}
+			if ((add_total_space_needed > add_user_space_needed) &&
+			    (preempt_ptr->user_id != job_ptr->user_id)) {
+				_stop_stage_in(preempt_ptr->job_id);
+				preempt_ptr->bb_ptr->cancelled = true;
+				preempt_ptr->bb_ptr->end_time = 0;
+				if (bb_state.bb_config.debug_flag) {
+					info("%s: %s: Preempting stage-in of "
+					     "job %u for %s", plugin_type,
+					     __func__, preempt_ptr->job_id,
+					     jobid2fmt(job_ptr, jobid_buf,
+						       sizeof(jobid_buf)));
+				}
+				add_total_space_needed -= preempt_ptr->size;
+			}
+		}
+		list_iterator_destroy(preempt_iter);
+	}
+	FREE_NULL_LIST(preempt_list);
+
+	return 2;
+}
+
+static int _parse_job_info(void **dest, slurm_parser_enum_t type,
+			   const char *key, const char *value,
+			   const char *line, char **leftover)
+{
+	s_p_hashtbl_t *job_tbl;
+	char *name = NULL, *tmp = NULL, local_name[64] = "";
+	uint64_t size = 0;
+	uint32_t job_id = 0, user_id = 0;
+	uint16_t state = 0;
+	bb_alloc_t *bb_ptr;
+	struct job_record *job_ptr = NULL;
+	bb_job_t *bb_spec;
+	static s_p_options_t _job_options[] = {
+		{"JobID",S_P_STRING},
+		{"Name", S_P_STRING},
+		{"Size", S_P_STRING},
+		{"State", S_P_STRING},
+		{NULL}
+	};
+
+	*dest = NULL;
+	user_id = strtol(value, NULL, 10);
+	job_tbl = s_p_hashtbl_create(_job_options);
+	s_p_parse_line(job_tbl, *leftover, leftover);
+	if (s_p_get_string(&tmp, "JobID", job_tbl)) {
+		job_id = strtol(tmp, NULL, 10);
+		xfree(tmp);
+	}
+	if (s_p_get_string(&name, "Name", job_tbl)) {
+		snprintf(local_name, sizeof(local_name), "%s", name);
+		xfree(name);
+	}
+	if (s_p_get_string(&tmp, "Size", job_tbl)) {
+		size =  bb_get_size_num(tmp, bb_state.bb_config.granularity);
+		xfree(tmp);
+	}
+	if (s_p_get_string(&tmp, "State", job_tbl)) {
+		state = bb_state_num(tmp);
+		xfree(tmp);
+	}
+	s_p_hashtbl_destroy(job_tbl);
+
+#if 0
+	info("%s: JobID:%u Name:%s Size:%"PRIu64" State:%u UserID:%u",
+	     __func__, job_id, local_name, size, state, user_id);
+#endif
+	if (job_id) {
+		job_ptr = find_job_record(job_id);
+		if (!job_ptr && (state == BB_STATE_STAGED_OUT)) {
+			struct job_record job_rec;
+			job_rec.job_id  = job_id;
+			job_rec.user_id = user_id;
+			bb_ptr = bb_find_alloc_rec(&bb_state, &job_rec);
+			_stop_stage_out(job_id);	/* Purge buffer */
+			if (bb_ptr) {
+				bb_ptr->cancelled = true;
+				bb_ptr->end_time = 0;
+			} else {
+				/* Slurm knows nothing about this job,
+				 * may be result of slurmctld cold start */
+				error("%s: Vestigial buffer for purged job %u",
+				      plugin_type, job_id);
+			}
+			return SLURM_SUCCESS;
+		} else if (!job_ptr &&
+			   ((state == BB_STATE_STAGING_IN) ||
+			    (state == BB_STATE_STAGED_IN))) {
+			struct job_record job_rec;
+			job_rec.job_id  = job_id;
+			job_rec.user_id = user_id;
+			bb_ptr = bb_find_alloc_rec(&bb_state, &job_rec);
+			_stop_stage_in(job_id);		/* Purge buffer */
+			if (bb_ptr) {
+				bb_ptr->cancelled = true;
+				bb_ptr->end_time = 0;
+			} else {
+				/* Slurm knows nothing about this job,
+				 * may be result of slurmctld cold start */
+				error("%s: Vestigial buffer for purged job %u",
+				      plugin_type, job_id);
+			}
+			return SLURM_SUCCESS;
+		} else if (!job_ptr) {
+			error("%s: Vestigial buffer for job ID %u. "
+			      "Clear manually",
+			      plugin_type, job_id);
+		}
+		snprintf(local_name, sizeof(local_name), "VestigialJob%u",
+			 job_id);
+	}
+	if (job_ptr) {
+		bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+		if (bb_ptr == NULL) {
+			bb_spec = xmalloc(sizeof(bb_job_t));
+			bb_spec->total_size = _get_bb_size(job_ptr);
+			bb_ptr = bb_alloc_job_rec(&bb_state, job_ptr, bb_spec);
+			xfree(bb_spec);
+			bb_ptr->state = state;
+			/* bb_ptr->state_time set in bb_alloc_job_rec() */
+		}
+	} else {
+		if ((bb_ptr = _find_bb_name_rec(local_name, user_id)) == NULL) {
+			bb_ptr = bb_alloc_name_rec(&bb_state, local_name,
+						   user_id);
+			bb_ptr->size = size;
+			bb_ptr->state = state;
+//FIXME: VESTIGIAL: Use bb_limit_add
+//			bb_add_user_load(bb_ptr, &bb_state);
+			return SLURM_SUCCESS;
+		}
+	}
+	bb_ptr->seen_time = time(NULL); /* used to purge defunct recs */
+
+	/* UserID set to 0 on some failure modes */
+	if ((bb_ptr->user_id != user_id) && (user_id != 0)) {
+		error("%s: User ID mismatch (%u != %u). "
+		      "BB UserID=%u JobID=%u Name=%s",
+		      plugin_type, bb_ptr->user_id, user_id,
+		      bb_ptr->user_id, bb_ptr->job_id, bb_ptr->name);
+	}
+	if ((bb_ptr->state == BB_STATE_RUNNING) &&
+	    (state == BB_STATE_STAGED_IN))
+		state = BB_STATE_RUNNING;	/* More precise state info */
+	if (bb_ptr->state != state) {
+		/* State is subject to real-time changes */
+		debug("%s: State changed (%s to %s). "
+		      "BB UserID=%u JobID=%u Name=%s",
+		      plugin_type, bb_state_string(bb_ptr->state),
+		      bb_state_string(state),
+		      bb_ptr->user_id, bb_ptr->job_id, bb_ptr->name);
+		bb_ptr->state = state;
+		bb_ptr->state_time = time(NULL);
+		if (bb_ptr->state == BB_STATE_STAGED_OUT) {
+			if (bb_ptr->size != 0) {
+//FIXME: VESTIGIAL: Use bb_limit_rem
+//				bb_remove_user_load(bb_ptr, &bb_state);
+				bb_ptr->size = 0;
+			}
+		}
+		if (bb_ptr->state == BB_STATE_STAGED_IN)
+			queue_job_scheduler();
+	}
+	if ((bb_ptr->state != BB_STATE_STAGED_OUT) && (bb_ptr->size != size)) {
+//FIXME: VESTIGIAL: Use bb_limit_rem
+//		bb_remove_user_load(bb_ptr, &bb_state);
+		if (size != 0) {
+			error("%s: Size mismatch (%"PRIu64" != %"PRIu64"). "
+			      "BB UserID=%u JobID=%u Name=%s",
+			      plugin_type, bb_ptr->size, size,
+			      bb_ptr->user_id, bb_ptr->job_id, bb_ptr->name);
+		}
+		bb_ptr->size = MAX(bb_ptr->size, size);
+//FIXME: VESTIGIAL: Use bb_limit_add
+//		bb_add_user_load(bb_ptr, &bb_state);
+	}
+
+	return SLURM_SUCCESS;
+}
+
+/* Destroy any records created by _parse_job_info(), currently none */
+static void _destroy_job_info(void *data)
+{
+}
+
+/*
+ * Determine the current actual burst buffer state.
+ * Run the program "get_sys_state" and parse stdout for details.
+ * job_id IN - specific job to get information about, or 0 for all jobs
+ */
+static void _load_state(uint32_t job_id)
+{
+	static uint64_t last_total_space = 0;
+	char *save_ptr = NULL, *tok, *leftover = NULL, *resp, *tmp = NULL;
+	char *script_args[4], job_id_str[32];
+	s_p_hashtbl_t *state_hashtbl = NULL;
+	static s_p_options_t state_options[] = {
+		{"ENOENT", S_P_STRING},
+		{"UserID", S_P_ARRAY, _parse_job_info, _destroy_job_info},
+		{"TotalSize", S_P_STRING},
+		{NULL}
+	};
+	int status = 0;
+	DEF_TIMERS;
+
+	if (!bb_state.bb_config.get_sys_state)
+		return;
+
+	bb_state.last_load_time = time(NULL);
+
+	tok = strrchr(bb_state.bb_config.get_sys_state, '/');
+	if (tok)
+		script_args[0] = tok + 1;
+	else
+		script_args[0] = bb_state.bb_config.get_sys_state;
+	if (job_id) {
+		script_args[1] = "get_job";
+		snprintf(job_id_str, sizeof(job_id_str), "%u", job_id);
+		script_args[3] = NULL;
+	} else {
+		script_args[1] = "get_sys";
+		script_args[2] = NULL;
+	}
+	START_TIMER;
+	resp = bb_run_script("GetSysState", bb_state.bb_config.get_sys_state,
+			     script_args, 2000, &status);
+	if (resp == NULL)
+		return;
+	END_TIMER;
+	if (DELTA_TIMER > 200000)	/* 0.2 secs */
+		info("%s: GetSysState ran for %s", __func__, TIME_STR);
+	else if (bb_state.bb_config.debug_flag)
+		debug("%s: GetSysState ran for %s", __func__, TIME_STR);
+
+	state_hashtbl = s_p_hashtbl_create(state_options);
+	tok = strtok_r(resp, "\n", &save_ptr);
+	while (tok) {
+		s_p_parse_line(state_hashtbl, tok, &leftover);
+		tok = strtok_r(NULL, "\n", &save_ptr);
+	}
+	if (s_p_get_string(&tmp, "TotalSize", state_hashtbl)) {
+		bb_state.total_space = bb_get_size_num(tmp,
+						bb_state.bb_config.granularity);
+		xfree(tmp);
+		if (bb_state.bb_config.debug_flag &&
+		    (bb_state.total_space != last_total_space)) {
+			info("%s: total_space:%"PRIu64"",  __func__,
+			     bb_state.total_space);
+		}
+		last_total_space = bb_state.total_space;
+	} else if (job_id == 0) {
+		error("%s: GetSysState failed to respond with TotalSize",
+		      plugin_type);
+	}
+	s_p_hashtbl_destroy(state_hashtbl);
+	xfree(resp);
+}
+
+/* Perform periodic background activities */
+static void *_bb_agent(void *args)
+{
+	/* Locks: write job */
+	slurmctld_lock_t job_write_lock = {
+		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+
+	while (!bb_state.term_flag) {
+		bb_sleep(&bb_state, AGENT_INTERVAL);
+		if (bb_state.term_flag)
+			break;
+		lock_slurmctld(job_write_lock);
+		pthread_mutex_lock(&bb_state.bb_mutex);
+		_load_state(0);
+		_timeout_bb_rec();
+		pthread_mutex_unlock(&bb_state.bb_mutex);
+		unlock_slurmctld(job_write_lock);
+	}
+	return NULL;
+}
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	pthread_attr_t attr;
+
+	pthread_mutex_init(&bb_state.bb_mutex, NULL);
+	pthread_cond_init(&bb_state.term_cond, NULL);
+	pthread_mutex_init(&bb_state.term_mutex, NULL);
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_load_config(&bb_state, (char *)plugin_type); /* Remove "const" */
+	_test_config();
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+	bb_alloc_cache(&bb_state);
+	slurm_attr_init(&attr);
+	if (pthread_create(&bb_state.bb_thread, &attr, _bb_agent, NULL))
+		error("Unable to start backfill thread: %m");
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is unloaded. Free all memory.
+ */
+extern int fini(void)
+{
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+
+	pthread_mutex_lock(&bb_state.term_mutex);
+	bb_state.term_flag = true;
+	pthread_cond_signal(&bb_state.term_cond);
+	pthread_mutex_unlock(&bb_state.term_mutex);
+
+	if (bb_state.bb_thread) {
+		pthread_join(bb_state.bb_thread, NULL);
+		bb_state.bb_thread = 0;
+	}
+	bb_clear_config(&bb_state.bb_config, true);
+	bb_clear_cache(&bb_state);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Return the total burst buffer size in MB
+ */
+extern uint64_t bb_p_get_system_size(void)
+{
+	uint64_t size = 0;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	size = bb_state.total_space / (1024 * 1024);	/* bytes to MB */
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	return size;
+}
+
+/*
+ * Load the current burst buffer state (e.g. how much space is available now).
+ * Run at the beginning of each scheduling cycle in order to recognize external
+ * changes to the burst buffer state (e.g. capacity is added, removed, fails,
+ * etc.)
+ *
+ * init_config IN - true if called as part of slurmctld initialization
+ * Returns a SLURM errno.
+ */
+extern int bb_p_load_state(bool init_config)
+{
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+	_load_state(0);
+	if (init_config)
+		bb_set_tres_pos(&bb_state);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Note configuration may have changed. Handle changes in BurstBufferParameters.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_reconfig(void)
+{
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+	bb_load_config(&bb_state, (char *)plugin_type); /* Remove "const" */
+	_test_config();
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Pack current burst buffer state information for network transmission to
+ * user (e.g. "scontrol show burst")
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_state_pack(uid_t uid, Buf buffer, uint16_t protocol_version)
+{
+	uint32_t rec_count = 0;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	packstr(bb_state.name, buffer);
+	bb_pack_state(&bb_state, buffer, protocol_version);
+	if ((bb_state.bb_config.flags & BB_FLAG_PRIVATE_DATA) == 0)
+		uid = 0;	/* User can see all data */
+	rec_count = bb_pack_bufs(uid, &bb_state, buffer, protocol_version);
+	(void) bb_pack_usage(uid, &bb_state, buffer, protocol_version);
+	if (bb_state.bb_config.debug_flag) {
+		debug("%s: %s: record_count:%u",
+		      plugin_type,  __func__, rec_count);
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Preliminary validation of a job submit request with respect to burst buffer
+ * options. Performed after setting default account + qos, but prior to
+ * establishing job ID or creating script file.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_validate(struct job_descriptor *job_desc,
+			     uid_t submit_uid)
+{
+	int64_t bb_size = 0;
+	char *key;
+	int i;
+
+	xassert(job_desc);
+	xassert(job_desc->tres_req_cnt);
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: job_user_id:%u, submit_uid:%d",
+		     plugin_type, __func__, job_desc->user_id, submit_uid);
+		info("%s: burst_buffer:%s", __func__, job_desc->burst_buffer);
+		info("%s: script:%s", __func__, job_desc->script);
+	}
+
+	if (job_desc->burst_buffer) {
+		key = strstr(job_desc->burst_buffer, "size=");
+		if (key) {
+			bb_size = bb_get_size_num(key + 5,
+					bb_state.bb_config.granularity);
+		}
+	}
+	if (bb_size == 0)
+		return SLURM_SUCCESS;
+	if (bb_size < 0)
+		return ESLURM_BURST_BUFFER_LIMIT;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	if (bb_state.bb_config.allow_users) {
+		for (i = 0; bb_state.bb_config.allow_users[i]; i++) {
+			if (job_desc->user_id ==
+			    bb_state.bb_config.allow_users[i])
+				break;
+		}
+		if (bb_state.bb_config.allow_users[i] == 0) {
+			pthread_mutex_unlock(&bb_state.bb_mutex);
+			return ESLURM_BURST_BUFFER_PERMISSION;
+		}
+	}
+
+	if (bb_state.bb_config.deny_users) {
+		for (i = 0; bb_state.bb_config.deny_users[i]; i++) {
+			if (job_desc->user_id ==
+			    bb_state.bb_config.deny_users[i])
+				break;
+		}
+		if (bb_state.bb_config.deny_users[i] != 0) {
+			pthread_mutex_unlock(&bb_state.bb_mutex);
+			return ESLURM_BURST_BUFFER_PERMISSION;
+		}
+	}
+
+	if (bb_size > bb_state.total_space) {
+		info("Job from user %u requested burst buffer size of "
+		     "%"PRIu64", but total space is only %"PRIu64"",
+		     job_desc->user_id, bb_size, bb_state.total_space);
+	}
+
+	job_desc->tres_req_cnt[bb_state.tres_pos] = bb_size / (1024 * 1024);
+
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Secondary validation of a job submit request with respect to burst buffer
+ * options. Performed after establishing job ID and creating script file.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_validate2(struct job_record *job_ptr, char **err_msg)
+{
+	/* This function is unused by this plugin type */
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Fill in the tres_cnt (in MB) based off the job record
+ * NOTE: Based upon job-specific burst buffers, excludes persistent buffers
+ * IN job_ptr - job record
+ * IN/OUT tres_cnt - fill in this already allocated array with tres_cnts
+ * IN locked - if the assoc_mgr tres read locked is locked or not
+ */
+extern void bb_p_job_set_tres_cnt(struct job_record *job_ptr,
+				  uint64_t *tres_cnt,
+				  bool locked)
+{
+	if (!tres_cnt) {
+		error("%s: No tres_cnt given when looking at job %u",
+		      __func__, job_ptr->job_id);
+	}
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	tres_cnt[bb_state.tres_pos] = _get_bb_size(job_ptr) / (1024 * 1024);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+}
+
+/*
+ * For a given job, return our best guess if when it might be able to start
+ */
+extern time_t bb_p_job_get_est_start(struct job_record *job_ptr)
+{
+	bb_alloc_t *bb_ptr;
+	time_t est_start = time(NULL);
+	uint64_t bb_size;
+	int rc;
+	char jobid_buf[32];
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0') ||
+	    ((bb_size = _get_bb_size(job_ptr)) == 0))
+		return est_start;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+	if (!bb_ptr) {
+		rc = _test_size_limit(job_ptr, bb_size);
+		if (rc == 0) {		/* Could start now */
+			;
+		} else if (rc == 1) {	/* Exceeds configured limits */
+			est_start += 365 * 24 * 60 * 60;
+		} else {		/* No space currently available */
+			est_start = MAX(est_start, bb_state.next_end_time);
+		}
+	} else if (bb_ptr->state < BB_STATE_STAGED_IN) {
+		est_start++;
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return est_start;
+}
+
+static void _alloc_job_bb(struct job_record *job_ptr, uint64_t bb_size)
+{
+	char **script_argv, *resp;
+	bb_alloc_t *bb_ptr;
+	int i, status = 0;
+	bb_job_t *bb_spec;
+	char jobid_buf[32];
+
+	bb_spec = xmalloc(sizeof(bb_job_t));
+	bb_spec->total_size = bb_size;
+	bb_ptr = bb_alloc_job(&bb_state, job_ptr, bb_spec);
+	xfree(bb_spec);
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: start stage-in %s", __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+	script_argv = _build_stage_args(bb_state.bb_config.start_stage_in,
+					"start_stage_in", job_ptr, bb_size);
+	if (script_argv) {
+		bb_ptr->state = BB_STATE_STAGING_IN;
+		bb_ptr->state_time = time(NULL);
+		resp = bb_run_script("StartStageIn",
+				     bb_state.bb_config.start_stage_in,
+				     script_argv, -1, &status);
+		if (resp) {
+			error("%s: StartStageIn: %s", __func__, resp);
+			xfree(resp);
+		}
+		for (i = 0; script_argv[i]; i++)
+			xfree(script_argv[i]);
+		xfree(script_argv);
+	} else {
+		bb_ptr->state = BB_STATE_STAGED_IN;
+		bb_ptr->state_time = time(NULL);
+	}
+}
+
+/*
+ * Attempt to allocate resources and begin file staging for pending jobs.
+ */
+extern int bb_p_job_try_stage_in(List job_queue)
+{
+	bb_job_queue_rec_t *job_rec;
+	List job_candidates;
+	ListIterator job_iter;
+	struct job_record *job_ptr;
+	uint64_t bb_size;
+	int rc;
+
+	if (bb_state.bb_config.debug_flag)
+		info("%s: %s", plugin_type,  __func__);
+
+	if (!bb_state.bb_config.start_stage_in)
+		return SLURM_ERROR;
+
+	/* Identify candidates to be allocated burst buffers */
+	job_candidates = list_create(bb_job_queue_del);
+	job_iter = list_iterator_create(job_queue);
+	while ((job_ptr = list_next(job_iter))) {
+		if (!IS_JOB_PENDING(job_ptr) ||
+		    (job_ptr->start_time == 0) ||
+		    (job_ptr->burst_buffer == NULL) ||
+		    (job_ptr->burst_buffer[0] == '\0'))
+			continue;
+		if (job_ptr->array_recs && (job_ptr->array_task_id == NO_VAL))
+			continue;
+		bb_size = _get_bb_size(job_ptr);
+		if (bb_size == 0)
+			continue;
+		job_rec = xmalloc(sizeof(bb_job_queue_rec_t));
+		job_rec->job_ptr = job_ptr;
+		job_rec->bb_size = bb_size;
+		list_push(job_candidates, job_rec);
+	}
+	list_iterator_destroy(job_iter);
+
+	/* Sort in order of expected start time */
+	list_sort(job_candidates, bb_job_queue_sort);
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_set_use_time(&bb_state);
+	job_iter = list_iterator_create(job_candidates);
+	while ((job_rec = list_next(job_iter))) {
+		job_ptr = job_rec->job_ptr;
+		bb_size = job_rec->bb_size;
+
+		if (bb_find_alloc_rec(&bb_state, job_ptr))
+			continue;
+
+		rc = _test_size_limit(job_ptr, bb_size);
+		if (rc == 1)
+			continue;
+		else if (rc == 2)
+			break;
+
+		_alloc_job_bb(job_ptr, bb_size);
+	}
+	list_iterator_destroy(job_iter);
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	FREE_NULL_LIST(job_candidates);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Determine if a job's burst buffer stage-in is complete
+ * job_ptr IN - Job to test
+ * test_only IN - If false, then attempt to allocate burst buffer if possible
+ *
+ * RET: 0 - stage-in is underway
+ *      1 - stage-in complete
+ *     -1 - stage-in not started or burst buffer in some unexpected state
+ */
+extern int bb_p_job_test_stage_in(struct job_record *job_ptr, bool test_only)
+{
+	bb_alloc_t *bb_ptr;
+	uint64_t bb_size = 0;
+	int rc = 1;
+	char jobid_buf[32];
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0') ||
+	    ((bb_size = _get_bb_size(job_ptr)) == 0))
+		return rc;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+	if (!bb_ptr) {
+		debug("%s: %s bb_rec not found", __func__,
+		      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+		rc = -1;
+		if ((test_only == false) &&
+		    (_test_size_limit(job_ptr, bb_size) == 0))
+			_alloc_job_bb(job_ptr, bb_size);
+	} else {
+		if (bb_ptr->state < BB_STATE_STAGED_IN)
+			_load_state(job_ptr->job_id);
+		if (bb_ptr->state < BB_STATE_STAGED_IN) {
+			rc = 0;
+		} else if (bb_ptr->state == BB_STATE_STAGED_IN) {
+			rc = 1;
+		} else {
+			error("%s: %s bb_state:%u", __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)),
+			      bb_ptr->state);
+			rc = -1;
+		}
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+	return rc;
+}
+
+/* Attempt to claim burst buffer resources.
+ * At this time, bb_g_job_test_stage_in() should have been run sucessfully AND
+ * the compute nodes selected for the job.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_begin(struct job_record *job_ptr)
+{
+	bb_alloc_t *bb_ptr;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0') ||
+	    (_get_bb_size(job_ptr) == 0))
+		return SLURM_SUCCESS;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+	if (bb_ptr)
+		bb_ptr->state = BB_STATE_RUNNING;
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Trigger a job's burst buffer stage-out to begin
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_start_stage_out(struct job_record *job_ptr)
+{
+//FIXME: How to handle various job terminate states (e.g. requeue, failure), user script controlled?
+//FIXME: Test for memory leaks
+	bb_alloc_t *bb_ptr;
+	char **script_argv, *resp;
+	int i, status = 0;
+	char jobid_buf[32];
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if (!bb_state.bb_config.start_stage_out)
+		return SLURM_ERROR;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0') ||
+	    (_get_bb_size(job_ptr) == 0))
+		return SLURM_SUCCESS;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+	if (!bb_ptr) {
+		/* No job buffers. Assuming use of persistent buffers only */
+		debug("%s: %s bb_rec not found", __func__,
+		      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	} else {
+		script_argv = _build_stage_args(bb_state.bb_config.start_stage_out,
+						"start_stage_out", job_ptr,
+						bb_ptr->size);
+		if (script_argv) {
+			bb_ptr->state = BB_STATE_STAGING_OUT;
+			bb_ptr->state_time = time(NULL);
+			resp = bb_run_script("StartStageOut",
+					     bb_state.bb_config.start_stage_out,
+					     script_argv, -1, &status);
+			if (resp) {
+				error("%s: StartStageOut: %s", __func__, resp);
+				xfree(resp);
+			}
+			for (i = 0; script_argv[i]; i++)
+				xfree(script_argv[i]);
+			xfree(script_argv);
+		} else {
+			bb_ptr->state = BB_STATE_STAGED_OUT;
+			bb_ptr->state_time = time(NULL);
+		}
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Determine if a job's burst buffer stage-out is complete
+ *
+ * RET: 0 - stage-out is underway
+ *      1 - stage-out complete
+ *     -1 - fatal error
+ */
+extern int bb_p_job_test_stage_out(struct job_record *job_ptr)
+{
+	bb_alloc_t *bb_ptr;
+	int rc = -1;
+	char jobid_buf[32];
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0') ||
+	    (_get_bb_size(job_ptr) == 0))
+		return 1;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+	if (!bb_ptr) {
+		/* No job buffers. Assuming use of persistent buffers only */
+		debug("%s: %s bb_rec not found", __func__,
+		      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+		rc =  1;
+	} else {
+		if (bb_ptr->state < BB_STATE_STAGED_OUT)
+			_load_state(job_ptr->job_id);
+		if (bb_ptr->state == BB_STATE_STAGING_OUT) {
+			rc =  0;
+		} else if (bb_ptr->state == BB_STATE_STAGED_OUT) {
+			if (bb_ptr->size != 0) {
+//FIXME: VESTIGIAL: Use bb_limit_rem
+//				bb_remove_user_load(bb_ptr, &bb_state);
+				bb_ptr->size = 0;
+			}
+			rc =  1;
+		} else {
+			error("%s: %s bb_state:%u", __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)),
+			      bb_ptr->state);
+			rc = -1;
+		}
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return rc;
+}
+
+/*
+ * Terminate any file staging and completely release burst buffer resources
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_p_job_cancel(struct job_record *job_ptr)
+{
+	bb_alloc_t *bb_ptr;
+	char **script_argv, *resp;
+	int i, status = 0;
+	char jobid_buf[32];
+
+	if (bb_state.bb_config.debug_flag) {
+		info("%s: %s: %s", plugin_type, __func__,
+		     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+	}
+
+	if (!bb_state.bb_config.stop_stage_out)
+		return SLURM_ERROR;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0') ||
+	    (_get_bb_size(job_ptr) == 0))
+		return SLURM_SUCCESS;
+
+	pthread_mutex_lock(&bb_state.bb_mutex);
+	bb_ptr = bb_find_alloc_rec(&bb_state, job_ptr);
+	if (!bb_ptr) {
+		_stop_stage_out(job_ptr->job_id);
+	} else {
+		script_argv = _build_stage_args(bb_state.bb_config.stop_stage_out,
+						"stop_stage_out", job_ptr, 0);
+		if (script_argv) {
+			bb_ptr->state = BB_STATE_STAGED_OUT;
+			bb_ptr->state_time = time(NULL);
+			resp = bb_run_script("StopStageOut",
+					     bb_state.bb_config.stop_stage_out,
+					     script_argv, -1, &status);
+			if (resp) {
+				error("%s: StopStageOut: %s", __func__, resp);
+				xfree(resp);
+			}
+			for (i = 0; script_argv[i]; i++)
+				xfree(script_argv[i]);
+			xfree(script_argv);
+		} else {
+			_stop_stage_out(job_ptr->job_id);
+			bb_ptr->cancelled = true;
+			bb_ptr->end_time = 0;
+			bb_ptr->state = BB_STATE_STAGED_OUT;
+			bb_ptr->state_time = time(NULL);
+		}
+	}
+	pthread_mutex_unlock(&bb_state.bb_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * Translate a burst buffer string to it's equivalent TRES string
+ * Caller must xfree the return value
+ */
+extern char *bb_p_xlate_bb_2_tres_str(char *burst_buffer)
+{
+	return NULL;
+}
diff --git a/src/plugins/checkpoint/Makefile.in b/src/plugins/checkpoint/Makefile.in
index 3e68f8ba9..a40344fcc 100644
--- a/src/plugins/checkpoint/Makefile.in
+++ b/src/plugins/checkpoint/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/checkpoint/aix/Makefile.in b/src/plugins/checkpoint/aix/Makefile.in
index 2b77c2ce3..80a16cb90 100644
--- a/src/plugins/checkpoint/aix/Makefile.in
+++ b/src/plugins/checkpoint/aix/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -278,6 +281,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -327,8 +332,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -347,6 +356,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -390,6 +402,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -413,6 +426,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/checkpoint/aix/checkpoint_aix.c b/src/plugins/checkpoint/aix/checkpoint_aix.c
index c00c2b7e4..f36cb8379 100644
--- a/src/plugins/checkpoint/aix/checkpoint_aix.c
+++ b/src/plugins/checkpoint/aix/checkpoint_aix.c
@@ -134,15 +134,12 @@ static void  _ckpt_signal_step(struct ckpt_timeout_info *rec);
  * only load checkpoint plugins if the plugin_type string has a
  * prefix of "checkpoint/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the checkpoint API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Checkpoint AIX plugin";
 const char plugin_type[]       	= "checkpoint/aix";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -343,14 +340,6 @@ extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer,
 		set_buf_offset(buffer, x);
 		pack32(z - y, buffer);
 		set_buf_offset(buffer, z);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		pack16(check_ptr->disabled, buffer);
-		pack16(check_ptr->node_cnt, buffer);
-		pack16(check_ptr->reply_cnt, buffer);
-		pack16(check_ptr->wait_time, buffer);
-		pack32(check_ptr->error_code, buffer);
-		packstr(check_ptr->error_msg, buffer);
-		pack_time(check_ptr->time_stamp, buffer);
 	}
 
 	return SLURM_SUCCESS;
@@ -383,15 +372,6 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer,
 					       &uint32_tmp, buffer);
 			safe_unpack_time(&check_ptr->time_stamp, buffer);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpack16(&check_ptr->disabled, buffer);
-		safe_unpack16(&check_ptr->node_cnt, buffer);
-		safe_unpack16(&check_ptr->reply_cnt, buffer);
-		safe_unpack16(&check_ptr->wait_time, buffer);
-		safe_unpack32(&check_ptr->error_code, buffer);
-		safe_unpackstr_xmalloc(&check_ptr->error_msg,
-				       &uint32_tmp, buffer);
-		safe_unpack_time(&check_ptr->time_stamp, buffer);
 	}
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/checkpoint/blcr/Makefile.in b/src/plugins/checkpoint/blcr/Makefile.in
index 15537f561..084ee29aa 100644
--- a/src/plugins/checkpoint/blcr/Makefile.in
+++ b/src/plugins/checkpoint/blcr/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -284,6 +287,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -333,8 +338,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -353,6 +362,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -396,6 +408,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -419,6 +432,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/checkpoint/blcr/checkpoint_blcr.c b/src/plugins/checkpoint/blcr/checkpoint_blcr.c
index 4cfd8b46a..59afe0068 100644
--- a/src/plugins/checkpoint/blcr/checkpoint_blcr.c
+++ b/src/plugins/checkpoint/blcr/checkpoint_blcr.c
@@ -148,15 +148,12 @@ static pthread_cond_t ckpt_agent_cond = PTHREAD_COND_INITIALIZER;
  * only load checkpoint plugins if the plugin_type string has a
  * prefix of "checkpoint/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the checkpoint API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "BLCR checkpoint plugin";
 const char plugin_type[]       	= "checkpoint/blcr";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -367,11 +364,6 @@ extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer,
 		set_buf_offset(buffer, x);
 		pack32(z - y, buffer);
 		set_buf_offset(buffer, z);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		pack16(check_ptr->disabled, buffer);
-		pack_time(check_ptr->time_stamp, buffer);
-		pack32(check_ptr->error_code, buffer);
-		packstr(check_ptr->error_msg, buffer);
 	}
 
 	return SLURM_SUCCESS;
@@ -402,13 +394,8 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer,
 					       &uint32_tmp, buffer);
 		}
 
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpack16(&check_ptr->disabled, buffer);
-		safe_unpack_time(&check_ptr->time_stamp, buffer);
-		safe_unpack32(&check_ptr->error_code, buffer);
-		safe_unpackstr_xmalloc(&check_ptr->error_msg,
-				       &uint32_tmp, buffer);
 	}
+
 	return SLURM_SUCCESS;
 
     unpack_error:
diff --git a/src/plugins/checkpoint/none/Makefile.in b/src/plugins/checkpoint/none/Makefile.in
index f9cfe8005..6a35a4400 100644
--- a/src/plugins/checkpoint/none/Makefile.in
+++ b/src/plugins/checkpoint/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/checkpoint/none/checkpoint_none.c b/src/plugins/checkpoint/none/checkpoint_none.c
index 69e0cd473..d9e9c006a 100644
--- a/src/plugins/checkpoint/none/checkpoint_none.c
+++ b/src/plugins/checkpoint/none/checkpoint_none.c
@@ -77,15 +77,12 @@
  * only load checkpoint plugins if the plugin_type string has a
  * prefix of "checkpoint/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the checkpoint API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Checkpoint NONE plugin";
 const char plugin_type[]       	= "checkpoint/none";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/checkpoint/ompi/Makefile.in b/src/plugins/checkpoint/ompi/Makefile.in
index 53b7fb346..7285011e8 100644
--- a/src/plugins/checkpoint/ompi/Makefile.in
+++ b/src/plugins/checkpoint/ompi/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/checkpoint/ompi/checkpoint_ompi.c b/src/plugins/checkpoint/ompi/checkpoint_ompi.c
index 856b2a850..0b2d346ff 100644
--- a/src/plugins/checkpoint/ompi/checkpoint_ompi.c
+++ b/src/plugins/checkpoint/ompi/checkpoint_ompi.c
@@ -93,15 +93,12 @@ static int _ckpt_step(struct step_record * step_ptr, uint16_t wait, int vacate);
  * only load checkpoint plugins if the plugin_type string has a
  * prefix of "checkpoint/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the checkpoint API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "OpenMPI checkpoint plugin";
 const char plugin_type[]       	= "checkpoint/ompi";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -275,13 +272,6 @@ extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer,
 		set_buf_offset(buffer, x);
 		pack32(z - y, buffer);
 		set_buf_offset(buffer, z);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		pack16(check_ptr->disabled, buffer);
-		pack16(check_ptr->reply_cnt, buffer);
-		pack16(check_ptr->wait_time, buffer);
-		pack32(check_ptr->error_code, buffer);
-		packstr(check_ptr->error_msg, buffer);
-		pack_time(check_ptr->time_stamp, buffer);
 	}
 
 	return SLURM_SUCCESS;
@@ -313,14 +303,6 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer,
 					       &uint32_tmp, buffer);
 			safe_unpack_time(&check_ptr->time_stamp, buffer);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpack16(&check_ptr->disabled, buffer);
-		safe_unpack16(&check_ptr->reply_cnt, buffer);
-		safe_unpack16(&check_ptr->wait_time, buffer);
-		safe_unpack32(&check_ptr->error_code, buffer);
-		safe_unpackstr_xmalloc(&check_ptr->error_msg,
-				       &uint32_tmp, buffer);
-		safe_unpack_time(&check_ptr->time_stamp, buffer);
 	}
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/checkpoint/poe/Makefile.in b/src/plugins/checkpoint/poe/Makefile.in
index 881fc8ca5..128d0bdc1 100644
--- a/src/plugins/checkpoint/poe/Makefile.in
+++ b/src/plugins/checkpoint/poe/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -278,6 +281,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -327,8 +332,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -347,6 +356,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -390,6 +402,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -413,6 +426,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/checkpoint/poe/checkpoint_poe.c b/src/plugins/checkpoint/poe/checkpoint_poe.c
index 401a1c296..718a96750 100644
--- a/src/plugins/checkpoint/poe/checkpoint_poe.c
+++ b/src/plugins/checkpoint/poe/checkpoint_poe.c
@@ -142,15 +142,12 @@ static void  _ckpt_signal_step(struct ckpt_timeout_info *rec);
  * only load checkpoint plugins if the plugin_type string has a
  * prefix of "checkpoint/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the checkpoint API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Checkpoint POE plugin";
 const char plugin_type[]       	= "checkpoint/poe";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -349,14 +346,6 @@ extern int slurm_ckpt_pack_job(check_jobinfo_t jobinfo, Buf buffer,
 		set_buf_offset(buffer, x);
 		pack32(z - y, buffer);
 		set_buf_offset(buffer, z);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		pack16(check_ptr->disabled, buffer);
-		pack16(check_ptr->node_cnt, buffer);
-		pack16(check_ptr->reply_cnt, buffer);
-		pack16(check_ptr->wait_time, buffer);
-		pack32(check_ptr->error_code, buffer);
-		packstr(check_ptr->error_msg, buffer);
-		pack_time(check_ptr->time_stamp, buffer);
 	}
 
 	return SLURM_SUCCESS;
@@ -389,15 +378,6 @@ extern int slurm_ckpt_unpack_job(check_jobinfo_t jobinfo, Buf buffer,
 					       &uint32_tmp, buffer);
 			safe_unpack_time(&check_ptr->time_stamp, buffer);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpack16(&check_ptr->disabled, buffer);
-		safe_unpack16(&check_ptr->node_cnt, buffer);
-		safe_unpack16(&check_ptr->reply_cnt, buffer);
-		safe_unpack16(&check_ptr->wait_time, buffer);
-		safe_unpack32(&check_ptr->error_code, buffer);
-		safe_unpackstr_xmalloc(&check_ptr->error_msg,
-				       &uint32_tmp, buffer);
-		safe_unpack_time(&check_ptr->time_stamp, buffer);
 	}
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/core_spec/Makefile.in b/src/plugins/core_spec/Makefile.in
index 62916c810..e29fb4dbe 100644
--- a/src/plugins/core_spec/Makefile.in
+++ b/src/plugins/core_spec/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/core_spec/cray/Makefile.in b/src/plugins/core_spec/cray/Makefile.in
index 84637d99d..f43344f07 100644
--- a/src/plugins/core_spec/cray/Makefile.in
+++ b/src/plugins/core_spec/cray/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/core_spec/cray/core_spec_cray.c b/src/plugins/core_spec/cray/core_spec_cray.c
index 0decfd390..70990e9a7 100644
--- a/src/plugins/core_spec/cray/core_spec_cray.c
+++ b/src/plugins/core_spec/cray/core_spec_cray.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  core_spec_cray.c - Cray core specialization plugin.
  *****************************************************************************
- *  Copyright (C) 2014 SchedMD LLC
+ *  Copyright (C) 2014-2015 SchedMD LLC
  *  Written by Morris Jette <jette@schemd.com>
  *
  *  This file is part of SLURM, a resource management program.
@@ -61,6 +61,7 @@
 
 #include <stdio.h>
 
+#include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 
@@ -94,13 +95,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Cray core specialization plugin";
 const char plugin_type[]       	= "core_spec/cray";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 // If job_set_corespec fails, retry this many times to wait
 // for suspends to complete.
@@ -126,7 +126,20 @@ extern int fini(void)
 extern int core_spec_p_set(uint64_t cont_id, uint16_t core_count)
 {
 #if _DEBUG
-	info("core_spec_p_set(%"PRIu64") to %u", cont_id, core_count);
+	char *spec_type;
+	int spec_count;
+	if (core_count == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (core_count & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = core_count & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = core_count;
+	}
+	info("core_spec_p_set(%"PRIu64") to %d %s",
+	     cont_id, spec_count, spec_type);
 #endif
 
 #ifdef HAVE_NATIVE_CRAY
@@ -136,9 +149,11 @@ extern int core_spec_p_set(uint64_t cont_id, uint16_t core_count)
 	int i;
 
 	// Skip core spec setup for no specialized cores
-	if ((core_count == (uint16_t) NO_VAL) || (core_count < 1)) {
+	if ((core_count == (uint16_t) NO_VAL) ||
+	    (core_count == CORE_SPEC_THREAD)) {
 		return SLURM_SUCCESS;
 	}
+	core_count &= (~CORE_SPEC_THREAD);
 
 	// Set the core spec information
 	// Retry because there's a small timing window during preemption
@@ -223,7 +238,20 @@ extern int core_spec_p_clear(uint64_t cont_id)
 extern int core_spec_p_suspend(uint64_t cont_id, uint16_t core_count)
 {
 #if _DEBUG
-	info("core_spec_p_suspend(%"PRIu64") count %u", cont_id, core_count);
+	char *spec_type;
+	int spec_count;
+	if (core_count == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (core_count & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = core_count & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = core_count;
+	}
+	info("core_spec_p_suspend(%"PRIu64") count %d %s",
+	     cont_id, spec_count, spec_type);
 #endif
 	// The code that was here is now performed by
 	// switch_p_job_step_{pre,post}_suspend()
@@ -238,7 +266,20 @@ extern int core_spec_p_suspend(uint64_t cont_id, uint16_t core_count)
 extern int core_spec_p_resume(uint64_t cont_id, uint16_t core_count)
 {
 #if _DEBUG
-	info("core_spec_p_resume(%"PRIu64") count %u", cont_id, core_count);
+	char *spec_type;
+	int spec_count;
+	if (core_count == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (core_count & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = core_count & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = core_count;
+	}
+	info("core_spec_p_resume(%"PRIu64") count %d %s",
+	     cont_id, spec_count, spec_type);
 #endif
 	// The code that was here is now performed by
 	// switch_p_job_step_{pre,post}_resume()
diff --git a/src/plugins/core_spec/none/Makefile.in b/src/plugins/core_spec/none/Makefile.in
index ead109da1..59827614f 100644
--- a/src/plugins/core_spec/none/Makefile.in
+++ b/src/plugins/core_spec/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/core_spec/none/core_spec_none.c b/src/plugins/core_spec/none/core_spec_none.c
index 71d13efb8..2d4749d15 100644
--- a/src/plugins/core_spec/none/core_spec_none.c
+++ b/src/plugins/core_spec/none/core_spec_none.c
@@ -1,7 +1,7 @@
 /*****************************************************************************\
  *  core_spec_none.c - NO-OP slurm core specialization plugin.
  *****************************************************************************
- *  Copyright (C) 2014 SchedMD LLC
+ *  Copyright (C) 2014-2015 SchedMD LLC
  *  Written by Morris Jette <jette@schemd.com>
  *
  *  This file is part of SLURM, a resource management program.
@@ -61,6 +61,7 @@
 
 #include <stdio.h>
 
+#include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
 #include "src/common/slurm_xlator.h"
 
@@ -89,13 +90,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Null core specialization plugin";
 const char plugin_type[]       	= "core_spec/none";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 extern int init(void)
 {
@@ -115,7 +115,20 @@ extern int fini(void)
 extern int core_spec_p_set(uint64_t cont_id, uint16_t core_count)
 {
 #if _DEBUG
-	info("core_spec_p_set(%"PRIu64") to %u", cont_id, core_count);
+	char *spec_type;
+	int spec_count;
+	if (core_count == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (core_count & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = core_count & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = core_count;
+	}
+	info("core_spec_p_set(%"PRIu64") to %d %s",
+	     cont_id, spec_count, spec_type);
 #endif
 	return SLURM_SUCCESS;
 }
@@ -141,7 +154,20 @@ extern int core_spec_p_clear(uint64_t cont_id)
 extern int core_spec_p_suspend(uint64_t cont_id, uint16_t core_count)
 {
 #if _DEBUG
-	info("core_spec_p_suspend(%"PRIu64") count %u", cont_id, core_count);
+	char *spec_type;
+	int spec_count;
+	if (core_count == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (core_count & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = core_count & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = core_count;
+	}
+	info("core_spec_p_suspend(%"PRIu64") count %d %s",
+	     cont_id, spec_count, spec_type);
 #endif
 	return SLURM_SUCCESS;
 }
@@ -154,7 +180,20 @@ extern int core_spec_p_suspend(uint64_t cont_id, uint16_t core_count)
 extern int core_spec_p_resume(uint64_t cont_id, uint16_t core_count)
 {
 #if _DEBUG
-	info("core_spec_p_resume(%"PRIu64") count %u", cont_id, core_count);
+	char *spec_type;
+	int spec_count;
+	if (core_count == (uint16_t) NO_VAL) {
+		spec_type  = "Cores";
+		spec_count = 0;
+	} else if (core_count & CORE_SPEC_THREAD) {
+		spec_type  = "Threads";
+		spec_count = core_count & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "Cores";
+		spec_count = core_count;
+	}
+	info("core_spec_p_resume(%"PRIu64") count %d %s",
+	     cont_id, spec_count, spec_type);
 #endif
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/crypto/Makefile.in b/src/plugins/crypto/Makefile.in
index d4bccc204..c4ada14da 100644
--- a/src/plugins/crypto/Makefile.in
+++ b/src/plugins/crypto/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/crypto/munge/Makefile.in b/src/plugins/crypto/munge/Makefile.in
index 02718e385..2d794e35a 100644
--- a/src/plugins/crypto/munge/Makefile.in
+++ b/src/plugins/crypto/munge/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/crypto/munge/crypto_munge.c b/src/plugins/crypto/munge/crypto_munge.c
index 3a0a436ca..14947c82e 100644
--- a/src/plugins/crypto/munge/crypto_munge.c
+++ b/src/plugins/crypto/munge/crypto_munge.c
@@ -90,16 +90,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the authentication API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "Munge cryptographic signature plugin";
 const char plugin_type[]        = "crypto/munge";
-const uint32_t plugin_version   = 90;
-
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  *  Error codes local to this plugin:
@@ -113,9 +109,7 @@ enum local_error_code {
 
 static uid_t slurm_user = 0;
 
-/* Convert AuthInfo to a socket path. Accepts two input formats:
- * 1) <path>		(Old format)
- * 2) socket=<path>[,]	(New format)
+/* Convert AuthInfo to a socket path. Parses input format "socket=<path>[,]".
  * NOTE: Caller must xfree return value
  */
 static char *_auth_opts_to_socket(void)
@@ -123,19 +117,15 @@ static char *_auth_opts_to_socket(void)
 	char *socket = NULL, *sep, *tmp;
 	char *opts = slurm_get_auth_info();
 
-	if (!opts)
-		return NULL;
-
-	tmp = strstr(opts, "socket=");
-	if (tmp) {	/* New format */
-		socket = xstrdup(tmp + 7);
-		sep = strchr(socket, ',');
-		if (sep)
-			sep[0] = '\0';
-	} else if (strchr(opts, '=')) {
-		;	/* New format, but socket not specified */
-	} else {
-		socket = xstrdup(tmp);	/* Old format */
+	if (opts) {
+		tmp = strstr(opts, "socket=");
+		if (tmp) {	/* New format */
+			socket = xstrdup(tmp + 7);
+			sep = strchr(socket, ',');
+			if (sep)
+				sep[0] = '\0';
+		}
+		xfree(opts);
 	}
 
 	return socket;
diff --git a/src/plugins/crypto/openssl/Makefile.in b/src/plugins/crypto/openssl/Makefile.in
index 06a907a11..1227bb1ca 100644
--- a/src/plugins/crypto/openssl/Makefile.in
+++ b/src/plugins/crypto/openssl/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -280,6 +283,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -329,8 +334,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -349,6 +358,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -392,6 +404,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -415,6 +428,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/crypto/openssl/crypto_openssl.c b/src/plugins/crypto/openssl/crypto_openssl.c
index f110a6250..2fa9767aa 100644
--- a/src/plugins/crypto/openssl/crypto_openssl.c
+++ b/src/plugins/crypto/openssl/crypto_openssl.c
@@ -86,15 +86,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the authentication API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "OpenSSL cryptographic signature plugin";
 const char plugin_type[]        = "crypto/openssl";
-const uint32_t plugin_version   = 90;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/ext_sensors/Makefile.in b/src/plugins/ext_sensors/Makefile.in
index 480e65262..2e35ca9a8 100644
--- a/src/plugins/ext_sensors/Makefile.in
+++ b/src/plugins/ext_sensors/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/ext_sensors/none/Makefile.in b/src/plugins/ext_sensors/none/Makefile.in
index 784410111..2e1519450 100644
--- a/src/plugins/ext_sensors/none/Makefile.in
+++ b/src/plugins/ext_sensors/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/ext_sensors/none/ext_sensors_none.c b/src/plugins/ext_sensors/none/ext_sensors_none.c
index ab39beba5..a0f309a5c 100644
--- a/src/plugins/ext_sensors/none/ext_sensors_none.c
+++ b/src/plugins/ext_sensors/none/ext_sensors_none.c
@@ -77,16 +77,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "ExtSensors NONE plugin";
 const char plugin_type[] = "ext_sensors/none";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 
 extern int ext_sensors_read_conf(void)
diff --git a/src/plugins/ext_sensors/rrd/Makefile.in b/src/plugins/ext_sensors/rrd/Makefile.in
index c7e4e90c2..35cf90c93 100644
--- a/src/plugins/ext_sensors/rrd/Makefile.in
+++ b/src/plugins/ext_sensors/rrd/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -282,6 +285,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -331,8 +336,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -351,6 +360,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -394,6 +406,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -417,6 +430,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/ext_sensors/rrd/ext_sensors_rrd.c b/src/plugins/ext_sensors/rrd/ext_sensors_rrd.c
index 6646248cd..d6aa23467 100644
--- a/src/plugins/ext_sensors/rrd/ext_sensors_rrd.c
+++ b/src/plugins/ext_sensors/rrd/ext_sensors_rrd.c
@@ -90,16 +90,12 @@ enum ext_sensors_value_type {
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "ExtSensors rrd plugin";
 const char plugin_type[] = "ext_sensors/rrd";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 static uint64_t debug_flags = 0;
 static ext_sensors_conf_t ext_sensors_conf;
@@ -122,7 +118,7 @@ static rrd_value_t _validate_watt(rrd_value_t *v);
 static char* _get_node_rrd_path(char* component_name,
 				enum ext_sensors_value_type sensor_type);
 static uint32_t _rrd_get_last_one(char* filename, char* rra_name);
-static uint32_t _rrd_consolidate_one(time_t t0, time_t t1,
+static uint64_t _rrd_consolidate_one(time_t t0, time_t t1,
 				     char* filename, char* rra_name,
 				     bool flag_approximate);
 
@@ -247,7 +243,7 @@ static uint32_t _rrd_get_last_one(char* filename, char* rra_name)
 	return temperature;
 }
 
-static uint32_t _rrd_consolidate_one(time_t t0, time_t t1,
+static uint64_t _rrd_consolidate_one(time_t t0, time_t t1,
 				     char* filename, char* rra_name,
 				     bool flag_approximate)
 {
@@ -405,14 +401,14 @@ static uint32_t _rrd_consolidate_one(time_t t0, time_t t1,
 	free(ds_names);
 	free(rrd_data);
 
-	return (uint32_t)consumed_energy;
+	return (uint64_t)consumed_energy;
 }
 
-extern uint32_t RRD_consolidate(time_t step_starttime, time_t step_endtime,
+extern uint64_t RRD_consolidate(time_t step_starttime, time_t step_endtime,
 				bitstr_t* bitmap_of_nodes)
 {
-	uint32_t consumed_energy = 0;
-	uint32_t tmp;
+	uint64_t consumed_energy = 0;
+	uint64_t tmp;
 	char *node_name = NULL;
 	hostlist_t hl;
 	char* path;
@@ -423,14 +419,14 @@ extern uint32_t RRD_consolidate(time_t step_starttime, time_t step_endtime,
 	while ((node_name = hostlist_shift(hl))) {
 		if (!(path = _get_node_rrd_path(node_name,
 						EXT_SENSORS_VALUE_ENERGY)))
-			consumed_energy = NO_VAL;
+			consumed_energy = (uint64_t)NO_VAL;
 		free(node_name);
 		if ((tmp = _rrd_consolidate_one(
 			     step_starttime, step_endtime, path,
 			     ext_sensors_cnf->energy_rra_name, true)) == NO_VAL)
-			consumed_energy = NO_VAL;
+			consumed_energy = (uint64_t)NO_VAL;
 		xfree(path);
-		if (consumed_energy == NO_VAL)
+		if (consumed_energy == (uint64_t)NO_VAL)
 			break;
 		consumed_energy += tmp;
 	}
@@ -443,7 +439,8 @@ static int _update_node_data(void)
 {
 	int i;
 	char* path;
-	uint32_t tmp;
+	uint32_t tmp32;
+	uint64_t tmp;
 	ext_sensors_data_t *ext_sensors;
 	time_t now = time(NULL);
 
@@ -459,7 +456,7 @@ static int _update_node_data(void)
 			if (!(path = _get_node_rrd_path(
 				      node_record_table_ptr[i].name,
 				      EXT_SENSORS_VALUE_ENERGY))) {
-				ext_sensors->consumed_energy = NO_VAL;
+				ext_sensors->consumed_energy = (uint64_t)NO_VAL;
 				ext_sensors->current_watts = NO_VAL;
 				continue;
 			}
@@ -469,11 +466,12 @@ static int _update_node_data(void)
 				ext_sensors_cnf->energy_rra_name,
 				false);
 			xfree(path);
-			if ((tmp != NO_VAL) && (tmp != 0) &&
+			if ((tmp != (uint64_t)NO_VAL) && (tmp != 0) &&
 			    (last_valid_time != 0) &&
 			    (last_valid_watt != (rrd_value_t)NO_VAL)) {
 				if ((ext_sensors->consumed_energy <= 0) ||
-				    (ext_sensors->consumed_energy == NO_VAL)) {
+				    (ext_sensors->consumed_energy ==
+				     (uint64_t)NO_VAL)) {
 					ext_sensors->consumed_energy = tmp;
 				} else {
 					ext_sensors->consumed_energy += tmp;
@@ -495,13 +493,13 @@ static int _update_node_data(void)
 				ext_sensors->temperature = NO_VAL;
 				continue;
 			}
-			tmp = _rrd_get_last_one(path,
-						ext_sensors_cnf->temp_rra_name);
+			tmp32 = _rrd_get_last_one(
+				path, ext_sensors_cnf->temp_rra_name);
 			xfree(path);
-			if (tmp != NO_VAL &&
-			    tmp > ext_sensors_cnf->min_temp &&
-			    tmp < ext_sensors_cnf->max_temp) {
-				ext_sensors->temperature = tmp;
+			if (tmp32 != NO_VAL &&
+			    tmp32 > ext_sensors_cnf->min_temp &&
+			    tmp32 < ext_sensors_cnf->max_temp) {
+				ext_sensors->temperature = tmp32;
 			} else {
 				ext_sensors->temperature = NO_VAL;
 			}
@@ -676,7 +674,8 @@ extern int ext_sensors_p_get_stependdata(struct step_record *step_rec)
 					step_rec->step_node_bitmap);
 		if (step_rec->jobacct &&
 		    (!step_rec->jobacct->energy.consumed_energy
-		     || (step_rec->jobacct->energy.consumed_energy == NO_VAL))) {
+		     || (step_rec->jobacct->energy.consumed_energy ==
+			 (uint64_t)NO_VAL))) {
 			step_rec->jobacct->energy.consumed_energy =
 				step_rec->ext_sensors->consumed_energy;
 		}
diff --git a/src/plugins/ext_sensors/rrd/ext_sensors_rrd.h b/src/plugins/ext_sensors/rrd/ext_sensors_rrd.h
index 8288350e7..8c41809ab 100644
--- a/src/plugins/ext_sensors/rrd/ext_sensors_rrd.h
+++ b/src/plugins/ext_sensors/rrd/ext_sensors_rrd.h
@@ -80,7 +80,7 @@ extern int ext_sensors_p_get_stependdata(struct step_record *step_rec);
 extern List ext_sensors_p_get_config(void);
 
 /* consolidate RRD data */
-extern uint32_t RRD_consolidate(time_t step_starttime, time_t step_endtime,
+extern uint64_t RRD_consolidate(time_t step_starttime, time_t step_endtime,
 				bitstr_t* bitmap_of_nodes);
 
 extern int init(void);
diff --git a/src/plugins/gres/Makefile.in b/src/plugins/gres/Makefile.in
index cca22bde2..94c6b3701 100644
--- a/src/plugins/gres/Makefile.in
+++ b/src/plugins/gres/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/gres/gpu/Makefile.in b/src/plugins/gres/gpu/Makefile.in
index ea65fc903..f840ce0e0 100644
--- a/src/plugins/gres/gpu/Makefile.in
+++ b/src/plugins/gres/gpu/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/gres/gpu/gres_gpu.c b/src/plugins/gres/gpu/gres_gpu.c
index b8d117e0e..5afbf7006 100644
--- a/src/plugins/gres/gpu/gres_gpu.c
+++ b/src/plugins/gres/gpu/gres_gpu.c
@@ -36,6 +36,7 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#define _GNU_SOURCE
 #if HAVE_CONFIG_H
 #  include "config.h"
 #  if STDC_HEADERS
@@ -61,9 +62,10 @@
 #  include <string.h>
 #endif /* HAVE_CONFIG_H */
 
+#include <ctype.h>
+#include <sched.h>
 #include <stdio.h>
 #include <stdlib.h>
-#include <ctype.h>
 
 #include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
@@ -98,12 +100,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - Specifies the version number of the plugin. This would
- * typically be the same for all plugins.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char	plugin_name[]		= "Gres GPU plugin";
 const char	plugin_type[]		= "gres/gpu";
-const uint32_t	plugin_version		= 120;
+const uint32_t	plugin_version		= SLURM_VERSION_NUMBER;
 
 static char	gres_name[]		= "gpu";
 
@@ -124,6 +126,7 @@ extern int fini(void)
 
 	return SLURM_SUCCESS;
 }
+
 /*
  * We could load gres state or validate it using various mechanisms here.
  * This only validates that the configuration was specified in gres.conf.
@@ -282,8 +285,7 @@ extern void job_set_env(char ***job_env_ptr, void *gres_ptr)
 	} else if (gres_job_ptr && (gres_job_ptr->gres_cnt_alloc > 0)) {
 		/* The gres.conf file must identify specific device files
 		 * in order to set the CUDA_VISIBLE_DEVICES env var */
-		error("gres/gpu unable to set CUDA_VISIBLE_DEVICES, "
-		      "no device files configured");
+		debug("gres/gpu unable to set CUDA_VISIBLE_DEVICES, no device files configured");
 	} else {
 		xstrcat(local_list, "NoDevFiles");
 	}
@@ -343,9 +345,71 @@ extern void step_set_env(char ***job_env_ptr, void *gres_ptr)
 	}
 
 	if (dev_list) {
-		env_array_overwrite(job_env_ptr,"CUDA_VISIBLE_DEVICES",
+		env_array_overwrite(job_env_ptr, "CUDA_VISIBLE_DEVICES",
 				    dev_list);
-		env_array_overwrite(job_env_ptr,"GPU_DEVICE_ORDINAL",
+		env_array_overwrite(job_env_ptr, "GPU_DEVICE_ORDINAL",
+				    dev_list);
+		xfree(dev_list);
+	}
+}
+
+/*
+ * Reset environment variables as appropriate for a job (i.e. this one tasks)
+ * based upon the job step's GRES state and assigned CPUs.
+ */
+extern void step_reset_env(char ***job_env_ptr, void *gres_ptr,
+			   bitstr_t *usable_gres)
+{
+	int i, len, local_inx = 0, first_match = -1;
+	char *dev_list = NULL;
+	gres_step_state_t *gres_step_ptr = (gres_step_state_t *) gres_ptr;
+	bool use_local_dev_index = _use_local_device_index();
+
+	if ((gres_step_ptr != NULL) &&
+	    (gres_step_ptr->node_cnt == 1) &&
+	    (gres_step_ptr->gres_bit_alloc != NULL) &&
+	    (gres_step_ptr->gres_bit_alloc[0] != NULL) &&
+	    (usable_gres != NULL)) {
+		len = MIN(bit_size(gres_step_ptr->gres_bit_alloc[0]),
+			  bit_size(usable_gres));
+		for (i = 0; i < len; i++) {
+			if (!bit_test(gres_step_ptr->gres_bit_alloc[0], i))
+				continue;
+			if (first_match == -1)
+				first_match = i;
+			if (!bit_test(usable_gres, i))
+				continue;
+			if (!dev_list)
+				dev_list = xmalloc(128);
+			else
+				xstrcat(dev_list, ",");
+			if (use_local_dev_index) {
+				xstrfmtcat(dev_list, "%d", local_inx++);
+			} else if (gpu_devices && (i < nb_available_files) &&
+				   (gpu_devices[i] >= 0)) {
+				xstrfmtcat(dev_list, "%d", gpu_devices[i]);
+			} else {
+				xstrfmtcat(dev_list, "%d", i);
+			}
+		}
+		if (!dev_list && (first_match != -1)) {
+			i = first_match;
+			dev_list = xmalloc(128);
+			if (use_local_dev_index) {
+				xstrfmtcat(dev_list, "%d", local_inx++);
+			} else if (gpu_devices && (i < nb_available_files) &&
+				   (gpu_devices[i] >= 0)) {
+				xstrfmtcat(dev_list, "%d", gpu_devices[i]);
+			} else {
+				xstrfmtcat(dev_list, "%d", i);
+			}
+		}
+	}
+
+	if (dev_list) {
+		env_array_overwrite(job_env_ptr, "CUDA_VISIBLE_DEVICES",
+				    dev_list);
+		env_array_overwrite(job_env_ptr, "GPU_DEVICE_ORDINAL",
 				    dev_list);
 		xfree(dev_list);
 	}
diff --git a/src/plugins/gres/mic/Makefile.in b/src/plugins/gres/mic/Makefile.in
index b1a937cdc..007673055 100644
--- a/src/plugins/gres/mic/Makefile.in
+++ b/src/plugins/gres/mic/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/gres/mic/gres_mic.c b/src/plugins/gres/mic/gres_mic.c
index e1d763676..5c418e3ab 100644
--- a/src/plugins/gres/mic/gres_mic.c
+++ b/src/plugins/gres/mic/gres_mic.c
@@ -99,12 +99,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - Specifies the version number of the plugin. This would
- * typically be the same for all plugins.
+ *  * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char	plugin_name[]		= "Gres MIC plugin";
 const char	plugin_type[]		= "gres/mic";
-const uint32_t	plugin_version		= 120;
+const uint32_t	plugin_version		= SLURM_VERSION_NUMBER;
 
 static char	gres_name[]		= "mic";
 
@@ -252,6 +252,56 @@ extern void step_set_env(char ***job_env_ptr, void *gres_ptr)
 	}
 }
 
+/*
+ * Reset environment variables as appropriate for a job (i.e. this one tasks)
+ * based upon the job step's GRES state and assigned CPUs.
+ */
+extern void step_reset_env(char ***job_env_ptr, void *gres_ptr,
+			   bitstr_t *usable_gres)
+{
+	int i, len, first_match = -1;
+	char *dev_list = NULL;
+	gres_step_state_t *gres_step_ptr = (gres_step_state_t *) gres_ptr;
+
+	if ((gres_step_ptr != NULL) &&
+	    (gres_step_ptr->node_cnt == 1) &&
+	    (gres_step_ptr->gres_bit_alloc != NULL) &&
+	    (gres_step_ptr->gres_bit_alloc[0] != NULL) &&
+	    (usable_gres != NULL)) {
+		len = MIN(bit_size(gres_step_ptr->gres_bit_alloc[0]),
+			  bit_size(usable_gres));
+		for (i = 0; i < len; i++) {
+			if (!bit_test(gres_step_ptr->gres_bit_alloc[0], i))
+				continue;
+			if (first_match == -1)
+				first_match = i;
+			if (!bit_test(usable_gres, i))
+				continue;
+			if (!dev_list)
+				dev_list = xmalloc(128);
+			else
+				xstrcat(dev_list, ",");
+			if (mic_devices && (mic_devices[i] >= 0))
+				xstrfmtcat(dev_list, "%d", mic_devices[i]);
+			else
+				xstrfmtcat(dev_list, "%d", i);
+		}
+		if (!dev_list && (first_match != -1)) {
+			i = first_match;
+			dev_list = xmalloc(128);
+			if (mic_devices && (mic_devices[i] >= 0))
+				xstrfmtcat(dev_list, "%d", mic_devices[i]);
+			else
+				xstrfmtcat(dev_list, "%d", i);
+		}
+	}
+	if (dev_list) {
+		env_array_overwrite(job_env_ptr,"OFFLOAD_DEVICES",
+				    dev_list);
+		xfree(dev_list);
+	}
+}
+
 /* Send GRES information to slurmstepd on the specified file descriptor */
 extern void send_stepd(int fd)
 {
diff --git a/src/plugins/gres/nic/Makefile.in b/src/plugins/gres/nic/Makefile.in
index 32229b3b0..a55c3f465 100644
--- a/src/plugins/gres/nic/Makefile.in
+++ b/src/plugins/gres/nic/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/gres/nic/gres_nic.c b/src/plugins/gres/nic/gres_nic.c
index 5022f3977..1fa13a8b4 100644
--- a/src/plugins/gres/nic/gres_nic.c
+++ b/src/plugins/gres/nic/gres_nic.c
@@ -61,6 +61,7 @@
 #  include <string.h>
 #endif /* HAVE_CONFIG_H */
 
+#include <ctype.h>
 #include <stdio.h>
 #include <stdlib.h>
 
@@ -69,8 +70,11 @@
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/bitstring.h"
+#include "src/common/env.h"
 #include "src/common/gres.h"
 #include "src/common/list.h"
+#include "src/common/xcgroup_read_config.c"
+#include "src/common/xstring.h"
 
 /*
  * These variables are required by the generic plugin interface.  If they
@@ -94,15 +98,33 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - Specifies the version number of the plugin. This would
- * typically be the same for all plugins.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char	plugin_name[]		= "Gres NIC plugin";
 const char	plugin_type[]		= "gres/nic";
-const uint32_t	plugin_version		= 120;
+const uint32_t	plugin_version		= SLURM_VERSION_NUMBER;
 
 static char	gres_name[]		= "nic";
 
+static int *nic_devices = NULL;
+static int nb_available_files = 0;
+
+extern int init(void)
+{
+	debug("%s: %s loaded", __func__, plugin_name);
+
+	return SLURM_SUCCESS;
+}
+extern int fini(void)
+{
+	debug("%s: unloading %s", __func__, plugin_name);
+	xfree(nic_devices);
+	nb_available_files = 0;
+
+	return SLURM_SUCCESS;
+}
+
 /*
  * We could load gres state or validate it using various mechanisms here.
  * This only validates that the configuration was specified in gres.conf.
@@ -110,31 +132,160 @@ static char	gres_name[]		= "nic";
  */
 extern int node_config_load(List gres_conf_list)
 {
-	int rc = SLURM_ERROR;
+	int i, rc = SLURM_SUCCESS;
 	ListIterator iter;
 	gres_slurmd_conf_t *gres_slurmd_conf;
+	int nb_nic = 0;	/* Number of NICs in the list */
+	int available_files_index = 0;
 
 	xassert(gres_conf_list);
 	iter = list_iterator_create(gres_conf_list);
 	while ((gres_slurmd_conf = list_next(iter))) {
-		if (strcmp(gres_slurmd_conf->name, gres_name) == 0) {
-			rc = SLURM_SUCCESS;
+		if (strcmp(gres_slurmd_conf->name, gres_name))
+			continue;
+		if (gres_slurmd_conf->file)
+			nb_nic++;
+	}
+	list_iterator_destroy(iter);
+	xfree(nic_devices);	/* No-op if NULL */
+	nb_available_files = -1;
+	/* (Re-)Allocate memory if number of files changed */
+	if (nb_nic > nb_available_files) {
+		nic_devices = (int *) xmalloc(sizeof(int) * nb_nic);
+		nb_available_files = nb_nic;
+		for (i = 0; i < nb_available_files; i++)
+			nic_devices[i] = -1;
+	}
+
+	iter = list_iterator_create(gres_conf_list);
+	while ((gres_slurmd_conf = list_next(iter))) {
+		if ((strcmp(gres_slurmd_conf->name, gres_name) == 0) &&
+		    gres_slurmd_conf->file) {
+			/* Populate nic_devices array with number
+			 * at end of the file name */
+			char *bracket, *fname, *tmp_name;
+			hostlist_t hl;
+			bracket = strrchr(gres_slurmd_conf->file, '[');
+			if (bracket)
+				tmp_name = xstrdup(bracket);
+			else
+				tmp_name = xstrdup(gres_slurmd_conf->file);
+			hl = hostlist_create(tmp_name);
+			xfree(tmp_name);
+			if (!hl) {
+				rc = EINVAL;
+				break;
+			}
+			while ((fname = hostlist_shift(hl))) {
+				if (available_files_index ==
+				    nb_available_files) {
+					nb_available_files++;
+					xrealloc(nic_devices, sizeof(int) *
+						 nb_available_files);
+					nic_devices[available_files_index] = -1;
+				}
+				for (i = 0; fname[i]; i++) {
+					if (!isdigit(fname[i]))
+						continue;
+					nic_devices[available_files_index] =
+						atoi(fname + i);
+					break;
+				}
+				available_files_index++;
+				free(fname);
+			}
+			hostlist_destroy(hl);
 		}
 	}
 	list_iterator_destroy(iter);
 
 	if (rc != SLURM_SUCCESS)
 		fatal("%s failed to load configuration", plugin_name);
+
+	for (i = 0; i < nb_available_files; i++)
+		info("nic %d is device number %d", i, nic_devices[i]);
+
 	return rc;
 }
 
+/*
+ * Test if OMPI_MCA_btl_openib_if_include should be set to global device ID or a
+ * device ID that always starts at zero (based upon what the application can see).
+ * RET true if TaskPlugin=task/cgroup AND ConstrainDevices=yes (in cgroup.conf).
+ */
+static bool _use_local_device_index(void)
+{
+	slurm_cgroup_conf_t slurm_cgroup_conf;
+	char *task_plugin = slurm_get_task_plugin();
+	bool use_cgroup = false, use_local_index = false;
+
+	if (!task_plugin)
+		return use_local_index;
+
+	if (strstr(task_plugin, "cgroup"))
+		use_cgroup = true;
+	xfree(task_plugin);
+	if (!use_cgroup)
+		return use_local_index;
+
+	/* Read and parse cgroup.conf */
+	bzero(&slurm_cgroup_conf, sizeof(slurm_cgroup_conf_t));
+	if (read_slurm_cgroup_conf(&slurm_cgroup_conf) != SLURM_SUCCESS)
+		return use_local_index;
+	if (slurm_cgroup_conf.constrain_devices)
+		use_local_index = true;
+	free_slurm_cgroup_conf(&slurm_cgroup_conf);
+
+	return use_local_index;
+}
+
 /*
  * Set environment variables as appropriate for a job (i.e. all tasks) based
  * upon the job's GRES state.
  */
 extern void job_set_env(char ***job_env_ptr, void *gres_ptr)
 {
-	/* EMPTY */
+	int i, len, local_inx = 0;
+	char *dev_list = NULL;
+	gres_job_state_t *gres_job_ptr = (gres_job_state_t *) gres_ptr;
+	bool use_local_dev_index = _use_local_device_index();
+
+	if ((gres_job_ptr != NULL) &&
+	    (gres_job_ptr->node_cnt == 1) &&
+	    (gres_job_ptr->gres_bit_alloc != NULL) &&
+	    (gres_job_ptr->gres_bit_alloc[0] != NULL)) {
+		len = bit_size(gres_job_ptr->gres_bit_alloc[0]);
+		for (i = 0; i < len; i++) {
+			if (!bit_test(gres_job_ptr->gres_bit_alloc[0], i))
+				continue;
+			if (!dev_list)
+				dev_list = xmalloc(128);
+			else
+				xstrcat(dev_list, ",");
+			if (use_local_dev_index) {
+				xstrfmtcat(dev_list, "mlx4_%d", local_inx++);
+			} else if (nic_devices && (i < nb_available_files) &&
+				   (nic_devices[i] >= 0)) {
+				xstrfmtcat(dev_list, "mlx4_%d", nic_devices[i]);
+			} else {
+				xstrfmtcat(dev_list, "mlx4_%d", i);
+			}
+		}
+	} else if (gres_job_ptr && (gres_job_ptr->gres_cnt_alloc > 0)) {
+		/* The gres.conf file must identify specific device files
+		 * in order to set the OMPI_MCA_btl_openib_if_include env var */
+		debug("gres/nic unable to set OMPI_MCA_btl_openib_if_include, no device files configured");
+	} else {
+		xstrcat(dev_list, "NoDevFiles");
+	}
+
+	if (dev_list) {
+		/* we assume mellanox cards and OpenMPI programm */
+		env_array_overwrite(job_env_ptr,
+				    "OMPI_MCA_btl_openib_if_include",
+				    dev_list);
+		xfree(dev_list);
+	}
 }
 
 /*
@@ -143,19 +294,135 @@ extern void job_set_env(char ***job_env_ptr, void *gres_ptr)
  */
 extern void step_set_env(char ***job_env_ptr, void *gres_ptr)
 {
-	/* EMPTY */
+	int i, len, local_inx = 0;
+	char *dev_list = NULL;
+	gres_step_state_t *gres_step_ptr = (gres_step_state_t *) gres_ptr;
+	bool use_local_dev_index = _use_local_device_index();
+
+	if ((gres_step_ptr != NULL) &&
+	    (gres_step_ptr->node_cnt == 1) &&
+	    (gres_step_ptr->gres_bit_alloc != NULL) &&
+	    (gres_step_ptr->gres_bit_alloc[0] != NULL)) {
+		len = bit_size(gres_step_ptr->gres_bit_alloc[0]);
+		for (i = 0; i < len; i++) {
+			if (!bit_test(gres_step_ptr->gres_bit_alloc[0], i))
+				continue;
+			if (!dev_list)
+				dev_list = xmalloc(128);
+			else
+				xstrcat(dev_list, ",");
+			if (use_local_dev_index) {
+				xstrfmtcat(dev_list, "mlx4_%d", local_inx++);
+			} else if (nic_devices && (i < nb_available_files) &&
+				   (nic_devices[i] >= 0)) {
+				xstrfmtcat(dev_list, "mlx4_%d", nic_devices[i]);
+			} else {
+				xstrfmtcat(dev_list, "mlx4_%d", i);
+			}
+		}
+	} else if (gres_step_ptr && (gres_step_ptr->gres_cnt_alloc > 0)) {
+		/* The gres.conf file must identify specific device files
+		 * in order to set the OMPI_MCA_btl_openib_if_include env var */
+		error("gres/nic unable to set OMPI_MCA_btl_openib_if_include, "
+		      "no device files configured");
+	} else {
+		xstrcat(dev_list, "NoDevFiles");
+	}
+
+	if (dev_list) {
+		/* we assume mellanox cards and OpenMPI programm */
+		env_array_overwrite(job_env_ptr,
+				    "OMPI_MCA_btl_openib_if_include",
+				    dev_list);
+		xfree(dev_list);
+	}
+}
+
+/*
+ * Reset environment variables as appropriate for a job (i.e. this one tasks)
+ * based upon the job step's GRES state and assigned CPUs.
+ */
+extern void step_reset_env(char ***job_env_ptr, void *gres_ptr,
+			   bitstr_t *usable_gres)
+{
+	int i, len, first_match = -1;
+	char *dev_list = NULL;
+	gres_step_state_t *gres_step_ptr = (gres_step_state_t *) gres_ptr;
+
+	if ((gres_step_ptr != NULL) &&
+	    (gres_step_ptr->node_cnt == 1) &&
+	    (gres_step_ptr->gres_bit_alloc != NULL) &&
+	    (gres_step_ptr->gres_bit_alloc[0] != NULL) &&
+	    (usable_gres != NULL)) {
+		len = MIN(bit_size(gres_step_ptr->gres_bit_alloc[0]),
+			  bit_size(usable_gres));
+		for (i = 0; i < len; i++) {
+			if (!bit_test(gres_step_ptr->gres_bit_alloc[0], i))
+				continue;
+			if (first_match == -1)
+				first_match = i;
+			if (!bit_test(usable_gres, i))
+				continue;
+			if (!dev_list)
+				dev_list = xmalloc(128);
+			else
+				xstrcat(dev_list, ",");
+			if (nic_devices && (i < nb_available_files) &&
+			    (nic_devices[i] >= 0)) {
+				xstrfmtcat(dev_list, "mlx4_%d", nic_devices[i]);
+			} else {
+				xstrfmtcat(dev_list, "mlx4_%d", i);
+			}
+		}
+		if (!dev_list && (first_match != -1)) {
+			i = first_match;
+			dev_list = xmalloc(128);
+			if (nic_devices && (i < nb_available_files) &&
+			    (nic_devices[i] >= 0)) {
+				xstrfmtcat(dev_list, "mlx4_%d", nic_devices[i]);
+			} else {
+				xstrfmtcat(dev_list, "mlx4_%d", i);
+			}
+		}
+	}
+
+	if (dev_list) {
+		/* we assume mellanox cards and OpenMPI programm */
+		env_array_overwrite(job_env_ptr,
+				    "OMPI_MCA_btl_openib_if_include",
+				    dev_list);
+		xfree(dev_list);
+	}
 }
 
 /* Send GRES information to slurmstepd on the specified file descriptor*/
 extern void send_stepd(int fd)
 {
-	/* EMPTY */
+	int i;
+
+	safe_write(fd, &nb_available_files, sizeof(int));
+	for (i = 0; i < nb_available_files; i++)
+		safe_write(fd, &nic_devices[i], sizeof(int));
+	return;
+
+rwfail:	error("gres_plugin_send_stepd failed");
 }
 
-/* Receive GRES information from slurmd on the specified file descriptor*/
+/* Receive GRES information from slurmd on the specified file descriptor */
 extern void recv_stepd(int fd)
 {
-	/* EMPTY */
+	int i;
+
+	safe_read(fd, &nb_available_files, sizeof(int));
+	if (nb_available_files > 0) {
+		xfree(nic_devices);	/* No-op if NULL */
+		nic_devices = xmalloc(sizeof(int) * nb_available_files);
+	}
+	for (i = 0; i < nb_available_files; i++)
+		safe_read(fd, &nic_devices[i], sizeof(int));
+	return;
+
+rwfail:	error("gres_plugin_recv_stepd failed");
 }
 
 extern int job_info(gres_job_state_t *job_gres_data, uint32_t node_inx,
diff --git a/src/plugins/job_container/Makefile.in b/src/plugins/job_container/Makefile.in
index b7abe2b5f..c3fc60cf8 100644
--- a/src/plugins/job_container/Makefile.in
+++ b/src/plugins/job_container/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_container/cncu/Makefile.in b/src/plugins/job_container/cncu/Makefile.in
index 788a937df..8992ccbad 100644
--- a/src/plugins/job_container/cncu/Makefile.in
+++ b/src/plugins/job_container/cncu/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_container/cncu/job_container_cncu.c b/src/plugins/job_container/cncu/job_container_cncu.c
index 996475d66..4f5b74750 100644
--- a/src/plugins/job_container/cncu/job_container_cncu.c
+++ b/src/plugins/job_container/cncu/job_container_cncu.c
@@ -78,15 +78,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "job_container cncu plugin";
 const char plugin_type[]        = "job_container/cncu";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static uint32_t *job_id_array = NULL;
 static uint32_t  job_id_count = 0;
diff --git a/src/plugins/job_container/none/Makefile.in b/src/plugins/job_container/none/Makefile.in
index e0c787823..5d2f192f0 100644
--- a/src/plugins/job_container/none/Makefile.in
+++ b/src/plugins/job_container/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_container/none/job_container_none.c b/src/plugins/job_container/none/job_container_none.c
index 947057679..2705068a6 100644
--- a/src/plugins/job_container/none/job_container_none.c
+++ b/src/plugins/job_container/none/job_container_none.c
@@ -66,15 +66,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "job_container none plugin";
 const char plugin_type[]        = "job_container/none";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 extern void container_p_reconfig(void)
 {
diff --git a/src/plugins/job_submit/Makefile.in b/src/plugins/job_submit/Makefile.in
index 9f2fa85c1..7ee545f21 100644
--- a/src/plugins/job_submit/Makefile.in
+++ b/src/plugins/job_submit/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/all_partitions/Makefile.in b/src/plugins/job_submit/all_partitions/Makefile.in
index 0c9158b61..3aaf981b2 100644
--- a/src/plugins/job_submit/all_partitions/Makefile.in
+++ b/src/plugins/job_submit/all_partitions/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/all_partitions/job_submit_all_partitions.c b/src/plugins/job_submit/all_partitions/job_submit_all_partitions.c
index 5dea0d9fa..2d0024477 100644
--- a/src/plugins/job_submit/all_partitions/job_submit_all_partitions.c
+++ b/src/plugins/job_submit/all_partitions/job_submit_all_partitions.c
@@ -91,14 +91,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit all_partitions plugin";
 const char plugin_type[]       	= "job_submit/all_partitions";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /* Set a job's default partition to all partitions in the cluster */
 extern int job_submit(struct job_descriptor *job_desc, uint32_t submit_uid,
diff --git a/src/plugins/job_submit/cnode/Makefile.in b/src/plugins/job_submit/cnode/Makefile.in
index 356ea6e4a..fc17be546 100644
--- a/src/plugins/job_submit/cnode/Makefile.in
+++ b/src/plugins/job_submit/cnode/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/cnode/job_submit_cnode.c b/src/plugins/job_submit/cnode/job_submit_cnode.c
index 70b8d6062..eddb98f61 100644
--- a/src/plugins/job_submit/cnode/job_submit_cnode.c
+++ b/src/plugins/job_submit/cnode/job_submit_cnode.c
@@ -100,14 +100,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit cnode plugin";
 const char plugin_type[]       	= "job_submit/cnode";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static void _rebuild_licenses(char **license_ptr, uint32_t cnode_cnt)
 {
diff --git a/src/plugins/job_submit/cray/Makefile.in b/src/plugins/job_submit/cray/Makefile.in
index 15d4cf30c..0f56b73e0 100644
--- a/src/plugins/job_submit/cray/Makefile.in
+++ b/src/plugins/job_submit/cray/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/cray/job_submit_cray.c b/src/plugins/job_submit/cray/job_submit_cray.c
index 569729d38..f8b12393a 100644
--- a/src/plugins/job_submit/cray/job_submit_cray.c
+++ b/src/plugins/job_submit/cray/job_submit_cray.c
@@ -91,14 +91,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit Cray plugin";
 const char plugin_type[]       	= "job_submit/cray";
-const uint32_t plugin_version   = 100;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 #define CRAY_GRES "craynetwork"
 #define CRAY_GRES_POSTFIX CRAY_GRES":1"
diff --git a/src/plugins/job_submit/defaults/Makefile.in b/src/plugins/job_submit/defaults/Makefile.in
index ac432d366..a21a156a2 100644
--- a/src/plugins/job_submit/defaults/Makefile.in
+++ b/src/plugins/job_submit/defaults/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/defaults/job_submit_defaults.c b/src/plugins/job_submit/defaults/job_submit_defaults.c
index 18ad14256..3c9402be6 100644
--- a/src/plugins/job_submit/defaults/job_submit_defaults.c
+++ b/src/plugins/job_submit/defaults/job_submit_defaults.c
@@ -91,14 +91,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit defaults plugin";
 const char plugin_type[]       	= "job_submit/defaults";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*****************************************************************************\
  * We've provided a simple example of the type of things you can do with this
diff --git a/src/plugins/job_submit/logging/Makefile.in b/src/plugins/job_submit/logging/Makefile.in
index b2138bb38..32187fd61 100644
--- a/src/plugins/job_submit/logging/Makefile.in
+++ b/src/plugins/job_submit/logging/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/logging/job_submit_logging.c b/src/plugins/job_submit/logging/job_submit_logging.c
index e5932e23f..a6ec3a0f6 100644
--- a/src/plugins/job_submit/logging/job_submit_logging.c
+++ b/src/plugins/job_submit/logging/job_submit_logging.c
@@ -91,14 +91,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit logging plugin";
 const char plugin_type[]       	= "job_submit/logging";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*****************************************************************************\
  * We've provided a simple example of the type of things you can do with this
diff --git a/src/plugins/job_submit/lua/Makefile.in b/src/plugins/job_submit/lua/Makefile.in
index 80af05349..bb0fd999d 100644
--- a/src/plugins/job_submit/lua/Makefile.in
+++ b/src/plugins/job_submit/lua/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -278,6 +281,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -327,8 +332,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -347,6 +356,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -390,6 +402,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -413,6 +426,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/lua/job_submit_lua.c b/src/plugins/job_submit/lua/job_submit_lua.c
index a8bbdb0e3..bf247a1e9 100644
--- a/src/plugins/job_submit/lua/job_submit_lua.c
+++ b/src/plugins/job_submit/lua/job_submit_lua.c
@@ -2,6 +2,7 @@
  *  job_submit_lua.c - Set defaults in job submit request specifications.
  *****************************************************************************
  *  Copyright (C) 2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -94,14 +95,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit lua plugin";
 const char plugin_type[]       	= "job_submit/lua";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static const char lua_script_path[] = DEFAULT_SCRIPT_DIR "/job_submit.lua";
 static time_t lua_script_last_loaded = (time_t) 0;
@@ -237,6 +236,43 @@ static char *_get_default_account(uint32_t user_id)
 	}
 }
 
+/* Get the default QOS for an association (or NULL if not present) */
+static char *_get_default_qos(uint32_t user_id, char *account, char *partition)
+{
+	slurmdb_assoc_rec_t assoc;
+	slurmdb_assoc_rec_t *assoc_ptr;
+	slurmdb_qos_rec_t qos;
+	uint32_t qos_id;
+
+	memset(&assoc, 0, sizeof(slurmdb_assoc_rec_t));
+	assoc.uid = user_id;
+	assoc.partition = partition;
+	if (account) {
+		assoc.acct = account;
+	} else {
+		assoc.acct = _get_default_account(user_id);
+	}
+	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc, 0,
+				    &assoc_ptr, false) != SLURM_ERROR) {
+		qos_id = assoc_ptr->def_qos_id;
+	} else {
+		return NULL;
+	}
+
+	if (!qos_id) {
+		return NULL;
+	}
+
+	memset(&qos, 0, sizeof(slurmdb_qos_rec_t));
+	qos.id = qos_id;
+	if (assoc_mgr_fill_in_qos(acct_db_conn,
+				  &qos, 0, NULL, false) != SLURM_ERROR) {
+		return qos.name;
+	} else {
+		return NULL;
+	}
+}
+
 /* Get fields in an existing slurmctld job record.
  *
  * This is an incomplete list of job record fields. Add more as needed and
@@ -250,6 +286,8 @@ static int _job_rec_field(const struct job_record *job_ptr,
 		lua_pushnil (L);
 	} else if (!strcmp(name, "account")) {
 		lua_pushstring (L, job_ptr->account);
+	} else if (!strcmp(name, "burst_buffer")) {
+		lua_pushstring (L, job_ptr->burst_buffer);
 	} else if (!strcmp(name, "comment")) {
 		lua_pushstring (L, job_ptr->comment);
 	} else if (!strcmp(name, "direct_set_prio")) {
@@ -291,6 +329,14 @@ static int _job_rec_field(const struct job_record *job_ptr,
 		lua_pushstring (L, job_ptr->partition);
 	} else if (!strcmp(name, "priority")) {
 		lua_pushnumber (L, job_ptr->priority);
+	} else if (!strcmp(name, "qos")) {
+		if (job_ptr->qos_ptr) {
+			slurmdb_qos_rec_t *qos_ptr =
+				(slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+			lua_pushstring (L, qos_ptr->name);
+		} else {
+			lua_pushnil (L);
+		}
 	} else if (!strcmp(name, "req_switch")) {
 		lua_pushnumber (L, job_ptr->req_switch);
 	} else if (!strcmp(name, "time_limit")) {
@@ -308,14 +354,6 @@ static int _job_rec_field(const struct job_record *job_ptr,
 	return 1;
 }
 
-static int _get_job_rec_field(lua_State *L)
-{
-	const struct job_record *job_ptr = lua_touserdata(L, 1);
-	const char *name = luaL_checkstring(L, 2);
-
-	return _job_rec_field(job_ptr, name);
-}
-
 /* Get fields in an existing slurmctld job_record */
 static int _job_rec_field_index(lua_State *L)
 {
@@ -382,8 +420,6 @@ static int _resv_field(const slurmctld_resv_t *resv_ptr,
 		lua_pushstring(L, resv_ptr->accounts);
 	} else if (!strcmp(name, "assoc_list")) {
 		lua_pushstring(L, resv_ptr->assoc_list);
-	} else if (!strcmp(name, "cpu_cnt")) {
-		lua_pushnumber(L, resv_ptr->cpu_cnt);
 	} else if (!strcmp(name, "duration")) {
 		lua_pushnumber(L, resv_ptr->duration);
 	} else if (!strcmp(name, "end_time")) {
@@ -471,11 +507,124 @@ static void _update_resvs_global(void)
 	lua_pop(L, 1);
 }
 
-static int _job_req_field(const struct job_descriptor *job_desc,
-                          const char *name)
+/* Set fields in the job request structure on job submit or modify */
+static int _set_job_env_field(lua_State *L)
+{
+	const char *name, *value_str;
+	struct job_descriptor *job_desc;
+	char *name_eq = NULL;
+	int i, j, name_len;
+
+	name = luaL_checkstring(L, 2);
+	name_eq = xstrdup(name);
+	xstrcat(name_eq, "=");
+	name_len = strlen(name_eq);
+	lua_getmetatable(L, -3);
+	lua_getfield(L, -1, "_job_desc");
+	job_desc = lua_touserdata(L, -1);
+	if (job_desc == NULL) {
+		error("%s: job_desc is NULL", __func__);
+	} else {
+		value_str = luaL_checkstring(L, 3);
+		for (i = 0; job_desc->environment[i]; i++) {
+			if (!strncmp(job_desc->environment[i], name_eq,
+				     name_len)) {
+				job_desc->environment[i][name_len] = '\0';
+				xstrcat(job_desc->environment[i], value_str);
+				break;
+			}
+		}
+		if (!job_desc->environment[i]) {
+			job_desc->environment = xrealloc(job_desc->environment,
+							 sizeof(char*) * (i+2));
+			for (j = i; j >= 1; j--) {
+				job_desc->environment[j] =
+					job_desc->environment[j-1];
+			}
+			job_desc->environment[0] = xstrdup(name_eq);
+			xstrcat(job_desc->environment[0], value_str);
+		}
+	}
+	xfree(name_eq);
+
+	return 0;
+}
+
+static int _job_env_field(const struct job_descriptor *job_desc,
+			  const char *name)
+{
+	char *name_eq = "";
+	int i, name_len;
+
+	name_eq = xstrdup(name);
+	xstrcat(name_eq, "=");
+	name_len = strlen(name_eq);
+	if (job_desc == NULL) {
+		error("%s: job_desc is NULL", __func__);
+		lua_pushnil (L);
+	} else if (job_desc->environment == NULL) {
+		error("%s: job_desc->environment is NULL", __func__);
+		lua_pushnil (L);
+	} else {
+		for (i = 0; job_desc->environment[i]; i++) {
+			if (!strncmp(job_desc->environment[i], name_eq,
+				     name_len)) {
+				lua_pushstring (L, job_desc->environment[i] +
+						   name_len);
+				break;
+			}
+		}
+		if (!job_desc->environment[i])
+			lua_pushnil (L);
+	}
+	xfree(name_eq);
+
+	return 1;
+}
+
+/* Get fields in the job request record on job submit or modify */
+static int _get_job_env_field_name(lua_State *L)
+{
+	const struct job_descriptor *job_desc = lua_touserdata(L, 1);
+	const char *name = luaL_checkstring(L, 2);
+	return _job_env_field(job_desc, name);
+}
+
+/* Get fields in an existing slurmctld job_descriptor record */
+static int _job_env_field_index(lua_State *L)
+{
+	const char *name;
+	struct job_descriptor *job_desc;
+
+	name = luaL_checkstring(L, 2);
+	lua_getmetatable(L, -2);
+	lua_getfield(L, -1, "_job_desc");
+	job_desc = lua_touserdata(L, -1);
+	return _job_env_field(job_desc, name);
+}
+
+static void _push_job_env(struct job_descriptor *job_desc)
+{
+	lua_newtable(L);
+
+	lua_newtable(L);
+	lua_pushcfunction(L, _job_env_field_index);
+	lua_setfield(L, -2, "__index");
+	lua_pushcfunction(L, _set_job_env_field);
+	lua_setfield(L, -2, "__newindex");
+	/* Store the job descriptor in the metatable, so the index
+	 * function knows which struct it's getting data for.
+	 */
+	lua_pushlightuserdata(L, job_desc);
+	lua_setfield(L, -2, "_job_desc");
+	lua_setmetatable(L, -2);
+}
+
+static int _get_job_req_field(const struct job_descriptor *job_desc,
+			      const char *name)
 {
 	if (job_desc == NULL) {
-		error("_job_req_field: job_desc is NULL");
+		error("%s: job_desc is NULL", __func__);
 		lua_pushnil (L);
 	} else if (!strcmp(name, "account")) {
 		lua_pushstring (L, job_desc->account);
@@ -485,20 +634,38 @@ static int _job_req_field(const struct job_descriptor *job_desc,
 		lua_pushstring (L, job_desc->alloc_node);
 	} else if (!strcmp(name, "begin_time")) {
 		lua_pushnumber (L, job_desc->begin_time);
+	} else if (!strcmp(name, "boards_per_node")) {
+		lua_pushnumber (L, job_desc->boards_per_node);
+	} else if (!strcmp(name, "burst_buffer")) {
+		lua_pushstring (L, job_desc->burst_buffer);
+	} else if (!strcmp(name, "clusters")) {
+		lua_pushstring (L, job_desc->clusters);
 	} else if (!strcmp(name, "comment")) {
 		lua_pushstring (L, job_desc->comment);
 	} else if (!strcmp(name, "contiguous")) {
 		lua_pushnumber (L, job_desc->contiguous);
 	} else if (!strcmp(name, "cores_per_socket")) {
 		lua_pushnumber (L, job_desc->cores_per_socket);
+	} else if (!strcmp(name, "cpu_freq_min")) {
+		lua_pushnumber (L, job_desc->cpu_freq_min);
+	} else if (!strcmp(name, "cpu_freq_max")) {
+		lua_pushnumber (L, job_desc->cpu_freq_max);
+	} else if (!strcmp(name, "cpu_freq_gov")) {
+		lua_pushnumber (L, job_desc->cpu_freq_gov);
 	} else if (!strcmp(name, "cpus_per_task")) {
 		lua_pushnumber (L, job_desc->cpus_per_task);
 	} else if (!strcmp(name, "default_account")) {
 		lua_pushstring (L, _get_default_account(job_desc->user_id));
+	} else if (!strcmp(name, "default_qos")) {
+		lua_pushstring (L, _get_default_qos(job_desc->user_id,
+						    job_desc->account,
+						    job_desc->partition));
 	} else if (!strcmp(name, "dependency")) {
 		lua_pushstring (L, job_desc->dependency);
 	} else if (!strcmp(name, "end_time")) {
 		lua_pushnumber (L, job_desc->end_time);
+	} else if (!strcmp(name, "environment")) {
+		_push_job_env ((struct job_descriptor *)job_desc); // No const
 	} else if (!strcmp(name, "exc_nodes")) {
 		lua_pushstring (L, job_desc->exc_nodes);
 	} else if (!strcmp(name, "features")) {
@@ -525,12 +692,20 @@ static int _job_req_field(const struct job_descriptor *job_desc,
 		lua_pushstring (L, job_desc->name);
 	} else if (!strcmp(name, "nice")) {
 		lua_pushnumber (L, job_desc->nice);
+	} else if (!strcmp(name, "ntasks_per_board")) {
+		lua_pushnumber (L, job_desc->ntasks_per_board);
+	} else if (!strcmp(name, "ntasks_per_core")) {
+		lua_pushnumber (L, job_desc->ntasks_per_core);
 	} else if (!strcmp(name, "ntasks_per_node")) {
 		lua_pushnumber (L, job_desc->ntasks_per_node);
+	} else if (!strcmp(name, "ntasks_per_socket")) {
+		lua_pushnumber (L, job_desc->ntasks_per_socket);
 	} else if (!strcmp(name, "num_tasks")) {
 		lua_pushnumber (L, job_desc->num_tasks);
 	} else if (!strcmp(name, "partition")) {
 		lua_pushstring (L, job_desc->partition);
+	} else if (!strcmp(name, "power_flags")) {
+		lua_pushnumber (L, job_desc->power_flags);
 	} else if (!strcmp(name, "pn_min_cpus")) {
 		lua_pushnumber (L, job_desc->pn_min_cpus);
 	} else if (!strcmp(name, "pn_min_memory")) {
@@ -549,8 +724,14 @@ static int _job_req_field(const struct job_descriptor *job_desc,
 		lua_pushnumber (L, job_desc->requeue);
 	} else if (!strcmp(name, "reservation")) {
 		lua_pushstring (L, job_desc->reservation);
+	} else if (!strcmp(name, "script")) {
+		lua_pushstring (L, job_desc->script);
 	} else if (!strcmp(name, "shared")) {
 		lua_pushnumber (L, job_desc->shared);
+	} else if (!strcmp(name, "sicp_mode")) {
+		lua_pushnumber (L, job_desc->sicp_mode);
+	} else if (!strcmp(name, "sockets_per_board")) {
+		lua_pushnumber (L, job_desc->sockets_per_board);
 	} else if (!strcmp(name, "sockets_per_node")) {
 		lua_pushnumber (L, job_desc->sockets_per_node);
 	} else if (!strcmp(name, "std_err")) {
@@ -573,16 +754,6 @@ static int _job_req_field(const struct job_descriptor *job_desc,
 		lua_pushstring (L, job_desc->work_dir);
 	} else if (!strcmp(name, "wckey")) {
 		lua_pushstring (L, job_desc->wckey);
-	} else if (!strcmp(name, "ntasks_per_core")) {
-		lua_pushnumber (L, job_desc->ntasks_per_core);
-	} else if (!strcmp(name, "boards_per_node")) {
-		lua_pushnumber (L, job_desc->boards_per_node);
-	} else if (!strcmp(name, "ntasks_per_board")) {
-		lua_pushnumber (L, job_desc->ntasks_per_board);
-	} else if (!strcmp(name, "ntasks_per_socket")) {
-		lua_pushnumber (L, job_desc->ntasks_per_socket);
-	} else if (!strcmp(name, "sockets_per_board")) {
-		lua_pushnumber (L, job_desc->sockets_per_board);
 	} else {
 		lua_pushnil (L);
 	}
@@ -591,25 +762,26 @@ static int _job_req_field(const struct job_descriptor *job_desc,
 }
 
 /* Get fields in the job request record on job submit or modify */
-static int _get_job_req_field(lua_State *L)
+static int _get_job_req_field_name(lua_State *L)
 {
 	const struct job_descriptor *job_desc = lua_touserdata(L, 1);
 	const char *name = luaL_checkstring(L, 2);
 
-	return _job_req_field(job_desc, name);
+	return _get_job_req_field(job_desc, name);
 }
 
 /* Get fields in an existing slurmctld job_descriptor record */
-static int _job_req_field_index(lua_State *L)
+static int _get_job_req_field_index(lua_State *L)
 {
-	const char *name = luaL_checkstring(L, 2);
+	const char *name;
 	struct job_descriptor *job_desc;
 
+	name = luaL_checkstring(L, 2);
 	lua_getmetatable(L, -2);
 	lua_getfield(L, -1, "_job_desc");
 	job_desc = lua_touserdata(L, -1);
 
-	return _job_req_field(job_desc, name);
+	return _get_job_req_field(job_desc, name);
 }
 
 /* Set fields in the job request structure on job submit or modify */
@@ -636,6 +808,16 @@ static int _set_job_req_field(lua_State *L)
 			job_desc->acctg_freq = xstrdup(value_str);
 	} else if (!strcmp(name, "begin_time")) {
 		job_desc->begin_time = luaL_checknumber(L, 3);
+	} else if (!strcmp(name, "burst_buffer")) {
+		value_str = luaL_checkstring(L, 3);
+		xfree(job_desc->burst_buffer);
+		if (strlen(value_str))
+			job_desc->burst_buffer = xstrdup(value_str);
+	} else if (!strcmp(name, "clusters")) {
+		value_str = luaL_checkstring(L, 3);
+		xfree(job_desc->clusters);
+		if (strlen(value_str))
+			job_desc->clusters = xstrdup(value_str);
 	} else if (!strcmp(name, "comment")) {
 		value_str = luaL_checkstring(L, 3);
 		xfree(job_desc->comment);
@@ -647,6 +829,12 @@ static int _set_job_req_field(lua_State *L)
 		job_desc->cores_per_socket = luaL_checknumber(L, 3);
 	} else if (!strcmp(name, "cpus_per_task")) {
 		job_desc->cpus_per_task = luaL_checknumber(L, 3);
+	} else if (!strcmp(name, "cpu_freq_min")) {
+		job_desc->cpu_freq_min = luaL_checknumber(L, 3);
+	} else if (!strcmp(name, "cpu_freq_max")) {
+		job_desc->cpu_freq_max = luaL_checknumber(L, 3);
+	} else if (!strcmp(name, "cpu_freq_gov")) {
+		job_desc->cpu_freq_gov = luaL_checknumber(L, 3);
 	} else if (!strcmp(name, "dependency")) {
 		value_str = luaL_checkstring(L, 3);
 		xfree(job_desc->dependency);
@@ -700,6 +888,8 @@ static int _set_job_req_field(lua_State *L)
 		xfree(job_desc->partition);
 		if (strlen(value_str))
 			job_desc->partition = xstrdup(value_str);
+	} else if (!strcmp(name, "power_flags")) {
+		job_desc->power_flags = luaL_checknumber(L, 3);
 	} else if (!strcmp(name, "pn_min_cpus")) {
 		job_desc->pn_min_cpus = luaL_checknumber(L, 3);
 	} else if (!strcmp(name, "pn_min_memory")) {
@@ -727,8 +917,15 @@ static int _set_job_req_field(lua_State *L)
 		xfree(job_desc->reservation);
 		if (strlen(value_str))
 			job_desc->reservation = xstrdup(value_str);
+	} else if (!strcmp(name, "script")) {
+		value_str = luaL_checkstring(L, 3);
+		xfree(job_desc->script);
+		if (strlen(value_str))
+			job_desc->script = xstrdup(value_str);
 	} else if (!strcmp(name, "shared")) {
 		job_desc->shared = luaL_checknumber(L, 3);
+	} else if (!strcmp(name, "sicp_mode")) {
+		job_desc->sicp_mode = luaL_checknumber(L, 3);
 	} else if (!strcmp(name, "sockets_per_node")) {
 		job_desc->sockets_per_node = luaL_checknumber(L, 3);
 	} else if (!strcmp(name, "std_err")) {
@@ -773,15 +970,10 @@ static int _set_job_req_field(lua_State *L)
 
 static void _push_job_desc(struct job_descriptor *job_desc)
 {
-#if 0
-	lua_newtable(L);
-	lua_pushlightuserdata(L, job_desc);
-	lua_setfield(L, -2, "job_desc_ptr");
-#else
 	lua_newtable(L);
 
 	lua_newtable(L);
-	lua_pushcfunction(L, _job_req_field_index);
+	lua_pushcfunction(L, _get_job_req_field_index);
 	lua_setfield(L, -2, "__index");
 	lua_pushcfunction(L, _set_job_req_field);
 	lua_setfield(L, -2, "__newindex");
@@ -791,16 +983,10 @@ static void _push_job_desc(struct job_descriptor *job_desc)
 	lua_pushlightuserdata(L, job_desc);
 	lua_setfield(L, -2, "_job_desc");
 	lua_setmetatable(L, -2);
-#endif
 }
 
 static void _push_job_rec(struct job_record *job_ptr)
 {
-#if 0
-	lua_newtable(L);
-	lua_pushlightuserdata(L, job_ptr);
-	lua_setfield(L, -2, "job_rec_ptr");
-#else
 	lua_newtable(L);
 
 	lua_newtable(L);
@@ -812,7 +998,6 @@ static void _push_job_rec(struct job_record *job_ptr)
 	lua_pushlightuserdata(L, job_ptr);
 	lua_setfield(L, -2, "_job_rec_ptr");
 	lua_setmetatable(L, -2);
-#endif
 }
 
 /* Get fields in an existing slurmctld partition record
@@ -826,6 +1011,8 @@ static int _part_rec_field(const struct part_record *part_ptr,
 	if (part_ptr == NULL) {
 		error("_get_part_field: part_ptr is NULL");
 		lua_pushnil (L);
+	} else if (!strcmp(name, "allow_qos")) {
+		lua_pushstring (L, part_ptr->allow_qos);
 	} else if (!strcmp(name, "default_time")) {
 		lua_pushnumber (L, part_ptr->default_time);
 	} else if (!strcmp(name, "flag_default")) {
@@ -851,6 +1038,8 @@ static int _part_rec_field(const struct part_record *part_ptr,
 		lua_pushstring (L, part_ptr->nodes);
 	} else if (!strcmp(name, "priority")) {
 		lua_pushnumber (L, part_ptr->priority);
+	} else if (!strcmp(name, "qos")) {
+		lua_pushstring (L, part_ptr->qos_char);
 	} else if (!strcmp(name, "state_up")) {
 		lua_pushnumber (L, part_ptr->state_up);
 	} else {
@@ -879,11 +1068,6 @@ static int _part_rec_field_index(lua_State *L)
 
 	return _part_rec_field(part_ptr, name);
 }
-#if 0
-/* Filter before packing list of partitions */
-	char *allow_groups;	/* comma delimited list of groups */
-	uid_t *allow_uids;	/* zero terminated list of allowed users */
-#endif
 
 static bool _user_can_use_part(uint32_t user_id, uint32_t submit_uid,
 			       struct part_record *part_ptr)
@@ -919,10 +1103,7 @@ static void _push_partition_list(uint32_t user_id, uint32_t submit_uid)
 	while ((part_ptr = (struct part_record *) list_next(part_iterator))) {
 		if (!_user_can_use_part(user_id, submit_uid, part_ptr))
 			continue;
-#if 0
-		lua_pushlightuserdata(L, part_ptr);
-		lua_rawseti(L, -2, i++);
-#else
+
 		/* Create an empty table, with a metatable that looks up the
 		 * data for the partition.
 		 */
@@ -940,7 +1121,6 @@ static void _push_partition_list(uint32_t user_id, uint32_t submit_uid)
 
 		lua_setfield(L, -2, part_ptr->name);
 	}
-#endif
 	list_iterator_destroy(part_iterator);
 }
 
@@ -997,9 +1177,11 @@ static void _register_lua_slurm_output_functions (void)
 	lua_pushnumber (L, MAIL_JOB_END);
 	lua_setfield (L, -2, "MAIL_JOB_END");
 	lua_pushnumber (L, MAIL_JOB_FAIL);
-	lua_setfield (L, -2, "MAIL_FAIL");
+	lua_setfield (L, -2, "MAIL_JOB_FAIL");
 	lua_pushnumber (L, MAIL_JOB_REQUEUE);
 	lua_setfield (L, -2, "MAIL_JOB_REQUEUE");
+	lua_pushnumber (L, MAIL_JOB_STAGE_OUT);
+	lua_setfield (L, -2, "MAIL_JOB_STAGE_OUT");
 	lua_pushnumber (L, MEM_PER_CPU);
 	lua_setfield (L, -2, "MEM_PER_CPU");
 	lua_pushnumber (L, NICE_OFFSET);
@@ -1017,10 +1199,12 @@ static void _register_lua_slurm_output_functions (void)
 
 static void _register_lua_slurm_struct_functions (void)
 {
-	lua_pushcfunction(L, _get_job_rec_field);
-	lua_setglobal(L, "_get_job_rec_field");
-	lua_pushcfunction(L, _get_job_req_field);
-	lua_setglobal(L, "_get_job_req_field");
+	lua_pushcfunction(L, _get_job_env_field_name);
+	lua_setglobal(L, "_get_job_env_field_name");
+	lua_pushcfunction(L, _get_job_req_field_name);
+	lua_setglobal(L, "_get_job_req_field_name");
+	lua_pushcfunction(L, _set_job_env_field);
+	lua_setglobal(L, "_set_job_env_field");
 	lua_pushcfunction(L, _set_job_req_field);
 	lua_setglobal(L, "_set_job_req_field");
 	lua_pushcfunction(L, _get_part_rec_field);
diff --git a/src/plugins/job_submit/partition/Makefile.in b/src/plugins/job_submit/partition/Makefile.in
index 5808376d8..c27206324 100644
--- a/src/plugins/job_submit/partition/Makefile.in
+++ b/src/plugins/job_submit/partition/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/partition/job_submit_partition.c b/src/plugins/job_submit/partition/job_submit_partition.c
index df2bbfe56..936e17866 100644
--- a/src/plugins/job_submit/partition/job_submit_partition.c
+++ b/src/plugins/job_submit/partition/job_submit_partition.c
@@ -91,14 +91,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit partition plugin";
 const char plugin_type[]       	= "job_submit/partition";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*****************************************************************************\
  * We've provided a simple example of the type of things you can do with this
diff --git a/src/plugins/job_submit/pbs/Makefile.in b/src/plugins/job_submit/pbs/Makefile.in
index 6034deb56..326af6b43 100644
--- a/src/plugins/job_submit/pbs/Makefile.in
+++ b/src/plugins/job_submit/pbs/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -279,6 +282,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -328,8 +333,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -348,6 +357,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -391,6 +403,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -414,6 +427,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/pbs/job_submit_pbs.c b/src/plugins/job_submit/pbs/job_submit_pbs.c
index c2b1def77..69f186a6a 100644
--- a/src/plugins/job_submit/pbs/job_submit_pbs.c
+++ b/src/plugins/job_submit/pbs/job_submit_pbs.c
@@ -88,14 +88,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit PBS plugin";
 const char plugin_type[]       	= "job_submit/pbs";
-const uint32_t plugin_version   = 100;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static pthread_mutex_t depend_mutex = PTHREAD_MUTEX_INITIALIZER;
 
diff --git a/src/plugins/job_submit/require_timelimit/Makefile.in b/src/plugins/job_submit/require_timelimit/Makefile.in
index 015e00376..afbe1e734 100644
--- a/src/plugins/job_submit/require_timelimit/Makefile.in
+++ b/src/plugins/job_submit/require_timelimit/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/require_timelimit/job_submit_require_timelimit.c b/src/plugins/job_submit/require_timelimit/job_submit_require_timelimit.c
index fb2cf1251..03c245516 100644
--- a/src/plugins/job_submit/require_timelimit/job_submit_require_timelimit.c
+++ b/src/plugins/job_submit/require_timelimit/job_submit_require_timelimit.c
@@ -38,10 +38,35 @@
 
 #include "src/slurmctld/slurmctld.h"
 
-const char plugin_name[]="Require time limit jobsubmit plugin";
-const char plugin_type[]="job_submit/require_timelimit";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting t#include <time.h>he type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "auth" for SLURM authentication) and <method> is a
+ * description of how this plugin satisfies that application.  SLURM will
+ * only load authentication plugins if the plugin_type string has a prefix
+ * of "auth/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+
+const char plugin_name[] = "Require time limit jobsubmit plugin";
+const char plugin_type[] = "job_submit/require_timelimit";
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 extern int job_submit(struct job_descriptor *job_desc, uint32_t submit_uid,
 		      char **err_msg)
diff --git a/src/plugins/job_submit/throttle/Makefile.in b/src/plugins/job_submit/throttle/Makefile.in
index 1240698c5..d53df003e 100644
--- a/src/plugins/job_submit/throttle/Makefile.in
+++ b/src/plugins/job_submit/throttle/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/job_submit/throttle/job_submit_throttle.c b/src/plugins/job_submit/throttle/job_submit_throttle.c
index 4f535bcad..896067df5 100644
--- a/src/plugins/job_submit/throttle/job_submit_throttle.c
+++ b/src/plugins/job_submit/throttle/job_submit_throttle.c
@@ -93,14 +93,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - specifies the version number of the plugin.
- * min_plug_version - specifies the minumum version number of incoming
- *                    messages that this plugin can accept
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job submit throttle plugin";
 const char plugin_type[]       	= "job_submit/throttle";
-const uint32_t plugin_version   = 110;
-const uint32_t min_plug_version = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 typedef struct thru_put {
 	uint32_t    uid;
diff --git a/src/plugins/jobacct_gather/Makefile.am b/src/plugins/jobacct_gather/Makefile.am
index 21525a1a8..3c675eb3a 100644
--- a/src/plugins/jobacct_gather/Makefile.am
+++ b/src/plugins/jobacct_gather/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for jobacct plugins
 
-SUBDIRS = linux aix cgroup none
+SUBDIRS = linux aix cgroup none common
diff --git a/src/plugins/jobacct_gather/Makefile.in b/src/plugins/jobacct_gather/Makefile.in
index 9aacce28b..2acfa6f66 100644
--- a/src/plugins/jobacct_gather/Makefile.in
+++ b/src/plugins/jobacct_gather/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -447,7 +461,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = linux aix cgroup none
+SUBDIRS = linux aix cgroup none common
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/jobacct_gather/aix/Makefile.in b/src/plugins/jobacct_gather/aix/Makefile.in
index 5b8c5d2a3..816604915 100644
--- a/src/plugins/jobacct_gather/aix/Makefile.in
+++ b/src/plugins/jobacct_gather/aix/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
index d3d17d3d0..a90d5bbf6 100644
--- a/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
+++ b/src/plugins/jobacct_gather/aix/jobacct_gather_aix.c
@@ -78,16 +78,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Job accounting gather AIX plugin";
 const char plugin_type[] = "jobacct_gather/aix";
-const uint32_t plugin_version = 200;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /* Other useful declarations */
 static int pagesize = 0;
@@ -240,7 +236,7 @@ static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
  *    wrong.
  */
 extern void jobacct_gather_p_poll_data(
-	List task_list, bool pgid_plugin, uint64_t cont_id)
+	List task_list, bool pgid_plugin, uint64_t cont_id, bool profile)
 {
 	static jag_callbacks_t callbacks;
 	static bool first = 1;
@@ -252,7 +248,8 @@ extern void jobacct_gather_p_poll_data(
 		callbacks.get_offspring_data = _get_offspring_data;
 	}
 
-	jag_common_poll_data(task_list, pgid_plugin, cont_id, &callbacks);
+	jag_common_poll_data(task_list, pgid_plugin, cont_id, &callbacks,
+			     profile);
 	return;
 }
 
diff --git a/src/plugins/jobacct_gather/cgroup/Makefile.in b/src/plugins/jobacct_gather/cgroup/Makefile.in
index 2749f2190..03149a0f0 100644
--- a/src/plugins/jobacct_gather/cgroup/Makefile.in
+++ b/src/plugins/jobacct_gather/cgroup/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -277,6 +280,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -326,8 +331,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -346,6 +355,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -389,6 +401,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -412,6 +425,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c b/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c
index 893e6fec8..61efb0505 100644
--- a/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c
+++ b/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.c
@@ -45,10 +45,11 @@
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurm_acct_gather_energy.h"
-#include "src/slurmd/slurmd/slurmd.h"
 #include "src/common/xstring.h"
-#include "src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.h"
 #include "src/slurmd/common/proctrack.h"
+#include "src/slurmd/common/xcpuinfo.h"
+#include "src/slurmd/slurmd/slurmd.h"
+#include "src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup.h"
 #include "../common/common_jag.h"
 
 #define _DEBUG 0
@@ -88,16 +89,12 @@ int bg_recover = NOT_FROM_CONTROLLER;
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Job accounting gather cgroup plugin";
 const char plugin_type[] = "jobacct_gather/cgroup";
-const uint32_t plugin_version = 200;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /* Other useful declarations */
 static slurm_cgroup_conf_t slurm_cgroup_conf;
@@ -285,7 +282,7 @@ extern int fini (void)
  *    wrong.
  */
 extern void jobacct_gather_p_poll_data(
-	List task_list, bool pgid_plugin, uint64_t cont_id)
+	List task_list, bool pgid_plugin, uint64_t cont_id, bool profile)
 {
 	static jag_callbacks_t callbacks;
 	static bool first = 1;
@@ -296,7 +293,8 @@ extern void jobacct_gather_p_poll_data(
 		callbacks.prec_extra = _prec_extra;
 	}
 
-	jag_common_poll_data(task_list, pgid_plugin, cont_id, &callbacks);
+	jag_common_poll_data(task_list, pgid_plugin, cont_id, &callbacks,
+			     profile);
 
 	return;
 }
diff --git a/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup_cpuacct.c b/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup_cpuacct.c
index 0d90f0093..5a62b67db 100644
--- a/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup_cpuacct.c
+++ b/src/plugins/jobacct_gather/cgroup/jobacct_gather_cgroup_cpuacct.c
@@ -229,13 +229,17 @@ jobacct_gather_cgroup_cpuacct_attach_task(pid_t pid, jobacct_id_t *jobacct_id)
 	/* build job step cgroup relative path if not set (may not be) */
 	if (*jobstep_cgroup_path == '\0') {
 		int len;
-		if (stepid == SLURM_BATCH_SCRIPT)
+		if (stepid == SLURM_BATCH_SCRIPT) {
 			len = snprintf(jobstep_cgroup_path, PATH_MAX,
 				       "%s/step_batch", job_cgroup_path);
-		else
+		} else if (stepid == SLURM_EXTERN_CONT) {
+			len = snprintf(jobstep_cgroup_path, PATH_MAX,
+				       "%s/step_extern", job_cgroup_path);
+		} else {
 			len = snprintf(jobstep_cgroup_path, PATH_MAX,
 				       "%s/step_%u",
 				       job_cgroup_path, stepid);
+		}
 		if (len >= PATH_MAX) {
 			error("jobacct_gather/cgroup: unable to build job step "
 			      "%u cpuacct cg relative path : %m", stepid);
diff --git a/src/plugins/jobacct_gather/common/Makefile.in b/src/plugins/jobacct_gather/common/Makefile.in
index f483add94..5f9d5d80f 100644
--- a/src/plugins/jobacct_gather/common/Makefile.in
+++ b/src/plugins/jobacct_gather/common/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -242,6 +245,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -291,8 +296,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -311,6 +320,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -354,6 +366,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -377,6 +390,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobacct_gather/common/common_jag.c b/src/plugins/jobacct_gather/common/common_jag.c
index d8889ebca..02bc90cdb 100644
--- a/src/plugins/jobacct_gather/common/common_jag.c
+++ b/src/plugins/jobacct_gather/common/common_jag.c
@@ -41,6 +41,8 @@
 #include <dirent.h>
 #include <fcntl.h>
 #include <signal.h>
+#include <time.h>
+#include <ctype.h>
 
 #include "src/common/slurm_xlator.h"
 #include "src/common/slurm_jobacct_gather.h"
@@ -58,11 +60,24 @@ static long hertz = 0;
 static int my_pagesize = 0;
 static DIR  *slash_proc = NULL;
 static int energy_profile = ENERGY_DATA_JOULES_TASK;
+static uint64_t debug_flags = 0;
+
+static int _find_prec(void *x, void *key)
+{
+	jag_prec_t *prec = (jag_prec_t *) x;
+	struct jobacctinfo *jobacct = (struct jobacctinfo *) key;
+
+	if (prec->pid == jobacct->pid)
+		return 1;
+
+	return 0;
+}
 
 /* return weighted frequency in mhz */
 static uint32_t _update_weighted_freq(struct jobacctinfo *jobacct,
 				      char * sbuf)
 {
+	uint32_t tot_cpu;
 	int thisfreq = 0;
 
 	if (cpunfo_frequency)
@@ -73,26 +88,99 @@ static uint32_t _update_weighted_freq(struct jobacctinfo *jobacct,
 
 	jobacct->current_weighted_freq =
 		jobacct->current_weighted_freq +
-		jobacct->this_sampled_cputime * thisfreq;
-	if (jobacct->tot_cpu) {
-		return (jobacct->current_weighted_freq /
-			jobacct->tot_cpu);
+		(uint32_t)jobacct->this_sampled_cputime * thisfreq;
+	tot_cpu = (uint32_t) jobacct->tot_cpu;	/* Cast from double */
+	if (tot_cpu) {
+		return (uint32_t) (jobacct->current_weighted_freq / tot_cpu);
 	} else
 		return thisfreq;
 }
 
-static char *_skipdot (char *str)
+/* Parse /proc/cpuinfo file for CPU frequency.
+ * Store the value in global variable cpunfo_frequency
+ * RET: True if read valid CPU frequency */
+inline static bool _get_freq(char *str)
 {
-	int pntr = 0;
-	while (str[pntr]) {
-		if (str[pntr] == '.') {
-			str[pntr] = '0';
-			break;
-		}
-		pntr++;
+	char *sep = NULL;
+	double cpufreq_value;
+	int cpu_mult;
+
+	if (strstr(str, "MHz"))
+		cpu_mult = 1;
+	else if (strstr(str, "GHz"))
+		cpu_mult = 1000;	/* Scale to MHz */
+	else
+		return false;
+
+	sep = strchr(str, ':');
+	if (!sep)
+		return false;
+
+	if (sscanf(sep + 2, "%lf", &cpufreq_value) < 1)
+		return false;
+
+	cpunfo_frequency = cpufreq_value * cpu_mult;
+	debug2("cpunfo_frequency=%d", cpunfo_frequency);
+
+	return true;
+}
+
+/*
+ * collects the Pss value from /proc/<pid>/smaps
+ */
+static int _get_pss(char *proc_smaps_file, jag_prec_t *prec)
+{
+        uint64_t pss;
+	uint64_t p;
+        char line[128];
+        FILE *fp;
+	int i;
+
+	fp = fopen(proc_smaps_file, "r");
+        if (!fp) {
+                return -1;
+        }
+
+	fcntl(fileno(fp), F_SETFD, FD_CLOEXEC);
+	pss = 0;
+
+        while (fgets(line,sizeof(line),fp)) {
+
+                if (strncmp(line, "Pss:", 4) != 0) {
+                        continue;
+                }
+
+                for (i = 4; i < sizeof(line); i++) {
+
+                        if (!isdigit(line[i])) {
+                                continue;
+                        }
+                        if (sscanf(&line[i],"%"PRIu64"", &p) == 1) {
+                                pss += p;
+                        }
+                        break;
+                }
+        }
+
+	/* Check for error
+	 */
+	if (ferror(fp)) {
+		error("%s: ferror() indicates error on file %s",
+		      __func__, proc_smaps_file);
+		fclose(fp);
+		return -1;
 	}
-	str[pntr+3] = '\0';
-	return str;
+
+        fclose(fp);
+        /* Sanity checks */
+        if (pss > 0 && prec->rss > pss) {
+                prec->rss = pss;
+        }
+
+	debug3("%s: read pss %"PRIu64" for process %s",
+	       __func__, pss, proc_smaps_file);
+
+        return 0;
 }
 
 static int _get_sys_interface_freq_line(uint32_t cpu, char *filename,
@@ -102,7 +190,6 @@ static int _get_sys_interface_freq_line(uint32_t cpu, char *filename,
 	FILE *sys_fp = NULL;
 	char freq_file[80];
 	char cpunfo_line [128];
-	char cpufreq_line [10];
 
 	if (cpunfo_frequency)
 		/* scaling not enabled, static freq obtained */
@@ -124,22 +211,16 @@ static int _get_sys_interface_freq_line(uint32_t cpu, char *filename,
 		fclose(sys_fp);
 	} else {
 		/* frequency scaling not enabled */
-		if (!cpunfo_frequency){
+		if (!cpunfo_frequency) {
 			snprintf(freq_file, 14, "/proc/cpuinfo");
 			debug2("_get_sys_interface_freq_line: filename = %s ",
 			       freq_file);
 			if ((sys_fp = fopen(freq_file, "r")) != NULL) {
-				while (fgets(cpunfo_line, sizeof cpunfo_line,
+				while (fgets(cpunfo_line, sizeof(cpunfo_line),
 					     sys_fp) != NULL) {
-					if (strstr(cpunfo_line, "cpu MHz") ||
-					    strstr(cpunfo_line, "cpu GHz")) {
+					if (_get_freq(cpunfo_line))
 						break;
-					}
 				}
-				strncpy(cpufreq_line, cpunfo_line+11, 8);
-				_skipdot(cpufreq_line);
-				sscanf(cpufreq_line, "%d", &cpunfo_frequency);
-				debug2("cpunfo_frequency= %d",cpunfo_frequency);
 				fclose(sys_fp);
 			}
 		}
@@ -359,10 +440,11 @@ static int _get_process_io_data_line(int in, jag_prec_t *prec) {
 	return 1;
 }
 
-static void _handle_stats(List prec_list, char *proc_stat_file,
-			  char *proc_io_file, jag_callbacks_t *callbacks)
+static void _handle_stats(List prec_list, char *proc_stat_file, char *proc_io_file,
+			  char *proc_smaps_file, jag_callbacks_t *callbacks)
 {
 	static int no_share_data = -1;
+	static int use_pss = -1;
 	FILE *stat_fp = NULL;
 	FILE *io_fp = NULL;
 	int fd, fd2;
@@ -374,6 +456,11 @@ static void _handle_stats(List prec_list, char *proc_stat_file,
 			no_share_data = 1;
 		else
 			no_share_data = 0;
+
+		if (acct_params && strstr(acct_params, "UsePss"))
+			use_pss = 1;
+		else
+			use_pss = 0;
 		xfree(acct_params);
 	}
 
@@ -393,22 +480,35 @@ static void _handle_stats(List prec_list, char *proc_stat_file,
 	fcntl(fd, F_SETFD, FD_CLOEXEC);
 
 	prec = xmalloc(sizeof(jag_prec_t));
-	if (_get_process_data_line(fd, prec)) {
-		if (no_share_data)
-			_remove_share_data(proc_stat_file, prec);
-		list_append(prec_list, prec);
-		if ((io_fp = fopen(proc_io_file, "r"))) {
-			fd2 = fileno(io_fp);
-			fcntl(fd2, F_SETFD, FD_CLOEXEC);
-			_get_process_io_data_line(fd2, prec);
-			fclose(io_fp);
-		}
-		if (callbacks->prec_extra)
-			(*(callbacks->prec_extra))(prec);
-	} else
+	if (!_get_process_data_line(fd, prec)) {
 		xfree(prec);
+		fclose(stat_fp);
+		return;
+	}
 	fclose(stat_fp);
 
+	/* Remove shared data from rss */
+	if (no_share_data)
+		_remove_share_data(proc_stat_file, prec);
+
+	/* Use PSS instead if RSS */
+	if (use_pss) {
+		if (_get_pss(proc_smaps_file, prec) == -1) {
+			xfree(prec);
+			return;
+		}
+	}
+
+	list_append(prec_list, prec);
+
+	if ((io_fp = fopen(proc_io_file, "r"))) {
+		fd2 = fileno(io_fp);
+		fcntl(fd2, F_SETFD, FD_CLOEXEC);
+		_get_process_io_data_line(fd2, prec);
+		fclose(io_fp);
+	}
+	if (callbacks->prec_extra)
+		(*(callbacks->prec_extra))(prec);
 }
 
 static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
@@ -417,6 +517,7 @@ static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
 	List prec_list = list_create(destroy_jag_prec);
 	char	proc_stat_file[256];	/* Allow ~20x extra length */
 	char	proc_io_file[256];	/* Allow ~20x extra length */
+	char	proc_smaps_file[256];	/* Allow ~20x extra length */
 	static	int	slash_proc_open = 0;
 	int i;
 
@@ -427,16 +528,14 @@ static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
 		proctrack_g_get_pids(cont_id, &pids, &npids);
 		if (!npids) {
 			/* update consumed energy even if pids do not exist */
-			ListIterator itr = list_iterator_create(task_list);
 			struct jobacctinfo *jobacct = NULL;
-			if ((jobacct = list_next(itr))) {
+			if ((jobacct = list_peek(task_list))) {
 				acct_gather_energy_g_get_data(
 					energy_profile,
 					&jobacct->energy);
-				debug2("getjoules_task energy = %u",
+				debug2("getjoules_task energy = %"PRIu64"",
 				       jobacct->energy.consumed_energy);
 			}
-			list_iterator_destroy(itr);
 
 			debug4("no pids in this container %"PRIu64"", cont_id);
 			goto finished;
@@ -444,8 +543,9 @@ static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
 		for (i = 0; i < npids; i++) {
 			snprintf(proc_stat_file, 256, "/proc/%d/stat", pids[i]);
 			snprintf(proc_io_file, 256, "/proc/%d/io", pids[i]);
+			snprintf(proc_smaps_file, 256, "/proc/%d/smaps", pids[i]);
 			_handle_stats(prec_list, proc_stat_file, proc_io_file,
-				      callbacks);
+				      proc_smaps_file, callbacks);
 		}
 		xfree(pids);
 	} else {
@@ -464,6 +564,7 @@ static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
 		}
 		strcpy(proc_stat_file, "/proc/");
 		strcpy(proc_io_file, "/proc/");
+		strcpy(proc_smaps_file, "/proc/");
 
 		while ((slash_proc_entry = readdir(slash_proc))) {
 
@@ -513,8 +614,27 @@ static List _get_precs(List task_list, bool pgid_plugin, uint64_t cont_id,
 			} while (*iptr);
 			*optr2 = 0;
 
+			optr2 = proc_smaps_file + sizeof("/proc");
+			iptr = slash_proc_entry->d_name;
+			i = 0;
+			do {
+				if ((*iptr < '0') ||
+				    ((*optr2++ = *iptr++) > '9')) {
+					i = -1;
+					break;
+				}
+			} while (*iptr);
+			if (i == -1)
+				continue;
+			iptr = (char*)"/smaps";
+
+			do {
+				*optr2++ = *iptr++;
+			} while (*iptr);
+			*optr2 = 0;
+
 			_handle_stats(prec_list, proc_stat_file, proc_io_file,
-				      callbacks);
+				      proc_smaps_file,callbacks);
 		}
 	}
 
@@ -523,17 +643,111 @@ finished:
 	return prec_list;
 }
 
+static void _record_profile(struct jobacctinfo *jobacct)
+{
+	enum {
+		FIELD_CPUFREQ,
+		FIELD_CPUTIME,
+		FIELD_CPUUTIL,
+		FIELD_RSS,
+		FIELD_VMSIZE,
+		FIELD_PAGES,
+		FIELD_READ,
+		FIELD_WRITE,
+		FIELD_CNT
+	};
+
+	acct_gather_profile_dataset_t dataset[] = {
+		{ "CPUFrequency", PROFILE_FIELD_UINT64 },
+		{ "CPUTime", PROFILE_FIELD_DOUBLE },
+		{ "CPUUtilization", PROFILE_FIELD_DOUBLE },
+		{ "RSS", PROFILE_FIELD_UINT64 },
+		{ "VMSize", PROFILE_FIELD_UINT64 },
+		{ "Pages", PROFILE_FIELD_UINT64 },
+		{ "ReadMB", PROFILE_FIELD_DOUBLE },
+		{ "WriteMB", PROFILE_FIELD_DOUBLE },
+		{ NULL, PROFILE_FIELD_NOT_SET }
+	};
+
+	static int profile_gid = -1;
+	double et;
+	union {
+		double d;
+		uint64_t u64;
+	} data[FIELD_CNT];
+
+	if (profile_gid == -1)
+		profile_gid = acct_gather_profile_g_create_group("Tasks");
+
+	/* Create the dataset first */
+	if (jobacct->dataset_id < 0) {
+		char ds_name[32];
+		snprintf(ds_name, sizeof(ds_name), "%u", jobacct->id.taskid);
+
+		jobacct->dataset_id = acct_gather_profile_g_create_dataset(
+			ds_name, profile_gid, dataset);
+		if (jobacct->dataset_id == SLURM_ERROR) {
+			error("JobAcct: Failed to create the dataset for "
+			      "task %d",
+			      jobacct->pid);
+			return;
+		}
+	}
+
+	if (jobacct->dataset_id < 0)
+		return;
+
+	data[FIELD_CPUFREQ].u64 = jobacct->act_cpufreq;
+	data[FIELD_RSS].u64 = jobacct->tot_rss;
+	data[FIELD_VMSIZE].u64 = jobacct->tot_vsize;
+	data[FIELD_PAGES].u64 = jobacct->tot_pages;
+
+	/* delta from last snapshot */
+	if (!jobacct->last_time) {
+		data[FIELD_CPUTIME].d = 0;
+		data[FIELD_CPUUTIL].d = 0.0;
+		data[FIELD_READ].d = 0.0;
+		data[FIELD_WRITE].d = 0.0;
+	} else {
+		data[FIELD_CPUTIME].d =
+			jobacct->tot_cpu - jobacct->last_total_cputime;
+		et = (jobacct->cur_time - jobacct->last_time);
+		if (!et)
+			data[FIELD_CPUUTIL].d = 0.0;
+		else
+			data[FIELD_CPUUTIL].d =
+				(100.0 * (double)data[FIELD_CPUTIME].d) /
+				((double) et);
+
+		data[FIELD_READ].d = jobacct->tot_disk_read -
+			jobacct->last_tot_disk_read;
+
+		data[FIELD_WRITE].d = jobacct->tot_disk_write -
+			jobacct->last_tot_disk_write;
+	}
+
+	if (debug_flags & DEBUG_FLAG_PROFILE) {
+		char str[256];
+		info("PROFILE-Task: %s", acct_gather_profile_dataset_str(
+			     dataset, data, str, sizeof(str)));
+	}
+	acct_gather_profile_g_add_sample_data(jobacct->dataset_id,
+	                                      (void *)data, jobacct->cur_time);
+}
+
 extern void jag_common_init(long in_hertz)
 {
 	uint32_t profile_opt;
 
+	debug_flags = slurm_get_debug_flags();
+
 	acct_gather_profile_g_get(ACCT_GATHER_PROFILE_RUNNING,
 				  &profile_opt);
 	/* If we are profiling energy it will be checked at a
 	   different rate, so just grab the last one.
 	*/
 	if (profile_opt & ACCT_GATHER_PROFILE_ENERGY)
-		energy_profile = ENERGY_DATA_STRUCT;
+		energy_profile = ENERGY_DATA_NODE_ENERGY;
 
 	if (in_hertz) {
 		hertz = in_hertz;
@@ -577,23 +791,23 @@ extern void print_jag_prec(jag_prec_t *prec)
 
 extern void jag_common_poll_data(
 	List task_list, bool pgid_plugin, uint64_t cont_id,
-	jag_callbacks_t *callbacks)
+	jag_callbacks_t *callbacks, bool profile)
 {
 	/* Update the data */
 	List prec_list = NULL;
 	uint64_t total_job_mem = 0, total_job_vsize = 0;
 	ListIterator itr;
-	ListIterator itr2;
 	jag_prec_t *prec = NULL;
 	struct jobacctinfo *jobacct = NULL;
 	static int processing = 0;
-	char		sbuf[72];
+	char sbuf[72];
 	int energy_counted = 0;
-	static int first = 1;
+	time_t ct;
+	static int no_over_memory_kill = -1;
 
 	xassert(callbacks);
 
-	if (!pgid_plugin && (cont_id == (uint64_t)NO_VAL)) {
+	if (!pgid_plugin && (cont_id == NO_VAL64)) {
 		debug("cont_id hasn't been set yet not running poll");
 		return;
 	}
@@ -604,9 +818,19 @@ extern void jag_common_poll_data(
 	}
 	processing = 1;
 
+	if (no_over_memory_kill == -1) {
+		char *acct_params = slurm_get_jobacct_gather_params();
+		if (acct_params && strstr(acct_params, "NoOverMemoryKill"))
+			no_over_memory_kill = 1;
+		else
+			no_over_memory_kill = 0;
+		xfree(acct_params);
+	}
+
 	if (!callbacks->get_precs)
 		callbacks->get_precs = _get_precs;
 
+	ct = time(NULL);
 	prec_list = (*(callbacks->get_precs))(task_list, pgid_plugin, cont_id,
 					      callbacks);
 
@@ -615,97 +839,104 @@ extern void jag_common_poll_data(
 
 	itr = list_iterator_create(task_list);
 	while ((jobacct = list_next(itr))) {
-		itr2 = list_iterator_create(prec_list);
-		while ((prec = list_next(itr2))) {
-			if (prec->pid == jobacct->pid) {
-				uint32_t cpu_calc;
+		double cpu_calc;
+		double last_total_cputime;
+		if (!(prec = list_find_first(prec_list, _find_prec, jobacct)))
+			continue;
+
 #if _DEBUG
-				info("pid:%u ppid:%u rss:%d KB",
-				     prec->pid, prec->ppid, prec->rss);
+		info("pid:%u ppid:%u rss:%d KB",
+		     prec->pid, prec->ppid, prec->rss);
 #endif
-				/* find all my descendents */
-				if (callbacks->get_offspring_data)
-					(*(callbacks->get_offspring_data))
-						(prec_list, prec, prec->pid);
-				cpu_calc = (prec->ssec + prec->usec)/hertz;
-				/* tally their usage */
-				jobacct->max_rss =
-					MAX(jobacct->max_rss, prec->rss);
-				jobacct->tot_rss = prec->rss;
-				total_job_mem += prec->rss;
-				jobacct->max_vsize =
-					MAX(jobacct->max_vsize, prec->vsize);
-				jobacct->tot_vsize = prec->vsize;
-				total_job_vsize += prec->vsize;
-				jobacct->max_pages =
-					MAX(jobacct->max_pages, prec->pages);
-				jobacct->tot_pages = prec->pages;
-				jobacct->max_disk_read = MAX(
-					jobacct->max_disk_read,
-					prec->disk_read);
-				jobacct->tot_disk_read = prec->disk_read;
-				jobacct->max_disk_write = MAX(
-					jobacct->max_disk_write,
-					prec->disk_write);
-				jobacct->tot_disk_write = prec->disk_write;
-				jobacct->min_cpu =
-					MAX(jobacct->min_cpu, cpu_calc);
-				jobacct->last_total_cputime = jobacct->tot_cpu;
-				/* Update the cpu times
-				 */
-				jobacct->tot_cpu = cpu_calc;
-				jobacct->user_cpu_sec = prec->usec/hertz;
-				jobacct->sys_cpu_sec = prec->ssec/hertz;
-				debug2("%s: %d mem size %"PRIu64" %"PRIu64" "
-				       "time %u(%u+%u)", __func__,
-				       jobacct->pid, jobacct->max_rss,
-				       jobacct->max_vsize, jobacct->tot_cpu,
-				       jobacct->user_cpu_sec,
-				       jobacct->sys_cpu_sec);
-				/* compute frequency */
-				jobacct->this_sampled_cputime =
-					cpu_calc - jobacct->last_total_cputime;
-				_get_sys_interface_freq_line(
-					prec->last_cpu,
-					"cpuinfo_cur_freq", sbuf);
-				jobacct->act_cpufreq =
-					_update_weighted_freq(jobacct, sbuf);
-				debug2("%s: Task average frequency = %u "
-				       "pid %d mem size %"PRIu64" %"PRIu64" "
-				       "time %u(%u+%u)", __func__,
-				       jobacct->act_cpufreq,
-				       jobacct->pid, jobacct->max_rss,
-				       jobacct->max_vsize, jobacct->tot_cpu,
-				       jobacct->user_cpu_sec,
-				       jobacct->sys_cpu_sec);
-				/* get energy consumption
-  				 * only once is enough since we
- 				 * report per node energy consumption */
-				debug2("energycounted = %d", energy_counted);
-				if (energy_counted == 0) {
-					acct_gather_energy_g_get_data(
-						energy_profile,
-						&jobacct->energy);
-					debug2("getjoules_task energy = %u",
-					       jobacct->energy.consumed_energy);
-					energy_counted = 1;
-				}
-				/* We only profile on after the first poll. */
-				if (!first)
-					acct_gather_profile_g_add_sample_data(
-						ACCT_GATHER_PROFILE_TASK,
-						jobacct);
-				break;
-			}
+		/* find all my descendents */
+		if (callbacks->get_offspring_data)
+			(*(callbacks->get_offspring_data))
+				(prec_list, prec, prec->pid);
+
+		last_total_cputime = jobacct->tot_cpu;
+
+		cpu_calc = (double)(prec->ssec + prec->usec)/(double)hertz;
+		/* tally their usage */
+		jobacct->max_rss =
+			MAX(jobacct->max_rss, prec->rss);
+		jobacct->tot_rss = prec->rss;
+		total_job_mem += prec->rss;
+		jobacct->max_vsize =
+			MAX(jobacct->max_vsize, prec->vsize);
+		jobacct->tot_vsize = prec->vsize;
+		total_job_vsize += prec->vsize;
+		jobacct->max_pages =
+			MAX(jobacct->max_pages, prec->pages);
+		jobacct->tot_pages = prec->pages;
+		jobacct->max_disk_read = MAX(
+			jobacct->max_disk_read,
+			prec->disk_read);
+		jobacct->tot_disk_read = prec->disk_read;
+		jobacct->max_disk_write = MAX(
+			jobacct->max_disk_write,
+			prec->disk_write);
+
+		jobacct->tot_disk_write = prec->disk_write;
+		jobacct->min_cpu =
+			MAX((double)jobacct->min_cpu, cpu_calc);
+
+		/* Update the cpu times
+		 */
+		jobacct->tot_cpu = cpu_calc;
+		jobacct->user_cpu_sec = prec->usec/hertz;
+		jobacct->sys_cpu_sec = prec->ssec/hertz;
+		debug2("%s: %d mem size %"PRIu64" %"PRIu64" "
+		       "time %f(%u+%u)", __func__,
+		       jobacct->pid, jobacct->max_rss,
+		       jobacct->max_vsize, jobacct->tot_cpu,
+		       jobacct->user_cpu_sec,
+		       jobacct->sys_cpu_sec);
+		/* compute frequency */
+		jobacct->this_sampled_cputime =
+			cpu_calc - last_total_cputime;
+		_get_sys_interface_freq_line(
+			prec->last_cpu,
+			"cpuinfo_cur_freq", sbuf);
+		jobacct->act_cpufreq =
+			_update_weighted_freq(jobacct, sbuf);
+		debug("%s: Task average frequency = %u "
+		       "pid %d mem size %"PRIu64" %"PRIu64" "
+		       "time %f(%u+%u)", __func__,
+		       jobacct->act_cpufreq,
+		       jobacct->pid, jobacct->max_rss,
+		       jobacct->max_vsize, jobacct->tot_cpu,
+		       jobacct->user_cpu_sec,
+		       jobacct->sys_cpu_sec);
+		/* get energy consumption
+		 * only once is enough since we
+		 * report per node energy consumption */
+		debug2("energycounted = %d", energy_counted);
+		if (energy_counted == 0) {
+			acct_gather_energy_g_get_data(
+				energy_profile,
+				&jobacct->energy);
+			debug2("getjoules_task energy = %"PRIu64,
+			       jobacct->energy.consumed_energy);
+			energy_counted = 1;
+		}
+		if (profile &&
+		    acct_gather_profile_g_is_active(ACCT_GATHER_PROFILE_TASK)) {
+			jobacct->cur_time = ct;
+
+			_record_profile(jobacct);
+
+			jobacct->last_tot_disk_read = jobacct->tot_disk_read;
+			jobacct->last_tot_disk_write = jobacct->tot_disk_write;
+			jobacct->last_total_cputime = jobacct->tot_cpu;
+			jobacct->last_time = jobacct->cur_time;
 		}
-		list_iterator_destroy(itr2);
 	}
 	list_iterator_destroy(itr);
 
-	jobacct_gather_handle_mem_limit(total_job_mem, total_job_vsize);
+	if (!no_over_memory_kill)
+		jobacct_gather_handle_mem_limit(total_job_mem, total_job_vsize);
 
 finished:
-	list_destroy(prec_list);
+	FREE_NULL_LIST(prec_list);
 	processing = 0;
-	first = 0;
 }
diff --git a/src/plugins/jobacct_gather/common/common_jag.h b/src/plugins/jobacct_gather/common/common_jag.h
index 40c21bb38..30d215ada 100644
--- a/src/plugins/jobacct_gather/common/common_jag.h
+++ b/src/plugins/jobacct_gather/common/common_jag.h
@@ -72,6 +72,6 @@ extern void print_jag_prec(jag_prec_t *prec);
 
 extern void jag_common_poll_data(
 	List task_list, bool pgid_plugin, uint64_t cont_id,
-	jag_callbacks_t *callbacks);
+	jag_callbacks_t *callbacks, bool profile);
 
 #endif
diff --git a/src/plugins/jobacct_gather/linux/Makefile.in b/src/plugins/jobacct_gather/linux/Makefile.in
index 30409410b..259e17afb 100644
--- a/src/plugins/jobacct_gather/linux/Makefile.in
+++ b/src/plugins/jobacct_gather/linux/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
index 8a9fa6796..b094882f9 100644
--- a/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
+++ b/src/plugins/jobacct_gather/linux/jobacct_gather_linux.c
@@ -75,16 +75,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Job accounting gather LINUX plugin";
 const char plugin_type[] = "jobacct_gather/linux";
-const uint32_t plugin_version = 200;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * _get_offspring_data() -- collect memory usage data for the offspring
@@ -189,7 +185,7 @@ extern int fini (void)
  *    wrong.
  */
 extern void jobacct_gather_p_poll_data(
-	List task_list, bool pgid_plugin, uint64_t cont_id)
+	List task_list, bool pgid_plugin, uint64_t cont_id, bool profile)
 {
 	static jag_callbacks_t callbacks;
 	static bool first = 1;
@@ -202,7 +198,8 @@ extern void jobacct_gather_p_poll_data(
 		callbacks.get_offspring_data = _get_offspring_data;
 	}
 
-	jag_common_poll_data(task_list, pgid_plugin, cont_id, &callbacks);
+	jag_common_poll_data(task_list, pgid_plugin, cont_id, &callbacks,
+			     profile);
 	return;
 }
 
diff --git a/src/plugins/jobacct_gather/none/Makefile.in b/src/plugins/jobacct_gather/none/Makefile.in
index 2636852aa..abb535fff 100644
--- a/src/plugins/jobacct_gather/none/Makefile.in
+++ b/src/plugins/jobacct_gather/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobacct_gather/none/jobacct_gather_none.c b/src/plugins/jobacct_gather/none/jobacct_gather_none.c
index 8658aa617..9e19bfaa7 100644
--- a/src/plugins/jobacct_gather/none/jobacct_gather_none.c
+++ b/src/plugins/jobacct_gather/none/jobacct_gather_none.c
@@ -64,16 +64,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Job accounting gather NOT_INVOKED plugin";
 const char plugin_type[] = "jobacct_gather/none";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/jobcomp/Makefile.am b/src/plugins/jobcomp/Makefile.am
index 1898e0a06..e8199d208 100644
--- a/src/plugins/jobcomp/Makefile.am
+++ b/src/plugins/jobcomp/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for jobcomp plugins
 
-SUBDIRS = filetxt none script mysql
+SUBDIRS = elasticsearch filetxt none script mysql
diff --git a/src/plugins/jobcomp/Makefile.in b/src/plugins/jobcomp/Makefile.in
index 97c9b9040..2d3f4d2ec 100644
--- a/src/plugins/jobcomp/Makefile.in
+++ b/src/plugins/jobcomp/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -447,7 +461,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = filetxt none script mysql
+SUBDIRS = elasticsearch filetxt none script mysql
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/jobcomp/elasticsearch/Makefile.am b/src/plugins/jobcomp/elasticsearch/Makefile.am
new file mode 100644
index 000000000..bd085c4f2
--- /dev/null
+++ b/src/plugins/jobcomp/elasticsearch/Makefile.am
@@ -0,0 +1,22 @@
+# Makefile for jobcomp/elasticsearch plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+if WITH_CURL
+ELASTICSEARCH = jobcomp_elasticsearch.la
+endif
+
+pkglib_LTLIBRARIES = $(ELASTICSEARCH)
+
+# Elasticsearch job completion logging plugin.
+jobcomp_elasticsearch_la_SOURCES = jobcomp_elasticsearch.c 
+
+jobcomp_elasticsearch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+
+jobcomp_elasticsearch_la_LIBADD = $(LIBCURL)
+
diff --git a/src/plugins/jobcomp/elasticsearch/Makefile.in b/src/plugins/jobcomp/elasticsearch/Makefile.in
new file mode 100644
index 000000000..23b65060c
--- /dev/null
+++ b/src/plugins/jobcomp/elasticsearch/Makefile.in
@@ -0,0 +1,818 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for jobcomp/elasticsearch plugin
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/jobcomp/elasticsearch
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+am__DEPENDENCIES_1 =
+jobcomp_elasticsearch_la_DEPENDENCIES = $(am__DEPENDENCIES_1)
+am_jobcomp_elasticsearch_la_OBJECTS = jobcomp_elasticsearch.lo
+jobcomp_elasticsearch_la_OBJECTS =  \
+	$(am_jobcomp_elasticsearch_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+jobcomp_elasticsearch_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
+	$(AM_CFLAGS) $(CFLAGS) $(jobcomp_elasticsearch_la_LDFLAGS) \
+	$(LDFLAGS) -o $@
+@WITH_CURL_TRUE@am_jobcomp_elasticsearch_la_rpath = -rpath \
+@WITH_CURL_TRUE@	$(pkglibdir)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(jobcomp_elasticsearch_la_SOURCES)
+DIST_SOURCES = $(jobcomp_elasticsearch_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+@WITH_CURL_TRUE@ELASTICSEARCH = jobcomp_elasticsearch.la
+pkglib_LTLIBRARIES = $(ELASTICSEARCH)
+
+# Elasticsearch job completion logging plugin.
+jobcomp_elasticsearch_la_SOURCES = jobcomp_elasticsearch.c 
+jobcomp_elasticsearch_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+jobcomp_elasticsearch_la_LIBADD = $(LIBCURL)
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/jobcomp/elasticsearch/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/jobcomp/elasticsearch/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+jobcomp_elasticsearch.la: $(jobcomp_elasticsearch_la_OBJECTS) $(jobcomp_elasticsearch_la_DEPENDENCIES) $(EXTRA_jobcomp_elasticsearch_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(jobcomp_elasticsearch_la_LINK) $(am_jobcomp_elasticsearch_la_rpath) $(jobcomp_elasticsearch_la_OBJECTS) $(jobcomp_elasticsearch_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/jobcomp_elasticsearch.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/jobcomp/elasticsearch/jobcomp_elasticsearch.c b/src/plugins/jobcomp/elasticsearch/jobcomp_elasticsearch.c
new file mode 100644
index 000000000..f4225f71b
--- /dev/null
+++ b/src/plugins/jobcomp/elasticsearch/jobcomp_elasticsearch.c
@@ -0,0 +1,983 @@
+/*****************************************************************************\
+ *  jobcomp_elasticsearch.c - elasticsearch slurm job completion logging plugin.
+ *****************************************************************************
+ *  Produced at Barcelona Supercomputing Center, in collaboration with 
+ *  Barcelona School of Informatics.
+ *  Written by Alejandro Sanchez Graells <alejandro.sanchezgraells@bsc.es>,
+ *  <asanchez1987@gmail.com>, who borrowed heavily from jobcomp/filetxt 
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#   include "config.h"
+#endif
+
+#if HAVE_STDINT_H
+#  include <stdint.h>
+#endif
+#if HAVE_INTTYPES_H
+#  include <inttypes.h>
+#endif
+
+#include <curl/curl.h>
+#include <fcntl.h>
+#include <grp.h>
+#include <pwd.h>
+#include <stdlib.h>
+#include <stddef.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include "src/common/assoc_mgr.h"
+#include "src/common/fd.h"
+#include "src/common/parse_time.h"
+#include "src/common/slurm_jobcomp.h"
+#include "src/common/slurm_protocol_defs.h"
+#include "src/common/slurm_time.h"
+#include "src/common/slurmdb_defs.h"
+#include "src/common/uid.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/state_save.h"
+
+#define USE_ISO8601 1
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *	<application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "jobcomp" for SLURM job completion logging) and <method>
+ * is a description of how this plugin satisfies that application.  SLURM will
+ * only load job completion logging plugins if the plugin_type string has a
+ * prefix of "jobcomp/".
+ *
+ * plugin_version - an unsigned 32-bit integer giving the version number
+ * of the plugin.  If major and minor revisions are desired, the major
+ * version number may be multiplied by a suitable magnitude constant such
+ * as 100 or 1000.  Various SLURM versions will likely require a certain
+ * minimum version for their plugins as the job completion logging API
+ * matures.
+ */
+const char plugin_name[] = "Job completion elasticsearch logging plugin";
+const char plugin_type[] = "jobcomp/elasticsearch";
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
+
+#define JOBCOMP_DATA_FORMAT "{\"jobid\":%lu,\"username\":\"%s\","\
+	"\"user_id\":%lu,\"groupname\":\"%s\",\"group_id\":%lu,"\
+	"\"@start\":\"%s\",\"@end\":\"%s\",\"elapsed\":%ld,"\
+	"\"partition\":\"%s\",\"alloc_node\":\"%s\","\
+	"\"nodes\":\"%s\",\"total_cpus\":%lu,\"total_nodes\":%lu,"\
+	"\"derived_exitcode\":%lu,\"exitcode\":%lu,\"state\":\"%s\""
+
+/* Type for error string table entries */
+typedef struct {
+	int xe_number;
+	char *xe_message;
+} slurm_errtab_t;
+
+static slurm_errtab_t slurm_errtab[] = {
+	{0, "No error"},
+	{-1, "Unspecified error"}
+};
+
+/* Type for handling HTTP responses */
+struct http_response {
+	char *message;
+	size_t size;
+};
+
+/* Type for jobcomp data pending to be indexed */
+typedef struct {
+	uint32_t nelems;
+	char **jobs;
+} pending_jobs_t;
+
+pending_jobs_t pend_jobs;
+
+char *save_state_file = "elasticsearch_state";
+char *index_type = "/slurm/jobcomp";
+char *log_url = NULL;
+
+static pthread_mutex_t save_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t pend_jobs_lock = PTHREAD_MUTEX_INITIALIZER;
+
+/* A plugin-global errno. */
+static int plugin_errno = SLURM_SUCCESS;
+
+
+/* Get the user name for the give user_id */
+static void _get_user_name(uint32_t user_id, char *user_name, int buf_size)
+{
+	static uint32_t cache_uid = 0;
+	static char cache_name[32] = "root", *uname;
+
+	if (user_id != cache_uid) {
+		uname = uid_to_string((uid_t) user_id);
+		snprintf(cache_name, sizeof(cache_name), "%s", uname);
+		xfree(uname);
+		cache_uid = user_id;
+	}
+	snprintf(user_name, buf_size, "%s", cache_name);
+}
+
+/* Get the group name for the give group_id */
+static void _get_group_name(uint32_t group_id, char *group_name, int buf_size)
+{
+	static uint32_t cache_gid = 0;
+	static char cache_name[32] = "root", *gname;
+
+	if (group_id != cache_gid) {
+		gname = gid_to_string((gid_t) group_id);
+		snprintf(cache_name, sizeof(cache_name), "%s", gname);
+		xfree(gname);
+		cache_gid = group_id;
+	}
+	snprintf(group_name, buf_size, "%s", cache_name);
+}
+
+/*
+ * Linear search through table of errno values and strings,
+ * returns NULL on error, string on success.
+ */
+static char *_lookup_slurm_api_errtab(int errnum)
+{
+	char *res = NULL;
+	int i;
+
+	for (i = 0; i < sizeof(slurm_errtab) / sizeof(slurm_errtab_t); i++) {
+		if (slurm_errtab[i].xe_number == errnum) {
+			res = slurm_errtab[i].xe_message;
+			break;
+		}
+	}
+	return res;
+}
+
+/* Read file to data variable */
+static uint32_t _read_file(const char *file, char **data)
+{
+	uint32_t data_size = 0;
+	int data_allocated, data_read, fd, fsize = 0;
+	struct stat f_stat;
+
+	fd = open(file, O_RDONLY);
+	if (fd < 0) {
+		debug("Could not open jobcomp state file %s", file);
+		return data_size;
+	}
+	if (fstat(fd, &f_stat)) {
+		debug("Could not stat jobcomp state file %s", file);
+		close(fd);
+		return data_size;
+	}
+
+	fsize = f_stat.st_size;
+	data_allocated = BUF_SIZE;
+	*data = xmalloc(data_allocated);
+	while (1) {
+		data_read = read(fd, &(*data)[data_size], BUF_SIZE);
+		if (data_read < 0) {
+			if (errno == EINTR)
+				continue;
+			else {
+				debug("Read error on %s: %m", file);
+				break;
+			}
+		} else if (data_read == 0)	/* EOF */
+			break;
+		data_size += data_read;
+		data_allocated += data_read;
+		*data = xrealloc(*data, data_allocated);
+	}
+	close(fd);
+	if (data_size != fsize) {
+		debug("Could not read entire jobcomp state file %s (%d of %d)",
+		      file, data_size, fsize);
+	}
+	return data_size;
+}
+
+/* Load jobcomp data from save state file */
+static int _load_pending_jobs(void)
+{
+	int rc = SLURM_SUCCESS;
+	char *saved_data = NULL;
+	char *state_file;
+	uint32_t data_size;
+	Buf buffer;
+	pend_jobs.nelems = 0;
+	pend_jobs.jobs = NULL;
+
+	state_file = slurm_get_state_save_location();
+	if (state_file == NULL) {
+		debug("Could not retrieve StateSaveLocation from conf");
+		return SLURM_ERROR;
+	}
+
+	if (state_file[strlen(state_file) - 1] != '/')
+		xstrcat(state_file, "/");
+	xstrcat(state_file, save_state_file);
+
+	slurm_mutex_lock(&save_lock);
+	data_size = _read_file(state_file, &saved_data);
+	if (data_size <= 0 || saved_data == NULL) {
+		slurm_mutex_unlock(&save_lock);
+		xfree(saved_data);
+		xfree(state_file);
+		return rc;
+	}
+	slurm_mutex_unlock(&save_lock);
+
+	buffer = create_buf(saved_data, data_size);
+	safe_unpackstr_array(&pend_jobs.jobs, &pend_jobs.nelems, buffer);
+	if (pend_jobs.nelems > 0) {
+		debug("Loaded jobcomp pending data about %d jobs",
+		      pend_jobs.nelems);
+	}
+	free_buf(buffer);
+	xfree(state_file);
+
+	return rc;
+
+      unpack_error:
+	error("Error unpacking file %s", state_file);
+	free_buf(buffer);
+	return SLURM_FAILURE;
+}
+
+/* Callback to handle the HTTP response */
+static size_t _write_callback(void *contents, size_t size, size_t nmemb,
+			      void *userp)
+{
+	size_t realsize = size * nmemb;
+	struct http_response *mem = (struct http_response *) userp;
+
+	mem->message = xrealloc(mem->message, mem->size + realsize + 1);
+
+	memcpy(&(mem->message[mem->size]), contents, realsize);
+	mem->size += realsize;
+	mem->message[mem->size] = 0;
+
+	return realsize;
+}
+
+/* Try to index job into elasticsearch */
+static int _index_job(const char *jobcomp)
+{
+	CURL *curl_handle = NULL;
+	CURLcode res;
+	struct http_response chunk;
+	int rc = SLURM_SUCCESS;
+	static int error_cnt = 0;
+
+	if (log_url == NULL) {
+		if (((error_cnt++) % 100) == 0) {
+	                /* Periodically log errors */
+                        error("%s: Unable to save job state for %d "
+                               "jobs, caching data",
+                               plugin_type, error_cnt);
+                }
+		debug("JobCompLoc parameter not configured");
+		return SLURM_ERROR;
+	}
+
+	if (curl_global_init(CURL_GLOBAL_ALL) != 0) {
+		debug("curl_global_init: %m");
+		rc = SLURM_ERROR;
+	} else if ((curl_handle = curl_easy_init()) == NULL) {
+		debug("curl_easy_init: %m");
+		rc = SLURM_ERROR;
+	}
+
+	if (curl_handle) {
+		char *url = xstrdup(log_url);
+		xstrcat(url, index_type);
+
+		chunk.message = xmalloc(1);
+		chunk.size = 0;
+
+		curl_easy_setopt(curl_handle, CURLOPT_URL, url);
+		curl_easy_setopt(curl_handle, CURLOPT_POST, 1);
+		curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDS, jobcomp);
+		curl_easy_setopt(curl_handle, CURLOPT_POSTFIELDSIZE,
+				 strlen(jobcomp));
+		curl_easy_setopt(curl_handle, CURLOPT_HEADER, 1);
+		curl_easy_setopt(curl_handle, CURLOPT_WRITEFUNCTION,
+				 _write_callback);
+		curl_easy_setopt(curl_handle, CURLOPT_WRITEDATA,
+				 (void *) &chunk);
+
+		res = curl_easy_perform(curl_handle);
+		if (res != CURLE_OK) {
+			debug2("Could not connect to: %s , reason: %s", url,
+			       curl_easy_strerror(res));
+			rc = SLURM_ERROR;
+		} else {
+			char *token, *response;
+			response = xstrdup(chunk.message);
+			token = strtok(chunk.message, " ");
+			if (token == NULL) {
+				debug("Could not receive the HTTP response "
+				      "status code from %s", url);
+				rc = SLURM_ERROR;
+			} else {
+				token = strtok(NULL, " ");
+				if ((xstrcmp(token, "100") == 0)) {
+					(void)  strtok(NULL, " ");
+					token = strtok(NULL, " ");
+				}
+				if ((xstrcmp(token, "200") != 0)
+				    && (xstrcmp(token, "201") != 0)) {
+					debug("HTTP status code %s received "
+					      "from %s", token, url);
+					debug("Check whether index writes and "
+					      "metadata changes are enabled on"
+					      " %s", url);
+					debug3("HTTP Response:\n%s", response);
+					rc = SLURM_ERROR;
+				} else {
+					token = strtok((char *)jobcomp, ",");
+					(void)  strtok(token, ":");
+					token = strtok(NULL, ":");
+					debug("Jobcomp data related to jobid %s"
+					      " indexed into elasticsearch",
+					      token);
+				}
+				xfree(response);
+			}
+		}
+		xfree(chunk.message);
+		curl_easy_cleanup(curl_handle);
+		xfree(url);
+	}
+	curl_global_cleanup();
+
+	if (rc == SLURM_ERROR) {
+		if (((error_cnt++) % 100) == 0) {
+                        /* Periodically log errors */
+                        error("%s: Unable to save job state for %d "
+                               "jobs, caching data",
+                               plugin_type, error_cnt);
+                }
+	}
+
+	return rc;
+}
+
+/* Escape characters according to RFC7159 */
+static char *_json_escape(const char *str)
+{
+	char *ret = NULL;
+	int i;
+	for (i = 0; i < strlen(str); ++i) {
+		switch (str[i]) {
+		case '\\':
+			xstrcat(ret, "\\\\");
+			break;
+		case '"':
+			xstrcat(ret, "\\\"");
+			break;
+		case '\n':
+			xstrcat(ret, "\\n");
+			break;
+		case '\b':
+			xstrcat(ret, "\\b");
+			break;
+		case '\f':
+			xstrcat(ret, "\\f");
+			break;
+		case '\r':
+			xstrcat(ret, "\\r");
+			break;
+		case '\t':
+			xstrcat(ret, "\\t");
+			break;
+		case '<':
+			xstrcat(ret, "\\u003C");
+			break;
+		default:
+			xstrcatchar(ret, str[i]);
+		}
+	}
+	return ret;
+}
+
+/* Saves the state of all jobcomp data for further indexing retries */
+static int _save_state(void)
+{
+	int fd, rc = SLURM_SUCCESS;
+	char *state_file, *new_file, *old_file;
+	static int high_buffer_size = (1024 * 1024);
+	Buf buffer = init_buf(high_buffer_size);
+
+	slurm_mutex_lock(&pend_jobs_lock);
+	packstr_array(pend_jobs.jobs, pend_jobs.nelems, buffer);
+	slurm_mutex_unlock(&pend_jobs_lock);
+
+	state_file = slurm_get_state_save_location();
+	if (state_file == NULL || strlen(state_file) == 0) {
+		debug("Could not retrieve StateSaveLocation from conf");
+		return SLURM_ERROR;
+	}
+
+	if (state_file[strlen(state_file) - 1] != '/')
+		xstrcat(state_file, "/");
+
+	xstrcat(state_file, save_state_file);
+	old_file = xstrdup(state_file);
+	new_file = xstrdup(state_file);
+	xstrcat(new_file, ".new");
+	xstrcat(old_file, ".old");
+
+	slurm_mutex_lock(&save_lock);
+	fd = open(new_file, O_CREAT | O_WRONLY | O_TRUNC, S_IRUSR | S_IWUSR);
+	if (fd < 0) {
+		debug("Can't save jobcomp state, open file %s error %m",
+		      new_file);
+		rc = SLURM_ERROR;
+	} else {
+		int pos = 0, nwrite, amount, rc2;
+		char *data;
+		fd_set_close_on_exec(fd);
+		nwrite = get_buf_offset(buffer);
+		data = (char *) get_buf_data(buffer);
+		high_buffer_size = MAX(nwrite, high_buffer_size);
+		while (nwrite > 0) {
+			amount = write(fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				debug("Error writing file %s, %m", new_file);
+				rc = SLURM_ERROR;
+				break;
+			}
+			nwrite -= amount;
+			pos += amount;
+		}
+		if ((rc2 = fsync_and_close(fd, save_state_file)))
+			rc = rc2;
+	}
+
+	if (rc == SLURM_ERROR)
+		(void) unlink(new_file);
+	else {
+		(void) unlink(old_file);
+		if (link(state_file, old_file)) {
+			debug("Unable to create link for %s -> %s: %m",
+			      state_file, old_file);
+			rc = SLURM_ERROR;
+		}
+		(void) unlink(state_file);
+		if (link(new_file, state_file)) {
+			debug("Unable to create link for %s -> %s: %m",
+			      new_file, state_file);
+			rc = SLURM_ERROR;
+		}
+		(void) unlink(new_file);
+	}
+
+	xfree(old_file);
+	xfree(state_file);
+	xfree(new_file);
+	slurm_mutex_unlock(&save_lock);
+
+	free_buf(buffer);
+
+	return rc;
+}
+
+/* Add jobcomp data to the pending jobs structure */
+static void _push_pending_job(char *j)
+{
+	pend_jobs.jobs = xrealloc(pend_jobs.jobs,
+				  sizeof(char *) * (pend_jobs.nelems + 1));
+	pend_jobs.jobs[pend_jobs.nelems] = xstrdup(j);
+	pend_jobs.nelems++;
+}
+
+/* Updates pending jobs structure with the jobs that
+ * failed to be indexed */
+static void _update_pending_jobs(int *m)
+{
+	int i;
+	pending_jobs_t aux;
+	aux.jobs = NULL;
+	aux.nelems = 0;
+
+	for (i = 0; i < pend_jobs.nelems; i++) {
+		if (!m[i]) {
+			aux.jobs = xrealloc(aux.jobs,
+					    sizeof(char *) * (aux.nelems + 1));
+			aux.jobs[aux.nelems] = xstrdup(pend_jobs.jobs[i]);
+			aux.nelems++;
+			xfree(pend_jobs.jobs[i]);
+		}
+	}
+
+	xfree(pend_jobs.jobs);
+	//pend_jobs.jobs = xmalloc(1);
+	//pend_jobs.jobs = xrealloc(pend_jobs.jobs, sizeof(char *) * (aux.nelems));
+	pend_jobs = aux;
+}
+
+/* Try to index all the jobcomp data for pending jobs */
+static int _index_retry(void)
+{
+	int i, rc = SLURM_SUCCESS, marks = 0;
+	int *pop_marks;
+
+	slurm_mutex_lock(&pend_jobs_lock);
+	pop_marks = xmalloc(sizeof(int) * pend_jobs.nelems);
+
+	for (i = 0; i < pend_jobs.nelems; i++) {
+		pop_marks[i] = 0;
+		if (_index_job(pend_jobs.jobs[i]) == SLURM_ERROR)
+			rc = SLURM_ERROR;
+		else {
+			marks = 1;
+			pop_marks[i] = 1;
+			xfree(pend_jobs.jobs[i]);
+		}
+	}
+
+	if (marks)
+		_update_pending_jobs(pop_marks);
+	xfree(pop_marks);
+
+	slurm_mutex_unlock(&pend_jobs_lock);
+	if (_save_state() == SLURM_ERROR)
+		rc = SLURM_ERROR;
+
+	return rc;
+}
+
+/* This is a variation of slurm_make_time_str() in src/common/parse_time.h
+ * This version uses ISO8601 format by default. */
+static void _make_time_str(time_t * time, char *string, int size)
+{
+	struct tm time_tm;
+
+	slurm_gmtime_r(time, &time_tm);
+	if (*time == (time_t) 0) {
+		snprintf(string, size, "Unknown");
+	} else {
+#if USE_ISO8601
+		/* Format YYYY-MM-DDTHH:MM:SS, ISO8601 standard format,
+		 * NOTE: This is expected to break Maui, Moab and LSF
+		 * schedulers management of SLURM. */
+		snprintf(string, size,
+			 "%4.4u-%2.2u-%2.2uT%2.2u:%2.2u:%2.2u",
+			 (time_tm.tm_year + 1900), (time_tm.tm_mon + 1),
+			 time_tm.tm_mday, time_tm.tm_hour, time_tm.tm_min,
+			 time_tm.tm_sec);
+#else
+		/* Format MM/DD-HH:MM:SS */
+		snprintf(string, size,
+			 "%2.2u/%2.2u-%2.2u:%2.2u:%2.2u",
+			 (time_tm.tm_mon + 1), time_tm.tm_mday,
+			 time_tm.tm_hour, time_tm.tm_min, time_tm.tm_sec);
+
+#endif
+	}
+}
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called. Put global initialization here.
+ */
+extern int init(void)
+{
+	int rc;
+
+	slurm_mutex_lock(&pend_jobs_lock);
+	rc = _load_pending_jobs();
+	slurm_mutex_unlock(&pend_jobs_lock);
+
+	return rc;
+}
+
+extern int fini(void)
+{
+	xfree(log_url);
+	xfree(save_state_file);
+	xfree(index_type);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * The remainder of this file implements the standard SLURM job completion
+ * logging API.
+ */
+extern int slurm_jobcomp_set_location(char *location)
+{
+	int rc = SLURM_SUCCESS;
+	CURL *curl_handle;
+	CURLcode res;
+
+	if (location == NULL) {
+		debug("JobCompLoc parameter not configured");
+		return SLURM_ERROR;
+	}
+
+	log_url = xstrdup(location);
+
+	curl_global_init(CURL_GLOBAL_ALL);
+	curl_handle = curl_easy_init();
+	if (curl_handle) {
+		curl_easy_setopt(curl_handle, CURLOPT_URL, log_url);
+		curl_easy_setopt(curl_handle, CURLOPT_NOBODY, 1);
+		res = curl_easy_perform(curl_handle);
+		if (res != CURLE_OK) {
+			debug("Could not connect to: %s", log_url);
+			rc = SLURM_ERROR;
+		}
+		curl_easy_cleanup(curl_handle);
+	}
+	curl_global_cleanup();
+
+	if (rc == SLURM_SUCCESS && pend_jobs.nelems > 0) {
+		if (_index_retry() == SLURM_ERROR) {
+			debug("Could not index all jobcomp saved data");
+		}
+	}
+
+	return rc;
+}
+
+extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
+{
+	int nwritten, nparents, B_SIZE = 1024, rc = SLURM_SUCCESS;
+	char usr_str[32], grp_str[32], start_str[32], end_str[32];
+	char submit_str[32], *script, *cluster = NULL, *qos, *state_string;
+	char *script_str;
+	char *parent_accounts;
+	char **acc_aux;
+	time_t elapsed_time, submit_time, eligible_time;
+	enum job_states job_state;
+	uint32_t time_limit;
+	uint16_t ntasks_per_node;
+	int i, tmp_size;
+	char *buffer, *tmp;
+
+	_get_user_name(job_ptr->user_id, usr_str, sizeof(usr_str));
+	_get_group_name(job_ptr->group_id, grp_str, sizeof(grp_str));
+
+	if ((job_ptr->time_limit == NO_VAL) && job_ptr->part_ptr)
+		time_limit = job_ptr->part_ptr->max_time;
+	else
+		time_limit = job_ptr->time_limit;
+
+	if (job_ptr->job_state & JOB_RESIZING) {
+		time_t now = time(NULL);
+		state_string = job_state_string(job_ptr->job_state);
+		if (job_ptr->resize_time) {
+			_make_time_str(&job_ptr->resize_time, start_str,
+				       sizeof(start_str));
+		} else {
+			_make_time_str(&job_ptr->start_time, start_str,
+				       sizeof(start_str));
+		}
+		_make_time_str(&now, end_str, sizeof(end_str));
+	} else {
+		/* Job state will typically have JOB_COMPLETING or JOB_RESIZING
+		 * flag set when called. We remove the flags to get the eventual
+		 * completion state: JOB_FAILED, JOB_TIMEOUT, etc. */
+		job_state = job_ptr->job_state & JOB_STATE_BASE;
+		state_string = job_state_string(job_state);
+		if (job_ptr->resize_time) {
+			_make_time_str(&job_ptr->resize_time, start_str,
+				       sizeof(start_str));
+		} else if (job_ptr->start_time > job_ptr->end_time) {
+			/* Job cancelled while pending and
+			 * expected start time is in the future. */
+			snprintf(start_str, sizeof(start_str), "Unknown");
+		} else {
+			_make_time_str(&job_ptr->start_time, start_str,
+				       sizeof(start_str));
+		}
+		_make_time_str(&job_ptr->end_time, end_str, sizeof(end_str));
+	}
+
+	elapsed_time = job_ptr->end_time - job_ptr->start_time;
+
+	buffer = xmalloc(B_SIZE);
+
+	nwritten = snprintf(buffer, B_SIZE, JOBCOMP_DATA_FORMAT,
+			    (unsigned long) job_ptr->job_id, usr_str,
+			    (unsigned long) job_ptr->user_id, grp_str,
+			    (unsigned long) job_ptr->group_id, start_str,
+			    end_str, (long) elapsed_time,
+			    job_ptr->partition, job_ptr->alloc_node,
+			    job_ptr->nodes, (unsigned long) job_ptr->total_cpus,
+			    (unsigned long) job_ptr->total_nodes,
+			    (unsigned long) job_ptr->derived_ec,
+			    (unsigned long) job_ptr->exit_code, state_string);
+
+	if (nwritten >= B_SIZE) {
+		B_SIZE += nwritten + 1;
+		buffer = xrealloc(buffer, B_SIZE);
+
+		nwritten = snprintf(buffer, B_SIZE, JOBCOMP_DATA_FORMAT,
+				    (unsigned long) job_ptr->job_id, usr_str,
+				    (unsigned long) job_ptr->user_id, grp_str,
+				    (unsigned long) job_ptr->group_id,
+				    start_str, end_str, (long) elapsed_time,
+				    job_ptr->partition, job_ptr->alloc_node,
+				    job_ptr->nodes,
+				    (unsigned long) job_ptr->total_cpus,
+				    (unsigned long) job_ptr->total_nodes,
+				    (unsigned long) job_ptr->derived_ec,
+				    (unsigned long) job_ptr->exit_code,
+				    state_string);
+
+		if (nwritten >= B_SIZE) {
+			debug("Job completion data truncated and lost");
+			rc = SLURM_ERROR;
+		}
+	}
+
+	tmp_size = 256;
+	tmp = xmalloc(tmp_size * sizeof(char));
+
+	sprintf(tmp, ",\"cpu_hours\":%.6f",
+		((float) elapsed_time * (float) job_ptr->total_cpus) /
+		(float) 3600);
+	xstrcat(buffer, tmp);
+
+	if (job_ptr->details && (job_ptr->details->submit_time != NO_VAL)) {
+		submit_time = job_ptr->details->submit_time;
+		_make_time_str(&submit_time, submit_str, sizeof(submit_str));
+		xstrfmtcat(buffer, ",\"@submit\":\"%s\"", submit_str);
+	}
+
+	if (job_ptr->details && (job_ptr->details->begin_time != NO_VAL)) {
+		eligible_time =
+		    job_ptr->start_time - job_ptr->details->begin_time;
+		xstrfmtcat(buffer, ",\"eligible_time\":%lu", eligible_time);
+	}
+
+	if (job_ptr->details
+	    && (job_ptr->details->work_dir && job_ptr->details->work_dir[0])) {
+		xstrfmtcat(buffer, ",\"work_dir\":\"%s\"",
+			   job_ptr->details->work_dir);
+	}
+
+	if (job_ptr->details
+	    && (job_ptr->details->std_err && job_ptr->details->std_err[0])) {
+		xstrfmtcat(buffer, ",\"std_err\":\"%s\"",
+			   job_ptr->details->std_err);
+	}
+
+	if (job_ptr->details
+	    && (job_ptr->details->std_in && job_ptr->details->std_in[0])) {
+		xstrfmtcat(buffer, ",\"std_in\":\"%s\"",
+			   job_ptr->details->std_in);
+	}
+
+	if (job_ptr->details
+	    && (job_ptr->details->std_out && job_ptr->details->std_out[0])) {
+		xstrfmtcat(buffer, ",\"std_out\":\"%s\"",
+			   job_ptr->details->std_out);
+	}
+
+	if (job_ptr->assoc_ptr != NULL) {
+		cluster = ((slurmdb_assoc_rec_t *) job_ptr->assoc_ptr)->cluster;
+		xstrfmtcat(buffer, ",\"cluster\":\"%s\"", cluster);
+	}
+
+	if (job_ptr->qos_ptr != NULL) {
+		slurmdb_qos_rec_t *assoc =
+		    (slurmdb_qos_rec_t *) job_ptr->qos_ptr;
+		qos = assoc->name;
+		xstrfmtcat(buffer, ",\"qos\":\"%s\"", qos);
+	}
+
+	if (job_ptr->details && (job_ptr->details->num_tasks != NO_VAL)) {
+		xstrfmtcat(buffer, ",\"ntasks\":%hu",
+			   job_ptr->details->num_tasks);
+	}
+
+	if (job_ptr->details && (job_ptr->details->ntasks_per_node != NO_VAL)) {
+		ntasks_per_node = job_ptr->details->ntasks_per_node;
+		xstrfmtcat(buffer, ",\"ntasks_per_node\":%hu", ntasks_per_node);
+	}
+
+	if (job_ptr->details && (job_ptr->details->cpus_per_task != NO_VAL)) {
+		xstrfmtcat(buffer, ",\"cpus_per_task\":%hu",
+			   job_ptr->details->cpus_per_task);
+	}
+
+	if (job_ptr->details
+	    && (job_ptr->details->orig_dependency
+		&& job_ptr->details->orig_dependency[0])) {
+		xstrfmtcat(buffer, ",\"orig_dependency\":\"%s\"",
+			   job_ptr->details->orig_dependency);
+	}
+
+	if (job_ptr->details
+	    && (job_ptr->details->exc_nodes
+		&& job_ptr->details->exc_nodes[0])) {
+		xstrfmtcat(buffer, ",\"excluded_nodes\":\"%s\"",
+			   job_ptr->details->exc_nodes);
+	}
+
+	if (time_limit != INFINITE) {
+		xstrfmtcat(buffer, ",\"time_limit\":%lu",
+			(unsigned long) time_limit * 60);
+	}
+
+	if (job_ptr->resv_name && job_ptr->resv_name[0]) {		
+		xstrfmtcat(buffer, ",\"reservation_name\":\"%s\"",
+			   job_ptr->resv_name);
+	}
+
+	if (job_ptr->gres_req && job_ptr->gres_req[0]) {		
+		xstrfmtcat(buffer, ",\"gres_req\":\"%s\"", job_ptr->gres_req);
+	}
+
+	if (job_ptr->gres_alloc && job_ptr->gres_alloc[0]) {		
+		xstrfmtcat(buffer, ",\"gres_alloc\":\"%s\"",
+			   job_ptr->gres_alloc);
+	}
+
+	if (job_ptr->account && job_ptr->account[0]) {
+		xstrfmtcat(buffer, ",\"account\":\"%s\"", job_ptr->account);
+	}
+
+	script = get_job_script(job_ptr);
+	if (script && script[0]) {
+		script_str = _json_escape(script);
+		xstrfmtcat(buffer, ",\"script\":\"%s\"", script_str);
+		xfree(script_str);
+		xfree(script);
+	}
+
+	if (job_ptr->assoc_ptr) {
+		slurmdb_assoc_rec_t assoc_rec, *assoc_ptr;
+
+		parent_accounts = NULL;
+		acc_aux = NULL;
+		nparents = 0;
+
+		memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
+		assoc_rec.cluster = xstrdup(cluster);
+		assoc_rec.id =
+		    ((slurmdb_assoc_rec_t *) job_ptr->assoc_ptr)->parent_id;
+
+		do {
+			assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
+						accounting_enforce, &assoc_ptr,
+						false);
+			acc_aux = xrealloc(acc_aux,
+					   sizeof(char *) * (nparents + 1));
+			acc_aux[nparents] = xstrdup(assoc_ptr->acct);
+			nparents++;
+			assoc_rec.id = assoc_ptr->parent_id;
+			xfree(assoc_rec.cluster);
+			memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
+			assoc_rec.cluster = xstrdup(cluster);
+			assoc_rec.id = assoc_ptr->parent_id;
+
+		} while (xstrcmp(assoc_ptr->acct, "root") != 0);
+
+		for (i = nparents - 1; i >= 0; i--) {
+			xstrcat(parent_accounts, "/");
+			xstrcat(parent_accounts, acc_aux[i]);
+			xfree(acc_aux[i]);
+		}
+	
+		xstrfmtcat(buffer, ",\"parent_accounts\":\"%s\"",
+			   parent_accounts);
+		xfree(acc_aux);
+		xfree(assoc_rec.cluster);
+		xfree(parent_accounts);
+	}
+
+	xstrcat(buffer, "}");
+
+	if (rc == SLURM_SUCCESS) {
+		if (_index_job(buffer) == SLURM_ERROR) {
+			slurm_mutex_lock(&pend_jobs_lock);
+			_push_pending_job(buffer);
+			slurm_mutex_unlock(&pend_jobs_lock);
+			rc = _save_state();
+		} else {
+			rc = _index_retry();
+		}
+	}
+
+	xfree(tmp);
+	xfree(buffer);
+
+	return rc;
+}
+
+extern int slurm_jobcomp_get_errno(void)
+{
+	return plugin_errno;
+}
+
+extern char *slurm_jobcomp_strerror(int errnum)
+{
+	char *res = _lookup_slurm_api_errtab(errnum);
+	return (res ? res : strerror(errnum));
+}
+
+/*
+ * get info from the database
+ * in/out job_list List of job_rec_t *
+ * note List needs to be freed when called
+ */
+extern List slurm_jobcomp_get_jobs(slurmdb_job_cond_t * job_cond)
+{
+	info("This function is not implemented.");
+	return NULL;
+}
+
+/*
+ * expire old info from the database
+ */
+extern int slurm_jobcomp_archive(slurmdb_archive_cond_t * arch_cond)
+{
+	info("This function is not implemented.");
+	return SLURM_SUCCESS;
+}
diff --git a/src/plugins/jobcomp/filetxt/Makefile.in b/src/plugins/jobcomp/filetxt/Makefile.in
index a735f3133..ef6d1924e 100644
--- a/src/plugins/jobcomp/filetxt/Makefile.in
+++ b/src/plugins/jobcomp/filetxt/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
index 56073f341..05dee5b9c 100644
--- a/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
+++ b/src/plugins/jobcomp/filetxt/filetxt_jobcomp_process.c
@@ -194,8 +194,7 @@ extern List filetxt_jobcomp_process_get_jobs(slurmdb_job_cond_t *job_cond)
 		lc++;
 		fptr = line;	/* break the record into NULL-
 				   terminated strings */
-		if (job_info_list)
-			list_destroy(job_info_list);
+		FREE_NULL_LIST(job_info_list);
 		jobid = 0;
 		partition = NULL;
 		job_info_list = list_create(_destroy_filetxt_jobcomp_info);
@@ -268,8 +267,7 @@ extern List filetxt_jobcomp_process_get_jobs(slurmdb_job_cond_t *job_cond)
 			list_append(job_list, job);
 	}
 
-	if (job_info_list)
-		list_destroy(job_info_list);
+	FREE_NULL_LIST(job_info_list);
 
 	if (ferror(fd)) {
 		perror(filein);
diff --git a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
index 0d13abdee..633478dce 100644
--- a/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
+++ b/src/plugins/jobcomp/filetxt/jobcomp_filetxt.c
@@ -54,6 +54,7 @@
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurm_jobcomp.h"
 #include "src/common/parse_time.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "filetxt_jobcomp_process.h"
 
@@ -81,16 +82,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job completion text file logging plugin";
 const char plugin_type[]       	= "jobcomp/filetxt";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 #define JOB_FORMAT "JobId=%lu UserId=%s(%lu) GroupId=%s(%lu) Name=%s JobState=%s Partition=%s "\
 		"TimeLimit=%s StartTime=%s EndTime=%s NodeList=%s NodeCnt=%u ProcCnt=%u "\
@@ -218,7 +215,7 @@ static void _make_time_str (time_t *time, char *string, int size)
 {
 	struct tm time_tm;
 
-	localtime_r(time, &time_tm);
+	slurm_localtime_r(time, &time_tm);
 	if ( *time == (time_t) 0 ) {
 		snprintf(string, size, "Unknown");
 	} else {
@@ -249,7 +246,7 @@ extern int slurm_jobcomp_log_record ( struct job_record *job_ptr )
 	char usr_str[32], grp_str[32], start_str[32], end_str[32], lim_str[32];
 	char select_buf[128], *state_string, *work_dir;
 	size_t offset = 0, tot_size, wrote;
-	enum job_states job_state;
+	uint32_t job_state;
 	uint32_t time_limit;
 
 	if ((log_name == NULL) || (job_comp_fd < 0)) {
diff --git a/src/plugins/jobcomp/mysql/Makefile.in b/src/plugins/jobcomp/mysql/Makefile.in
index f593266b6..47e1bdc74 100644
--- a/src/plugins/jobcomp/mysql/Makefile.in
+++ b/src/plugins/jobcomp/mysql/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -284,6 +287,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -333,8 +338,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -353,6 +362,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -396,6 +408,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -419,6 +432,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobcomp/mysql/jobcomp_mysql.c b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
index 7e46e6e52..17a55f83d 100644
--- a/src/plugins/jobcomp/mysql/jobcomp_mysql.c
+++ b/src/plugins/jobcomp/mysql/jobcomp_mysql.c
@@ -68,16 +68,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobacct/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job accounting API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Job completion MYSQL plugin";
 const char plugin_type[] = "jobcomp/mysql";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 mysql_conn_t *jobcomp_mysql_conn = NULL;
 
@@ -89,7 +85,7 @@ storage_field_t jobcomp_table_fields[] = {
 	{ "gid", "int unsigned not null" },
 	{ "group_name", "tinytext not null" },
 	{ "name", "tinytext not null" },
-	{ "state", "smallint not null" },
+	{ "state", "int unsigned not null" },
 	{ "partition", "tinytext not null" },
 	{ "timelimit", "tinytext not null" },
 	{ "starttime", "int unsigned default 0 not null" },
@@ -129,7 +125,9 @@ static pthread_mutex_t  jobcomp_lock = PTHREAD_MUTEX_INITIALIZER;
 static int _mysql_jobcomp_check_tables()
 {
 	if (mysql_db_create_table(jobcomp_mysql_conn, jobcomp_table,
-				 jobcomp_table_fields, ")") == SLURM_ERROR)
+				  jobcomp_table_fields,
+				  ", primary key (jobid, starttime, endtime))")
+	    == SLURM_ERROR)
 		return SLURM_ERROR;
 
 	return SLURM_SUCCESS;
@@ -277,7 +275,7 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 	char *connect_type = NULL, *reboot = NULL, *rotate = NULL,
 		*geometry = NULL, *start = NULL,
 		*blockid = NULL;
-	enum job_states job_state;
+	uint32_t job_state;
 	char *query = NULL;
 	uint32_t time_limit, start_time, end_time;
 
@@ -371,7 +369,7 @@ extern int slurm_jobcomp_log_record(struct job_record *job_ptr)
 		xstrcat(query, ", start");
 	if (blockid)
 		xstrcat(query, ", blockid");
-	xstrfmtcat(query, ") values (%u, %u, '%s', %u, '%s', '%s', %d, %u, "
+	xstrfmtcat(query, ") values (%u, %u, '%s', %u, '%s', '%s', %u, %u, "
 		   "'%s', '%s', %u, %u, %u",
 		   job_ptr->job_id, job_ptr->user_id, usr_str,
 		   job_ptr->group_id, grp_str, jname,
diff --git a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
index d7207ae94..a6a322ec8 100644
--- a/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
+++ b/src/plugins/jobcomp/mysql/mysql_jobcomp_process.c
@@ -123,7 +123,7 @@ extern List mysql_jobcomp_process_get_jobs(slurmdb_job_cond_t *job_cond)
 	if (!(result =
 	     mysql_db_query_ret(jobcomp_mysql_conn, query, 0))) {
 		xfree(query);
-		list_destroy(job_list);
+		FREE_NULL_LIST(job_list);
 		return NULL;
 	}
 	xfree(query);
diff --git a/src/plugins/jobcomp/none/Makefile.in b/src/plugins/jobcomp/none/Makefile.in
index 31634729e..f1f8417d1 100644
--- a/src/plugins/jobcomp/none/Makefile.in
+++ b/src/plugins/jobcomp/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobcomp/none/jobcomp_none.c b/src/plugins/jobcomp/none/jobcomp_none.c
index 44f52a313..0f8c97802 100644
--- a/src/plugins/jobcomp/none/jobcomp_none.c
+++ b/src/plugins/jobcomp/none/jobcomp_none.c
@@ -76,16 +76,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job completion logging NONE plugin";
 const char plugin_type[]       	= "jobcomp/none";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/jobcomp/script/Makefile.in b/src/plugins/jobcomp/script/Makefile.in
index cf2dd7bcb..6e08305a2 100644
--- a/src/plugins/jobcomp/script/Makefile.in
+++ b/src/plugins/jobcomp/script/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/jobcomp/script/jobcomp_script.c b/src/plugins/jobcomp/script/jobcomp_script.c
index 6db9bbd6c..0687ed271 100644
--- a/src/plugins/jobcomp/script/jobcomp_script.c
+++ b/src/plugins/jobcomp/script/jobcomp_script.c
@@ -123,16 +123,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Job completion logging script plugin";
 const char plugin_type[]       	= "jobcomp/script";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 static char * script = NULL;
 static List comp_list = NULL;
@@ -178,6 +174,8 @@ static const char * _jobcomp_script_strerror (int errnum)
  */
 struct jobcomp_info {
 	uint32_t jobid;
+	uint32_t array_job_id;
+	uint32_t array_task_id;
 	uint32_t uid;
 	uint32_t gid;
 	uint32_t limit;
@@ -212,6 +210,8 @@ static struct jobcomp_info * _jobcomp_info_create (struct job_record *job)
 	j->uid = job->user_id;
 	j->gid = job->group_id;
 	j->name = xstrdup (job->name);
+	j->array_job_id = job->array_job_id;
+	j->array_task_id = job->array_task_id;
 
 	if (IS_JOB_RESIZING(job)) {
 		state = JOB_RESIZING;
@@ -383,6 +383,8 @@ static char ** _create_environment (struct jobcomp_info *job)
 	env[0] = NULL;
 
 	_env_append_fmt (&env, "JOBID", "%u",  job->jobid);
+	_env_append_fmt (&env, "ARRAYJOBID", "%u", job->array_job_id);
+	_env_append_fmt (&env, "ARRAYTASKID", "%u", job->array_task_id);
 	_env_append_fmt (&env, "UID",   "%u",  job->uid);
 	_env_append_fmt (&env, "GID",   "%u",  job->gid);
 	_env_append_fmt (&env, "START", "%ld", (long)job->start);
@@ -660,8 +662,7 @@ extern int fini ( void )
 	xfree(script);
 	if (rc == SLURM_SUCCESS) {
 		pthread_mutex_lock(&comp_list_mutex);
-		list_destroy(comp_list);
-		comp_list = NULL;
+		FREE_NULL_LIST(comp_list);
 		pthread_mutex_unlock(&comp_list_mutex);
 	}
 
diff --git a/src/plugins/launch/Makefile.in b/src/plugins/launch/Makefile.in
index f5c05a2ad..2130e91a1 100644
--- a/src/plugins/launch/Makefile.in
+++ b/src/plugins/launch/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -253,6 +256,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -302,8 +307,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -322,6 +331,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -365,6 +377,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -388,6 +401,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/launch/aprun/Makefile.in b/src/plugins/launch/aprun/Makefile.in
index 0c4130e85..65c510132 100644
--- a/src/plugins/launch/aprun/Makefile.in
+++ b/src/plugins/launch/aprun/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/launch/aprun/launch_aprun.c b/src/plugins/launch/aprun/launch_aprun.c
index 5c3a12b5c..4187961cc 100644
--- a/src/plugins/launch/aprun/launch_aprun.c
+++ b/src/plugins/launch/aprun/launch_aprun.c
@@ -83,15 +83,12 @@ resource_allocation_response_msg_t *global_resp = NULL;
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "launch aprun plugin";
 const char plugin_type[]        = "launch/aprun";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static pid_t aprun_pid = 0;
 
@@ -319,7 +316,8 @@ static void _handle_timeout(srun_timeout_msg_t *timeout_msg)
 static void _handle_msg(slurm_msg_t *msg)
 {
 	static uint32_t slurm_uid = NO_VAL;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	uid_t uid = getuid();
 	job_step_kill_msg_t *ss;
 	srun_user_msg_t *um;
@@ -389,12 +387,12 @@ static void *_msg_thr_internal(void *arg)
 		if (slurm_receive_msg(newsockfd, msg, 0) != 0) {
 			error("slurm_receive_msg: %m");
 			/* close the new socket */
-			slurm_close_accepted_conn(newsockfd);
+			slurm_close(newsockfd);
 			continue;
 		}
 		_handle_msg(msg);
 		slurm_free_msg(msg);
-		slurm_close_accepted_conn(newsockfd);
+		slurm_close(newsockfd);
 	}
 	return NULL;
 }
@@ -473,16 +471,23 @@ extern int launch_p_setup_srun_opt(char **rest)
 			"%u", opt.cpus_per_task);
 	}
 
-	if (opt.shared != (uint16_t)NO_VAL) {
+	if (opt.exclusive) {
 		opt.argc += 2;
 		xrealloc(opt.argv, opt.argc * sizeof(char *));
 		opt.argv[command_pos++] = xstrdup("-F");
-		opt.argv[command_pos++] = xstrdup("share");
-	} else if (opt.exclusive) {
+		opt.argv[command_pos++] = xstrdup("exclusive");
+	} else if (opt.shared == 1) {
 		opt.argc += 2;
 		xrealloc(opt.argv, opt.argc * sizeof(char *));
 		opt.argv[command_pos++] = xstrdup("-F");
-		opt.argv[command_pos++] = xstrdup("exclusive");
+		opt.argv[command_pos++] = xstrdup("share");
+	}
+
+	if (opt.cpu_bind_type & CPU_BIND_ONE_THREAD_PER_CORE) {
+		opt.argc += 2;
+		xrealloc(opt.argv, opt.argc * sizeof(char *));
+		opt.argv[command_pos++] = xstrdup("-j");
+		opt.argv[command_pos++] = xstrdup("1");
 	}
 
 	if (opt.nodelist) {
diff --git a/src/plugins/launch/poe/Makefile.in b/src/plugins/launch/poe/Makefile.in
index d66a9d01e..39e62c084 100644
--- a/src/plugins/launch/poe/Makefile.in
+++ b/src/plugins/launch/poe/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/launch/poe/launch_poe.c b/src/plugins/launch/poe/launch_poe.c
index f235e2923..31fe13216 100644
--- a/src/plugins/launch/poe/launch_poe.c
+++ b/src/plugins/launch/poe/launch_poe.c
@@ -76,15 +76,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "launch poe plugin";
 const char plugin_type[]        = "launch/poe";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static char *cmd_fname = NULL;
 static char *poe_cmd_line = NULL;
@@ -201,7 +198,7 @@ static void _propagate_srun_opts(uint32_t nnodes, uint32_t ntasks)
 	if (opt.dependency)
 		setenv("SLURM_DEPENDENCY", opt.dependency, 1);
 	if (opt.distribution != SLURM_DIST_UNKNOWN) {
-		snprintf(value, sizeof(value), "%d", opt.distribution);
+		snprintf(value, sizeof(value), "%u", opt.distribution);
 		setenv("SLURM_DISTRIBUTION", value, 1);
 	}
 	if (opt.exc_nodes)
@@ -600,7 +597,8 @@ extern int launch_p_create_job_step(srun_job_t *job, bool use_all_cpus,
 		}
 	}
 
-	if (opt.nodelist && (opt.distribution == SLURM_DIST_ARBITRARY)) {
+	if (opt.nodelist &&
+	    ((opt.distribution & SLURM_DIST_STATE_BASE)==SLURM_DIST_ARBITRARY)) {
 		bool destroy_hostfile = 0;
 		if (!opt.hostfile) {
 			char *host_name, *host_line;
diff --git a/src/plugins/launch/runjob/Makefile.in b/src/plugins/launch/runjob/Makefile.in
index aba24f93f..fe0ee9f08 100644
--- a/src/plugins/launch/runjob/Makefile.in
+++ b/src/plugins/launch/runjob/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -295,6 +298,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -344,8 +349,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -364,6 +373,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -407,6 +419,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -430,6 +443,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/launch/runjob/launch_runjob.c b/src/plugins/launch/runjob/launch_runjob.c
index 8c73e3085..d7a4a59da 100644
--- a/src/plugins/launch/runjob/launch_runjob.c
+++ b/src/plugins/launch/runjob/launch_runjob.c
@@ -75,15 +75,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "launch runjob plugin";
 const char plugin_type[]        = "launch/runjob";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static srun_job_t *local_srun_job = NULL;
 
@@ -138,7 +135,8 @@ static void
 _handle_msg(slurm_msg_t *msg)
 {
 	static uint32_t slurm_uid = NO_VAL;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	uid_t uid = getuid();
 	job_step_kill_msg_t *ss;
 	srun_user_msg_t *um;
@@ -208,12 +206,12 @@ static void *_msg_thr_internal(void *arg)
 		if (slurm_receive_msg(newsockfd, msg, 0) != 0) {
 			error("slurm_receive_msg: %m");
 			/* close the new socket */
-			slurm_close_accepted_conn(newsockfd);
+			slurm_close(newsockfd);
 			continue;
 		}
 		_handle_msg(msg);
 		slurm_free_msg(msg);
-		slurm_close_accepted_conn(newsockfd);
+		slurm_close(newsockfd);
 	}
 	return NULL;
 }
diff --git a/src/plugins/launch/slurm/Makefile.am b/src/plugins/launch/slurm/Makefile.am
index cd585bdc4..66711415d 100644
--- a/src/plugins/launch/slurm/Makefile.am
+++ b/src/plugins/launch/slurm/Makefile.am
@@ -8,6 +8,6 @@ AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
 
 pkglib_LTLIBRARIES = launch_slurm.la
 
-launch_slurm_la_SOURCES = launch_slurm.c task_state.c
+launch_slurm_la_SOURCES = launch_slurm.c task_state.c task_state.h
 
 launch_slurm_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/launch/slurm/Makefile.in b/src/plugins/launch/slurm/Makefile.in
index 7e9004be3..49016a092 100644
--- a/src/plugins/launch/slurm/Makefile.in
+++ b/src/plugins/launch/slurm/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -473,7 +487,7 @@ AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
 pkglib_LTLIBRARIES = launch_slurm.la
-launch_slurm_la_SOURCES = launch_slurm.c task_state.c
+launch_slurm_la_SOURCES = launch_slurm.c task_state.c task_state.h
 launch_slurm_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 all: all-am
 
diff --git a/src/plugins/launch/slurm/launch_slurm.c b/src/plugins/launch/slurm/launch_slurm.c
index 59abde164..dc5731be5 100644
--- a/src/plugins/launch/slurm/launch_slurm.c
+++ b/src/plugins/launch/slurm/launch_slurm.c
@@ -83,15 +83,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "launch SLURM plugin";
 const char plugin_type[]        = "launch/slurm";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static srun_job_t *local_srun_job = NULL;
 static uint32_t *local_global_rc = NULL;
@@ -558,6 +555,7 @@ extern int launch_p_step_launch(
 	launch_params.cpu_bind_type = opt.cpu_bind_type;
 	launch_params.mem_bind = opt.mem_bind;
 	launch_params.mem_bind_type = opt.mem_bind_type;
+	launch_params.accel_bind_type = opt.accel_bind_type;
 	launch_params.open_mode = opt.open_mode;
 	if (opt.acctg_freq >= 0)
 		launch_params.acctg_freq = opt.acctg_freq;
@@ -566,7 +564,9 @@ extern int launch_p_step_launch(
 		launch_params.cpus_per_task	= opt.cpus_per_task;
 	else
 		launch_params.cpus_per_task	= 1;
-	launch_params.cpu_freq          = opt.cpu_freq;
+	launch_params.cpu_freq_min      = opt.cpu_freq_min;
+	launch_params.cpu_freq_max      = opt.cpu_freq_max;
+	launch_params.cpu_freq_gov      = opt.cpu_freq_gov;
 	launch_params.task_dist         = opt.distribution;
 	launch_params.ckpt_dir		= opt.ckpt_dir;
 	launch_params.restart_dir       = opt.restart_dir;
diff --git a/src/plugins/mpi/Makefile.in b/src/plugins/mpi/Makefile.in
index feafa029e..e724307a4 100644
--- a/src/plugins/mpi/Makefile.in
+++ b/src/plugins/mpi/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -252,6 +255,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -301,8 +306,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -321,6 +330,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -364,6 +376,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -387,6 +400,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/lam/Makefile.in b/src/plugins/mpi/lam/Makefile.in
index dccc6ee7b..618033f32 100644
--- a/src/plugins/mpi/lam/Makefile.in
+++ b/src/plugins/mpi/lam/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/lam/mpi_lam.c b/src/plugins/mpi/lam/mpi_lam.c
index ff6363deb..0aa60a21d 100644
--- a/src/plugins/mpi/lam/mpi_lam.c
+++ b/src/plugins/mpi/lam/mpi_lam.c
@@ -71,15 +71,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi LAM plugin";
 const char plugin_type[]        = "mpi/lam";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/mpich1_p4/Makefile.in b/src/plugins/mpi/mpich1_p4/Makefile.in
index e1fcb627f..e3e303454 100644
--- a/src/plugins/mpi/mpich1_p4/Makefile.in
+++ b/src/plugins/mpi/mpich1_p4/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/mpich1_p4/mpich1_p4.c b/src/plugins/mpi/mpich1_p4/mpich1_p4.c
index 733dce8b3..443190797 100644
--- a/src/plugins/mpi/mpich1_p4/mpich1_p4.c
+++ b/src/plugins/mpi/mpich1_p4/mpich1_p4.c
@@ -78,15 +78,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi MPICH1_P4 plugin";
 const char plugin_type[]        = "mpi/mpich1_p4";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /* communication for master port info */
 pthread_t p4_tid = (pthread_t) -1;
diff --git a/src/plugins/mpi/mpich1_shmem/Makefile.in b/src/plugins/mpi/mpich1_shmem/Makefile.in
index 73a1c8452..11a5848c0 100644
--- a/src/plugins/mpi/mpich1_shmem/Makefile.in
+++ b/src/plugins/mpi/mpich1_shmem/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
index bbf6ba94f..5874b1831 100644
--- a/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
+++ b/src/plugins/mpi/mpich1_shmem/mpich1_shmem.c
@@ -71,15 +71,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpich1_shmem plugin";
 const char plugin_type[]        = "mpi/mpich1_shmem";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/mpichgm/Makefile.in b/src/plugins/mpi/mpichgm/Makefile.in
index 11da87b26..6e13afc6d 100644
--- a/src/plugins/mpi/mpichgm/Makefile.in
+++ b/src/plugins/mpi/mpichgm/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/mpichgm/mpi_mpichgm.c b/src/plugins/mpi/mpichgm/mpi_mpichgm.c
index eb87e4aba..003d6c5f1 100644
--- a/src/plugins/mpi/mpichgm/mpi_mpichgm.c
+++ b/src/plugins/mpi/mpichgm/mpi_mpichgm.c
@@ -72,15 +72,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi MPICH-GM plugin";
 const char plugin_type[]        = "mpi/mpichgm";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/mpichmx/Makefile.in b/src/plugins/mpi/mpichmx/Makefile.in
index a337cc8d3..8bf9010e8 100644
--- a/src/plugins/mpi/mpichmx/Makefile.in
+++ b/src/plugins/mpi/mpichmx/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/mpichmx/mpi_mpichmx.c b/src/plugins/mpi/mpichmx/mpi_mpichmx.c
index 33714e142..d4ff5cce8 100644
--- a/src/plugins/mpi/mpichmx/mpi_mpichmx.c
+++ b/src/plugins/mpi/mpichmx/mpi_mpichmx.c
@@ -71,15 +71,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi MPICH-MX plugin";
 const char plugin_type[]        = "mpi/mpichmx";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/mvapich/Makefile.am b/src/plugins/mpi/mvapich/Makefile.am
index 1a0ab1c84..65f2dcd9d 100644
--- a/src/plugins/mpi/mvapich/Makefile.am
+++ b/src/plugins/mpi/mvapich/Makefile.am
@@ -4,11 +4,15 @@ AUTOMAKE_OPTIONS = foreign
 
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(MUNGE_CPPFLAGS)
 
-pkglib_LTLIBRARIES = mpi_mvapich.la
+if WITH_MUNGE
+MVAPICH = mpi_mvapich.la
+endif
 
-mpi_mvapich_la_SOURCES = mpi_mvapich.c mvapich.c mvapich.h\
+pkglib_LTLIBRARIES = $(MVAPICH)
+
+mpi_mvapich_la_SOURCES = mpi_mvapich.c mvapich.c mvapich.h \
 	$(top_srcdir)/src/common/mpi.h
 
-mpi_mvapich_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+mpi_mvapich_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(MUNGE_LDFLAGS) $(MUNGE_LIBS)
diff --git a/src/plugins/mpi/mvapich/Makefile.in b/src/plugins/mpi/mvapich/Makefile.in
index 569020c06..9f7efd477 100644
--- a/src/plugins/mpi/mvapich/Makefile.in
+++ b/src/plugins/mpi/mvapich/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -171,6 +174,7 @@ mpi_mvapich_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
 	$(AM_CFLAGS) $(CFLAGS) $(mpi_mvapich_la_LDFLAGS) $(LDFLAGS) -o \
 	$@
+@WITH_MUNGE_TRUE@am_mpi_mvapich_la_rpath = -rpath $(pkglibdir)
 AM_V_P = $(am__v_P_@AM_V@)
 am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
 am__v_P_0 = false
@@ -273,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -471,12 +486,13 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
-pkglib_LTLIBRARIES = mpi_mvapich.la
-mpi_mvapich_la_SOURCES = mpi_mvapich.c mvapich.c mvapich.h\
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(MUNGE_CPPFLAGS)
+@WITH_MUNGE_TRUE@MVAPICH = mpi_mvapich.la
+pkglib_LTLIBRARIES = $(MVAPICH)
+mpi_mvapich_la_SOURCES = mpi_mvapich.c mvapich.c mvapich.h \
 	$(top_srcdir)/src/common/mpi.h
 
-mpi_mvapich_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+mpi_mvapich_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(MUNGE_LDFLAGS) $(MUNGE_LIBS)
 all: all-am
 
 .SUFFIXES:
@@ -548,7 +564,7 @@ clean-pkglibLTLIBRARIES:
 	}
 
 mpi_mvapich.la: $(mpi_mvapich_la_OBJECTS) $(mpi_mvapich_la_DEPENDENCIES) $(EXTRA_mpi_mvapich_la_DEPENDENCIES) 
-	$(AM_V_CCLD)$(mpi_mvapich_la_LINK) -rpath $(pkglibdir) $(mpi_mvapich_la_OBJECTS) $(mpi_mvapich_la_LIBADD) $(LIBS)
+	$(AM_V_CCLD)$(mpi_mvapich_la_LINK) $(am_mpi_mvapich_la_rpath) $(mpi_mvapich_la_OBJECTS) $(mpi_mvapich_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
diff --git a/src/plugins/mpi/mvapich/mpi_mvapich.c b/src/plugins/mpi/mvapich/mpi_mvapich.c
index 402cf2487..d49abfe1c 100644
--- a/src/plugins/mpi/mvapich/mpi_mvapich.c
+++ b/src/plugins/mpi/mvapich/mpi_mvapich.c
@@ -72,15 +72,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi MVAPICH plugin";
 const char plugin_type[]        = "mpi/mvapich";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/mvapich/mvapich.c b/src/plugins/mpi/mvapich/mvapich.c
index 9113ecc05..586d2f751 100644
--- a/src/plugins/mpi/mvapich/mvapich.c
+++ b/src/plugins/mpi/mvapich/mvapich.c
@@ -53,6 +53,13 @@
 #include <sys/poll.h>
 #include <sys/time.h>
 
+#include <arpa/inet.h>
+#include <errno.h>
+
+/* include munge and syslog to log authentication faiulres */
+#include <munge.h>
+#include <syslog.h>
+
 #include "src/common/slurm_xlator.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -112,6 +119,8 @@ enum mv_init_state
 	MV_READ_ADDRS,
 	MV_READ_PIDLEN,
 	MV_READ_PID,
+	MV_READ_MUNGELEN,
+	MV_READ_MUNGE,
 	MV_INIT_DONE,
 };
 
@@ -144,6 +153,8 @@ struct mvapich_info
 	                     *  and the hostid is tacked onto the end
 	                     *  of the array (for protocol version 3)
 	                     */
+	uint32_t mungelen;  /* Length of munge packet (v9+ only) */
+	char* munge;        /* Buffer to hold munge packet */
 };
 
 /*  Globals for the mvapich thread.
@@ -254,7 +265,7 @@ static int startup_timeout (mvapich_state_t *st)
 		return (0);
 }
 
-char * vmsg (const char *msg, va_list ap)
+static char * vmsg (const char *msg, va_list ap)
 {
 	int n = -1;
 	int size = BUFSIZ;
@@ -360,12 +371,15 @@ static struct mvapich_info * mvapich_info_create (void)
 	mvi->rank = -1;
 	mvi->state = MV_READ_VERSION;
 	mvi->nread = 0;
+	mvi->mungelen = 0;
+	mvi->munge = NULL;
 
 	return (mvi);
 }
 
 static void mvapich_info_destroy (struct mvapich_info *mvi)
 {
+	/* don't free mvi->munge here because it's freed elsewhere */
 	xfree (mvi->addr);
 	xfree (mvi->pid);
 	xfree (mvi);
@@ -1224,6 +1238,7 @@ mvapich_print_abort_message (mvapich_state_t *st, int rank,
 			     int dest, char *msg, int msglen)
 {
 	slurm_step_layout_t *sl = st->job->step_layout;
+	int i;
 	char *host;
 	char *msgstr;
 	char time_stamp[256];
@@ -1241,6 +1256,16 @@ mvapich_print_abort_message (mvapich_state_t *st, int rank,
 		if (msg [msglen - 1] == '\n')
 			msg [msglen - 1] = '\0';
 
+		/*
+		 *  Replace internal newlines with periods.  We want
+		 *  the full message to be written as a single line
+		 *  to the syslog.
+		 */
+		for (i = 0; i < msglen; i++) {
+			if (msg [i] == '\n')
+				msg [i] = '.';
+		}
+
 		msgstr = msg;
 	}
 	else {
@@ -1394,8 +1419,31 @@ static void mvapich_wait_for_abort(mvapich_state_t *st)
 			dst = ranks[0];
 			src = ranks[1];
 			fd_read_n (newfd, &msglen, sizeof (int));
-			if (msglen)
+			if (msglen > 0) {
+				/*
+				 * Ensure that we don't overrun our buffer.
+				 */
+				if (msglen > sizeof(msg) - 1)
+					msglen = sizeof(msg) - 1;
+				
 				fd_read_n (newfd, msg, msglen);
+				
+				/*
+				 * Ensure that msg ends with a NULL.
+				 * Note that msglen is at most sizeof(msg)-1
+				 * due to code above.
+				 */
+				msg [ msglen ] = '\0';
+			} else {
+				/*
+				 * We read in a zero or negative message length.
+				 * Set msglen to 0 to indicate that we didn't
+				 * read any message string and ensure msg is
+				 * the empty string.
+				 */
+				msglen = 0;
+				msg [ msglen ] = '\0';
+			}
 		} else {
 			src = ranks[0];
 			dst = -1;
@@ -1475,9 +1523,11 @@ static int mvapich_read_item (struct mvapich_info *mvi, void *buf, size_t size)
 	nleft = size - mvi->nread;
 
 	if ((n = read (mvi->fd, p, nleft)) < 0) {
-		if (errno == EAGAIN)
-			return (EAGAIN);
-		else {
+		if (errno == EAGAIN) {
+			/* we return 0 on EAGAIN, outer layers will
+			 * call mvapich_read_item again */
+			return (0);
+		} else {
 			error ("mvapich: %d: nread=%d, read (%d, %zx, "
 			       "size=%zd, nleft=%zd): %m",
 			       mvi->rank, mvi->nread,
@@ -1486,6 +1536,8 @@ static int mvapich_read_item (struct mvapich_info *mvi, void *buf, size_t size)
 		}
 	}
 
+	/* add number of bytes read to our running count,
+	 * advance the state if we've read all that we should */
 	mvi->nread += n;
 	if (mvi->nread == size) {
 		mvi->nread = 0;
@@ -1495,6 +1547,254 @@ static int mvapich_read_item (struct mvapich_info *mvi, void *buf, size_t size)
 	return (0);
 }
 
+/*
+ *  Create a unique MPIRUN_ID for jobid/stepid pairs.
+ *   Combine the least significant bits of the jobid and stepid
+ *
+ *  The MPIRUN_ID is used by MVAPICH to create shmem files in /tmp,
+ *   so we have to make sure multiple jobs and job steps on the
+ *   same node have different MPIRUN_IDs.
+ */
+static int mpirun_id_create(const mpi_plugin_client_info_t *job)
+{
+	return (int) ((job->jobid << 16) | (job->stepid & 0xffff));
+}
+
+/* constructs a string for given socket which includes IP address and port
+ * of remote and local ends, returns name in newly allocated string,
+ * which looks like "IP:port --> IP:port", returns NULL on failure */
+static char * pmgr_conn_name(int fd, int local_first)
+{
+	/* variable to hold socket info */
+	struct sockaddr_in sin;
+	socklen_t len;
+
+	/* lookup info for local end */
+	memset (&sin, 0, sizeof(sin));
+	len = sizeof (sin);
+	if (getsockname (fd, (struct sockaddr *) &sin, &len) != 0) {
+		fatal ("Extracting local IP and port (getsockname() errno=%d %m)",
+			errno
+		);
+		return NULL;
+	}
+
+	/* extract local IP and port */
+	struct in_addr ip_local = sin.sin_addr;
+	unsigned short port_local = (unsigned short) ntohs (sin.sin_port);
+
+	/* lookup info for remote end */
+	memset (&sin, 0, sizeof(sin));
+	len = sizeof(sin);
+	if (getpeername (fd, (struct sockaddr *) &sin, &len) != 0) {
+		fatal ("Extracting remote IP and port (getpeername() errno=%d %m)",
+			errno
+		);
+		return NULL;
+	}
+
+	/* extract remote IP and port */
+	struct in_addr ip_remote = sin.sin_addr;
+	unsigned short port_remote = (unsigned short) ntohs (sin.sin_port);
+
+	/* convert addresses to strings in IP:port format,
+	 * we're careful to copy inet_ntoa output before calling it again */
+	char *addr_local  = xstrdup_printf ("%s:%hu", inet_ntoa(ip_local),  port_local);
+	char *addr_remote = xstrdup_printf ("%s:%hu", inet_ntoa(ip_remote), port_remote);
+
+	/* construct our connection string, list local info first,
+	 * then remote */
+	char *str;
+	if (local_first) {
+		str = xstrdup_printf ("%s --> %s", addr_local,  addr_remote);
+	} else {
+		str = xstrdup_printf ("%s --> %s", addr_remote, addr_local);
+	}
+
+	/* free local string */
+	xfree (addr_local);
+
+	/* free remote string */
+	xfree (addr_remote);
+
+	/* return connection string */
+	return str;
+}
+
+/* log an authentication failure to syslog if we receive a
+ * munge packet that doesn't check out */
+static void pmgr_munge_failure(const mvapich_state_t* st, int fd, const char *err)
+{
+	/* get our connection name (with remote IP:port first) */
+	int local_first = 0;
+	char *name = pmgr_conn_name (fd, local_first);
+
+	/* TODO: are the types correct for jobid/stepid? */
+
+	/* include the SLURM_JOBID if we have one */
+	char *msg = xstrdup_printf ("JOBID=%d STEPID=%d (remote) %s (local) ERROR: %s",
+		(int) st->job->jobid, (int) st->job->stepid, name, err
+	);
+
+	/* write message to syslog */
+	openlog ("srunmvapich", LOG_CONS | LOG_PID, LOG_USER);
+	syslog (LOG_AUTHPRIV | LOG_ERR, "%s", msg);
+	closelog ();
+
+	/* free the message */
+	xfree (msg);
+
+	/* free the connection name string */
+	xfree (name);
+
+	return;
+}
+
+/* reads munge packet from process and verifies its authentication,
+ * checks user and group ids as well as payload, where the payload
+ * contains the IP:port of the remote and local ends of the socket
+ * followed by the jobid passed to the process in MPIRUN_ID */
+static int mvapich_authenticate_munge(mvapich_state_t *st, struct mvapich_info *mvi)
+{
+	/* we encode the connection name (IP:port of both ends)
+	 * and check that so that it can only be used for this socket,
+	 * we also encode the jobid, which should be unique within the
+	 * munge credential TTL so that an app does not reuse the
+	 * same value within the time limit */
+
+	/* get file descriptor */
+	int fd = mvi->fd;
+
+	/* define maximum length of credential that we're willing
+	 * to accept, this is so we don't blow up allocating memory
+	 * if remote end sends us a really large (fake) credential
+	 * size */
+	uint32_t mungelen_max = 4096;
+
+	/* check that incoming credential is not too big */
+	if (mvi->mungelen > mungelen_max) {
+		pmgr_munge_failure (st, fd, "Remote side sent a credential size that is too large");
+		error ("Remote side sent a credential size that is too large");
+		return -1;
+	}
+
+	/* check that credential length is positive */
+	if (mvi->mungelen == 0) {
+		/* consider a zero-length credential to be a failure */
+		error ("Remote side sent a zero-length credential");
+		return -1;
+	}
+
+	/* allocate memory to hold munge packet if we haven't already */
+	if (mvi->munge == NULL) {
+		mvi->munge = (char *) xmalloc ((size_t) mvi->mungelen);
+	}
+
+	/* receive incoming credential */
+	int rc = mvapich_read_item (mvi, mvi->munge, (size_t) mvi->mungelen);
+	if (rc != 0) {
+		error ("Failed to read credential");
+		return rc;
+	}
+
+	/* when we read the whole munge packet, our state will change,
+	 * so if we're still in the READ_MUNGE state, we have more to
+	 * read */
+	if (mvi->state == MV_READ_MUNGE) {
+		return 0;
+	}
+
+	/* if we get to here, we have the whole munge packet,
+	 * assume that it authenticates successfully */
+	int failed = 0;
+
+	/* get our connection name (with remote IP:port first,
+	 * followed by local IP:port) */
+	int local_first = 0;
+	char *name = pmgr_conn_name (fd, local_first);
+
+	/* create expected payload in following format:
+	 * remote IP:port --> local IP:port :: MPIRUN_ID,
+	 * note that we need to be sure to generate the
+	 * MPIRUN_ID string the same here as we set the
+	 * variable for the process */
+	int jobid = mpirun_id_create(st->job);
+	char *payload = xstrdup_printf ("%s :: %d", name, jobid);
+
+	/* get length of payload, munge_decode tacks on trailing NUL
+	 * to payload_remote string, but it doesn't count it in the
+	 * length, so we don't count the trailing NUL here either */
+	int payload_len = (int) strlen (payload);
+
+	/* decode the munge packet and authenticate */
+	char *payload_remote = NULL;
+	int payload_len_remote;
+	uid_t uid_remote;
+	gid_t gid_remote;
+	munge_err_t err = munge_decode (mvi->munge, NULL, (void**)&payload_remote, &payload_len_remote, &uid_remote, &gid_remote);
+	if (err == EMUNGE_SUCCESS) {
+		/* we decoded sucecssfully, check that user and group id match */
+		uid_t uid = getuid ();
+		gid_t gid = getgid ();
+		if (uid != uid_remote || gid != gid_remote) {
+			pmgr_munge_failure (st, fd, "Got credential with bad uid or gid");
+			error ("Got credential with bad uid or gid");
+			failed = 1;
+		}
+
+		/* check that received payload is the right size */
+		if (!failed && payload_len != payload_len_remote) {
+			pmgr_munge_failure (st, fd, "Got credential with bad payload length");
+			error ("Got credential with bad payload length");
+			failed = 1;
+		}
+
+		/* check that the payload is valid */
+		if (!failed && strcmp (payload, payload_remote) != 0) {
+			pmgr_munge_failure (st, fd, "Got credential with bad payload");
+			error ("Got credential with bad payload");
+			failed = 1;
+		}
+	} else {
+		/* the decode failed */
+		char *tmp = xstrdup_printf ("Failed to decode munge credential: %s", munge_strerror (err));
+		pmgr_munge_failure (st, fd, tmp);
+		xfree (tmp);
+
+		error ("Failed to decode munge credential: %s", munge_strerror (err));
+		failed = 1;
+	}
+
+	/* free the remote payload */
+	if (payload_remote != NULL) {
+		free (payload_remote);
+		payload_remote = NULL;
+	}
+
+	/* free the payload */
+	xfree (payload);
+
+	/* free the connection name string */
+	xfree (name);
+
+	/* free memory holding credential received from remote side,
+	 * we no longer need it at this point and we free it here to
+	 * avoid accumulating memory while handling all processes,
+	 * it's attached as a field of the info object (rather
+	 * than allocated as a local variable) because we may exit
+	 * this function early after having received only part of
+	 * the munge message, when more data comes in on the
+	 * socket, this function is called again until it reads the
+	 * whole message */
+	xfree (mvi->munge);
+	mvi->munge = NULL;
+	mvi->mungelen = 0;
+
+	/* return -1 if we failed to authenticate, 0 otherwise */
+	rc = failed ? -1 : 0;
+	return rc;
+}
+
 /*
  *  Process initial mvapich states to read items such as
  *   version, rank, hostidlen, hostids... and so on.
@@ -1508,9 +1808,12 @@ static int mvapich_read_item (struct mvapich_info *mvi, void *buf, size_t size)
  *
  *  State processing is considered complete when state == MV_INIT_DONE.
  *
+ *  Protocol info:
+ *  v8: version, rank
+ *  v9: version, rank, munge
  */
 static int mvapich_info_process_init (mvapich_state_t *st,
-		                              struct mvapich_info *mvi)
+					      struct mvapich_info *mvi)
 {
 	int rc = 0;
 
@@ -1540,7 +1843,13 @@ again:
 			goto again;
 		}
 
-		if (mvi->version >= 8 || mvi->state != MV_READ_HOSTIDLEN)
+		/* after the rank, protocol 9 reads a munge packet */
+		if (mvi->version == 9 && mvi->state == MV_READ_HOSTIDLEN) {
+			mvi->state = MV_READ_MUNGELEN;
+			goto again;
+		}
+
+		if (mvi->version == 8 || mvi->state != MV_READ_HOSTIDLEN)
 			break;
 
 	case MV_READ_HOSTIDLEN:
@@ -1602,6 +1911,33 @@ again:
 
 		break;
 
+	case MV_READ_MUNGELEN:
+		/* only protocols v9 and higer process a munge packet,
+		 * so if we make it to this state in another version,
+		 * we're done */
+		if (mvi->version < 9) {
+			mvi->state = MV_INIT_DONE;
+			break;
+		}
+
+		mvapich_debug2 ("rank %d: read munge packet length. version = %d",
+				mvi->rank, mvi->version);
+
+		/* read the length of the incoming munge packet */
+		rc = mvapich_read_item (mvi, &mvi->mungelen, sizeof (mvi->mungelen));
+
+		if (mvi->state != MV_READ_MUNGE)
+			break;
+
+	case MV_READ_MUNGE:
+		mvapich_debug2 ("rank %d: read munge packet. version = %d",
+				mvi->rank, mvi->version);
+
+		rc = mvapich_authenticate_munge (st, mvi);
+
+		if (mvi->state != MV_INIT_DONE)
+			break;
+
 	case MV_INIT_DONE:
 		break;
 	}
@@ -1981,7 +2317,7 @@ static void *mvapich_thr(void *arg)
 	/*
 	 *  Process subsequent phases of various protocol versions.
 	 */
-	if (st->protocol_version == 8) {
+	if (st->protocol_version >= 8) {
 		if (mvapich_processops (st) < 0)
 			mvapich_terminate_job (st, "mvapich_processops failed.");
 	}
@@ -2080,19 +2416,6 @@ static void mvapich_state_destroy(mvapich_state_t *st)
 	xfree(st);
 }
 
-/*
- *  Create a unique MPIRUN_ID for jobid/stepid pairs.
- *   Combine the least significant bits of the jobid and stepid
- *
- *  The MPIRUN_ID is used by MVAPICH to create shmem files in /tmp,
- *   so we have to make sure multiple jobs and job steps on the
- *   same node have different MPIRUN_IDs.
- */
-int mpirun_id_create(const mpi_plugin_client_info_t *job)
-{
-	return (int) ((job->jobid << 16) | (job->stepid & 0xffff));
-}
-
 /*
  * Returns the port number in host byte order.
  */
@@ -2101,10 +2424,7 @@ static short _sock_bind_wild(int sockfd)
 	socklen_t len;
 	struct sockaddr_in sin;
 
-	memset(&sin, 0, sizeof(sin));
-	sin.sin_family = AF_INET;
-	sin.sin_addr.s_addr = htonl(INADDR_ANY);
-	sin.sin_port = htons(0);    /* bind ephemeral port */
+	slurm_setup_sockaddr(&sin, 0); /* bind ephemeral port */
 
 	if (bind(sockfd, (struct sockaddr *) &sin, sizeof(sin)) < 0)
 		return (-1);
diff --git a/src/plugins/mpi/none/Makefile.in b/src/plugins/mpi/none/Makefile.in
index a033000e6..7b0e0799e 100644
--- a/src/plugins/mpi/none/Makefile.in
+++ b/src/plugins/mpi/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/none/mpi_none.c b/src/plugins/mpi/none/mpi_none.c
index 3f67f4e09..572cf0e97 100644
--- a/src/plugins/mpi/none/mpi_none.c
+++ b/src/plugins/mpi/none/mpi_none.c
@@ -73,15 +73,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi none plugin";
 const char plugin_type[]        = "mpi/none";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/openmpi/Makefile.in b/src/plugins/mpi/openmpi/Makefile.in
index e4aa6000e..53388ae6c 100644
--- a/src/plugins/mpi/openmpi/Makefile.in
+++ b/src/plugins/mpi/openmpi/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/mpi/openmpi/mpi_openmpi.c b/src/plugins/mpi/openmpi/mpi_openmpi.c
index e14b36b1d..917a3bf16 100644
--- a/src/plugins/mpi/openmpi/mpi_openmpi.c
+++ b/src/plugins/mpi/openmpi/mpi_openmpi.c
@@ -73,15 +73,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "OpenMPI plugin";
 const char plugin_type[]        = "mpi/openmpi";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 int p_mpi_hook_slurmstepd_prefork(const stepd_step_rec_t *job, char ***env)
 {
diff --git a/src/plugins/mpi/pmi2/Makefile.am b/src/plugins/mpi/pmi2/Makefile.am
index 1b1fbd992..a7df063a3 100644
--- a/src/plugins/mpi/pmi2/Makefile.am
+++ b/src/plugins/mpi/pmi2/Makefile.am
@@ -17,7 +17,8 @@ mpi_pmi2_la_SOURCES = mpi_pmi2.c \
 	setup.c setup.h \
 	spawn.c spawn.h \
 	tree.c tree.h \
-	nameserv.c nameserv.h
+	nameserv.c nameserv.h \
+	ring.c ring.h
 
 mpi_pmi2_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 
diff --git a/src/plugins/mpi/pmi2/Makefile.in b/src/plugins/mpi/pmi2/Makefile.in
index b7e83902b..84d750a52 100644
--- a/src/plugins/mpi/pmi2/Makefile.in
+++ b/src/plugins/mpi/pmi2/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -162,7 +165,7 @@ am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
 mpi_pmi2_la_DEPENDENCIES = $(top_builddir)/src/slurmd/common/libslurmd_reverse_tree_math.la
 am_mpi_pmi2_la_OBJECTS = mpi_pmi2.lo agent.lo client.lo kvs.lo info.lo \
-	pmi1.lo pmi2.lo setup.lo spawn.lo tree.lo nameserv.lo
+	pmi1.lo pmi2.lo setup.lo spawn.lo tree.lo nameserv.lo ring.lo
 mpi_pmi2_la_OBJECTS = $(am_mpi_pmi2_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
 am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -482,7 +496,8 @@ mpi_pmi2_la_SOURCES = mpi_pmi2.c \
 	setup.c setup.h \
 	spawn.c spawn.h \
 	tree.c tree.h \
-	nameserv.c nameserv.h
+	nameserv.c nameserv.h \
+	ring.c ring.h
 
 mpi_pmi2_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 mpi_pmi2_la_LIBADD = \
@@ -575,6 +590,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/nameserv.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pmi1.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/pmi2.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ring.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/setup.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/spawn.Plo@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tree.Plo@am__quote@
diff --git a/src/plugins/mpi/pmi2/agent.c b/src/plugins/mpi/pmi2/agent.c
index 02bf5c96a..1929ca9f1 100644
--- a/src/plugins/mpi/pmi2/agent.c
+++ b/src/plugins/mpi/pmi2/agent.c
@@ -301,7 +301,7 @@ _agent(void * unused)
 	eio_obj_t *tree_listen_obj, *task_obj;
 	int i;
 
-	pmi2_handle = eio_handle_create();
+	pmi2_handle = eio_handle_create(0);
 
 	//fd_set_nonblocking(tree_sock);
 	tree_listen_obj = eio_obj_create(tree_sock, &tree_listen_ops,
diff --git a/src/plugins/mpi/pmi2/kvs.c b/src/plugins/mpi/pmi2/kvs.c
index c7d4e426c..0a0950cd1 100644
--- a/src/plugins/mpi/pmi2/kvs.c
+++ b/src/plugins/mpi/pmi2/kvs.c
@@ -190,6 +190,16 @@ temp_kvs_send(void)
 {
 	int rc = SLURM_ERROR, retry = 0;
 	unsigned int delay = 1;
+	hostlist_t hl = NULL;
+	char free_hl = 0;
+
+	if (! in_stepd()) {	/* srun */
+		hl = hostlist_create(job_info.step_nodelist);
+		free_hl = 1;
+	} else if (tree_info.parent_node != NULL) {
+		hl = hostlist_create(tree_info.parent_node);
+		free_hl = 1;
+	}
 
 	/* cmd included in temp_kvs_buf */
 	kvs_seq ++; /* expecting new kvs after now */
@@ -198,13 +208,14 @@ temp_kvs_send(void)
 		if (retry == 1) {
 			verbose("failed to send temp kvs, rc=%d, retrying", rc);
 		}
+
 		if (! in_stepd()) {	/* srun */
-			rc = tree_msg_to_stepds(job_info.step_nodelist,
+			rc = tree_msg_to_stepds(hl,
 						temp_kvs_cnt,
 						temp_kvs_buf);
 		} else if (tree_info.parent_node != NULL) {
 			/* non-first-level stepds */
-			rc = tree_msg_to_stepds(tree_info.parent_node,
+			rc = tree_msg_to_stepds(hl,
 						temp_kvs_cnt,
 						temp_kvs_buf);
 		} else {		/* first level stepds */
@@ -212,6 +223,7 @@ temp_kvs_send(void)
 		}
 		if (rc == SLURM_SUCCESS)
 			break;
+
 		retry ++;
 		if (retry >= MAX_RETRIES)
 			break;
@@ -220,6 +232,9 @@ temp_kvs_send(void)
 		delay *= 2;
 	}
 	temp_kvs_init();	/* clear old temp kvs */
+	if( free_hl ){
+		hostlist_destroy(hl);
+	}
 	return rc;
 }
 
diff --git a/src/plugins/mpi/pmi2/mpi_pmi2.c b/src/plugins/mpi/pmi2/mpi_pmi2.c
index 400a32b99..25f78407b 100644
--- a/src/plugins/mpi/pmi2/mpi_pmi2.c
+++ b/src/plugins/mpi/pmi2/mpi_pmi2.c
@@ -72,15 +72,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum versions for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "mpi PMI2 plugin";
 const char plugin_type[]        = "mpi/pmi2";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * The following is executed in slurmstepd.
diff --git a/src/plugins/mpi/pmi2/pmi.h b/src/plugins/mpi/pmi2/pmi.h
index 3703870fc..b60592b64 100644
--- a/src/plugins/mpi/pmi2/pmi.h
+++ b/src/plugins/mpi/pmi2/pmi.h
@@ -152,6 +152,8 @@
 #define NAMELOOKUPRESP_CMD     "name-lookup-response"
 #define SPAWN_CMD              "spawn"
 #define SPAWNRESP_CMD          "spawn-response"
+#define RING_CMD               "ring"
+#define RINGRESP_CMD           "ring-response"
 
 #define GETMYKVSNAME_CMD       "get_my_kvsname"
 #define GETMYKVSNAMERESP_CMD   "my_kvsname"
@@ -208,6 +210,9 @@
 #define ERRCODES_KEY      "errcodes"
 #define SERVICE_KEY       "service"
 #define INFO_KEY          "info"
+#define RING_COUNT_KEY    "ring-count"
+#define RING_LEFT_KEY     "ring-left"
+#define RING_RIGHT_KEY    "ring-right"
 
 #define TRUE_VAL          "TRUE"
 #define FALSE_VAL         "FALSE"
@@ -232,6 +237,7 @@
 #define PMI2_PPKEY_ENV          "SLURM_PMI2_PPKEY"
 #define PMI2_PPVAL_ENV          "SLURM_PMI2_PPVAL"
 #define SLURM_STEP_RESV_PORTS   "SLURM_STEP_RESV_PORTS"
+#define PMIX_RING_TREE_WIDTH_ENV "SLURM_PMIX_RING_WIDTH"
 /* old PMIv1 envs */
 #define PMI2_PMI_DEBUGGED_ENV   "PMI_DEBUG"
 #define PMI2_KVS_NO_DUP_KEYS_ENV "SLURM_PMI_KVS_NO_DUP_KEYS"
diff --git a/src/plugins/mpi/pmi2/pmi2.c b/src/plugins/mpi/pmi2/pmi2.c
index d960853de..84234507b 100644
--- a/src/plugins/mpi/pmi2/pmi2.c
+++ b/src/plugins/mpi/pmi2/pmi2.c
@@ -60,6 +60,7 @@
 #include "setup.h"
 #include "agent.h"
 #include "nameserv.h"
+#include "ring.h"
 
 /* PMI2 command handlers */
 static int _handle_fullinit(int fd, int lrank, client_req_t *req);
@@ -68,6 +69,7 @@ static int _handle_abort(int fd, int lrank, client_req_t *req);
 static int _handle_job_getid(int fd, int lrank, client_req_t *req);
 static int _handle_job_connect(int fd, int lrank, client_req_t *req);
 static int _handle_job_disconnect(int fd, int lrank, client_req_t *req);
+static int _handle_ring(int fd, int lrank, client_req_t *req);
 static int _handle_kvs_put(int fd, int lrank, client_req_t *req);
 static int _handle_kvs_fence(int fd, int lrank, client_req_t *req);
 static int _handle_kvs_get(int fd, int lrank, client_req_t *req);
@@ -90,6 +92,7 @@ static struct {
 	{ JOBGETID_CMD,          _handle_job_getid },
 	{ JOBCONNECT_CMD,        _handle_job_connect },
 	{ JOBDISCONNECT_CMD,     _handle_job_disconnect },
+	{ RING_CMD,              _handle_ring },
 	{ KVSPUT_CMD,            _handle_kvs_put },
 	{ KVSFENCE_CMD,          _handle_kvs_fence },
 	{ KVSGET_CMD,            _handle_kvs_get },
@@ -103,7 +106,6 @@ static struct {
 	{ NULL, NULL},
 };
 
-
 static int
 _handle_fullinit(int fd, int lrank, client_req_t *req)
 {
@@ -227,6 +229,35 @@ _handle_job_disconnect(int fd, int lrank, client_req_t *req)
 	return rc;
 }
 
+static int
+_handle_ring(int fd, int lrank, client_req_t *req)
+{
+	int rc = SLURM_SUCCESS;
+        int count   = 0;
+	char *left  = NULL;
+        char *right = NULL;
+
+	debug3("mpi/pmi2: in _handle_ring");
+
+	/* extract left, right, and count values from ring payload */
+	client_req_parse_body(req);
+	client_req_get_int(req, RING_COUNT_KEY, &count);
+	client_req_get_str(req, RING_LEFT_KEY,  &left);
+	client_req_get_str(req, RING_RIGHT_KEY, &right);
+
+	/* compute ring_id, we list all application tasks first,
+         * followed by stepds, so here we just use the application
+         * process rank */
+	int ring_id = lrank;
+
+        rc = pmix_ring_in(ring_id, count, left, right);
+
+        /* the repsonse is sent back to client from the pmix_ring_out call */
+
+	debug3("mpi/pmi2: out _handle_ring");
+	return rc;
+}
+
 static int
 _handle_kvs_put(int fd, int lrank, client_req_t *req)
 {
diff --git a/src/plugins/mpi/pmi2/ring.c b/src/plugins/mpi/pmi2/ring.c
new file mode 100644
index 000000000..7a3118cd3
--- /dev/null
+++ b/src/plugins/mpi/pmi2/ring.c
@@ -0,0 +1,586 @@
+/*****************************************************************************\
+ **  ring.c - Implements logic for PMIX_Ring
+ *****************************************************************************
+ * Copyright (c) 2015, Lawrence Livermore National Security, LLC.
+ * Produced at the Lawrence Livermore National Laboratory.
+ * Written by Adam Moody <moody20@llnl.gov>.
+ * LLNL-CODE-670614
+ * All rights reserved.
+ *
+ * This file is part of SLURM, a resource management program.
+ * For details, see <http://slurm.schedmd.com/>.
+ * Please also read the included file: DISCLAIMER.
+ * 
+ * LLNL Preamble Notice
+ *
+ * A. This notice is required to be provided under our contract with
+ * the U.S. Department of Energy (DOE). This work was produced at the
+ * Lawrence Livermore National Laboratory under Contract No.
+ * DE-AC52-07NA27344 with the DOE.
+ *
+ * B. Neither the United States Government nor Lawrence Livermore
+ * National Security, LLC nor any of their employees, makes any
+ * warranty, express or implied, or assumes any liability or
+ * responsibility for the accuracy, completeness, or usefulness of
+ * any information, apparatus, product, or process disclosed, or
+ * represents that its use would not infringe privately-owned rights.
+ *
+ * C. Also, reference herein to any specific commercial products,
+ * process, or services by trade name, trademark, manufacturer or
+ * otherwise does not necessarily constitute or imply its endorsement,
+ * recommendation, or favoring by the United States Government or
+ * Lawrence Livermore National Security, LLC. The views and opinions
+ * of authors expressed herein do not necessarily state or reflect
+ * those of the United States Government or Lawrence Livermore
+ * National Security, LLC, and shall not be used for advertising or
+ * product endorsement purposes.
+\*****************************************************************************/
+
+/*
+ * -----------------------------------------------------------
+ * PMIX_Ring - execute ring exchange over processes in group
+ *
+ * Input Parameters:
+ * + value    - input string
+ * - maxvalue - max size of input and output strings
+ *
+ *  Output Parameters:
+ *  + rank  - returns caller's rank within ring
+ *  - ranks - returns number of procs within ring
+ *  - left  - buffer to receive value provided by (rank - 1) % ranks
+ *  - right - buffer to receive value provided by (rank + 1) % ranks
+ *
+ *  Return values:
+ *  Returns 'MPI_SUCCESS' on success and an MPI error code on failure.
+ *
+ *  Notes:
+ *  This function is collective, but not necessarily synchronous,
+ *  across all processes in the process group to which the calling
+ *  process belongs.  All processes in the group must call this
+ *  function, but a process may return before all processes have called
+ *  the function.
+ *
+ * int PMIX_Ring(const char value[], int *rank, int *ranks, char left[], char right[], int maxvalue);
+ * -----------------------------------------------------------
+ *
+ * For details on why this function is useful, see:
+ *
+ *   "PMI Extensions for Scalable MPI Startup",
+ *   S. Chakrborty, H. Subramoni, J. Perkins, A. Moody,
+ *   M. Arnold, and D. K. Panda, EuroMPI/ASIA 2014
+ *
+ * Here, PMIX_Ring is implemented as scan over the stepd tree.
+ * Each application process sends a RING_IN message containing count,
+ * left, and right values to its host stepd.  For this initial message,
+ * count = 1 and left = right = input value provided by the app process.
+ * After a stepd has received messages from all local tasks and all of
+ * its stepd children (if any), it summarizes data received from all
+ * procs and sends a RING_IN message up to its parent.
+ *
+ * When the root of the tree receives RING_IN messages from all
+ * children, it computes and sends a custom RING_OUT message back to
+ * each child.
+ *
+ * Upon receiving a RING_OUT message from its parent, a stepd computes
+ * and sends a custom RING_OUT message to each of its children stepds
+ * (if any) as well as responses to each application process.
+ *
+ * Each stepd process records the message received from each child
+ * during the RING_IN phase, and it uses this data along with the
+ * RING_OUT message from its parent to compute messages to send to its
+ * children during the RING_OUT phase.
+ *
+ * With this algorithm, application processes on the same node are
+ * assigned as consecutive ranks in the ring, and all processes within
+ * a subtree are assigned as consecutive ranks within the ring.
+ *
+ * Going up the tree, the RING_IN message specifies the following:
+ *   count - sum of app processes in subtree
+ *   left  - left value from leftmost app process in subtree
+ *   right - right value from rightmost app process in subtree
+ *
+ * Coming down the tree, the RING_OUT message species the following:
+ *   count - rank to assign to leftmost app process in subtree
+ *   left  - left value for leftmost app process in subtree
+ *   right - right value for rightmost app process in subtree
+ */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "src/common/slurm_xlator.h"
+#include "src/common/slurm_protocol_interface.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+
+#include "pmi.h"
+#include "client.h"
+#include "setup.h"
+#include "tree.h"
+#include "ring.h"
+
+/* max number of times to retry sending to stepd before giving up */
+#define MAX_RETRIES 5
+
+/* tracks values received from child in pmix_ring_in message */
+typedef struct {
+    int count;   /* count received from child */
+    char* left;  /* left value from child (strdup'd) */
+    char* right; /* right value from child (strdup'd) */
+} pmix_ring_msg;
+
+/* we record one pmix_ring_msg structure for each child */
+static pmix_ring_msg* pmix_ring_msgs = NULL;
+
+/* tracks number of pmix_ring_in messages we've received,
+ * we increment this count on each pmix_ring_in message,
+ * and compose a message to our parent when it reaches
+ * pmix_ring_children */
+static int pmix_ring_count = 0;
+
+/* tracks number of chilren we have for pmix_ring operation
+ * (sum of application children and stepd children) */
+static int pmix_ring_children = 0;
+
+/* number of application processes */
+static int pmix_app_children = 0;
+
+/* our rank within stepd tree */
+static int pmix_stepd_rank = -1;
+
+/* number of procs in stepd tree */
+static int pmix_stepd_ranks = 0;
+
+/* degree k of k-ary stepd tree */
+static int pmix_stepd_width = 16;
+
+/* number of stepd children for this proc */
+static int pmix_stepd_children = 0;
+
+/* we allocate a hostlist in init and destroy it in finalize */
+static hostlist_t pmix_stepd_hostlist = NULL;
+
+/* return rank of our parent in stepd tree,
+ * returns -1 if we're the root */
+static int pmix_stepd_rank_parent()
+{
+	int rank = -1;
+	if (pmix_stepd_rank > 0) {
+        	rank = (pmix_stepd_rank - 1) / pmix_stepd_width;
+        }
+	return rank;
+}
+
+/* given a child index from 0..(pmix_stepd_children-1)
+ * return rank of child in stepd tree */
+static int pmix_stepd_rank_child(int i)
+{
+	int rank = pmix_stepd_rank * pmix_stepd_width + (i + 1);
+	return rank;
+}
+
+/* given a global rank in stepd tree for message received
+ * from one of our stepd children, compute its corresponding
+ * ring_id, returns -1 if rank is not a child */
+int pmix_ring_id_by_rank(int rank)
+{
+	/* compute the rank of our first child */
+	int min_child = pmix_stepd_rank * pmix_stepd_width + 1;
+
+	/* compute offset from this first child */
+	int ring_id = rank - min_child;
+
+	/* check that child is within range */
+        if (rank >= min_child && ring_id < pmix_stepd_children) {
+        	/* child is in range, add in local tasks */
+        	ring_id += pmix_app_children;
+        } else {
+        	/* child is out of range */
+        	ring_id = -1;
+        }
+	return ring_id;
+}
+
+/* send message defined by buf and size to given rank stepd */
+static int pmix_stepd_send(const char* buf, uint32_t size, int rank)
+{
+	int rc = SLURM_SUCCESS;
+
+	/* map rank to host name */
+	char* host = hostlist_nth(pmix_stepd_hostlist, rank); /* strdup-ed */
+
+	/* delay to sleep between retries in seconds,
+	 * if there are multiple retires, we'll grow this delay
+          * using exponential backoff, doubling it each time */
+	unsigned int delay = 1;
+
+	/* we'll try multiple times to send message to stepd,
+	 * we retry in case stepd is just slow to get started */
+	int retries = 0;
+	while (1) {
+		/* attempt to send message */
+		rc = slurm_forward_data(host, tree_sock_addr, size, (char*) buf);
+		if (rc == SLURM_SUCCESS) {
+			/* message sent successfully, we're done */
+			break;
+		}
+
+		/* check whether we've exceeded our retry count */
+		retries++;
+		if (retries >= MAX_RETRIES) {
+			/* cancel the step to avoid tasks hang */
+			slurm_kill_job_step(job_info.jobid, job_info.stepid,
+					    SIGKILL);
+		}
+
+		/* didn't succeeded, but we'll retry again,
+		 * sleep for a bit first */
+		sleep(delay);
+		delay *= 2;
+	}
+
+	/* free host name */
+	free(host); /* strdup-ed */
+
+	return rc;
+} 
+
+/* allocate resources to track PMIX_Ring state */
+int pmix_ring_init(const pmi2_job_info_t* job, char*** env)
+{
+	int i;
+	int rc = SLURM_SUCCESS;
+
+	/* this is called by each stepd process, and each stepd has
+	 * at least one application process, so
+	 * pmix_app_children > 0 and pmix_ring_children > 0 */
+
+	/* allow user to override default tree width via variable */
+	char* p = getenvp(*env, PMIX_RING_TREE_WIDTH_ENV);
+	if (p) {
+		int width = atoi(p);
+		if (width >= 2) {
+			pmix_stepd_width = width;
+                } else {
+			info("Invalid %s value detected (%d), using (%d).",
+			     PMIX_RING_TREE_WIDTH_ENV, width, pmix_stepd_width);
+		}
+	}
+
+	/* allocate hostlist so we can map a stepd rank to a hostname */
+	pmix_stepd_hostlist = hostlist_create(job->step_nodelist);
+
+	/* record our rank in the stepd tree */
+	pmix_stepd_rank = job->nodeid;
+
+        /* record number of ranks in stepd tree */
+        pmix_stepd_ranks = job->nnodes;
+
+        /* record number of application children we serve */
+        pmix_app_children = job->ltasks;
+
+	/* compute number of stepd children */
+	int min_child = pmix_stepd_rank * pmix_stepd_width + 1;
+	int max_child = pmix_stepd_rank * pmix_stepd_width + pmix_stepd_width;
+	if (min_child >= pmix_stepd_ranks) {
+		min_child = pmix_stepd_ranks;
+	}
+	if (max_child >= pmix_stepd_ranks) {
+		max_child = pmix_stepd_ranks - 1;
+	}
+	pmix_stepd_children = max_child - min_child + 1;
+
+	/* record number of children we have (includes app procs and stepds) */
+	pmix_ring_children = pmix_app_children + pmix_stepd_children;
+
+	/* allocate a structure to record ring_in message from each child */
+	pmix_ring_msgs = (pmix_ring_msg*) xmalloc(pmix_ring_children * sizeof(pmix_ring_msg));
+
+	/* initialize messages */
+	for (i = 0; i < pmix_ring_children; i++) {
+        	pmix_ring_msgs[i].count = 0;
+        	pmix_ring_msgs[i].left  = NULL;
+        	pmix_ring_msgs[i].right = NULL;
+        }
+
+	/* initialize count */
+	pmix_ring_count = 0;
+
+	return rc;
+}
+
+/* free resources allocated to track PMIX_Ring state */
+int pmix_ring_finalize()
+{
+	int rc = SLURM_SUCCESS;
+
+	/* clear the pmix_ring_in messages for next ring operation */
+        if (pmix_ring_msgs != NULL) {
+		int i;
+		for (i = 0; i < pmix_ring_children; i++) {
+			/* free any memory allocated for each message */
+			pmix_ring_msg* msg = &pmix_ring_msgs[i];
+			msg->count = 0;
+			if (msg->left != NULL) {
+				xfree(msg->left);
+				msg->left = NULL;
+			}
+			if (msg->right != NULL) {
+				xfree(msg->right);
+				msg->right = NULL;
+			}
+		}
+
+		/* free array of messages */
+		xfree(pmix_ring_msgs);
+		pmix_ring_msgs = NULL;
+	}
+
+	/* free host list */
+	if (pmix_stepd_hostlist != NULL) {
+		hostlist_destroy(pmix_stepd_hostlist);
+        }
+
+	return rc;
+}
+
+/* ring_out messages come in from our parent,
+ * we process this and send ring_out messages to each of our children:
+ *   count - starting rank for our leftmost application process
+ *   left  - left value for leftmost application process in our subtree
+ *   right - right value for rightmost application process in our subtree */
+int pmix_ring_out(int count, char* left, char* right)
+{
+	int rc = SLURM_SUCCESS;
+
+	debug3("mpi/pmi2: in pmix_ring_out rank=%d count=%d left=%s right=%s",
+		pmix_stepd_rank, count, left, right);
+
+	/* our parent will send us a pmix_ring_out message, the count value
+	 * contained in this message will be the rank of the first process
+	 * in our subtree, the left value will be the left value for the
+	 * first process in the subtree, and the right value will be the
+	 * right value for the last process in our subtree */
+
+	/* allocate a structure to compute values to send to each child */
+	pmix_ring_msg* outmsgs = (pmix_ring_msg*) xmalloc(pmix_ring_children * sizeof(pmix_ring_msg));
+
+        /* initialize messages to all children */
+	int i;
+	for (i = 0; i < pmix_ring_children; i++) {
+		outmsgs[i].count = 0;
+		outmsgs[i].left  = NULL;
+		outmsgs[i].right = NULL;
+	}
+
+	/* iterate over all msgs and set count and left neighbor */
+	for (i = 0; i < pmix_ring_children; i++) {
+		/* store current count in output message */
+		outmsgs[i].count = count;
+
+		/* add count for this child to our running total */
+		count += pmix_ring_msgs[i].count;
+
+		/* set left value for this child */
+		outmsgs[i].left = left;
+
+		/* get right value from child, if it exists,
+		 * it will be the left neighbor of the next child,
+		 * otherwise, reuse the current left value */
+		char* next = pmix_ring_msgs[i].right;
+		if (next != NULL) {
+			left = next;
+		}
+	}
+
+	/* now set all right values (iterate backwards through children) */
+	for (i = (pmix_ring_children - 1); i >= 0; i--) {
+		/* set right value for this child */
+		outmsgs[i].right = right;
+
+		/* get left value from child, if it exists,
+		 * it will be the right neighbor of the next child,
+		 * otherwise, reuse the current right value */
+		char* next = pmix_ring_msgs[i].left;
+		if (next != NULL) {
+			right = next;
+		}
+	}
+
+	/* send messages to children in stepd tree,
+	 * we do this first to get the message down the tree quickly */
+	for (i = 0; i < pmix_stepd_children; i++) {
+		/* get pointer to message data for this child */
+		int ring_id = pmix_app_children + i;
+		pmix_ring_msg* msg = &outmsgs[ring_id];
+
+		/* TODO: do we need hton translation? */
+
+		/* construct message */
+		Buf buf = init_buf(1024);
+		pack16(TREE_CMD_RING_RESP,    buf); /* specify message type (RING_OUT) */
+		pack32((uint32_t) msg->count, buf); /* send count value */
+		packstr(msg->left,            buf); /* send left value */
+		packstr(msg->right,           buf); /* send right value */
+
+		/* get global rank of our i-th child stepd */
+		int rank = pmix_stepd_rank_child(i);
+
+		debug3("mpi/pmi2: rank=%d sending RING_OUT to rank=%d count=%d left=%s right=%s",
+			pmix_stepd_rank, rank, msg->count, msg->left, msg->right);
+
+		/* send message to child */
+		rc = pmix_stepd_send(get_buf_data(buf), (uint32_t) size_buf(buf), rank);
+
+		/* TODO: use tmp_rc here to catch any failure */
+
+		/* free message */
+		free_buf(buf);
+	}
+
+	/* now send messages to children app procs,
+	 * and set their state back to normal */
+	for (i = 0; i < pmix_app_children; i++) {
+		/* get pointer to message data for this child */
+		pmix_ring_msg* msg = &outmsgs[i];
+
+		/* TODO: want to catch send failure here? */
+
+		/* construct message and send to client */
+		client_resp_t *resp = client_resp_new();
+		client_resp_append(resp, "%s=%s;%s=%d;%s=%d;%s=%s;%s=%s;",
+			CMD_KEY, RINGRESP_CMD,
+			RC_KEY, 0,
+			RING_COUNT_KEY, msg->count,
+			RING_LEFT_KEY,  msg->left,
+			RING_RIGHT_KEY, msg->right);
+		client_resp_send(resp, STEPD_PMI_SOCK(i));
+		client_resp_free(resp);
+	}
+
+	/* delete messages, note that we don't need to free
+         * left and right strings in each message since they
+         * are pointers to strings allocated in pmix_ring_msgs */
+	xfree(outmsgs);
+
+	/* clear the pmix_ring_in messages for next ring operation */
+	for (i = 0; i < pmix_ring_children; i++) {
+		pmix_ring_msg* msg = &pmix_ring_msgs[i];
+		msg->count = 0;
+		if (msg->left != NULL) {
+			xfree(msg->left);
+			msg->left = NULL;
+		}
+		if (msg->right != NULL) {
+			xfree(msg->right);
+			msg->right = NULL;
+		}
+	}
+
+	/* reset our ring count */
+	pmix_ring_count = 0;
+
+	debug3("mpi/pmi2: out pmix_ring_out");
+	return rc;
+}
+
+/* we get a ring_in message from each child (stepd and application tasks),
+ * once we've gotten a message from each child, we send a ring_in message
+ * to our parent
+ *   ring_id - index of child (all app procs first, followed by stepds)
+ *   count   - count value from child
+ *   left    - left value from child
+ *   right   - right value from child
+ *
+ * upon receiving ring_in messages from all children, we send a ring_in
+ * message to our parent consisting of:
+ *   rank  = our rank in stepd tree (so parent knows which child msg is from)
+ *   count = sum of counts from all children
+ *   left  = left value from leftmost child
+ *   right = right value from rightmost child */
+int pmix_ring_in(int ring_id, int count, char* left, char* right)
+{
+	int i;
+	int rc = SLURM_SUCCESS;
+
+	debug3("mpi/pmi2: in pmix_ring_in rank=%d ring_id=%d count=%d left=%s right=%s",
+		pmix_stepd_rank, ring_id, count, left, right);
+
+	/* record values from child's ring_in message */
+	pmix_ring_msg* msg = &pmix_ring_msgs[ring_id];
+	msg->count = count;
+	msg->left  = xstrdup(left);
+	msg->right = xstrdup(right);
+
+	/* update our running count of received ring_in messages */
+	pmix_ring_count++;
+
+	/* if we have received a ring_in message from each app process
+         * and each stepd child, forward a ring_in message to our
+         * parent in the stepd tree */
+	if (pmix_ring_count == pmix_ring_children) {
+		/* each stepd has at least one application process
+		 * so each has at least one child */
+
+		/* lookup leftmost value from all children,
+		 * take left value from leftmost process */
+		char* leftmost = pmix_ring_msgs[0].left;
+
+		/* lookup rightmost value from all children,
+		 * take right value from rightmost process */
+		int right_id = pmix_ring_children - 1;
+		char* rightmost = pmix_ring_msgs[right_id].right;
+
+		/* total count values across all children */
+		uint32_t sum = 0;
+		for (i = 0; i < pmix_ring_children; i++) {
+			sum += (uint32_t) pmix_ring_msgs[i].count;
+		}
+
+		/* send to parent if we have one, otherwise create ring output
+		 * message and start the broadcast */
+		if (pmix_stepd_rank > 0) {
+			/* include our global rank in message so parent can
+                         * determine which child we are */
+			uint32_t my_rank = (uint32_t) pmix_stepd_rank;
+
+			/* TODO: do we need hton translation? */
+
+			/* construct message */
+			Buf buf = init_buf(1024);
+			pack16(TREE_CMD_RING, buf); /* specify message type (RING_IN) */
+			pack32(my_rank,       buf); /* send our rank */
+			pack32(sum,           buf); /* send count value */
+			packstr(leftmost,     buf); /* send left value */
+			packstr(rightmost,    buf); /* send right value */
+
+			/* get global rank of our parent stepd */
+			int rank = pmix_stepd_rank_parent();
+
+			debug3("mpi/pmi2: rank=%d sending RING_IN to rank=%d count=%d left=%s right=%s",
+				my_rank, rank, count, leftmost, rightmost);
+
+			/* send message to parent */
+                        rc = pmix_stepd_send(get_buf_data(buf), (uint32_t) size_buf(buf), rank);
+
+			/* TODO: use tmp_rc here to catch any failure */
+
+			/* free message */
+			free_buf(buf);
+		} else {
+			/* we're the root of the tree, send values back down */
+
+			/* at the top level, we wrap the ends to create a ring,
+			 * setting the rightmost process to be the left neighbor
+			 * of the leftmost process */
+
+			/* we start the top of the tree at offset 0 */
+
+			/* simulate reception of a ring output msg */
+			pmix_ring_out(0, rightmost, leftmost);
+		}
+	}
+
+	debug3("mpi/pmi2: out pmix_ring_in");
+	return rc;
+}
diff --git a/src/plugins/mpi/pmi2/ring.h b/src/plugins/mpi/pmi2/ring.h
new file mode 100644
index 000000000..945c0e00c
--- /dev/null
+++ b/src/plugins/mpi/pmi2/ring.h
@@ -0,0 +1,34 @@
+/* for pmi2_job_info_t definition */
+#include "setup.h"
+
+/* allocate resources to track PMIX_Ring state */
+int pmix_ring_init(const pmi2_job_info_t* job, char*** env);
+
+/* free resources allocated to track PMIX_Ring state */
+int pmix_ring_finalize();
+
+/* given a global rank in stepd/srun tree for message received
+ * from one of our stepd children, compute corresponding child index */
+int pmix_ring_id_by_rank(int rank);
+
+/* ring_out messages come in from our parent nodes,
+ * we process this and send ring_out messages to each of our children:
+ *   count - starting rank for our leftmost application process
+ *   left  - left value for leftmost application process in our subtree
+ *   right - right value for rightmost application process in our subtree */
+int pmix_ring_out(int count, char* left, char* right);
+
+/* we get a ring_in message from each child (stepd and application tasks),
+ * once we've gotten a message from each child, we send a ring_in message
+ * to our parent
+ *   ring_id - index of child (all app procs first, followed by stepds)
+ *   count   - count value from child
+ *   left    - left value from child
+ *   right   - right value from child
+ *
+ * upon receiving ring_in messages from all children, sends message to
+ * parent consisting of:
+ *   count = sum of counts from all children
+ *   left  = left value from leftmost child that specified a left value
+ *   right = right value from rightmost child that specified a right value */
+int pmix_ring_in(int ring_id, int count, char* left, char* right);
diff --git a/src/plugins/mpi/pmi2/setup.c b/src/plugins/mpi/pmi2/setup.c
index 68a6a8e8c..6829a404a 100644
--- a/src/plugins/mpi/pmi2/setup.c
+++ b/src/plugins/mpi/pmi2/setup.c
@@ -66,6 +66,7 @@
 #include "pmi.h"
 #include "spawn.h"
 #include "kvs.h"
+#include "ring.h"
 
 #define PMI2_SOCK_ADDR_FMT "/tmp/sock.pmi2.%u.%u"
 
@@ -370,6 +371,12 @@ pmi2_setup_stepd(const stepd_step_rec_t *job, char ***env)
 	if (rc != SLURM_SUCCESS)
 		return rc;
 
+	/* TODO: finalize pmix_ring state somewhere */
+	/* initialize pmix_ring state */
+	rc = pmix_ring_init(&job_info, env);
+	if (rc != SLURM_SUCCESS)
+		return rc;
+
 	return SLURM_SUCCESS;
 }
 
@@ -379,24 +386,21 @@ pmi2_setup_stepd(const stepd_step_rec_t *job, char ***env)
 static char *
 _get_proc_mapping(const mpi_plugin_client_info_t *job)
 {
-	uint32_t node_cnt, task_cnt, task_mapped, node_task_cnt, **tids,
-		block;
-	uint16_t task_dist, *tasks, *rounds;
+	uint32_t node_cnt, task_cnt, task_mapped, node_task_cnt, **tids;
+	uint32_t task_dist, block;
+	uint16_t *tasks, *rounds;
 	int i, start_id, end_id;
 	char *mapping = NULL;
 
 	node_cnt = job->step_layout->node_cnt;
 	task_cnt = job->step_layout->task_cnt;
-	task_dist = job->step_layout->task_dist;
+	task_dist = job->step_layout->task_dist & SLURM_DIST_STATE_BASE;
 	tasks = job->step_layout->tasks;
 	tids = job->step_layout->tids;
 
 	/* for now, PMI2 only supports vector processor mapping */
 
-	if (task_dist == SLURM_DIST_CYCLIC ||
-	    task_dist == SLURM_DIST_CYCLIC_CFULL ||
-	    task_dist == SLURM_DIST_CYCLIC_CYCLIC ||
-	    task_dist == SLURM_DIST_CYCLIC_BLOCK) {
+	if ((task_dist & SLURM_DIST_NODEMASK) == SLURM_DIST_NODECYCLIC) {
 		mapping = xstrdup("(vector");
 
 		rounds = xmalloc (node_cnt * sizeof(uint16_t));
diff --git a/src/plugins/mpi/pmi2/spawn.c b/src/plugins/mpi/pmi2/spawn.c
index 7553b02c8..ca8c83861 100644
--- a/src/plugins/mpi/pmi2/spawn.c
+++ b/src/plugins/mpi/pmi2/spawn.c
@@ -154,7 +154,7 @@ spawn_req_pack(spawn_req_t *req, Buf buf)
 	spawn_subcmd_t *subcmd;
 	void *auth_cred;
 
-	auth_cred = g_slurm_auth_create(NULL, 2, NULL);
+	auth_cred = g_slurm_auth_create(NULL, 2, slurm_get_auth_info());
 	if (auth_cred == NULL) {
 		error("authentication: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)) );
@@ -204,7 +204,7 @@ spawn_req_unpack(spawn_req_t **req_ptr, Buf buf)
 		      g_slurm_auth_errstr(g_slurm_auth_errno(NULL)) );
 		return SLURM_ERROR;
 	}
-	auth_uid = g_slurm_auth_get_uid(auth_cred, NULL);
+	auth_uid = g_slurm_auth_get_uid(auth_cred, slurm_get_auth_info());
 	(void) g_slurm_auth_destroy(auth_cred);
 	my_uid = getuid();
 	if ((auth_uid != 0) && (auth_uid != my_uid)) {
@@ -356,6 +356,7 @@ spawn_resp_send_to_stepd(spawn_resp_t *resp, char *node)
 	Buf buf;
 	int rc;
 	uint16_t cmd;
+	hostlist_t hl;
 
 	buf = init_buf(1024);
 
@@ -363,7 +364,11 @@ spawn_resp_send_to_stepd(spawn_resp_t *resp, char *node)
 	pack16(cmd, buf);
 	spawn_resp_pack(resp, buf);
 
-	rc = tree_msg_to_stepds(node, get_buf_offset(buf), get_buf_data(buf));
+	hl = hostlist_create(node);
+	rc = tree_msg_to_stepds(hl, 
+				get_buf_offset(buf),
+				get_buf_data(buf));
+	hostlist_destroy(hl);
 	free_buf(buf);
 	return rc;
 }
@@ -398,8 +403,8 @@ spawn_resp_send_to_fd(spawn_resp_t *resp, int fd)
 /* 	cmd = TREE_CMD_SPAWN_RESP; */
 /* 	pack16(cmd, buf); */
 	spawn_resp_pack(resp, buf);
-	rc = _slurm_msg_sendto(fd, get_buf_data(buf), get_buf_offset(buf),
-			       SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+	rc = slurm_msg_sendto(fd, get_buf_data(buf), get_buf_offset(buf),
+			      SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 	free_buf(buf);
 
 	return rc;
diff --git a/src/plugins/mpi/pmi2/tree.c b/src/plugins/mpi/pmi2/tree.c
index 88e41476c..ecac5dc51 100644
--- a/src/plugins/mpi/pmi2/tree.c
+++ b/src/plugins/mpi/pmi2/tree.c
@@ -57,6 +57,7 @@
 #include "setup.h"
 #include "pmi.h"
 #include "nameserv.h"
+#include "ring.h"
 
 static int _handle_kvs_fence(int fd, Buf buf);
 static int _handle_kvs_fence_resp(int fd, Buf buf);
@@ -65,6 +66,8 @@ static int _handle_spawn_resp(int fd, Buf buf);
 static int _handle_name_publish(int fd, Buf buf);
 static int _handle_name_unpublish(int fd, Buf buf);
 static int _handle_name_lookup(int fd, Buf buf);
+static int _handle_ring(int fd, Buf buf);
+static int _handle_ring_resp(int fd, Buf buf);
 
 static uint32_t  spawned_srun_ports_size = 0;
 static uint16_t *spawned_srun_ports = NULL;
@@ -78,6 +81,8 @@ static int (*tree_cmd_handlers[]) (int fd, Buf buf) = {
 	_handle_name_publish,
 	_handle_name_unpublish,
 	_handle_name_lookup,
+	_handle_ring,
+	_handle_ring_resp,
 	NULL
 };
 
@@ -89,6 +94,8 @@ static char *tree_cmd_names[] = {
 	"TREE_CMD_NAME_PUBLISH",
 	"TREE_CMD_NAME_UNPUBLISH",
 	"TREE_CMD_NAME_LOOKUP",
+	"TREE_CMD_RING",
+	"TREE_CMD_RING_RESP",
 	NULL,
 };
 
@@ -173,7 +180,8 @@ _handle_kvs_fence_resp(int fd, Buf buf)
 
 	safe_unpack32(&seq, buf);
 	if( seq == kvs_seq - 2) {
-		debug("mpi/pmi2: duplicate KVS_FENCE_RESP from srun ignored");
+		debug("mpi/pmi2: duplicate KVS_FENCE_RESP "
+		      "seq %d kvs_seq %d from srun ignored", seq, kvs_seq);
 		return rc;
 	} else if (seq != kvs_seq - 1) {
 		error("mpi/pmi2: invalid kvs seq from srun, expect %u"
@@ -419,9 +427,9 @@ out:
 	xfree(port);
 	resp_buf = init_buf(32);
 	pack32((uint32_t) rc, resp_buf);
-	rc = _slurm_msg_sendto(fd, get_buf_data(resp_buf),
-			       get_buf_offset(resp_buf),
-			       SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+	rc = slurm_msg_sendto(fd, get_buf_data(resp_buf),
+			      get_buf_offset(resp_buf),
+			      SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 	free_buf(resp_buf);
 
 	debug3("mpi/pmi2: out _handle_name_publish");
@@ -452,9 +460,9 @@ out:
 	xfree(name);
 	resp_buf = init_buf(32);
 	pack32((uint32_t) rc, resp_buf);
-	rc = _slurm_msg_sendto(fd, get_buf_data(resp_buf),
-			       get_buf_offset(resp_buf),
-			       SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+	rc = slurm_msg_sendto(fd, get_buf_data(resp_buf),
+			      get_buf_offset(resp_buf),
+			      SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 	free_buf(resp_buf);
 
 	debug3("mpi/pmi2: out _handle_name_unpublish");
@@ -484,9 +492,9 @@ _handle_name_lookup(int fd, Buf buf)
 out:
 	resp_buf = init_buf(1024);
 	packstr(port, resp_buf);
-	rc2 = _slurm_msg_sendto(fd, get_buf_data(resp_buf),
-				get_buf_offset(resp_buf),
-				SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+	rc2 = slurm_msg_sendto(fd, get_buf_data(resp_buf),
+			       get_buf_offset(resp_buf),
+			       SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 	rc = MAX(rc, rc2);
 	free_buf(resp_buf);
 	xfree(name);
@@ -500,6 +508,92 @@ unpack_error:
 	goto out;
 }
 
+/* handles ring_in message from one of our stepd children */
+static int
+_handle_ring(int fd, Buf buf)
+{
+	uint32_t rank, count, temp32;
+	char *left  = NULL;
+	char *right = NULL;
+	int ring_id;
+	int rc = SLURM_SUCCESS;
+
+        debug3("mpi/pmi2: in _handle_ring");
+
+	/* TODO: do we need ntoh translation? */
+
+	/* data consists of:
+         *   uint32_t rank  - tree rank of stepd process that sent message
+         *   uint32_t count - ring in count value
+         *   string   left  - ring in left value
+         *   string   right - ring in right value */
+	safe_unpack32(&rank,  buf);
+	safe_unpack32(&count, buf);
+	safe_unpackstr_xmalloc(&left,  &temp32, buf);
+	safe_unpackstr_xmalloc(&right, &temp32, buf);
+
+	/* lookup ring_id for this child */
+	ring_id = pmix_ring_id_by_rank(rank);
+
+	/* check that we got a valid child id */
+	if (ring_id == -1) {
+		error("mpi/pmi2: received ring_in message from unknown child %d", rank);
+		rc = SLURM_ERROR;
+		goto out;
+	}
+
+	/* execute ring in operation */
+	rc = pmix_ring_in(ring_id, count, left, right);
+
+out:
+	/* free strings unpacked from message */
+	xfree(left);
+	xfree(right);
+        debug3("mpi/pmi2: out _handle_ring");
+	return rc;
+
+unpack_error:
+	error("mpi/pmi2: failed to unpack ring in message");
+	rc = SLURM_ERROR;
+	goto out;
+}
+
+/* handles ring_out messages coming in from parent in stepd tree */
+static int
+_handle_ring_resp(int fd, Buf buf)
+{
+	uint32_t count, temp32;
+	char *left  = NULL;
+	char *right = NULL;
+	int rc = SLURM_SUCCESS;
+
+        debug3("mpi/pmi2: in _handle_ring_resp");
+
+	/* TODO: need ntoh translation? */
+	/* data consists of:
+         *   uint32_t count - ring out count value
+         *   string   left  - ring out left value
+         *   string   right - ring out right value */
+	safe_unpack32(&count, buf);
+	safe_unpackstr_xmalloc(&left,  &temp32, buf);
+	safe_unpackstr_xmalloc(&right, &temp32, buf);
+
+	/* execute ring out operation */
+	rc = pmix_ring_out(count, left, right);
+
+out:
+	/* free strings unpacked from message */
+	xfree(left);
+	xfree(right);
+        debug3("mpi/pmi2: out _handle_ring_resp");
+	return rc;
+
+unpack_error:
+	error("mpi/pmi2: failed to unpack ring out message");
+	rc = SLURM_ERROR;
+	goto out;
+}
+
 /**************************************************************/
 extern int
 handle_tree_cmd(int fd)
@@ -546,7 +640,7 @@ tree_msg_to_srun(uint32_t len, char *msg)
 	fd = slurm_open_stream(tree_info.srun_addr, true);
 	if (fd < 0)
 		return SLURM_ERROR;
-	rc = _slurm_msg_sendto(fd, msg, len, SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+	rc = slurm_msg_sendto(fd, msg, len, SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 	if (rc == len) /* all data sent */
 		rc = SLURM_SUCCESS;
 	else
@@ -567,7 +661,7 @@ tree_msg_to_srun_with_resp(uint32_t len, char *msg, Buf *resp_ptr)
 	fd = slurm_open_stream(tree_info.srun_addr, true);
 	if (fd < 0)
 		return SLURM_ERROR;
-	rc = _slurm_msg_sendto(fd, msg, len, SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+	rc = slurm_msg_sendto(fd, msg, len, SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 	if (rc == len) { 	/* all data sent */
 		safe_read(fd, &len, sizeof(len));
 		len = ntohl(len);
@@ -589,13 +683,43 @@ rwfail:
 }
 
 extern int
-tree_msg_to_stepds(char *nodelist, uint32_t len, char *msg)
+tree_msg_to_stepds(hostlist_t hl, uint32_t len, char *data)
 {
-	int rc;
-	rc = slurm_forward_data(nodelist,
-				tree_sock_addr,
-				len,
-				msg);
+	List ret_list = NULL;
+	int temp_rc = 0, rc = 0;
+	ret_data_info_t *ret_data_info = NULL;
+	slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t));
+	forward_data_msg_t req;
+	char *nodelist = NULL;
+
+	slurm_msg_t_init(msg);
+	req.address = tree_sock_addr;
+	req.len = len;
+	req.data = data;
+
+	msg->msg_type = REQUEST_FORWARD_DATA;
+	msg->data = &req;
+
+	nodelist = hostlist_ranged_string_xmalloc(hl);
+
+	if ((ret_list = slurm_send_recv_msgs(nodelist, msg, 0, false))) {
+		while ((ret_data_info = list_pop(ret_list))) {
+			temp_rc = slurm_get_return_code(ret_data_info->type,
+							ret_data_info->data);
+			if (temp_rc){
+				rc = temp_rc;
+			} else {
+				hostlist_delete_host(hl, 
+							ret_data_info->node_name);
+			}
+		}
+	} else {
+		error("tree_msg_to_stepds: no list was returned");
+		rc = SLURM_ERROR;
+	}
+
+	slurm_free_msg(msg);
+	xfree(nodelist);
 	return rc;
 }
 
@@ -613,8 +737,8 @@ tree_msg_to_spawned_sruns(uint32_t len, char *msg)
 		fd = slurm_open_stream(&srun_addr, true);
 		if (fd < 0)
 			return SLURM_ERROR;
-		sent = _slurm_msg_sendto(fd, msg, len,
-					 SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
+		sent = slurm_msg_sendto(fd, msg, len,
+					SLURM_PROTOCOL_NO_SEND_RECV_FLAGS);
 		if (sent != len)
 			rc = SLURM_ERROR;
 		close(fd);
diff --git a/src/plugins/mpi/pmi2/tree.h b/src/plugins/mpi/pmi2/tree.h
index e58ec8b2d..ccc254c23 100644
--- a/src/plugins/mpi/pmi2/tree.h
+++ b/src/plugins/mpi/pmi2/tree.h
@@ -50,6 +50,8 @@ enum {
 	TREE_CMD_NAME_PUBLISH,
 	TREE_CMD_NAME_UNPUBLISH,
 	TREE_CMD_NAME_LOOKUP,
+	TREE_CMD_RING,
+	TREE_CMD_RING_RESP,
 	TREE_CMD_COUNT
 };
 
@@ -57,7 +59,7 @@ enum {
 extern int handle_tree_cmd(int fd);
 extern int tree_msg_to_srun(uint32_t len, char *msg);
 extern int tree_msg_to_srun_with_resp(uint32_t len, char *msg, Buf *resp_ptr);
-extern int tree_msg_to_stepds(char *nodelist, uint32_t len, char *msg);
+extern int tree_msg_to_stepds(hostlist_t hl, uint32_t len, char *data);
 extern int tree_msg_to_spawned_sruns(uint32_t len, char *msg);
 
 
diff --git a/src/plugins/power/Makefile.am b/src/plugins/power/Makefile.am
new file mode 100644
index 000000000..ce2145e2a
--- /dev/null
+++ b/src/plugins/power/Makefile.am
@@ -0,0 +1,3 @@
+# Makefile for power management plugins
+
+SUBDIRS = common cray none
diff --git a/src/plugins/power/Makefile.in b/src/plugins/power/Makefile.in
new file mode 100644
index 000000000..fa7a17325
--- /dev/null
+++ b/src/plugins/power/Makefile.in
@@ -0,0 +1,778 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for power management plugins
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/power
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+SOURCES =
+DIST_SOURCES =
+RECURSIVE_TARGETS = all-recursive check-recursive cscopelist-recursive \
+	ctags-recursive dvi-recursive html-recursive info-recursive \
+	install-data-recursive install-dvi-recursive \
+	install-exec-recursive install-html-recursive \
+	install-info-recursive install-pdf-recursive \
+	install-ps-recursive install-recursive installcheck-recursive \
+	installdirs-recursive pdf-recursive ps-recursive \
+	tags-recursive uninstall-recursive
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+RECURSIVE_CLEAN_TARGETS = mostlyclean-recursive clean-recursive	\
+  distclean-recursive maintainer-clean-recursive
+am__recursive_targets = \
+  $(RECURSIVE_TARGETS) \
+  $(RECURSIVE_CLEAN_TARGETS) \
+  $(am__extra_recursive_targets)
+AM_RECURSIVE_TARGETS = $(am__recursive_targets:-recursive=) TAGS CTAGS \
+	distdir
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DIST_SUBDIRS = $(SUBDIRS)
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+am__relativize = \
+  dir0=`pwd`; \
+  sed_first='s,^\([^/]*\)/.*$$,\1,'; \
+  sed_rest='s,^[^/]*/*,,'; \
+  sed_last='s,^.*/\([^/]*\)$$,\1,'; \
+  sed_butlast='s,/*[^/]*$$,,'; \
+  while test -n "$$dir1"; do \
+    first=`echo "$$dir1" | sed -e "$$sed_first"`; \
+    if test "$$first" != "."; then \
+      if test "$$first" = ".."; then \
+        dir2=`echo "$$dir0" | sed -e "$$sed_last"`/"$$dir2"; \
+        dir0=`echo "$$dir0" | sed -e "$$sed_butlast"`; \
+      else \
+        first2=`echo "$$dir2" | sed -e "$$sed_first"`; \
+        if test "$$first2" = "$$first"; then \
+          dir2=`echo "$$dir2" | sed -e "$$sed_rest"`; \
+        else \
+          dir2="../$$dir2"; \
+        fi; \
+        dir0="$$dir0"/"$$first"; \
+      fi; \
+    fi; \
+    dir1=`echo "$$dir1" | sed -e "$$sed_rest"`; \
+  done; \
+  reldir="$$dir2"
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+SUBDIRS = common cray none
+all: all-recursive
+
+.SUFFIXES:
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --gnu src/plugins/power/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --gnu src/plugins/power/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+# This directory's subdirectories are mostly independent; you can cd
+# into them and run 'make' without going through this Makefile.
+# To change the values of 'make' variables: instead of editing Makefiles,
+# (1) if the variable is set in 'config.status', edit 'config.status'
+#     (which will cause the Makefiles to be regenerated when you run 'make');
+# (2) otherwise, pass the desired values on the 'make' command line.
+$(am__recursive_targets):
+	@fail=; \
+	if $(am__make_keepgoing); then \
+	  failcom='fail=yes'; \
+	else \
+	  failcom='exit 1'; \
+	fi; \
+	dot_seen=no; \
+	target=`echo $@ | sed s/-recursive//`; \
+	case "$@" in \
+	  distclean-* | maintainer-clean-*) list='$(DIST_SUBDIRS)' ;; \
+	  *) list='$(SUBDIRS)' ;; \
+	esac; \
+	for subdir in $$list; do \
+	  echo "Making $$target in $$subdir"; \
+	  if test "$$subdir" = "."; then \
+	    dot_seen=yes; \
+	    local_target="$$target-am"; \
+	  else \
+	    local_target="$$target"; \
+	  fi; \
+	  ($(am__cd) $$subdir && $(MAKE) $(AM_MAKEFLAGS) $$local_target) \
+	  || eval $$failcom; \
+	done; \
+	if test "$$dot_seen" = "no"; then \
+	  $(MAKE) $(AM_MAKEFLAGS) "$$target-am" || exit 1; \
+	fi; test -z "$$fail"
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-recursive
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	if ($(ETAGS) --etags-include --version) >/dev/null 2>&1; then \
+	  include_option=--etags-include; \
+	  empty_fix=.; \
+	else \
+	  include_option=--include; \
+	  empty_fix=; \
+	fi; \
+	list='$(SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    test ! -f $$subdir/TAGS || \
+	      set "$$@" "$$include_option=$$here/$$subdir/TAGS"; \
+	  fi; \
+	done; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-recursive
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-recursive
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+	@list='$(DIST_SUBDIRS)'; for subdir in $$list; do \
+	  if test "$$subdir" = .; then :; else \
+	    $(am__make_dryrun) \
+	      || test -d "$(distdir)/$$subdir" \
+	      || $(MKDIR_P) "$(distdir)/$$subdir" \
+	      || exit 1; \
+	    dir1=$$subdir; dir2="$(distdir)/$$subdir"; \
+	    $(am__relativize); \
+	    new_distdir=$$reldir; \
+	    dir1=$$subdir; dir2="$(top_distdir)"; \
+	    $(am__relativize); \
+	    new_top_distdir=$$reldir; \
+	    echo " (cd $$subdir && $(MAKE) $(AM_MAKEFLAGS) top_distdir="$$new_top_distdir" distdir="$$new_distdir" \\"; \
+	    echo "     am__remove_distdir=: am__skip_length_check=: am__skip_mode_fix=: distdir)"; \
+	    ($(am__cd) $$subdir && \
+	      $(MAKE) $(AM_MAKEFLAGS) \
+	        top_distdir="$$new_top_distdir" \
+	        distdir="$$new_distdir" \
+		am__remove_distdir=: \
+		am__skip_length_check=: \
+		am__skip_mode_fix=: \
+	        distdir) \
+	      || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-recursive
+all-am: Makefile
+installdirs: installdirs-recursive
+installdirs-am:
+install: install-recursive
+install-exec: install-exec-recursive
+install-data: install-data-recursive
+uninstall: uninstall-recursive
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-recursive
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-recursive
+
+clean-am: clean-generic clean-libtool mostlyclean-am
+
+distclean: distclean-recursive
+	-rm -f Makefile
+distclean-am: clean-am distclean-generic distclean-tags
+
+dvi: dvi-recursive
+
+dvi-am:
+
+html: html-recursive
+
+html-am:
+
+info: info-recursive
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-recursive
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-recursive
+
+install-html-am:
+
+install-info: install-info-recursive
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-recursive
+
+install-pdf-am:
+
+install-ps: install-ps-recursive
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-recursive
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-recursive
+
+mostlyclean-am: mostlyclean-generic mostlyclean-libtool
+
+pdf: pdf-recursive
+
+pdf-am:
+
+ps: ps-recursive
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: $(am__recursive_targets) install-am install-strip
+
+.PHONY: $(am__recursive_targets) CTAGS GTAGS TAGS all all-am check \
+	check-am clean clean-generic clean-libtool cscopelist-am ctags \
+	ctags-am distclean distclean-generic distclean-libtool \
+	distclean-tags distdir dvi dvi-am html html-am info info-am \
+	install install-am install-data install-data-am install-dvi \
+	install-dvi-am install-exec install-exec-am install-html \
+	install-html-am install-info install-info-am install-man \
+	install-pdf install-pdf-am install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	installdirs-am maintainer-clean maintainer-clean-generic \
+	mostlyclean mostlyclean-generic mostlyclean-libtool pdf pdf-am \
+	ps ps-am tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/power/common/Makefile.am b/src/plugins/power/common/Makefile.am
new file mode 100644
index 000000000..f482af483
--- /dev/null
+++ b/src/plugins/power/common/Makefile.am
@@ -0,0 +1,13 @@
+# Makefile.am for power/common
+
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+
+AM_CPPFLAGS = -I$(top_srcdir)
+
+# making a .la
+
+noinst_LTLIBRARIES = libpower_common.la
+libpower_common_la_SOURCES =	\
+	power_common.c		\
+	power_common.h
diff --git a/src/plugins/power/common/Makefile.in b/src/plugins/power/common/Makefile.in
new file mode 100644
index 000000000..8a8594f66
--- /dev/null
+++ b/src/plugins/power/common/Makefile.in
@@ -0,0 +1,755 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile.am for power/common
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/power/common
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+LTLIBRARIES = $(noinst_LTLIBRARIES)
+libpower_common_la_LIBADD =
+am_libpower_common_la_OBJECTS = power_common.lo
+libpower_common_la_OBJECTS = $(am_libpower_common_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(libpower_common_la_SOURCES)
+DIST_SOURCES = $(libpower_common_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+CLEANFILES = core.*
+AM_CPPFLAGS = -I$(top_srcdir)
+
+# making a .la
+noinst_LTLIBRARIES = libpower_common.la
+libpower_common_la_SOURCES = \
+	power_common.c		\
+	power_common.h
+
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/power/common/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/power/common/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+clean-noinstLTLIBRARIES:
+	-test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES)
+	@list='$(noinst_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+libpower_common.la: $(libpower_common_la_OBJECTS) $(libpower_common_la_DEPENDENCIES) $(EXTRA_libpower_common_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(LINK)  $(libpower_common_la_OBJECTS) $(libpower_common_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/power_common.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+	-test -z "$(CLEANFILES)" || rm -f $(CLEANFILES)
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-noinstLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am:
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am:
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-noinstLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-ps install-ps-am install-strip installcheck \
+	installcheck-am installdirs maintainer-clean \
+	maintainer-clean-generic mostlyclean mostlyclean-compile \
+	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
+	tags tags-am uninstall uninstall-am
+
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/power/common/power_common.c b/src/plugins/power/common/power_common.c
new file mode 100644
index 000000000..acdde02dd
--- /dev/null
+++ b/src/plugins/power/common/power_common.c
@@ -0,0 +1,414 @@
+/*****************************************************************************\
+ *  power_common.c - Common logic for power management
+ *
+ *  NOTE: These functions are designed so they can be used by multiple power
+ *  management plugins at the same time, so the state information is largely in
+ *  the individual plugin and passed as a pointer argument to these functions.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#define _GNU_SOURCE	/* For POLLRDHUP */
+#include <fcntl.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+
+#include "src/common/list.h"
+#include "src/common/pack.h"
+#include "src/common/parse_config.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/timers.h"
+#include "src/common/uid.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/slurmctld.h"
+
+#include "power_common.h"
+
+static void _job_power_del(void *x)
+{
+	xfree(x);
+}
+
+/* For all nodes in a cluster
+ * 1) set default values and
+ * 2) return global power allocation/consumption information */
+extern void get_cluster_power(struct node_record *node_record_table_ptr,
+			      int node_record_count,
+			      uint32_t *alloc_watts, uint32_t *used_watts)
+{
+	uint64_t debug_flag = slurm_get_debug_flags();
+	int i;
+	struct node_record *node_ptr;
+
+	*alloc_watts = 0;
+	*used_watts  = 0;
+	if ((debug_flag & DEBUG_FLAG_POWER) == 0)
+		return;
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (node_ptr->power) {
+			if (!node_ptr->power->cap_watts) {	/* No limit */
+				if (!node_ptr->power->max_watts)
+					continue;	/* No node data */
+				node_ptr->power->cap_watts =
+					node_ptr->power->max_watts;
+			}
+			if (!node_ptr->power->current_watts) { /* No data yet */
+				if (node_ptr->energy &&
+				    node_ptr->energy->current_watts) {
+					node_ptr->power->current_watts +=
+						node_ptr->energy->current_watts;
+				} else {
+					node_ptr->power->current_watts =
+						node_ptr->power->cap_watts;
+				}
+			}
+			*alloc_watts += node_ptr->power->cap_watts;
+			*used_watts += node_ptr->power->current_watts;
+		}	
+	}
+}
+
+/* For each running job, return power allocation/use information in a List
+ * containing elements of type power_by_job_t.
+ * NOTE: Job data structure must be locked on function entry
+ * NOTE: Call list_delete() to free return value
+ * NOTE: This function is currently unused. */
+extern List get_job_power(List job_list,
+			  struct node_record *node_record_table_ptr)
+{
+	struct node_record *node_ptr;
+	struct job_record *job_ptr;
+	ListIterator job_iterator;
+	power_by_job_t *power_ptr;
+	char jobid_buf[64] = "";
+	int i, i_first, i_last;
+	uint64_t debug_flag = slurm_get_debug_flags();
+	List job_power_list = list_create(_job_power_del);
+	time_t now = time(NULL);
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (!IS_JOB_RUNNING(job_ptr))
+			continue;
+		power_ptr = xmalloc(sizeof(power_by_job_t));
+		power_ptr->job_id = job_ptr->job_id;
+		power_ptr->start_time = job_ptr->start_time;
+		list_append(job_power_list, power_ptr);
+		if (!job_ptr->node_bitmap) {
+			error("%s: %s node_bitmap is NULL", __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+			continue;
+		}
+		i_first = bit_ffs(job_ptr->node_bitmap);
+		if (i_first < 0)
+			continue;
+		i_last = bit_fls(job_ptr->node_bitmap);
+		for (i = i_first; i <= i_last; i++) {
+			if (!bit_test(job_ptr->node_bitmap, i))
+				continue;
+			node_ptr = node_record_table_ptr + i;
+			if (node_ptr->power) {
+				power_ptr->alloc_watts +=
+					node_ptr->power->cap_watts;
+			}
+			if (node_ptr->energy) {
+				power_ptr->used_watts +=
+					node_ptr->energy->current_watts;
+			}
+		}
+		if (debug_flag & DEBUG_FLAG_POWER) {
+			info("%s: %s Age=%ld(sec) AllocWatts=%u UsedWatts=%u",
+			     __func__,
+			     jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)),
+			     (long int) difftime(now, power_ptr->start_time),
+			     power_ptr->alloc_watts, power_ptr->used_watts);
+		}
+	}
+	list_iterator_destroy(job_iterator);
+
+	return job_power_list;
+}
+
+/* Execute a script, wait for termination and return its stdout.
+ * script_name IN - Name of program being run (e.g. "StartStageIn")
+ * script_path IN - Fully qualified program of the program to execute
+ * script_args IN - Arguments to the script
+ * max_wait IN - Maximum time to wait in milliseconds,
+ *		 -1 for no limit (asynchronous)
+ * data_in IN - data to use as program STDIN (NULL if not STDIN)
+ * status OUT - Job exit code
+ * Return stdout+stderr of spawned program, value must be xfreed. */
+extern char *power_run_script(char *script_name, char *script_path,
+			      char **script_argv, int max_wait, char *data_in,
+			      int *status)
+{
+	int i, new_wait, resp_size = 0, resp_offset = 0;
+	int send_size = 0, send_offset = 0;
+	pid_t cpid;
+	char *resp = NULL;
+	int fd_stdout[2] = { -1, -1 };
+	int fd_stdin[2] = { -1, -1 };
+
+	if ((script_path == NULL) || (script_path[0] == '\0')) {
+		error("%s: no script specified", __func__);
+		*status = 127;
+		resp = xstrdup("Slurm burst buffer configuration error");
+		return resp;
+	}
+	if (slurm_get_debug_flags() & DEBUG_FLAG_POWER) {
+		for (i = 0; i < 10; i++) {
+			if (!script_argv[i])
+				break;
+		}
+		if (i == 0) {
+			info("%s:", __func__);
+		} else if (i == 1) {
+			info("%s: %s", __func__, script_name);
+		} else if (i == 2) {
+			info("%s: %s %s", __func__, script_name,
+			     script_argv[1]);
+		} else if (i == 3) {
+			info("%s: %s %s %s", __func__, script_name,
+			     script_argv[1], script_argv[2]);
+		} else if (i == 4) {
+			info("%s: %s %s %s %s", __func__, script_name,
+			     script_argv[1], script_argv[2], script_argv[3]);
+		} else if (i == 5) {
+			info("%s: %s %s %s %s %s", __func__, script_name,
+			     script_argv[1], script_argv[2], script_argv[3],
+			     script_argv[4]);
+		} else if (i == 6) {
+			info("%s: %s %s %s %s %s %s", __func__, script_name,
+			     script_argv[1], script_argv[2], script_argv[3],
+			     script_argv[4], script_argv[5]);
+		} else if (i == 7) {
+			info("%s: %s %s %s %s %s %s %s", __func__,
+			     script_name, script_argv[1], script_argv[2],
+			     script_argv[3], script_argv[4], script_argv[5],
+			     script_argv[6]);
+		} else {	/* 8 or more args here, truncate as needed */
+			info("%s: %s %s %s %s %s %s %s %s", __func__,
+			     script_name, script_argv[1], script_argv[2],
+			     script_argv[3], script_argv[4], script_argv[5],
+			     script_argv[6], script_argv[7]);
+		}
+	}
+	if (script_path[0] != '/') {
+		error("%s: %s is not fully qualified pathname (%s)",
+		      __func__, script_name, script_path);
+		*status = 127;
+		resp = xstrdup("Slurm burst buffer configuration error");
+		return resp;
+	}
+	if (access(script_path, R_OK | X_OK) < 0) {
+		error("%s: %s can not be executed (%s) %m",
+		      __func__, script_name, script_path);
+		*status = 127;
+		resp = xstrdup("Slurm burst buffer configuration error");
+		return resp;
+	}
+	if (data_in) {
+		if (pipe(fd_stdin) != 0) {
+			error("%s: pipe(): %m", __func__);
+			*status = 127;
+			resp = xstrdup("System error");
+			return resp;
+		}
+	}
+	if (max_wait != -1) {
+		if (pipe(fd_stdout) != 0) {
+			error("%s: pipe(): %m", __func__);
+			*status = 127;
+			resp = xstrdup("System error");
+			return resp;
+		}
+	}
+	if ((cpid = fork()) == 0) {
+		int cc;
+
+		cc = sysconf(_SC_OPEN_MAX);
+		if (data_in)
+			dup2(fd_stdin[0], STDIN_FILENO);
+		if (max_wait != -1) {
+			dup2(fd_stdout[1], STDERR_FILENO);
+			dup2(fd_stdout[1], STDOUT_FILENO);
+			for (i = 0; i < cc; i++) {
+				if ((i != STDERR_FILENO) &&
+				    (i != STDIN_FILENO)  &&
+				    (i != STDOUT_FILENO))
+					close(i);
+			}
+		} else {
+			for (i = 0; i < cc; i++) {
+				if (!data_in || (i != STDERR_FILENO))
+					close(i);
+			}
+			if ((cpid = fork()) < 0)
+				exit(127);
+			else if (cpid > 0)
+				exit(0);
+		}
+#ifdef SETPGRP_TWO_ARGS
+		setpgrp(0, 0);
+#else
+		setpgrp();
+#endif
+		execv(script_path, script_argv);
+		error("%s: execv(%s): %m", __func__, script_path);
+		exit(127);
+	} else if (cpid < 0) {
+		if (data_in) {
+			close(fd_stdin[0]);
+			close(fd_stdin[1]);
+		}
+		if (max_wait != -1) {
+			close(fd_stdout[0]);
+			close(fd_stdout[1]);
+		}
+		error("%s: fork(): %m", __func__);
+	} else if (max_wait != -1) {
+		struct pollfd fds;
+		time_t start_time = time(NULL);
+		if (data_in) {
+			close(fd_stdin[0]);
+			send_size = strlen(data_in);
+			while (send_size > send_offset) {
+				i = write(fd_stdin[1], data_in + send_offset,
+					 send_size - send_offset);
+				if (i == 0) {
+					break;
+				} else if (i < 0) {
+					if (errno == EAGAIN)
+						continue;
+					error("%s: write(%s): %m", __func__,
+					      script_path);
+					break;
+				} else {
+					send_offset += i;
+				}
+			}
+			close(fd_stdin[1]);
+		}
+		resp_size = 1024;
+		resp = xmalloc(resp_size);
+		close(fd_stdout[1]);
+		while (1) {
+			fds.fd = fd_stdout[0];
+			fds.events = POLLIN | POLLHUP | POLLRDHUP;
+			fds.revents = 0;
+			if (max_wait <= 0) {
+				new_wait = -1;
+			} else {
+				new_wait = (time(NULL) - start_time) * 1000
+					   + max_wait;
+				if (new_wait <= 0)
+					break;
+			}
+			i = poll(&fds, 1, new_wait);
+			if (i == 0) {
+				error("%s: %s poll timeout",
+				      __func__, script_name);
+				break;
+			} else if (i < 0) {
+				error("%s: %s poll:%m", __func__, script_name);
+				break;
+			}
+			if ((fds.revents & POLLIN) == 0)
+				break;
+			i = read(fd_stdout[0], resp + resp_offset,
+				 resp_size - resp_offset);
+			if (i == 0) {
+				break;
+			} else if (i < 0) {
+				if (errno == EAGAIN)
+					continue;
+				error("%s: read(%s): %m", __func__,
+				      script_path);
+				break;
+			} else {
+				resp_offset += i;
+				if (resp_offset + 1024 >= resp_size) {
+					resp_size *= 2;
+					resp = xrealloc(resp, resp_size);
+				}
+			}
+		}
+		killpg(cpid, SIGKILL);
+		waitpid(cpid, status, 0);
+		close(fd_stdout[0]);
+	} else {
+		waitpid(cpid, status, 0);
+	}
+	return resp;
+}
+
+/* For a newly starting job, set "new_job_time" in each of it's nodes
+ * NOTE: The job and node data structures must be locked on function entry */
+extern void set_node_new_job(struct job_record *job_ptr,
+			     struct node_record *node_record_table_ptr)
+{
+	int i, i_first, i_last;
+	struct node_record *node_ptr;
+	time_t now = time(NULL);
+
+	if (!job_ptr || !job_ptr->node_bitmap) {
+		error("%s: job_ptr node_bitmap is NULL", __func__);
+		return;
+	}
+
+	i_first = bit_ffs(job_ptr->node_bitmap);
+	if (i_first >= 0)
+		i_last = bit_fls(job_ptr->node_bitmap);
+	else
+		i_last = i_first - 1;
+	for (i = i_first; i <= i_last; i++) {
+		if (!bit_test(job_ptr->node_bitmap, i))
+			continue;
+		node_ptr = node_record_table_ptr + i;
+		if (node_ptr->power)
+			node_ptr->power->new_job_time = now;
+	}
+}
diff --git a/src/plugins/power/common/power_common.h b/src/plugins/power/common/power_common.h
new file mode 100644
index 000000000..56c7a2290
--- /dev/null
+++ b/src/plugins/power/common/power_common.h
@@ -0,0 +1,94 @@
+/*****************************************************************************\
+ *  power_common.h - Common header for power management
+ *
+ *  NOTE: These functions are designed so they can be used by multiple power
+ *  management plugins at the same time, so the state information is largely in
+ *  the individual plugin and passed as a pointer argument to these functions.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef __POWER_COMMON_H__
+#define __POWER_COMMON_H__
+
+#include "slurm/slurm.h"
+#include "src/common/list.h"
+#include "src/common/pack.h"
+#include "src/slurmctld/slurmctld.h"
+
+typedef struct power_by_job {
+	uint32_t job_id;	/* Running Job ID */
+	time_t   start_time;	/* When job allocation started */
+	uint32_t alloc_watts;	/* Currently allocated power, in watts */
+	uint32_t used_watts;	/* Recent power use rate, in watts */
+} power_by_job_t;
+
+typedef struct power_by_nodes {
+	uint32_t alloc_watts;	/* Currently allocated power, in watts */
+	bool increase_power;	/* Set if node's power allocation increasing */
+	char *nodes;		/* Node names (nid range list values on Cray) */
+} power_by_nodes_t;
+
+/* For all nodes in a cluster
+ * 1) set default values and
+ * 2) return global power allocation/consumption information */
+extern void get_cluster_power(struct node_record *node_record_table_ptr,
+			      int node_record_count,
+			      uint32_t *alloc_watts, uint32_t *used_watts);
+
+/* For each running job, return power allocation/use information in a List
+ * containing elements of type power_by_job_t.
+ * NOTE: Job data structure must be locked on function entry
+ * NOTE: Call list_delete() to free return value */
+extern List get_job_power(List job_list,
+			  struct node_record *node_record_table_ptr);
+
+/* Execute a script, wait for termination and return its stdout.
+ * script_name IN - Name of program being run (e.g. "StartStageIn")
+ * script_path IN - Fully qualified program of the program to execute
+ * script_args IN - Arguments to the script
+ * max_wait IN - Maximum time to wait in milliseconds,
+ *		 -1 for no limit (asynchronous)
+ * data_in IN - data to use as program STDIN (NULL if not STDIN)
+ * status OUT - Job exit code
+ * Return stdout+stderr of spawned program, value must be xfreed. */
+extern char *power_run_script(char *script_name, char *script_path,
+			      char **script_argv, int max_wait, char *data_in,
+			      int *status);
+
+/* For a newly starting job, set "new_job_time" in each of it's nodes
+ * NOTE: The job and node data structures must be locked on function entry */
+extern void set_node_new_job(struct job_record *job_ptr,
+			     struct node_record *node_record_table_ptr);
+
+#endif	/* __POWER_COMMON_H__ */
diff --git a/src/plugins/power/cray/Makefile.am b/src/plugins/power/cray/Makefile.am
new file mode 100644
index 000000000..5a84d0dd1
--- /dev/null
+++ b/src/plugins/power/cray/Makefile.am
@@ -0,0 +1,22 @@
+# Makefile for power/cray plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+if WITH_JSON_PARSER
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(JSON_CPPFLAGS)
+
+pkglib_LTLIBRARIES = power_cray.la
+power_cray_la_SOURCES = power_cray.c
+power_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(JSON_LDFLAGS)
+power_cray_la_LIBADD = ../common/libpower_common.la
+
+force:
+$(power_cray_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+else
+EXTRA_power_cray_la_SOURCES = power_cray.c
+endif
diff --git a/src/plugins/power/cray/Makefile.in b/src/plugins/power/cray/Makefile.in
new file mode 100644
index 000000000..65ba381e0
--- /dev/null
+++ b/src/plugins/power/cray/Makefile.in
@@ -0,0 +1,820 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for power/cray plugin
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/power/cray
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+@WITH_JSON_PARSER_TRUE@power_cray_la_DEPENDENCIES =  \
+@WITH_JSON_PARSER_TRUE@	../common/libpower_common.la
+am__power_cray_la_SOURCES_DIST = power_cray.c
+@WITH_JSON_PARSER_TRUE@am_power_cray_la_OBJECTS = power_cray.lo
+am__EXTRA_power_cray_la_SOURCES_DIST = power_cray.c
+power_cray_la_OBJECTS = $(am_power_cray_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+power_cray_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(power_cray_la_LDFLAGS) $(LDFLAGS) -o $@
+@WITH_JSON_PARSER_TRUE@am_power_cray_la_rpath = -rpath $(pkglibdir)
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(power_cray_la_SOURCES) $(EXTRA_power_cray_la_SOURCES)
+DIST_SOURCES = $(am__power_cray_la_SOURCES_DIST) \
+	$(am__EXTRA_power_cray_la_SOURCES_DIST)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+@WITH_JSON_PARSER_TRUE@AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common $(JSON_CPPFLAGS)
+@WITH_JSON_PARSER_TRUE@pkglib_LTLIBRARIES = power_cray.la
+@WITH_JSON_PARSER_TRUE@power_cray_la_SOURCES = power_cray.c
+@WITH_JSON_PARSER_TRUE@power_cray_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS) $(JSON_LDFLAGS)
+@WITH_JSON_PARSER_TRUE@power_cray_la_LIBADD = ../common/libpower_common.la
+@WITH_JSON_PARSER_FALSE@EXTRA_power_cray_la_SOURCES = power_cray.c
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/power/cray/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/power/cray/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+power_cray.la: $(power_cray_la_OBJECTS) $(power_cray_la_DEPENDENCIES) $(EXTRA_power_cray_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(power_cray_la_LINK) $(am_power_cray_la_rpath) $(power_cray_la_OBJECTS) $(power_cray_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/power_cray.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+@WITH_JSON_PARSER_TRUE@force:
+@WITH_JSON_PARSER_TRUE@$(power_cray_la_LIBADD) : force
+@WITH_JSON_PARSER_TRUE@	@cd `dirname $@` && $(MAKE) `basename $@`
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/power/cray/power_cray.c b/src/plugins/power/cray/power_cray.c
new file mode 100644
index 000000000..974307324
--- /dev/null
+++ b/src/plugins/power/cray/power_cray.c
@@ -0,0 +1,1742 @@
+/*****************************************************************************\
+ *  power_cray.c - Plugin for Cray power management.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#define _GNU_SOURCE	/* For POLLRDHUP */
+#include <ctype.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#if HAVE_JSON
+#include <json-c/json.h>
+#endif
+
+#include "slurm/slurm.h"
+
+#include "src/common/list.h"
+#include "src/common/log.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/timers.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/plugins/power/common/power_common.h"
+#include "src/slurmctld/locks.h"
+
+#define DEFAULT_BALANCE_INTERVAL  30
+#define DEFAULT_CAPMC_PATH        "/opt/cray/capmc/default/bin/capmc"
+#define DEFAULT_CAP_WATTS         0
+#define DEFAULT_DECREASE_RATE     50
+#define DEFAULT_INCREASE_RATE     20
+#define DEFAULT_LOWER_THRESHOLD   90
+#define DEFAULT_UPPER_THRESHOLD   95
+#define DEFAULT_RECENT_JOB        300
+
+/* These are defined here so when we link with something other than
+ * the slurmctld we will have these symbols defined.  They will get
+ * overwritten when linking with the slurmctld.
+ */
+#if defined (__APPLE__)
+struct node_record *node_record_table_ptr __attribute__((weak_import)) = NULL;
+List job_list __attribute__((weak_import)) = NULL;
+int node_record_count __attribute__((weak_import)) = 0;
+#else
+struct node_record *node_record_table_ptr = NULL;
+List job_list = NULL;
+int node_record_count = 0;
+#endif
+
+typedef struct power_config_nodes {
+	uint32_t accel_max_watts; /* maximum power consumption by accel, in watts */
+	uint32_t accel_min_watts; /* minimum power consumption by accel, in watts */
+	uint32_t cap_watts;       /* cap on power consumption by node, in watts */
+	uint64_t joule_counter;	  /* total energy consumption by node, in joules */
+	uint32_t node_max_watts;  /* maximum power consumption by node, in watts */
+	uint32_t node_min_watts;  /* minimum power consumption by node, in watts */
+	int node_cnt;		  /* length of node_name array */
+	char **node_name;	  /* Node names (nid range list values on Cray) */
+	uint16_t state;           /* State 1=ready, 0=other */
+	uint64_t time_usec;       /* number of microseconds since start of the day */
+} power_config_nodes_t;
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "burst_buffer" for SLURM burst_buffer) and <method> is a
+ * description of how this plugin satisfies that application.  SLURM will only
+ * load a burst_buffer plugin if the plugin_type string has a prefix of
+ * "burst_buffer/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+const char plugin_name[]        = "power cray plugin";
+const char plugin_type[]        = "power/cray";
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
+
+/*********************** local variables *********************/
+static int balance_interval = DEFAULT_BALANCE_INTERVAL;
+static char *capmc_path = NULL;
+static uint32_t cap_watts = DEFAULT_CAP_WATTS;
+static uint32_t set_watts = 0;
+static uint64_t debug_flag = 0;
+static char *full_nid_string = NULL;
+static uint32_t decrease_rate = DEFAULT_DECREASE_RATE;
+static uint32_t increase_rate = DEFAULT_INCREASE_RATE;
+static uint32_t job_level = NO_VAL;
+static time_t last_cap_read = 0;
+static uint32_t lower_threshold = DEFAULT_LOWER_THRESHOLD;
+static uint32_t recent_job = DEFAULT_RECENT_JOB;
+static uint32_t upper_threshold = DEFAULT_UPPER_THRESHOLD;
+static bool stop_power = false;
+static pthread_t power_thread = 0;
+static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_mutex_t term_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t  term_cond = PTHREAD_COND_INITIALIZER;
+
+/*********************** local functions *********************/
+static void _build_full_nid_string(void);
+static void _clear_node_caps(void);
+static void _get_capabilities(void);
+static void _get_caps(void);
+static void _get_node_energy_counter(void);
+static void _get_nodes_ready(void);
+static power_config_nodes_t *
+            _json_parse_array_capabilities(json_object *jobj,
+					   char *key, int *num);
+static power_config_nodes_t *
+		_json_parse_array_caps(json_object *jobj, char *key, int *num);
+static power_config_nodes_t *
+            _json_parse_array_energy(json_object *jobj, char *key, int *num);
+static void _json_parse_capabilities(json_object *jobj,
+				     power_config_nodes_t *ent);
+static void _json_parse_energy(json_object *jobj, power_config_nodes_t *ent);
+static void _json_parse_nid(json_object *jobj, power_config_nodes_t *ent);
+static power_config_nodes_t *
+            _json_parse_ready(json_object *jobj, char *key, int *num);
+static void _load_config(void);
+static void _log_node_power(void);
+static void _parse_capable_control(json_object *j_control,
+				   power_config_nodes_t *ent);
+static void _parse_capable_controls(json_object *j_control,
+				    power_config_nodes_t *ent);
+static void _parse_caps_control(json_object *j_control,
+				power_config_nodes_t *ent);
+static void _parse_caps_controls(json_object *j_control,
+				 power_config_nodes_t *ent);
+extern void *_power_agent(void *args);
+static void _rebalance_node_power(void);
+static void _set_node_caps(void);
+static void _set_power_caps(void);
+static void _stop_power_agent(void);
+static uint64_t _time_str2num(char *time_str);
+
+/* Convert a time in the format "2015-02-19 15:50:00.581552-06" to the
+ * equivalent to the number of micro-seconds since the start of this day */
+static uint64_t _time_str2num(char *time_str)
+{
+	uint64_t total_usecs = 0;
+	int year = 0, month = 0, day = 0;
+	int hour = 0, min = 0, sec = 0;
+	int u_sec = 0, unk = 0;
+	int args;
+
+	args = sscanf(time_str, "%d-%d-%d %d:%d:%d.%d-%d",
+		      &year, &month, &day, &hour, &min, &sec, &u_sec, &unk);
+	if (args >= 6) {
+		total_usecs  = (((hour * 60) + min) * 60) + sec;
+		total_usecs *= 1000000;
+		total_usecs += u_sec;
+	}
+
+	return total_usecs;
+}
+
+/* Return a pointer to the numeric value of a node name starting with "nid",
+ * also skip over leading zeros in the numeric portion. Returns a pointer
+ * into the node_name argument. No data is copied. */
+static char *_node_name2nid(char *node_name)
+{
+	int j;
+
+	if ((node_name[0] != 'n') || (node_name[1] != 'i') ||
+	    (node_name[2] != 'd')) {
+		error("%s: Invalid node name (%s)", __func__, node_name);
+		return (node_name);
+	}
+
+	for (j = 3; j < 7; j++) {
+		if (node_name[j] != '0')
+			break;
+	}
+	return (node_name + j);
+}
+
+/* Parse PowerParameters configuration */
+static void _load_config(void)
+{
+	char *end_ptr = NULL, *sched_params, *tmp_ptr;
+
+	debug_flag = slurm_get_debug_flags();
+	sched_params = slurm_get_power_parameters();
+	if (!sched_params)
+		sched_params = xmalloc(1);	/* Set defaults below */
+
+	/*                                   12345678901234567890 */
+	if ((tmp_ptr = strstr(sched_params, "balance_interval="))) {
+		balance_interval = atoi(tmp_ptr + 17);
+		if (balance_interval < 1) {
+			error("PowerParameters: balance_interval=%d invalid",
+			      balance_interval);
+			balance_interval = DEFAULT_BALANCE_INTERVAL;
+		}
+	} else {
+		balance_interval = DEFAULT_BALANCE_INTERVAL;
+	}
+
+	xfree(capmc_path);
+	if ((tmp_ptr = strstr(sched_params, "capmc_path="))) {
+		capmc_path = xstrdup(tmp_ptr + 11);
+		tmp_ptr = strchr(capmc_path, ',');
+		if (tmp_ptr)
+			tmp_ptr[0] = '\0';
+	} else {
+		capmc_path = xstrdup(DEFAULT_CAPMC_PATH);
+	}
+
+	/*                                   12345678901234567890 */
+	if ((tmp_ptr = strstr(sched_params, "cap_watts="))) {
+		cap_watts = strtol(tmp_ptr + 10, &end_ptr, 10);
+		if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K')) {
+			cap_watts *= 1000;
+		} else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M')) {
+			cap_watts *= 1000000;
+		}
+	} else {
+		cap_watts = DEFAULT_CAP_WATTS;
+	}
+
+	if ((tmp_ptr = strstr(sched_params, "decrease_rate="))) {
+		decrease_rate = atoi(tmp_ptr + 14);
+		if (decrease_rate < 1) {
+			error("PowerParameters: decrease_rate=%u invalid",
+			      balance_interval);
+			lower_threshold = DEFAULT_DECREASE_RATE;
+		}
+	} else {
+		decrease_rate = DEFAULT_DECREASE_RATE;
+	}
+
+	if ((tmp_ptr = strstr(sched_params, "increase_rate="))) {
+		increase_rate = atoi(tmp_ptr + 14);
+		if (increase_rate < 1) {
+			error("PowerParameters: increase_rate=%u invalid",
+			      balance_interval);
+			lower_threshold = DEFAULT_INCREASE_RATE;
+		}
+	} else {
+		increase_rate = DEFAULT_INCREASE_RATE;
+	}
+
+	if (strstr(sched_params, "job_level"))
+		job_level = 1;
+	else if (strstr(sched_params, "job_no_level"))
+		job_level = 0;
+	else
+		job_level = NO_VAL;
+
+	if ((tmp_ptr = strstr(sched_params, "lower_threshold="))) {
+		lower_threshold = atoi(tmp_ptr + 16);
+		if (lower_threshold < 1) {
+			error("PowerParameters: lower_threshold=%u invalid",
+			      lower_threshold);
+			lower_threshold = DEFAULT_LOWER_THRESHOLD;
+		}
+	} else {
+		lower_threshold = DEFAULT_LOWER_THRESHOLD;
+	}
+
+	if ((tmp_ptr = strstr(sched_params, "recent_job="))) {
+		recent_job = atoi(tmp_ptr + 11);
+		if (recent_job < 1) {
+			error("PowerParameters: recent_job=%u invalid",
+			      recent_job);
+			recent_job = DEFAULT_RECENT_JOB;
+		}
+	} else {
+		recent_job = DEFAULT_RECENT_JOB;
+	}
+
+	if ((tmp_ptr = strstr(sched_params, "set_watts="))) {
+		set_watts = strtol(tmp_ptr + 10, &end_ptr, 10);
+		if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K')) {
+			set_watts *= 1000;
+		} else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M')) {
+			set_watts *= 1000000;
+		}
+	} else {
+		set_watts = 0;
+	}
+
+	if ((tmp_ptr = strstr(sched_params, "upper_threshold="))) {
+		upper_threshold = atoi(tmp_ptr + 16);
+		if (upper_threshold < 1) {
+			error("PowerParameters: upper_threshold=%u invalid",
+			      upper_threshold);
+			upper_threshold = DEFAULT_UPPER_THRESHOLD;
+		}
+	} else {
+		upper_threshold = DEFAULT_UPPER_THRESHOLD;
+	}
+
+	xfree(sched_params);
+	xfree(full_nid_string);
+	if (debug_flag & DEBUG_FLAG_POWER) {
+		char *level_str = "";
+		if (job_level == 0)
+			level_str = "job_no_level,";
+		else if (job_level == 1)
+			level_str = "job_level,";
+		info("PowerParameters=balance_interval=%d,capmc_path=%s,"
+		     "cap_watts=%u,decrease_rate=%u,increase_rate=%u,%s"
+		     "lower_threashold=%u,recent_job=%u,set_watts=%u,"
+		     "upper_threshold=%u",
+		     balance_interval, capmc_path, cap_watts, decrease_rate,
+		     increase_rate, level_str, lower_threshold, recent_job,
+		     set_watts, upper_threshold);
+	}
+
+	last_cap_read = 0;	/* Read node power limits again */
+}
+
+static void _get_capabilities(void)
+{
+	/* Write nodes */
+	slurmctld_lock_t write_node_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	char *cmd_resp, *script_argv[3], node_names[128];
+	power_config_nodes_t *ents = NULL;
+	int i, j, num_ent = 0, status = 0;
+	json_object *j_obj;
+	json_object_iter iter;
+	struct node_record *node_ptr;
+	hostlist_t hl = NULL;
+	DEF_TIMERS;
+
+	script_argv[0] = capmc_path;
+	script_argv[1] = "get_power_cap_capabilities";
+	script_argv[2] = NULL;
+
+	START_TIMER;
+	cmd_resp = power_run_script("capmc", capmc_path, script_argv, 5000,
+				    NULL, &status);
+	END_TIMER;
+	if (status != 0) {
+		error("%s: capmc %s: %s",
+		      __func__, script_argv[1], cmd_resp);
+		xfree(cmd_resp);
+		return;
+	} else if (debug_flag & DEBUG_FLAG_POWER) {
+		info("%s: capmc %s %s", __func__, script_argv[1], TIME_STR);
+	}
+	if ((cmd_resp == NULL) || (cmd_resp[0] == '\0')) {
+		xfree(cmd_resp);
+		return;
+	}
+
+	j_obj = json_tokener_parse(cmd_resp);
+	if (j_obj == NULL) {
+		error("%s: json parser failed on %s", __func__, cmd_resp);
+		xfree(cmd_resp);
+		return;
+	}
+	json_object_object_foreachC(j_obj, iter) {
+		/* NOTE: The error number "e" and message "err_msg" fields
+		 * are currently ignored. */
+		if (!strcmp(iter.key, "groups")) {
+			ents = _json_parse_array_capabilities(j_obj, iter.key,
+							      &num_ent);
+			break;
+		}
+	}
+	json_object_put(j_obj);	/* Frees json memory */
+
+	lock_slurmctld(write_node_lock);
+	for (i = 0; i < num_ent; i++) {
+		if (debug_flag & DEBUG_FLAG_POWER)
+			hl = hostlist_create(NULL);
+		for (j = 0; j < ents[i].node_cnt; j++) {
+			if (debug_flag & DEBUG_FLAG_POWER)
+				hostlist_push_host(hl, ents[i].node_name[j]);
+			node_ptr = find_node_record2(ents[i].node_name[j]);
+			if (!node_ptr) {
+				debug("%s: Node %s not in Slurm config",
+				      __func__, ents[i].node_name[j]);
+			} else {
+				if (!node_ptr->power) {
+					node_ptr->power =
+						xmalloc(sizeof(power_mgmt_data_t));
+				}
+				node_ptr->power->max_watts =
+					ents[i].node_max_watts;
+				node_ptr->power->min_watts =
+					ents[i].node_min_watts;
+			}
+			xfree(ents[i].node_name[j]);
+		}
+		xfree(ents[i].node_name);
+		if (debug_flag & DEBUG_FLAG_POWER) {
+			hostlist_ranged_string(hl, sizeof(node_names),
+					       node_names);
+			info("AccelWattsAvail:%3.3u-%3.3u "
+			     "NodeWattsAvail:%3.3u-%3.3u Nodes=%s",
+			     ents[i].accel_min_watts, ents[i].accel_max_watts,
+			     ents[i].node_min_watts, ents[i].node_max_watts,
+			     node_names);
+			hostlist_destroy(hl);
+		}
+	}
+	xfree(ents);
+	unlock_slurmctld(write_node_lock);
+	xfree(cmd_resp);
+}
+
+static power_config_nodes_t *
+_json_parse_array_capabilities(json_object *jobj, char *key, int *num)
+{
+	json_object *j_array;
+	json_object *j_value;
+	int i;
+	power_config_nodes_t *ents;
+
+	j_array = jobj;
+	json_object_object_get_ex(jobj, key, &j_array);
+
+	*num = json_object_array_length(j_array);
+	ents = xmalloc(*num * sizeof(power_config_nodes_t));
+
+	for (i = 0; i < *num; i++) {
+		j_value = json_object_array_get_idx(j_array, i);
+		_json_parse_capabilities(j_value, &ents[i]);
+	}
+
+	return ents;
+}
+
+/* Parse a "controls" array element from the "capmc get_power_cap_capabilities"
+ * command. Identifies node and accelerator power ranges. */
+static void _parse_capable_control(json_object *j_control,
+				   power_config_nodes_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	const char *p = NULL;
+	int min_watts = 0, max_watts = 0, x;
+
+	json_object_object_foreachC(j_control, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+			case json_type_boolean:
+//				info("%s: Key boolean %s", __func__, iter.key);
+				break;
+			case json_type_double:
+//				info("%s: Key double %s", __func__, iter.key);
+				break;
+			case json_type_null:
+//				info("%s: Key null %s", __func__, iter.key);
+				break;
+			case json_type_object:
+//				info("%s: Key object %s", __func__, iter.key);
+				break;
+			case json_type_array:
+//				info("%s: Key array %s", __func__, iter.key);
+				break;
+			case json_type_string:
+//				info("%s: Key string %s", __func__, iter.key);
+				if (!strcmp(iter.key, "name"))
+					p = json_object_get_string(iter.val);
+				break;
+			case json_type_int:
+//				info("%s: Key int %s", __func__, iter.key);
+				x = json_object_get_int64(iter.val);
+				if (!strcmp(iter.key, "max"))
+					max_watts = x;
+				else if (!strcmp(iter.key, "min"))
+					min_watts = x;
+				break;
+			default:
+				break;
+		}
+	}
+
+	if (p) {
+		if (!strcmp(p, "accel")) {
+			ent->accel_max_watts = max_watts;
+			ent->accel_min_watts = min_watts;
+		} else if (!strcmp(p, "node")) {
+			ent->node_max_watts = max_watts;
+			ent->node_min_watts = min_watts;
+		}
+	}
+}
+
+/* Parse the "controls" array from the "capmc get_power_cap_capabilities"
+ * command. Use _parse_capable_control() to get node and accelerator power
+ * ranges. */
+static void _parse_capable_controls(json_object *j_control,
+				    power_config_nodes_t *ent)
+{
+	json_object *j_array = NULL;
+	json_object *j_value;
+	enum json_type j_type;
+	int control_cnt, i;
+
+        json_object_object_get_ex(j_control, "controls", &j_array);
+	if (!j_array) {
+		error("%s: Unable to parse controls specification", __func__);
+		return;
+	}
+	control_cnt = json_object_array_length(j_array);
+	for (i = 0; i < control_cnt; i++) {
+		j_value = json_object_array_get_idx(j_array, i);
+		j_type = json_object_get_type(j_value);
+		if (j_type == json_type_object) {
+			_parse_capable_control(j_value, ent);
+		} else {
+			error("%s: Unexpected data type: %d", __func__, j_type);
+		}
+	}
+}
+
+/* Parse the "nids" array from the "capmc get_power_cap_capabilities"
+ * command. Identifies each node ID with identical power specifications. */
+static void _parse_nids(json_object *jobj, power_config_nodes_t *ent, char *key)
+{
+	json_object *j_array = NULL;
+	json_object *j_value;
+	enum json_type j_type;
+	int i, nid;
+
+        json_object_object_get_ex(jobj, key, &j_array);
+	if (!j_array) {
+		error("%s: Unable to parse nid specification", __func__);
+		return;
+	}
+	ent->node_cnt = json_object_array_length(j_array);
+	ent->node_name = xmalloc(sizeof(char *) * ent->node_cnt);
+	for (i = 0; i < ent->node_cnt; i++) {
+		j_value = json_object_array_get_idx(j_array, i);
+		j_type = json_object_get_type(j_value);
+		if (j_type != json_type_int) {
+			error("%s: Unable to parse nid specification",__func__);
+		} else {
+			nid = json_object_get_int64(j_value);
+			xstrfmtcat(ent->node_name[i], "nid%5.5d", nid);
+		}
+	}
+}
+
+/* Parse a "groups" array element from the "capmc get_power_cap_capabilities"
+ * command. Use _parse_capable_controls() and _parse_nids() to get node and
+ * accelerator power ranges for each node. */
+static void _json_parse_capabilities(json_object *jobj,
+				     power_config_nodes_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+			case json_type_boolean:
+//				info("%s: Key boolean %s", __func__, iter.key);
+				break;
+			case json_type_double:
+//				info("%s: Key double %s", __func__, iter.key);
+				break;
+			case json_type_null:
+//				info("%s: Key null %s", __func__, iter.key);
+				break;
+			case json_type_object:
+//				info("%s: Key object %s", __func__, iter.key);
+				break;
+			case json_type_string:
+//				info("%s: Key string %s", __func__, iter.key);
+				break;
+			case json_type_array:
+//				info("%s: Key array %s", __func__, iter.key);
+				if (!strcmp(iter.key, "controls")) {
+					_parse_capable_controls(jobj, ent);
+				} else if (!strcmp(iter.key, "nids")) {
+					_parse_nids(jobj, ent, "nids");
+				}
+				break;
+			case json_type_int:
+//				info("%s: Key int %s", __func__, iter.key);
+				break;
+			default:
+				break;
+		}
+	}
+}
+
+static void _build_full_nid_string(void)
+{
+	/* Read nodes */
+	slurmctld_lock_t read_node_lock = {
+		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	struct node_record *node_ptr;
+	hostset_t hs = NULL;
+	char *sep, *tmp_str;
+	int i, num_ent = 0;
+
+	if (full_nid_string)
+		return;
+
+	lock_slurmctld(read_node_lock);
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (!hs)
+			hs = hostset_create(_node_name2nid(node_ptr->name));
+		else
+			hostset_insert(hs, _node_name2nid(node_ptr->name));
+		num_ent++;
+	}
+	unlock_slurmctld(read_node_lock);
+	if (!hs) {
+		error("%s: No nodes found", __func__);
+		return;
+	}
+	tmp_str = xmalloc(node_record_count * 6 + 2);
+	(void) hostset_ranged_string(hs, num_ent * 6, tmp_str);
+	hostset_destroy(hs);
+	if ((sep = strrchr(tmp_str, ']')))
+		sep[0] = '\0';
+	if (tmp_str[0] == '[')
+		full_nid_string = xstrdup(tmp_str + 1);
+	else
+		full_nid_string = xstrdup(tmp_str);
+	xfree(tmp_str);
+}
+
+static void _get_caps(void)
+{
+	/* Write nodes */
+	slurmctld_lock_t write_node_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	char *cmd_resp, *script_argv[5];
+	power_config_nodes_t *ents = NULL;
+	int i, num_ent = 0, status = 0;
+	json_object *j_obj;
+	json_object_iter iter;
+	struct node_record *node_ptr;
+	DEF_TIMERS;
+
+	_build_full_nid_string();
+	if (!full_nid_string)
+		return;
+
+	script_argv[0] = capmc_path;
+	script_argv[1] = "get_power_cap";
+	script_argv[2] = "--nids";
+	script_argv[3] = full_nid_string;
+	script_argv[4] = NULL;
+
+	START_TIMER;
+	cmd_resp = power_run_script("capmc", capmc_path, script_argv, 5000,
+				    NULL, &status);
+	END_TIMER;
+	if (status != 0) {
+		error("%s: capmc %s: %s",
+		      __func__, script_argv[1], cmd_resp);
+		xfree(cmd_resp);
+		return;
+	} else if (debug_flag & DEBUG_FLAG_POWER) {
+		info("%s: capmc %s %s", __func__, script_argv[1], TIME_STR);
+	}
+	if ((cmd_resp == NULL) || (cmd_resp[0] == '\0')) {
+		xfree(cmd_resp);
+		return;
+	}
+
+	j_obj = json_tokener_parse(cmd_resp);
+	if (j_obj == NULL) {
+		error("%s: json parser failed on %s", __func__, cmd_resp);
+		xfree(cmd_resp);
+		return;
+	}
+	json_object_object_foreachC(j_obj, iter) {
+		/* NOTE: The error number "e" and message "err_msg" fields
+		 * are currently ignored. */
+		if (!strcmp(iter.key, "nids")) {
+			ents = _json_parse_array_caps(j_obj, iter.key,
+						      &num_ent);
+			break;
+		}
+	}
+	json_object_put(j_obj);	/* Frees json memory */
+
+	lock_slurmctld(write_node_lock);
+	for (i = 0; i < num_ent; i++) {
+		node_ptr = find_node_record2(ents[i].node_name[0]);
+		if (!node_ptr) {
+			debug("%s: Node %s not in Slurm config",
+			      __func__, ents[i].node_name[0]);
+		} else {
+			if (!node_ptr->power) {
+				node_ptr->power =
+					xmalloc(sizeof(power_mgmt_data_t));
+			}
+			node_ptr->power->cap_watts = ents[i].cap_watts;
+		}
+		xfree(ents[i].node_name[0]);
+		xfree(ents[i].node_name);
+	}
+	xfree(ents);
+	unlock_slurmctld(write_node_lock);
+	xfree(cmd_resp);
+}
+
+/* json_parse_array()
+ */
+static power_config_nodes_t *
+_json_parse_array_caps(json_object *jobj, char *key, int *num)
+{
+	json_object *j_array;
+	json_object *j_value;
+	int i;
+	power_config_nodes_t *ents;
+
+	j_array = jobj;
+	json_object_object_get_ex(jobj, key, &j_array);
+
+	*num = json_object_array_length(j_array);
+	ents = xmalloc(*num * sizeof(power_config_nodes_t));
+
+	for (i = 0; i < *num; i++) {
+		j_value = json_object_array_get_idx(j_array, i);
+		_json_parse_nid(j_value, &ents[i]);
+	}
+
+	return ents;
+}
+
+static void _parse_caps_control(json_object *j_control,
+				power_config_nodes_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	const char *p = NULL;
+	int cap_watts = 0, x;
+
+	json_object_object_foreachC(j_control, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+			case json_type_boolean:
+//				info("%s: Key boolean %s", __func__, iter.key);
+				break;
+			case json_type_double:
+//				info("%s: Key double %s", __func__, iter.key);
+				break;
+			case json_type_null:
+//				info("%s: Key null %s", __func__, iter.key);
+				break;
+			case json_type_object:
+//				info("%s: Key object %s", __func__, iter.key);
+				break;
+			case json_type_array:
+//				info("%s: Key array %s", __func__, iter.key);
+				break;
+			case json_type_string:
+//				info("%s: Key string %s", __func__, iter.key);
+				if (!strcmp(iter.key, "name"))
+					p = json_object_get_string(iter.val);
+				break;
+			case json_type_int:
+//				info("%s: Key int %s", __func__, iter.key);
+				x = json_object_get_int64(iter.val);
+				if (!strcmp(iter.key, "val"))
+					cap_watts = x;
+				break;
+			default:
+				break;
+		}
+	}
+
+	if (p) {
+		if (!strcmp(p, "node")) {
+			ent->cap_watts = cap_watts;
+		}
+	}
+}
+
+/* Parse the "controls" array from the "capmc get_power_caps" command.
+ * Use _parse_caps_control() to get node and accelerator power ranges. */
+static void _parse_caps_controls(json_object *j_control,
+				 power_config_nodes_t *ent)
+{
+	json_object *j_array = NULL;
+	json_object *j_value;
+	enum json_type j_type;
+	int control_cnt, i;
+
+        json_object_object_get_ex(j_control, "controls", &j_array);
+	if (!j_array) {
+		error("%s: Unable to parse controls specification", __func__);
+		return;
+	}
+	control_cnt = json_object_array_length(j_array);
+	for (i = 0; i < control_cnt; i++) {
+		j_value = json_object_array_get_idx(j_array, i);
+		j_type = json_object_get_type(j_value);
+		if (j_type == json_type_object) {
+			_parse_caps_control(j_value, ent);
+		} else {
+			error("%s: Unexpected data type: %d", __func__, j_type);
+		}
+	}
+}
+
+/* Parse a "nids" array element from the "capmc get_power_cap" command. */
+static void _json_parse_nid(json_object *jobj, power_config_nodes_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int x;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+			case json_type_boolean:
+//				info("%s: Key boolean %s", __func__, iter.key);
+				break;
+			case json_type_double:
+//				info("%s: Key double %s", __func__, iter.key);
+				break;
+			case json_type_null:
+//				info("%s: Key null %s", __func__, iter.key);
+				break;
+			case json_type_object:
+//				info("%s: Key object %s", __func__, iter.key);
+				break;
+			case json_type_string:
+//				info("%s: Key string %s", __func__, iter.key);
+				break;
+			case json_type_array:
+//				info("%s: Key array %s", __func__, iter.key);
+				if (!strcmp(iter.key, "controls")) {
+					_parse_caps_controls(jobj, ent);
+				}
+				break;
+			case json_type_int:
+//				info("%s: Key int %s", __func__, iter.key);
+				x = json_object_get_int64(iter.val);
+				if (!strcmp(iter.key, "nid")) {
+					ent->node_name = xmalloc(sizeof(char *));
+					xstrfmtcat(ent->node_name[0],
+						   "nid%5.5d", x);
+				}
+				break;
+			default:
+				break;
+		}
+	}
+}
+
+/* Identify nodes which are in a state of "ready". Only nodes in a "ready"
+ * state can have their power cap modified. */
+static void _get_nodes_ready(void)
+{
+	/* Write nodes */
+	slurmctld_lock_t write_node_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	char *cmd_resp, *script_argv[5];
+	struct node_record *node_ptr;
+	power_config_nodes_t *ents = NULL;
+	int i, j, num_ent, status = 0;
+	json_object *j_obj;
+	json_object_iter iter;
+	DEF_TIMERS;
+
+	script_argv[0] = capmc_path;
+	script_argv[1] = "node_status";
+//	script_argv[2] = "--filter";
+//	script_argv[3] = "show_ready";
+	script_argv[2] = NULL;
+
+	START_TIMER;
+	cmd_resp = power_run_script("capmc", capmc_path, script_argv, 5000,
+				    NULL, &status);
+	END_TIMER;
+	if (status != 0) {
+		error("%s: capmc %s: %s",  __func__, script_argv[1], cmd_resp);
+		xfree(cmd_resp);
+		return;
+	} else if (debug_flag & DEBUG_FLAG_POWER) {
+		info("%s: capmc %s %s",  __func__, script_argv[1], TIME_STR);
+	}
+	if ((cmd_resp == NULL) || (cmd_resp[0] == '\0')) {
+		xfree(cmd_resp);
+		return;
+	}
+
+	j_obj = json_tokener_parse(cmd_resp);
+	if (j_obj == NULL) {
+		error("%s: json parser failed on %s", __func__, cmd_resp);
+		xfree(cmd_resp);
+		return;
+	}
+	num_ent = 0;
+	json_object_object_foreachC(j_obj, iter) {
+		/* NOTE: The error number "e", message "err_msg", "off", and
+		 * "on" fields are currently ignored. */
+		if (!strcmp(iter.key, "ready")) {
+			ents = _json_parse_ready(j_obj, iter.key, &num_ent);
+			break;
+		}
+	}
+	json_object_put(j_obj);	/* Frees json memory */
+
+	lock_slurmctld(write_node_lock);
+	for (i = 0, node_ptr = node_record_table_ptr;
+	     i < node_record_count; i++, node_ptr++) {
+		if (!node_ptr->power)
+			node_ptr->power = xmalloc(sizeof(power_mgmt_data_t));
+		else
+			node_ptr->power->state = 0;
+	}
+	for (i = 0; i < num_ent; i++) {
+		for (j = 0; j < ents[i].node_cnt; j++) {
+			node_ptr = find_node_record2(ents[i].node_name[j]);
+			if (!node_ptr) {
+				debug("%s: Node %s not in Slurm config",
+				      __func__, ents[i].node_name[j]);
+			} else {
+				node_ptr->power->state = ents[i].state;
+			}
+			xfree(ents[i].node_name[j]);
+		}
+		xfree(ents[i].node_name);
+	}
+	xfree(ents);
+	unlock_slurmctld(write_node_lock);
+	xfree(cmd_resp);
+}
+
+static power_config_nodes_t *
+_json_parse_ready(json_object *jobj, char *key, int *num)
+{
+	power_config_nodes_t *ents;
+	enum json_type type;
+	struct json_object_iter iter;
+
+	*num = 1;
+	ents = xmalloc(*num * sizeof(power_config_nodes_t));
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+			case json_type_boolean:
+//				info("%s: Key boolean %s", __func__, iter.key);
+				break;
+			case json_type_double:
+//				info("%s: Key double %s", __func__, iter.key);
+				break;
+			case json_type_null:
+//				info("%s: Key null %s", __func__, iter.key);
+				break;
+			case json_type_object:
+//				info("%s: Key object %s", __func__, iter.key);
+				break;
+			case json_type_array:
+//				info("%s: Key array %s", __func__, iter.key);
+				if (!strcmp(iter.key, "ready")) {
+					ents->state = 1;	/* 1=ready */
+					_parse_nids(jobj, ents, "ready");
+				}
+				break;
+			case json_type_int:
+//				info("%s: Key int %s", __func__, iter.key);
+				break;
+			case json_type_string:
+//				info("%s: Key string %s", __func__, iter.key);
+				break;
+		}
+	}
+
+	return ents;
+}
+
+/* Gather current node power consumption rate. This logic gathers the
+ * information using Cray's capmc command. An alternative would be to use
+ * Slurm's energy plugin, but that would require additional synchronization
+ * logic be developed. Specifically we would operate on the node's energy
+ * data after current data is collected, which happens across all compute
+ * nodes with a frequency of AcctGatherNodeFreq. */
+static void _get_node_energy_counter(void)
+{
+	/* Write nodes */
+	slurmctld_lock_t write_node_lock = {
+		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
+	char *cmd_resp, *script_argv[5];
+	power_config_nodes_t *ents = NULL;
+	int i, j, num_ent = 0, status = 0;
+	uint64_t delta_joules, delta_time, usecs_day;
+	json_object *j_obj;
+	json_object_iter iter;
+	struct node_record *node_ptr;
+	DEF_TIMERS;
+
+	_build_full_nid_string();
+	if (!full_nid_string)
+		return;
+
+	script_argv[0] = capmc_path;
+	script_argv[1] = "get_node_energy_counter";
+	script_argv[2] = "--nids";
+	script_argv[3] = full_nid_string;
+	script_argv[4] = NULL;
+
+	START_TIMER;
+	cmd_resp = power_run_script("capmc", capmc_path, script_argv, 5000,
+				    NULL, &status);
+	END_TIMER;
+	if (status != 0) {
+		error("%s: capmc %s %s %s: %s",  __func__,
+		      script_argv[1], script_argv[2], script_argv[3], cmd_resp);
+		xfree(cmd_resp);
+		return;
+	} else if (debug_flag & DEBUG_FLAG_POWER) {
+		info("%s: capmc %s %s %s %s",  __func__,
+		     script_argv[1], script_argv[2], script_argv[3], TIME_STR);
+	}
+	if ((cmd_resp == NULL) || (cmd_resp[0] == '\0')) {
+		xfree(cmd_resp);
+		return;
+	}
+
+	j_obj = json_tokener_parse(cmd_resp);
+	if (j_obj == NULL) {
+		error("%s: json parser failed on %s", __func__, cmd_resp);
+		xfree(cmd_resp);
+		return;
+	}
+	num_ent = 0;
+	json_object_object_foreachC(j_obj, iter) {
+		/* NOTE: The error number "e", message "err_msg", and
+		 * "nid_count" fields are currently ignored. */
+		if (!strcmp(iter.key, "nodes")) {
+			ents = _json_parse_array_energy(j_obj, iter.key,
+							&num_ent);
+			break;
+		}
+	}
+	json_object_put(j_obj);	/* Frees json memory */
+
+	lock_slurmctld(write_node_lock);
+	for (i = 0, node_ptr = node_record_table_ptr;
+	     i < node_record_count; i++, node_ptr++) {
+		if (!node_ptr->power)
+			node_ptr->power = xmalloc(sizeof(power_mgmt_data_t));
+		else
+			node_ptr->power->current_watts = 0;
+	}
+	usecs_day  = 24 * 60 * 60;
+	usecs_day *= 1000000;
+	for (i = 0; i < num_ent; i++) {
+		for (j = 0; j < ents[i].node_cnt; j++) {
+			node_ptr = find_node_record2(ents[i].node_name[j]);
+			if (!node_ptr) {
+				debug("%s: Node %s not in Slurm config",
+				      __func__, ents[i].node_name[j]);
+			} else {
+				delta_time   = 0;
+				if ((ents[i].time_usec == 0) ||
+				    (node_ptr->power->time_usec == 0)) {
+					;
+				} else if (ents[i].time_usec >
+					   node_ptr->power->time_usec) {
+					delta_time =
+						ents[i].time_usec -
+						node_ptr->power->time_usec;
+				} else if ((ents[i].time_usec <
+					    node_ptr->power->time_usec) &&
+					   ((ents[i].time_usec + usecs_day) >
+					    node_ptr->power->time_usec)) {
+					delta_time =
+						(ents[i].time_usec +
+						 usecs_day) -
+						node_ptr->power->time_usec;
+				}	
+				if (delta_time &&
+				    (node_ptr->power->joule_counter <
+				     ents[i].joule_counter)) {
+					delta_joules =
+						ents[i].joule_counter -
+						node_ptr->power->joule_counter;
+					delta_joules *= 1000000;
+					node_ptr->power->current_watts =
+						delta_joules / delta_time;
+				}
+				node_ptr->power->joule_counter =
+					ents[i].joule_counter;
+				node_ptr->power->time_usec =
+					ents[i].time_usec;
+			}
+			xfree(ents[i].node_name[j]);
+		}
+		xfree(ents[i].node_name);
+	}
+	xfree(ents);
+	unlock_slurmctld(write_node_lock);
+	xfree(cmd_resp);
+}
+
+static power_config_nodes_t *
+_json_parse_array_energy(json_object *jobj, char *key, int *num)
+{
+	json_object *jarray;
+	int i;
+	json_object *jvalue;
+	power_config_nodes_t *ents;
+
+	jarray = jobj;
+	json_object_object_get_ex(jobj, key, &jarray);
+
+	*num = json_object_array_length(jarray);
+	ents = xmalloc(*num * sizeof(power_config_nodes_t));
+
+	for (i = 0; i < *num; i++) {
+		jvalue = json_object_array_get_idx(jarray, i);
+		_json_parse_energy(jvalue, &ents[i]);
+	}
+
+	return ents;
+}
+
+static void _json_parse_energy(json_object *jobj, power_config_nodes_t *ent)
+{
+	enum json_type type;
+	struct json_object_iter iter;
+	int64_t x;
+	const char *p = NULL;
+
+	json_object_object_foreachC(jobj, iter) {
+		type = json_object_get_type(iter.val);
+		switch (type) {
+			case json_type_boolean:
+//				info("%s: Key boolean %s", __func__, iter.key);
+				break;
+			case json_type_double:
+//				info("%s: Key double %s", __func__, iter.key);
+				break;
+			case json_type_null:
+//				info("%s: Key null %s", __func__, iter.key);
+				break;
+			case json_type_object:
+//				info("%s: Key object %s", __func__, iter.key);
+				break;
+			case json_type_array:
+//				info("%s: Key array %s", __func__, iter.key);
+				break;
+			case json_type_int:
+//				info("%s: Key int %s", __func__, iter.key);
+				x = json_object_get_int64(iter.val);
+				if (!strcmp(iter.key, "energy_ctr")) {
+					ent->joule_counter = x;
+				} else if (!strcmp(iter.key, "nid")) {
+					ent->node_cnt = 1;
+					ent->node_name = xmalloc(sizeof(char*));
+					ent->node_name[0] = xmalloc(10);
+					snprintf(ent->node_name[0], 10,
+						 "nid%5.5"PRId64"", x);
+				}
+				break;
+			case json_type_string:
+//				info("%s: Key string %s", __func__, iter.key);
+				p = json_object_get_string(iter.val);
+				if (!strcmp(iter.key, "time")) {
+					ent->time_usec =
+						_time_str2num((char *) p);
+				}
+				break;
+		}
+	}
+}
+
+static void _my_sleep(int add_secs)
+{
+	struct timespec ts = {0, 0};
+	struct timeval  tv = {0, 0};
+
+	if (gettimeofday(&tv, NULL)) {		/* Some error */
+		sleep(1);
+		return;
+	}
+
+	ts.tv_sec  = tv.tv_sec + add_secs;
+	ts.tv_nsec = tv.tv_usec * 1000;
+	pthread_mutex_lock(&term_lock);
+	if (!stop_power)
+		pthread_cond_timedwait(&term_cond, &term_lock, &ts);
+	pthread_mutex_unlock(&term_lock);
+}
+
+/* Periodically attempt to re-balance power caps across nodes */
+extern void *_power_agent(void *args)
+{
+	time_t now;
+	double wait_time;
+	static time_t last_balance_time = 0;
+	/* Read jobs and nodes */
+	slurmctld_lock_t read_locks = {
+		NO_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
+
+	last_balance_time = time(NULL);
+	while (!stop_power) {
+		_my_sleep(1);
+		if (stop_power)
+			break;
+
+		now = time(NULL);
+		wait_time = difftime(now, last_balance_time);
+		if (wait_time < balance_interval)
+			continue;
+
+		if (last_cap_read == 0) {	/* On first pass only */
+			/* Read initial power caps for every node */
+			_get_caps();		/* Has node write lock */
+		}
+
+		wait_time = difftime(now, last_cap_read);
+		if (wait_time > 600) {		/* Every 10 minutes */
+			/* Read min/max power for every node */
+			_get_capabilities();	/* Has node write lock */
+			last_cap_read = time(NULL);
+		}
+		_get_node_energy_counter();	/* Has node write lock */
+		_get_nodes_ready();		/* Has node write lock */
+		lock_slurmctld(read_locks);
+		if (set_watts)
+			_set_node_caps();
+		else if (cap_watts == 0)
+			_clear_node_caps();
+		else
+			_rebalance_node_power();
+		unlock_slurmctld(read_locks);
+		if (debug_flag & DEBUG_FLAG_POWER)
+			_log_node_power();
+		_set_power_caps();
+		last_balance_time = time(NULL);
+	}
+	return NULL;
+}
+
+/* Set power cap on all nodes to zero */
+static void _clear_node_caps(void)
+{
+	struct node_record *node_ptr;
+	int i;
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (!node_ptr->power)
+			continue;
+		node_ptr->power->new_cap_watts = 0;
+	}
+}
+
+/* Set power cap on all nodes to the same value "set_watts" */
+static void _set_node_caps(void)
+{
+	struct node_record *node_ptr;
+	int i;
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (!node_ptr->power)
+			continue;
+		if (node_ptr->power->state != 1)  /* Not ready, no change */
+			continue;
+		node_ptr->power->new_cap_watts =
+			MAX(node_ptr->power->min_watts, set_watts);
+		node_ptr->power->new_cap_watts =
+			MIN(node_ptr->power->max_watts,
+			    node_ptr->power->new_cap_watts);
+	}
+}
+
+/* For every job needing level power caps across it's nodes, set each of its
+ * node's power cap to the average cap based upon the global cap and recent
+ * usage. */ 
+static void _level_power_by_job(void)
+{
+	int i, i_first, i_last;
+	struct job_record *job_ptr;
+	ListIterator job_iterator;
+	struct node_record *node_ptr;
+	uint32_t ave_watts, total_watts, total_nodes;
+	uint32_t max_watts, min_watts;
+
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (!IS_JOB_RUNNING(job_ptr) || !job_ptr->node_bitmap)
+			continue;
+		if ((job_level == NO_VAL) &&
+		    ((job_ptr->power_flags & SLURM_POWER_FLAGS_LEVEL) == 0))
+			continue;
+
+		max_watts = 0;
+		min_watts = INFINITE;
+		total_watts = 0;
+		total_nodes = 0;
+		i_first = bit_ffs(job_ptr->node_bitmap);
+		if (i_first < 0)
+			continue;
+		i_last = bit_fls(job_ptr->node_bitmap);
+		for (i = i_first; i <= i_last; i++) {
+			if (!bit_test(job_ptr->node_bitmap, i))
+				continue;
+			node_ptr = node_record_table_ptr + i;
+			if (!node_ptr->power)
+				continue;
+			if (node_ptr->power->state != 1)/*Not ready, no change*/
+				continue;
+			total_watts += node_ptr->power->new_cap_watts;
+			total_nodes++;
+			if (max_watts < node_ptr->power->new_cap_watts)
+				max_watts = node_ptr->power->new_cap_watts;
+			if (min_watts > node_ptr->power->new_cap_watts)
+				min_watts = node_ptr->power->new_cap_watts;
+		}
+
+		if (total_nodes < 2)
+			continue;
+		if (min_watts == max_watts)
+			continue;
+		ave_watts = total_watts / total_nodes;
+		if (debug_flag & DEBUG_FLAG_POWER) {
+			debug("%s: leveling power caps for job %u "
+			      "(node_cnt:%u min:%u max:%u ave:%u)",
+			      __func__, job_ptr->job_id, total_nodes,
+			      min_watts, max_watts, ave_watts);
+		}
+		for (i = i_first; i <= i_last; i++) {
+			if (!bit_test(job_ptr->node_bitmap, i))
+				continue;
+			node_ptr = node_record_table_ptr + i;
+			if (!node_ptr->power)
+				continue;
+			if (node_ptr->power->state != 1)/*Not ready, no change*/
+				continue;
+			node_ptr->power->new_cap_watts = ave_watts;
+		}
+	}
+	list_iterator_destroy(job_iterator);
+}
+
+/* Determine the new power cap required on each node based upon recent usage
+ * and any power leveling by job */
+static void _rebalance_node_power(void)
+{
+	struct node_record *node_ptr;
+	uint32_t alloc_power = 0, avail_power = 0, ave_power, new_cap, tmp_u32;
+	uint32_t node_power_raise_cnt = 0, node_power_needed = 0;
+	uint32_t node_power_same_cnt = 0, node_power_lower_cnt = 0;
+	time_t recent = time(NULL) - recent_job;
+	int i;
+
+	/* Lower caps on under used nodes */
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (!node_ptr->power)
+			continue;
+		if (node_ptr->power->state != 1) {  /* Not ready -> no change */
+			if (node_ptr->power->cap_watts == 0) {
+				node_ptr->power->new_cap_watts =
+					node_ptr->power->max_watts;
+			} else {
+				node_ptr->power->new_cap_watts =
+					node_ptr->power->cap_watts;
+			}
+			alloc_power += node_ptr->power->new_cap_watts;
+			continue;
+		}
+		node_ptr->power->new_cap_watts = 0;
+		if (node_ptr->power->new_job_time >= recent) {
+			node_power_raise_cnt++;	/* Reset for new job below */
+			continue;
+		}
+		if ((node_ptr->power->cap_watts == 0) ||   /* Not initialized */
+		     (node_ptr->power->current_watts == 0)) {
+			node_power_raise_cnt++;	/* Reset below */
+			continue;
+		}
+		if (node_ptr->power->current_watts <
+		    (node_ptr->power->cap_watts * lower_threshold/100)) {
+			/* Lower cap by lower of
+			 * 1) decrease_rate OR
+			 * 2) half the excess power in the cap */
+			ave_power = (node_ptr->power->cap_watts -
+				     node_ptr->power->current_watts) / 2;
+			tmp_u32 = node_ptr->power->max_watts -
+				  node_ptr->power->min_watts;
+			tmp_u32 = (tmp_u32 * decrease_rate) / 100;
+			new_cap = node_ptr->power->cap_watts -
+				  MIN(tmp_u32, ave_power);
+			node_ptr->power->new_cap_watts =
+				MAX(new_cap, node_ptr->power->min_watts);
+			alloc_power += node_ptr->power->new_cap_watts;
+			node_power_lower_cnt++;
+		} else if (node_ptr->power->current_watts <=
+			   (node_ptr->power->cap_watts * upper_threshold/100)) {
+			/* In desired range. Retain previous cap */
+			node_ptr->power->new_cap_watts =
+				MAX(node_ptr->power->cap_watts,
+				    node_ptr->power->min_watts);
+			alloc_power += node_ptr->power->new_cap_watts;
+			node_power_same_cnt++;
+		} else {
+			/* Node should get more power */
+			node_power_raise_cnt++;
+			node_power_needed += node_ptr->power->min_watts;
+		}
+	}
+
+	if (cap_watts > alloc_power)
+		avail_power = cap_watts - alloc_power;
+	if ((alloc_power > cap_watts) || (node_power_needed > avail_power)) {
+		/* When CapWatts changes, we might need to lower nodes more
+		 * than the configured change rate specifications */
+		uint32_t red1 = 0, red2 = 0;
+		if (alloc_power > cap_watts)
+			red1 = alloc_power - cap_watts;
+		if (node_power_needed > avail_power)
+			red2 = node_power_needed - avail_power;
+		red1 = MAX(red1, red2);
+		red1 /= (node_power_lower_cnt + node_power_same_cnt);
+		for (i = 0, node_ptr = node_record_table_ptr;
+		     i < node_record_count; i++, node_ptr++) {
+			if (!node_ptr->power || !node_ptr->power->new_cap_watts)
+				continue;
+			tmp_u32 = node_ptr->power->new_cap_watts -
+				  node_ptr->power->min_watts;
+			tmp_u32 = MIN(tmp_u32, red1);
+			node_ptr->power->new_cap_watts -= tmp_u32;
+			alloc_power -= tmp_u32;
+		}
+		avail_power = cap_watts - alloc_power;
+	}
+	if (debug_flag & DEBUG_FLAG_POWER) {
+		info("%s: distributing %u watts over %d nodes",
+		     __func__, avail_power, node_power_raise_cnt);
+	}
+
+	/* Distribute rest of power cap on remaining nodes. */
+	if (node_power_raise_cnt) {
+		ave_power = avail_power / node_power_raise_cnt;
+		for (i = 0, node_ptr = node_record_table_ptr;
+		     i < node_record_count; i++, node_ptr++) {
+			if (!node_ptr->power || (node_ptr->power->state != 1))
+				continue;
+			if (node_ptr->power->new_cap_watts)    /* Already set */
+				continue;
+			if (node_ptr->power->new_job_time >= recent) {
+				/* Recent change in workload, do full reset */
+				new_cap = ave_power;
+			} else {
+				/* No recent change in workload, do partial
+				 * power cap reset (add up to increase_rate) */
+				tmp_u32 = node_ptr->power->max_watts -
+					  node_ptr->power->min_watts;
+				tmp_u32 = (tmp_u32 * increase_rate) / 100;
+				new_cap = node_ptr->power->cap_watts + tmp_u32;
+				new_cap = MIN(new_cap, ave_power);
+			}
+			node_ptr->power->new_cap_watts =
+				MAX(new_cap, node_ptr->power->min_watts);
+			node_ptr->power->new_cap_watts =
+				MIN(node_ptr->power->new_cap_watts,
+				    node_ptr->power->max_watts);
+			if (avail_power > node_ptr->power->new_cap_watts)
+				avail_power -= node_ptr->power->new_cap_watts;
+			else
+				avail_power = 0;
+			node_power_raise_cnt--;
+			if (node_power_raise_cnt == 0)
+				break;	/* No more nodes to modify */
+			if (node_ptr->power->new_cap_watts != ave_power) {
+				/* Re-normalize */
+				ave_power = avail_power / node_power_raise_cnt;
+			}
+		}
+	}
+
+	if (job_level != 0)
+		_level_power_by_job();
+}
+
+static void _log_node_power(void)
+{
+	struct node_record *node_ptr;
+	uint32_t total_current_watts = 0, total_min_watts = 0;
+	uint32_t total_max_watts = 0, total_cap_watts = 0;
+	uint32_t total_new_cap_watts = 0, total_ready_cnt = 0;
+	int i;
+
+	/* Build and log summary table of required updates to power caps */
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		char *ready_str;
+		if (!node_ptr->power)
+			continue;
+		if (node_ptr->power->state == 1) {
+			ready_str = "YES";
+			total_ready_cnt++;
+		} else
+			ready_str = "NO";
+		info("Node:%s CurWatts:%3u MinWatts:%3u "
+		     "MaxWatts:%3u OldCap:%3u NewCap:%3u Ready:%s",
+		     node_ptr->name, node_ptr->power->current_watts,
+		     node_ptr->power->min_watts,
+		     node_ptr->power->max_watts,
+		     node_ptr->power->cap_watts,
+		     node_ptr->power->new_cap_watts, ready_str);
+		total_current_watts += node_ptr->power->current_watts;
+		total_min_watts     += node_ptr->power->min_watts;
+		total_max_watts     += node_ptr->power->max_watts;
+		if (node_ptr->power->cap_watts)
+			total_cap_watts     += node_ptr->power->cap_watts;
+		else
+			total_cap_watts     += node_ptr->power->max_watts;
+		if (node_ptr->power->new_cap_watts)
+			total_new_cap_watts += node_ptr->power->new_cap_watts;
+		else if (node_ptr->power->cap_watts)
+			total_new_cap_watts += node_ptr->power->cap_watts;
+		else
+			total_new_cap_watts += node_ptr->power->max_watts;
+	}
+	info("TOTALS CurWatts:%u MinWatts:%u MaxWatts:%u OldCap:%u "
+	     "NewCap:%u ReadyCnt:%u",
+	     total_current_watts, total_min_watts, total_max_watts,
+	     total_cap_watts, total_new_cap_watts, total_ready_cnt);
+}
+
+static void _set_power_caps(void)
+{
+	struct node_record *node_ptr;
+	char *cmd_resp, *json = NULL, *script_argv[4];
+	int i, status = 0;
+	DEF_TIMERS;
+
+	script_argv[0] = capmc_path;
+	script_argv[1] = "json";
+	script_argv[2] = "--resource=/capmc/set_power_cap";
+	script_argv[3] = NULL;
+
+	/* Pass 1, decrease power for select nodes */
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (!node_ptr->power ||
+		    (node_ptr->power->state != 1) ||
+		    (node_ptr->power->cap_watts <=
+		     node_ptr->power->new_cap_watts))
+			continue;
+		node_ptr->power->cap_watts = node_ptr->power->new_cap_watts;
+		if (json)
+			xstrcat(json, ",\n ");
+		else
+			xstrcat(json, "{ \"nids\":[\n ");
+		xstrfmtcat(json,
+			   "{ \"nid\":%s, \"controls\":[ "
+			   "{ \"name\":\"node\", \"val\":%u } ] }",
+			   _node_name2nid(node_ptr->name),
+			   node_ptr->power->new_cap_watts);
+	}
+	if (json) {
+		xstrcat(json, "\n ]\n}\n");
+		START_TIMER;
+		cmd_resp = power_run_script("capmc", capmc_path, script_argv,
+					    5000, json, &status);
+		END_TIMER;
+		if (status != 0) {
+			error("%s: capmc %s %s: %s",
+			      __func__, script_argv[1], script_argv[2],
+			      cmd_resp);
+			xfree(cmd_resp);
+			return;
+		} else if (debug_flag & DEBUG_FLAG_POWER) {
+			info("%s: capmc %s %s %s",
+			     __func__, script_argv[1], script_argv[2],
+			     TIME_STR);
+		}
+		xfree(cmd_resp);
+		xfree(json);
+	}
+
+	/* Pass 2, increase power for select nodes */
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (!node_ptr->power ||
+		    (node_ptr->power->state != 1) ||
+		    (node_ptr->power->cap_watts >=
+		     node_ptr->power->new_cap_watts))
+			continue;
+		node_ptr->power->cap_watts = node_ptr->power->new_cap_watts;
+		if (json)
+			xstrcat(json, ",\n ");
+		else
+			xstrcat(json, "{ \"nids\":[\n ");
+		xstrfmtcat(json,
+			   "{ \"nid\":%s, \"controls\":[ "
+			   "{ \"name\":\"node\", \"val\":%u } ] }",
+			   _node_name2nid(node_ptr->name),
+			   node_ptr->power->new_cap_watts);
+	}
+	if (json) {
+		xstrcat(json, "\n ]\n}\n");
+		START_TIMER;
+		cmd_resp = power_run_script("capmc", capmc_path, script_argv,
+					    5000, json, &status);
+		END_TIMER;
+		if (status != 0) {
+			error("%s: capmc %s %s: %s",
+			      __func__, script_argv[1], script_argv[2],
+			      cmd_resp);
+			xfree(cmd_resp);
+			return;
+		} else if (debug_flag & DEBUG_FLAG_POWER) {
+			info("%s: capmc %s %s %s",
+			     __func__, script_argv[1], script_argv[2],
+			     TIME_STR);
+		}
+		xfree(cmd_resp);
+		xfree(json);
+	}
+}
+
+/* Terminate power thread */
+static void _stop_power_agent(void)
+{
+	pthread_mutex_lock(&term_lock);
+	stop_power = true;
+	pthread_cond_signal(&term_cond);
+	pthread_mutex_unlock(&term_lock);
+}
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	pthread_attr_t attr;
+
+	if (!run_in_daemon("slurmctld"))
+		return SLURM_SUCCESS;
+
+	pthread_mutex_lock(&thread_flag_mutex);
+	if (power_thread) {
+		debug2("Power thread already running, not starting another");
+		pthread_mutex_unlock(&thread_flag_mutex);
+		return SLURM_ERROR;
+	}
+
+	_load_config();
+	slurm_attr_init(&attr);
+	/* Since we do a join on thread later, don't make it detached */
+	if (pthread_create(&power_thread, &attr, _power_agent, NULL))
+		error("Unable to start power thread: %m");
+	slurm_attr_destroy(&attr);
+	pthread_mutex_unlock(&thread_flag_mutex);
+
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is unloaded. Free all memory.
+ */
+extern void fini(void)
+{
+	pthread_mutex_lock(&thread_flag_mutex);
+	if (power_thread) {
+		_stop_power_agent();
+		pthread_join(power_thread, NULL);
+		power_thread = 0;
+		xfree(capmc_path);
+		xfree(full_nid_string);
+	}
+	pthread_mutex_unlock(&thread_flag_mutex);
+}
+
+/* Read the configuration file */
+extern void power_p_reconfig(void)
+{
+	pthread_mutex_lock(&thread_flag_mutex);
+	_load_config();
+	pthread_mutex_unlock(&thread_flag_mutex);
+}
+
+/* Note that a suspended job has been resumed */
+extern void power_p_job_resume(struct job_record *job_ptr)
+{
+	set_node_new_job(job_ptr, node_record_table_ptr);
+}
+
+/* Note that a job has been allocated resources and is ready to start */
+extern void power_p_job_start(struct job_record *job_ptr)
+{
+	set_node_new_job(job_ptr, node_record_table_ptr);
+}
diff --git a/src/plugins/power/none/Makefile.am b/src/plugins/power/none/Makefile.am
new file mode 100644
index 000000000..d78a755ec
--- /dev/null
+++ b/src/plugins/power/none/Makefile.am
@@ -0,0 +1,16 @@
+# Makefile for power/none plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = power_none.la
+power_none_la_SOURCES = power_none.c
+power_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+power_none_la_LIBADD = ../common/libpower_common.la
+
+force:
+$(power_none_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
diff --git a/src/plugins/power/none/Makefile.in b/src/plugins/power/none/Makefile.in
new file mode 100644
index 000000000..5a9452f7b
--- /dev/null
+++ b/src/plugins/power/none/Makefile.in
@@ -0,0 +1,814 @@
+# Makefile.in generated by automake 1.14.1 from Makefile.am.
+# @configure_input@
+
+# Copyright (C) 1994-2013 Free Software Foundation, Inc.
+
+# This Makefile.in is free software; the Free Software Foundation
+# gives unlimited permission to copy and/or distribute it,
+# with or without modifications, as long as this notice is preserved.
+
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
+# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
+# PARTICULAR PURPOSE.
+
+@SET_MAKE@
+
+# Makefile for power/none plugin
+
+VPATH = @srcdir@
+am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
+am__make_running_with_option = \
+  case $${target_option-} in \
+      ?) ;; \
+      *) echo "am__make_running_with_option: internal error: invalid" \
+              "target option '$${target_option-}' specified" >&2; \
+         exit 1;; \
+  esac; \
+  has_opt=no; \
+  sane_makeflags=$$MAKEFLAGS; \
+  if $(am__is_gnu_make); then \
+    sane_makeflags=$$MFLAGS; \
+  else \
+    case $$MAKEFLAGS in \
+      *\\[\ \	]*) \
+        bs=\\; \
+        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
+          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
+    esac; \
+  fi; \
+  skip_next=no; \
+  strip_trailopt () \
+  { \
+    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
+  }; \
+  for flg in $$sane_makeflags; do \
+    test $$skip_next = yes && { skip_next=no; continue; }; \
+    case $$flg in \
+      *=*|--*) continue;; \
+        -*I) strip_trailopt 'I'; skip_next=yes;; \
+      -*I?*) strip_trailopt 'I';; \
+        -*O) strip_trailopt 'O'; skip_next=yes;; \
+      -*O?*) strip_trailopt 'O';; \
+        -*l) strip_trailopt 'l'; skip_next=yes;; \
+      -*l?*) strip_trailopt 'l';; \
+      -[dEDm]) skip_next=yes;; \
+      -[JT]) skip_next=yes;; \
+    esac; \
+    case $$flg in \
+      *$$target_option*) has_opt=yes; break;; \
+    esac; \
+  done; \
+  test $$has_opt = yes
+am__make_dryrun = (target_option=n; $(am__make_running_with_option))
+am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
+pkgdatadir = $(datadir)/@PACKAGE@
+pkgincludedir = $(includedir)/@PACKAGE@
+pkglibdir = $(libdir)/@PACKAGE@
+pkglibexecdir = $(libexecdir)/@PACKAGE@
+am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
+install_sh_DATA = $(install_sh) -c -m 644
+install_sh_PROGRAM = $(install_sh) -c
+install_sh_SCRIPT = $(install_sh) -c
+INSTALL_HEADER = $(INSTALL_DATA)
+transform = $(program_transform_name)
+NORMAL_INSTALL = :
+PRE_INSTALL = :
+POST_INSTALL = :
+NORMAL_UNINSTALL = :
+PRE_UNINSTALL = :
+POST_UNINSTALL = :
+build_triplet = @build@
+host_triplet = @host@
+target_triplet = @target@
+subdir = src/plugins/power/none
+DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
+	$(top_srcdir)/auxdir/depcomp
+ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
+	$(top_srcdir)/auxdir/ax_pthread.m4 \
+	$(top_srcdir)/auxdir/libtool.m4 \
+	$(top_srcdir)/auxdir/ltoptions.m4 \
+	$(top_srcdir)/auxdir/ltsugar.m4 \
+	$(top_srcdir)/auxdir/ltversion.m4 \
+	$(top_srcdir)/auxdir/lt~obsolete.m4 \
+	$(top_srcdir)/auxdir/slurm.m4 \
+	$(top_srcdir)/auxdir/x_ac__system_configuration.m4 \
+	$(top_srcdir)/auxdir/x_ac_affinity.m4 \
+	$(top_srcdir)/auxdir/x_ac_aix.m4 \
+	$(top_srcdir)/auxdir/x_ac_blcr.m4 \
+	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
+	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
+	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
+	$(top_srcdir)/auxdir/x_ac_databases.m4 \
+	$(top_srcdir)/auxdir/x_ac_debug.m4 \
+	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
+	$(top_srcdir)/auxdir/x_ac_env.m4 \
+	$(top_srcdir)/auxdir/x_ac_freeipmi.m4 \
+	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
+	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
+	$(top_srcdir)/auxdir/x_ac_lua.m4 \
+	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
+	$(top_srcdir)/auxdir/x_ac_munge.m4 \
+	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
+	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
+	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
+	$(top_srcdir)/auxdir/x_ac_pam.m4 \
+	$(top_srcdir)/auxdir/x_ac_printf_null.m4 \
+	$(top_srcdir)/auxdir/x_ac_ptrace.m4 \
+	$(top_srcdir)/auxdir/x_ac_readline.m4 \
+	$(top_srcdir)/auxdir/x_ac_rrdtool.m4 \
+	$(top_srcdir)/auxdir/x_ac_setpgrp.m4 \
+	$(top_srcdir)/auxdir/x_ac_setproctitle.m4 \
+	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
+	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
+	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
+	$(top_srcdir)/configure.ac
+am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+	$(ACLOCAL_M4)
+mkinstalldirs = $(install_sh) -d
+CONFIG_HEADER = $(top_builddir)/config.h $(top_builddir)/slurm/slurm.h
+CONFIG_CLEAN_FILES =
+CONFIG_CLEAN_VPATH_FILES =
+am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
+am__vpath_adj = case $$p in \
+    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
+    *) f=$$p;; \
+  esac;
+am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
+am__install_max = 40
+am__nobase_strip_setup = \
+  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
+am__nobase_strip = \
+  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
+am__nobase_list = $(am__nobase_strip_setup); \
+  for p in $$list; do echo "$$p $$p"; done | \
+  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
+  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
+    if (++n[$$2] == $(am__install_max)) \
+      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
+    END { for (dir in files) print dir, files[dir] }'
+am__base_list = \
+  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
+  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
+am__uninstall_files_from_dir = { \
+  test -z "$$files" \
+    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
+    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
+         $(am__cd) "$$dir" && rm -f $$files; }; \
+  }
+am__installdirs = "$(DESTDIR)$(pkglibdir)"
+LTLIBRARIES = $(pkglib_LTLIBRARIES)
+power_none_la_DEPENDENCIES = ../common/libpower_common.la
+am_power_none_la_OBJECTS = power_none.lo
+power_none_la_OBJECTS = $(am_power_none_la_OBJECTS)
+AM_V_lt = $(am__v_lt_@AM_V@)
+am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
+am__v_lt_0 = --silent
+am__v_lt_1 = 
+power_none_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(power_none_la_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_P = $(am__v_P_@AM_V@)
+am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
+am__v_P_0 = false
+am__v_P_1 = :
+AM_V_GEN = $(am__v_GEN_@AM_V@)
+am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
+am__v_GEN_0 = @echo "  GEN     " $@;
+am__v_GEN_1 = 
+AM_V_at = $(am__v_at_@AM_V@)
+am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
+am__v_at_0 = @
+am__v_at_1 = 
+DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir) -I$(top_builddir)/slurm
+depcomp = $(SHELL) $(top_srcdir)/auxdir/depcomp
+am__depfiles_maybe = depfiles
+am__mv = mv -f
+COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
+	$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
+LTCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=compile $(CC) $(DEFS) \
+	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS)
+AM_V_CC = $(am__v_CC_@AM_V@)
+am__v_CC_ = $(am__v_CC_@AM_DEFAULT_V@)
+am__v_CC_0 = @echo "  CC      " $@;
+am__v_CC_1 = 
+CCLD = $(CC)
+LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC $(AM_LIBTOOLFLAGS) \
+	$(LIBTOOLFLAGS) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
+	$(AM_LDFLAGS) $(LDFLAGS) -o $@
+AM_V_CCLD = $(am__v_CCLD_@AM_V@)
+am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
+am__v_CCLD_0 = @echo "  CCLD    " $@;
+am__v_CCLD_1 = 
+SOURCES = $(power_none_la_SOURCES)
+DIST_SOURCES = $(power_none_la_SOURCES)
+am__can_run_installinfo = \
+  case $$AM_UPDATE_INFO_DIR in \
+    n|no|NO) false;; \
+    *) (install-info --version) >/dev/null 2>&1;; \
+  esac
+am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
+# Read a list of newline-separated strings from the standard input,
+# and print each of them once, without duplicates.  Input order is
+# *not* preserved.
+am__uniquify_input = $(AWK) '\
+  BEGIN { nonempty = 0; } \
+  { items[$$0] = 1; nonempty = 1; } \
+  END { if (nonempty) { for (i in items) print i; }; } \
+'
+# Make sure the list of sources is unique.  This is necessary because,
+# e.g., the same source file might be shared among _SOURCES variables
+# for different programs/libraries.
+am__define_uniq_tagged_files = \
+  list='$(am__tagged_files)'; \
+  unique=`for i in $$list; do \
+    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+  done | $(am__uniquify_input)`
+ETAGS = etags
+CTAGS = ctags
+DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
+ACLOCAL = @ACLOCAL@
+AMTAR = @AMTAR@
+AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
+AR = @AR@
+AUTHD_CFLAGS = @AUTHD_CFLAGS@
+AUTHD_LIBS = @AUTHD_LIBS@
+AUTOCONF = @AUTOCONF@
+AUTOHEADER = @AUTOHEADER@
+AUTOMAKE = @AUTOMAKE@
+AWK = @AWK@
+BGL_LOADED = @BGL_LOADED@
+BGQ_LOADED = @BGQ_LOADED@
+BG_INCLUDES = @BG_INCLUDES@
+BG_LDFLAGS = @BG_LDFLAGS@
+BG_L_P_LOADED = @BG_L_P_LOADED@
+BLCR_CPPFLAGS = @BLCR_CPPFLAGS@
+BLCR_HOME = @BLCR_HOME@
+BLCR_LDFLAGS = @BLCR_LDFLAGS@
+BLCR_LIBS = @BLCR_LIBS@
+BLUEGENE_LOADED = @BLUEGENE_LOADED@
+CC = @CC@
+CCDEPMODE = @CCDEPMODE@
+CFLAGS = @CFLAGS@
+CHECK_CFLAGS = @CHECK_CFLAGS@
+CHECK_LIBS = @CHECK_LIBS@
+CMD_LDFLAGS = @CMD_LDFLAGS@
+CPP = @CPP@
+CPPFLAGS = @CPPFLAGS@
+CRAY_JOB_CPPFLAGS = @CRAY_JOB_CPPFLAGS@
+CRAY_JOB_LDFLAGS = @CRAY_JOB_LDFLAGS@
+CRAY_SELECT_CPPFLAGS = @CRAY_SELECT_CPPFLAGS@
+CRAY_SELECT_LDFLAGS = @CRAY_SELECT_LDFLAGS@
+CRAY_SWITCH_CPPFLAGS = @CRAY_SWITCH_CPPFLAGS@
+CRAY_SWITCH_LDFLAGS = @CRAY_SWITCH_LDFLAGS@
+CRAY_TASK_CPPFLAGS = @CRAY_TASK_CPPFLAGS@
+CRAY_TASK_LDFLAGS = @CRAY_TASK_LDFLAGS@
+CXX = @CXX@
+CXXCPP = @CXXCPP@
+CXXDEPMODE = @CXXDEPMODE@
+CXXFLAGS = @CXXFLAGS@
+CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
+DEFS = @DEFS@
+DEPDIR = @DEPDIR@
+DLLTOOL = @DLLTOOL@
+DL_LIBS = @DL_LIBS@
+DSYMUTIL = @DSYMUTIL@
+DUMPBIN = @DUMPBIN@
+ECHO_C = @ECHO_C@
+ECHO_N = @ECHO_N@
+ECHO_T = @ECHO_T@
+EGREP = @EGREP@
+EXEEXT = @EXEEXT@
+FGREP = @FGREP@
+FREEIPMI_CPPFLAGS = @FREEIPMI_CPPFLAGS@
+FREEIPMI_LDFLAGS = @FREEIPMI_LDFLAGS@
+FREEIPMI_LIBS = @FREEIPMI_LIBS@
+GLIB_CFLAGS = @GLIB_CFLAGS@
+GLIB_COMPILE_RESOURCES = @GLIB_COMPILE_RESOURCES@
+GLIB_GENMARSHAL = @GLIB_GENMARSHAL@
+GLIB_LIBS = @GLIB_LIBS@
+GLIB_MKENUMS = @GLIB_MKENUMS@
+GOBJECT_QUERY = @GOBJECT_QUERY@
+GREP = @GREP@
+GTK_CFLAGS = @GTK_CFLAGS@
+GTK_LIBS = @GTK_LIBS@
+H5CC = @H5CC@
+H5FC = @H5FC@
+HAVEMYSQLCONFIG = @HAVEMYSQLCONFIG@
+HAVE_AIX = @HAVE_AIX@
+HAVE_MAN2HTML = @HAVE_MAN2HTML@
+HAVE_NRT = @HAVE_NRT@
+HAVE_OPENSSL = @HAVE_OPENSSL@
+HAVE_SOME_CURSES = @HAVE_SOME_CURSES@
+HDF5_CC = @HDF5_CC@
+HDF5_CFLAGS = @HDF5_CFLAGS@
+HDF5_CPPFLAGS = @HDF5_CPPFLAGS@
+HDF5_FC = @HDF5_FC@
+HDF5_FFLAGS = @HDF5_FFLAGS@
+HDF5_FLIBS = @HDF5_FLIBS@
+HDF5_LDFLAGS = @HDF5_LDFLAGS@
+HDF5_LIBS = @HDF5_LIBS@
+HDF5_VERSION = @HDF5_VERSION@
+HWLOC_CPPFLAGS = @HWLOC_CPPFLAGS@
+HWLOC_LDFLAGS = @HWLOC_LDFLAGS@
+HWLOC_LIBS = @HWLOC_LIBS@
+INSTALL = @INSTALL@
+INSTALL_DATA = @INSTALL_DATA@
+INSTALL_PROGRAM = @INSTALL_PROGRAM@
+INSTALL_SCRIPT = @INSTALL_SCRIPT@
+INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
+LD = @LD@
+LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
+LIBOBJS = @LIBOBJS@
+LIBS = @LIBS@
+LIBTOOL = @LIBTOOL@
+LIB_LDFLAGS = @LIB_LDFLAGS@
+LIPO = @LIPO@
+LN_S = @LN_S@
+LTLIBOBJS = @LTLIBOBJS@
+MAINT = @MAINT@
+MAKEINFO = @MAKEINFO@
+MANIFEST_TOOL = @MANIFEST_TOOL@
+MKDIR_P = @MKDIR_P@
+MUNGE_CPPFLAGS = @MUNGE_CPPFLAGS@
+MUNGE_DIR = @MUNGE_DIR@
+MUNGE_LDFLAGS = @MUNGE_LDFLAGS@
+MUNGE_LIBS = @MUNGE_LIBS@
+MYSQL_CFLAGS = @MYSQL_CFLAGS@
+MYSQL_LIBS = @MYSQL_LIBS@
+NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
+NM = @NM@
+NMEDIT = @NMEDIT@
+NRT_CPPFLAGS = @NRT_CPPFLAGS@
+NUMA_LIBS = @NUMA_LIBS@
+OBJDUMP = @OBJDUMP@
+OBJEXT = @OBJEXT@
+OFED_CPPFLAGS = @OFED_CPPFLAGS@
+OFED_LDFLAGS = @OFED_LDFLAGS@
+OFED_LIBS = @OFED_LIBS@
+OTOOL = @OTOOL@
+OTOOL64 = @OTOOL64@
+PACKAGE = @PACKAGE@
+PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
+PACKAGE_NAME = @PACKAGE_NAME@
+PACKAGE_STRING = @PACKAGE_STRING@
+PACKAGE_TARNAME = @PACKAGE_TARNAME@
+PACKAGE_URL = @PACKAGE_URL@
+PACKAGE_VERSION = @PACKAGE_VERSION@
+PAM_DIR = @PAM_DIR@
+PAM_LIBS = @PAM_LIBS@
+PATH_SEPARATOR = @PATH_SEPARATOR@
+PKG_CONFIG = @PKG_CONFIG@
+PKG_CONFIG_LIBDIR = @PKG_CONFIG_LIBDIR@
+PKG_CONFIG_PATH = @PKG_CONFIG_PATH@
+PROCTRACKDIR = @PROCTRACKDIR@
+PROJECT = @PROJECT@
+PTHREAD_CC = @PTHREAD_CC@
+PTHREAD_CFLAGS = @PTHREAD_CFLAGS@
+PTHREAD_LIBS = @PTHREAD_LIBS@
+RANLIB = @RANLIB@
+READLINE_LIBS = @READLINE_LIBS@
+REAL_BGQ_LOADED = @REAL_BGQ_LOADED@
+REAL_BG_L_P_LOADED = @REAL_BG_L_P_LOADED@
+RELEASE = @RELEASE@
+RRDTOOL_CPPFLAGS = @RRDTOOL_CPPFLAGS@
+RRDTOOL_LDFLAGS = @RRDTOOL_LDFLAGS@
+RRDTOOL_LIBS = @RRDTOOL_LIBS@
+RUNJOB_LDFLAGS = @RUNJOB_LDFLAGS@
+SED = @SED@
+SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
+SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
+SET_MAKE = @SET_MAKE@
+SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
+SLURMCTLD_PORT = @SLURMCTLD_PORT@
+SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
+SLURMDBD_PORT = @SLURMDBD_PORT@
+SLURMD_PORT = @SLURMD_PORT@
+SLURM_API_AGE = @SLURM_API_AGE@
+SLURM_API_CURRENT = @SLURM_API_CURRENT@
+SLURM_API_MAJOR = @SLURM_API_MAJOR@
+SLURM_API_REVISION = @SLURM_API_REVISION@
+SLURM_API_VERSION = @SLURM_API_VERSION@
+SLURM_MAJOR = @SLURM_MAJOR@
+SLURM_MICRO = @SLURM_MICRO@
+SLURM_MINOR = @SLURM_MINOR@
+SLURM_PREFIX = @SLURM_PREFIX@
+SLURM_VERSION_NUMBER = @SLURM_VERSION_NUMBER@
+SLURM_VERSION_STRING = @SLURM_VERSION_STRING@
+SO_LDFLAGS = @SO_LDFLAGS@
+SSL_CPPFLAGS = @SSL_CPPFLAGS@
+SSL_LDFLAGS = @SSL_LDFLAGS@
+SSL_LIBS = @SSL_LIBS@
+STRIP = @STRIP@
+SUCMD = @SUCMD@
+UTIL_LIBS = @UTIL_LIBS@
+VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
+abs_builddir = @abs_builddir@
+abs_srcdir = @abs_srcdir@
+abs_top_builddir = @abs_top_builddir@
+abs_top_srcdir = @abs_top_srcdir@
+ac_ct_AR = @ac_ct_AR@
+ac_ct_CC = @ac_ct_CC@
+ac_ct_CXX = @ac_ct_CXX@
+ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+ac_have_man2html = @ac_have_man2html@
+am__include = @am__include@
+am__leading_dot = @am__leading_dot@
+am__quote = @am__quote@
+am__tar = @am__tar@
+am__untar = @am__untar@
+ax_pthread_config = @ax_pthread_config@
+bindir = @bindir@
+build = @build@
+build_alias = @build_alias@
+build_cpu = @build_cpu@
+build_os = @build_os@
+build_vendor = @build_vendor@
+builddir = @builddir@
+datadir = @datadir@
+datarootdir = @datarootdir@
+docdir = @docdir@
+dvidir = @dvidir@
+exec_prefix = @exec_prefix@
+host = @host@
+host_alias = @host_alias@
+host_cpu = @host_cpu@
+host_os = @host_os@
+host_vendor = @host_vendor@
+htmldir = @htmldir@
+includedir = @includedir@
+infodir = @infodir@
+install_sh = @install_sh@
+libdir = @libdir@
+libexecdir = @libexecdir@
+localedir = @localedir@
+localstatedir = @localstatedir@
+lua_CFLAGS = @lua_CFLAGS@
+lua_LIBS = @lua_LIBS@
+mandir = @mandir@
+mkdir_p = @mkdir_p@
+oldincludedir = @oldincludedir@
+pdfdir = @pdfdir@
+prefix = @prefix@
+program_transform_name = @program_transform_name@
+psdir = @psdir@
+sbindir = @sbindir@
+sharedstatedir = @sharedstatedir@
+srcdir = @srcdir@
+sysconfdir = @sysconfdir@
+target = @target@
+target_alias = @target_alias@
+target_cpu = @target_cpu@
+target_os = @target_os@
+target_vendor = @target_vendor@
+top_build_prefix = @top_build_prefix@
+top_builddir = @top_builddir@
+top_srcdir = @top_srcdir@
+AUTOMAKE_OPTIONS = foreign
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+pkglib_LTLIBRARIES = power_none.la
+power_none_la_SOURCES = power_none.c
+power_none_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+power_none_la_LIBADD = ../common/libpower_common.la
+all: all-am
+
+.SUFFIXES:
+.SUFFIXES: .c .lo .o .obj
+$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
+	@for dep in $?; do \
+	  case '$(am__configure_deps)' in \
+	    *$$dep*) \
+	      ( cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh ) \
+	        && { if test -f $@; then exit 0; else break; fi; }; \
+	      exit 1;; \
+	  esac; \
+	done; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/power/none/Makefile'; \
+	$(am__cd) $(top_srcdir) && \
+	  $(AUTOMAKE) --foreign src/plugins/power/none/Makefile
+.PRECIOUS: Makefile
+Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
+	@case '$?' in \
+	  *config.status*) \
+	    cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh;; \
+	  *) \
+	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe)'; \
+	    cd $(top_builddir) && $(SHELL) ./config.status $(subdir)/$@ $(am__depfiles_maybe);; \
+	esac;
+
+$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+
+$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
+	cd $(top_builddir) && $(MAKE) $(AM_MAKEFLAGS) am--refresh
+$(am__aclocal_m4_deps):
+
+install-pkglibLTLIBRARIES: $(pkglib_LTLIBRARIES)
+	@$(NORMAL_INSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	list2=; for p in $$list; do \
+	  if test -f $$p; then \
+	    list2="$$list2 $$p"; \
+	  else :; fi; \
+	done; \
+	test -z "$$list2" || { \
+	  echo " $(MKDIR_P) '$(DESTDIR)$(pkglibdir)'"; \
+	  $(MKDIR_P) "$(DESTDIR)$(pkglibdir)" || exit 1; \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(pkglibdir)'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(pkglibdir)"; \
+	}
+
+uninstall-pkglibLTLIBRARIES:
+	@$(NORMAL_UNINSTALL)
+	@list='$(pkglib_LTLIBRARIES)'; test -n "$(pkglibdir)" || list=; \
+	for p in $$list; do \
+	  $(am__strip_dir) \
+	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(pkglibdir)/$$f'"; \
+	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(pkglibdir)/$$f"; \
+	done
+
+clean-pkglibLTLIBRARIES:
+	-test -z "$(pkglib_LTLIBRARIES)" || rm -f $(pkglib_LTLIBRARIES)
+	@list='$(pkglib_LTLIBRARIES)'; \
+	locs=`for p in $$list; do echo $$p; done | \
+	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
+	      sort -u`; \
+	test -z "$$locs" || { \
+	  echo rm -f $${locs}; \
+	  rm -f $${locs}; \
+	}
+
+power_none.la: $(power_none_la_OBJECTS) $(power_none_la_DEPENDENCIES) $(EXTRA_power_none_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(power_none_la_LINK) -rpath $(pkglibdir) $(power_none_la_OBJECTS) $(power_none_la_LIBADD) $(LIBS)
+
+mostlyclean-compile:
+	-rm -f *.$(OBJEXT)
+
+distclean-compile:
+	-rm -f *.tab.c
+
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/power_none.Plo@am__quote@
+
+.c.o:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ $<
+
+.c.obj:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ `$(CYGPATH_W) '$<'`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(COMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
+
+.c.lo:
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(LTCOMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/$*.Tpo $(DEPDIR)/$*.Plo
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(LTCOMPILE) -c -o $@ $<
+
+mostlyclean-libtool:
+	-rm -f *.lo
+
+clean-libtool:
+	-rm -rf .libs _libs
+
+ID: $(am__tagged_files)
+	$(am__define_uniq_tagged_files); mkid -fID $$unique
+tags: tags-am
+TAGS: tags
+
+tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	set x; \
+	here=`pwd`; \
+	$(am__define_uniq_tagged_files); \
+	shift; \
+	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
+	  test -n "$$unique" || unique=$$empty_fix; \
+	  if test $$# -gt 0; then \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      "$$@" $$unique; \
+	  else \
+	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
+	      $$unique; \
+	  fi; \
+	fi
+ctags: ctags-am
+
+CTAGS: ctags
+ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
+	$(am__define_uniq_tagged_files); \
+	test -z "$(CTAGS_ARGS)$$unique" \
+	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
+	     $$unique
+
+GTAGS:
+	here=`$(am__cd) $(top_builddir) && pwd` \
+	  && $(am__cd) $(top_srcdir) \
+	  && gtags -i $(GTAGS_ARGS) "$$here"
+cscopelist: cscopelist-am
+
+cscopelist-am: $(am__tagged_files)
+	list='$(am__tagged_files)'; \
+	case "$(srcdir)" in \
+	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
+	  *) sdir=$(subdir)/$(srcdir) ;; \
+	esac; \
+	for i in $$list; do \
+	  if test -f "$$i"; then \
+	    echo "$(subdir)/$$i"; \
+	  else \
+	    echo "$$sdir/$$i"; \
+	  fi; \
+	done >> $(top_builddir)/cscope.files
+
+distclean-tags:
+	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
+
+distdir: $(DISTFILES)
+	@srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \
+	list='$(DISTFILES)'; \
+	  dist_files=`for file in $$list; do echo $$file; done | \
+	  sed -e "s|^$$srcdirstrip/||;t" \
+	      -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \
+	case $$dist_files in \
+	  */*) $(MKDIR_P) `echo "$$dist_files" | \
+			   sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \
+			   sort -u` ;; \
+	esac; \
+	for file in $$dist_files; do \
+	  if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
+	  if test -d $$d/$$file; then \
+	    dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \
+	    if test -d "$(distdir)/$$file"; then \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
+	      cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \
+	      find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \
+	    fi; \
+	    cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \
+	  else \
+	    test -f "$(distdir)/$$file" \
+	    || cp -p $$d/$$file "$(distdir)/$$file" \
+	    || exit 1; \
+	  fi; \
+	done
+check-am: all-am
+check: check-am
+all-am: Makefile $(LTLIBRARIES)
+installdirs:
+	for dir in "$(DESTDIR)$(pkglibdir)"; do \
+	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
+	done
+install: install-am
+install-exec: install-exec-am
+install-data: install-data-am
+uninstall: uninstall-am
+
+install-am: all-am
+	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
+
+installcheck: installcheck-am
+install-strip:
+	if test -z '$(STRIP)'; then \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	      install; \
+	else \
+	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
+	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
+	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
+	fi
+mostlyclean-generic:
+
+clean-generic:
+
+distclean-generic:
+	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
+	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
+
+maintainer-clean-generic:
+	@echo "This command is intended for maintainers to use"
+	@echo "it deletes files that may require special tools to rebuild."
+clean: clean-am
+
+clean-am: clean-generic clean-libtool clean-pkglibLTLIBRARIES \
+	mostlyclean-am
+
+distclean: distclean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+distclean-am: clean-am distclean-compile distclean-generic \
+	distclean-tags
+
+dvi: dvi-am
+
+dvi-am:
+
+html: html-am
+
+html-am:
+
+info: info-am
+
+info-am:
+
+install-data-am:
+
+install-dvi: install-dvi-am
+
+install-dvi-am:
+
+install-exec-am: install-pkglibLTLIBRARIES
+
+install-html: install-html-am
+
+install-html-am:
+
+install-info: install-info-am
+
+install-info-am:
+
+install-man:
+
+install-pdf: install-pdf-am
+
+install-pdf-am:
+
+install-ps: install-ps-am
+
+install-ps-am:
+
+installcheck-am:
+
+maintainer-clean: maintainer-clean-am
+	-rm -rf ./$(DEPDIR)
+	-rm -f Makefile
+maintainer-clean-am: distclean-am maintainer-clean-generic
+
+mostlyclean: mostlyclean-am
+
+mostlyclean-am: mostlyclean-compile mostlyclean-generic \
+	mostlyclean-libtool
+
+pdf: pdf-am
+
+pdf-am:
+
+ps: ps-am
+
+ps-am:
+
+uninstall-am: uninstall-pkglibLTLIBRARIES
+
+.MAKE: install-am install-strip
+
+.PHONY: CTAGS GTAGS TAGS all all-am check check-am clean clean-generic \
+	clean-libtool clean-pkglibLTLIBRARIES cscopelist-am ctags \
+	ctags-am distclean distclean-compile distclean-generic \
+	distclean-libtool distclean-tags distdir dvi dvi-am html \
+	html-am info info-am install install-am install-data \
+	install-data-am install-dvi install-dvi-am install-exec \
+	install-exec-am install-html install-html-am install-info \
+	install-info-am install-man install-pdf install-pdf-am \
+	install-pkglibLTLIBRARIES install-ps install-ps-am \
+	install-strip installcheck installcheck-am installdirs \
+	maintainer-clean maintainer-clean-generic mostlyclean \
+	mostlyclean-compile mostlyclean-generic mostlyclean-libtool \
+	pdf pdf-am ps ps-am tags tags-am uninstall uninstall-am \
+	uninstall-pkglibLTLIBRARIES
+
+
+force:
+$(power_none_la_LIBADD) : force
+	@cd `dirname $@` && $(MAKE) `basename $@`
+
+# Tell versions [3.59,3.63) of GNU make to not export all variables.
+# Otherwise a system limit (for SysV at least) may be exceeded.
+.NOEXPORT:
diff --git a/src/plugins/power/none/power_none.c b/src/plugins/power/none/power_none.c
new file mode 100644
index 000000000..566d79a51
--- /dev/null
+++ b/src/plugins/power/none/power_none.c
@@ -0,0 +1,120 @@
+/*****************************************************************************\
+ *  power_none.c - Plugin for "None" power management.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#define _GNU_SOURCE	/* For POLLRDHUP */
+#include <ctype.h>
+#include <poll.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+#include "slurm/slurm.h"
+
+#include "src/plugins/power/common/power_common.h"
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "burst_buffer" for SLURM burst_buffer) and <method> is a
+ * description of how this plugin satisfies that application.  SLURM will only
+ * load a burst_buffer plugin if the plugin_type string has a prefix of
+ * "burst_buffer/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+const char plugin_name[]        = "power none plugin";
+const char plugin_type[]        = "power/none";
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ * are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	debug("%s: %s", plugin_name, __func__);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is unloaded. Free all memory.
+ */
+extern void fini(void)
+{
+	debug("%s: %s", plugin_name, __func__);
+	return;
+}
+
+/* Read the configuration file */
+extern void power_p_reconfig(void)
+{
+	debug("%s: %s", plugin_name, __func__);
+	return;
+}
+
+/* Note that a suspended job has been resumed */
+extern void power_p_job_resume(struct job_record *job_ptr)
+{
+	debug("%s: %s", plugin_name, __func__);
+	return;
+}
+
+/* Note that a job has been allocated resources and is ready to start */
+extern void power_p_job_start(struct job_record *job_ptr)
+{
+	debug("%s: %s", plugin_name, __func__);
+	return;
+}
diff --git a/src/plugins/preempt/Makefile.in b/src/plugins/preempt/Makefile.in
index aa7c0b18d..dcca0a4e2 100644
--- a/src/plugins/preempt/Makefile.in
+++ b/src/plugins/preempt/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/preempt/job_prio/Makefile.in b/src/plugins/preempt/job_prio/Makefile.in
index 23725c38c..666c72acc 100644
--- a/src/plugins/preempt/job_prio/Makefile.in
+++ b/src/plugins/preempt/job_prio/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/preempt/job_prio/preempt_job_prio.c b/src/plugins/preempt/job_prio/preempt_job_prio.c
index 68a39d019..06a87db27 100644
--- a/src/plugins/preempt/job_prio/preempt_job_prio.c
+++ b/src/plugins/preempt/job_prio/preempt_job_prio.c
@@ -79,7 +79,7 @@
 
 const char  plugin_name[]   = "Preempt by Job Priority and Runtime";
 const char  plugin_type[]   = "preempt/job_prio";
-const uint32_t  plugin_version  = 100;
+const uint32_t  plugin_version  = SLURM_VERSION_NUMBER;
 
 /* The acct_usage_element data structure holds informaiton about
  * an association's current usage and current CPU count*/
@@ -102,15 +102,15 @@ typedef struct acct_usage_element
 static bool _account_preemptable(struct job_record *preemptor_job_ptr,
 				 struct job_record *preemptee_job_ptr)
 {
-	slurmdb_association_rec_t *preemptor_assoc, *preemptee_assoc;
+	slurmdb_assoc_rec_t *preemptor_assoc, *preemptee_assoc;
 	slurmdb_qos_rec_t *preemptor_qos, *preemptee_qos;
 	bool is_from_same_account = false;
 	int i;
 
 	preemptor_assoc =
-		(slurmdb_association_rec_t *)preemptor_job_ptr->assoc_ptr;
+		(slurmdb_assoc_rec_t *)preemptor_job_ptr->assoc_ptr;
 	preemptee_assoc =
-		(slurmdb_association_rec_t *)preemptee_job_ptr->assoc_ptr;
+		(slurmdb_assoc_rec_t *)preemptee_job_ptr->assoc_ptr;
 	if (!preemptor_assoc || !preemptee_assoc)
 		return false;
 
@@ -184,17 +184,17 @@ static bool _account_preemptable(struct job_record *preemptor_job_ptr,
 	 * its share. If the account is using more than its share, then this
 	 * job is a candidate. If not, it is NOT a candidate. */
 	if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
-		info("%s: Preemptor(%u) UsedCPUs:%u Shares: %f Tot_CPU %u "
-		     "TOT: %f",
+		info("%s: Preemptor(%u) UsedCPUs:%"PRIu64
+		     " Shares: %f Tot_CPU %u TOT: %f",
 		     plugin_type, preemptor_job_ptr->job_id,
-		     preemptee_assoc->usage->grp_used_cpus,
+		     preemptee_assoc->usage->grp_used_tres[TRES_ARRAY_CPU],
 		     preemptee_assoc->usage->shares_norm,
 		     preemptor_job_ptr->part_ptr->total_cpus,
 		     (preemptor_job_ptr->part_ptr->total_cpus *
 		      preemptee_assoc->usage->shares_norm));
 	}
 
-	if ((preemptee_assoc->usage->grp_used_cpus >
+	if ((preemptee_assoc->usage->grp_used_tres[TRES_ARRAY_CPU] >
 	     preemptee_assoc->usage->shares_norm *
 	     preemptee_job_ptr->part_ptr->total_cpus) || is_from_same_account) {
 		if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
@@ -315,7 +315,8 @@ static int _get_nb_cpus(struct job_record *job_ptr)
 	}
 	max_nodes = MIN(max_nodes, 500000);	/* prevent overflows */
 
-	if (!job_ptr->limit_set_max_nodes && job_ptr->details->max_nodes)
+	if (!job_ptr->limit_set.tres[TRES_ARRAY_NODE] &&
+	    job_ptr->details->max_nodes)
 		req_nodes = max_nodes;
 	else
 		req_nodes = min_nodes;
@@ -343,18 +344,18 @@ static int _get_nb_cpus(struct job_record *job_ptr)
 }
 
 /* Determine fair share assocation to use for some job */
-static slurmdb_association_rec_t *
+static slurmdb_assoc_rec_t *
 _get_job_fs_ass(char *job_type, struct job_record *job_ptr)
 {
-	slurmdb_association_rec_t *temp_fs_ass =
-		(slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	slurmdb_assoc_rec_t *temp_fs_ass =
+		(slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 
 	if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
 		info("%s: Pre %s JobID:%u ParentAcct:%s MyAcct:%s "
 		     "UsageParent:%s",
 		     plugin_type, job_type,job_ptr->job_id,
 		     temp_fs_ass->parent_acct, temp_fs_ass->acct,
-		     ((slurmdb_association_rec_t*)
+		     ((slurmdb_assoc_rec_t*)
 		      temp_fs_ass->usage->parent_assoc_ptr)->acct);
 	}
 
@@ -369,7 +370,7 @@ _get_job_fs_ass(char *job_type, struct job_record *job_ptr)
 			     plugin_type, job_type, job_ptr->job_id,
 			     temp_fs_ass->parent_acct,
 			     temp_fs_ass->acct,
-			     ((slurmdb_association_rec_t*)
+			     ((slurmdb_assoc_rec_t*)
 			      temp_fs_ass->usage->parent_assoc_ptr)->acct);
 		temp_fs_ass = temp_fs_ass->usage->parent_assoc_ptr;
 	}
@@ -379,7 +380,7 @@ _get_job_fs_ass(char *job_type, struct job_record *job_ptr)
 		     "UsageParent:%s",
 		     plugin_type, job_type, job_ptr->job_id,
 		     temp_fs_ass->parent_acct, temp_fs_ass->acct,
-		     ((slurmdb_association_rec_t*)temp_fs_ass->
+		     ((slurmdb_assoc_rec_t*)temp_fs_ass->
 		      usage->parent_assoc_ptr)->acct);
 	}
 	return temp_fs_ass;
@@ -388,14 +389,14 @@ _get_job_fs_ass(char *job_type, struct job_record *job_ptr)
 static void _account_under_alloc(struct job_record *preemptor_job_ptr,
 				 List preemptee_job_list)
 {
-	slurmdb_association_rec_t *preemptor_assoc;
+	slurmdb_assoc_rec_t *preemptor_assoc;
 	List acct_usage_list = list_create(_destroy_acct_usage_element);
 	uint32_t preemptor_cpu_cnt = _get_nb_cpus(preemptor_job_ptr);
 	uint32_t preemptee_cpu_cnt;
 	uint32_t preemptor_grp_used_cpu, preemptee_grp_used_cpu;
 	uint32_t preemptee_assoc_id;
-	slurmdb_association_rec_t *preemptor_temp_fs_ass;
-	slurmdb_association_rec_t *preemptee_temp_fs_ass, *preemptee_assoc;
+	slurmdb_assoc_rec_t *preemptor_temp_fs_ass;
+	slurmdb_assoc_rec_t *preemptee_temp_fs_ass, *preemptee_assoc;
 	acct_usage_element_t *new_acct_usage_ptr = NULL;
 	acct_usage_element_t *preemptee_acct_usage_ptr = NULL;
 	acct_usage_element_t *preemptor_acct_usage_ptr = NULL;
@@ -406,10 +407,11 @@ static void _account_under_alloc(struct job_record *preemptor_job_ptr,
 	acct_usage_element_t *found_acct_usage_ptr = NULL;
 	char *share_type;
 
-	preemptor_assoc = (slurmdb_association_rec_t *)
+	preemptor_assoc = (slurmdb_assoc_rec_t *)
 			  preemptor_job_ptr->assoc_ptr;
 	preemptor_temp_fs_ass = _get_job_fs_ass("preemptor", preemptor_job_ptr);
-	preemptor_grp_used_cpu = preemptor_temp_fs_ass->usage->grp_used_cpus;
+	preemptor_grp_used_cpu = preemptor_temp_fs_ass->usage->
+		grp_used_tres[TRES_ARRAY_CPU];
 
 	it = list_iterator_create(preemptee_job_list);
 	if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
@@ -418,13 +420,16 @@ static void _account_under_alloc(struct job_record *preemptor_job_ptr,
 	}
 
 	while ((preemptee_job_ptr = (struct job_record *) list_next(it))) {
-		preemptee_assoc = ((slurmdb_association_rec_t *)
+		preemptee_assoc = ((slurmdb_assoc_rec_t *)
 				   preemptee_job_ptr->assoc_ptr);
 		preemptee_assoc_id = preemptee_assoc->id;
 		preemptee_temp_fs_ass = _get_job_fs_ass("preemptee",
 							preemptee_job_ptr);
+		/* FIXME: This appears to only work off cpus at the
+		 * moment, probably should work off TRES.
+		 */
 		preemptee_grp_used_cpu = preemptee_temp_fs_ass->usage->
-					 grp_used_cpus;
+					 grp_used_tres[TRES_ARRAY_CPU];
 		preemptee_cpu_cnt = _get_nb_cpus(preemptee_job_ptr);
 		if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO) {
 			info("%s: Preemptee (%u %s) grp_used_cpu:%u",
@@ -515,7 +520,7 @@ static void _account_under_alloc(struct job_record *preemptor_job_ptr,
 		preemptee_current_usage =
 			(long)(preemptee_acct_usage_ptr->current_usage*EPSILON);
 		if (((strcmp(preemptor_assoc->acct,
-			     ((slurmdb_association_rec_t *)
+			     ((slurmdb_assoc_rec_t *)
 			      preemptee_job_ptr->assoc_ptr)->acct) != 0) &&
 		    (preemptor_new_usage >= preemptee_acct_usage_ptr->current_usage ||
 		     preemptee_acct_usage_ptr->current_cpu_count <= 0)) &&
@@ -534,9 +539,9 @@ static void _account_under_alloc(struct job_record *preemptor_job_ptr,
 				      preemptor_new_usage,
 				      preemptee_current_usage,
 				      preemptor_new_usage_long,
-				      ((slurmdb_association_rec_t*)
+				      ((slurmdb_assoc_rec_t*)
 				       preemptee_job_ptr->assoc_ptr)->acct,
-				      ((slurmdb_association_rec_t*)
+				      ((slurmdb_assoc_rec_t*)
 				       preemptee_job_ptr->assoc_ptr)->
 				       parent_acct);
 			}
@@ -557,8 +562,7 @@ static void _account_under_alloc(struct job_record *preemptor_job_ptr,
 	}
 	list_iterator_destroy(it);
 
-	if (acct_usage_list)
-		list_destroy(acct_usage_list);
+	FREE_NULL_LIST(acct_usage_list);
 }
 
 /* Test if preemptor request will overallocate the account */
@@ -566,7 +570,7 @@ static int _overalloc_test(struct job_record *preemptor,
 			   struct job_record *preemptee)
 {
 	uint32_t cpu_cnt_preemptee, cpu_cnt_preemptor;
-	slurmdb_association_rec_t *assoc_preemptee, *assoc_preemptor;
+	slurmdb_assoc_rec_t *assoc_preemptee, *assoc_preemptor;
 	double shares_preemptee, shares_preemptor;
 	uint32_t new_usage_preemptee, new_usage_preemptor;
 	double allotment_preemptee, allotment_preemptor;
@@ -578,8 +582,8 @@ static int _overalloc_test(struct job_record *preemptor,
 	cpu_cnt_preemptee = _get_nb_cpus(preemptee);
 	cpu_cnt_preemptor = _get_nb_cpus(preemptor);
 
-	assoc_preemptee = (slurmdb_association_rec_t *)preemptee->assoc_ptr;
-	assoc_preemptor = (slurmdb_association_rec_t *)preemptor->assoc_ptr;
+	assoc_preemptee = (slurmdb_assoc_rec_t *)preemptee->assoc_ptr;
+	assoc_preemptor = (slurmdb_assoc_rec_t *)preemptor->assoc_ptr;
 
 	if (!assoc_preemptee || !assoc_preemptee->usage ||
 	    !assoc_preemptor || !assoc_preemptor->usage) {
@@ -589,12 +593,20 @@ static int _overalloc_test(struct job_record *preemptor,
 
 	shares_preemptee = assoc_preemptee->usage->shares_norm;
 	shares_preemptor = assoc_preemptor->usage->shares_norm;
-	new_usage_preemptee = assoc_preemptee->usage->grp_used_cpus;
-	new_usage_preemptor = assoc_preemptor->usage->grp_used_cpus +
-			      cpu_cnt_preemptor;
 
-	allotment_preemptee = shares_preemptee * preemptee->part_ptr->total_cpus;
-	allotment_preemptor = shares_preemptor * preemptor->part_ptr->total_cpus;
+	/* FIXME: this appears to only work for CPUS at the moment, it
+	 * probably should work for other TRES as well.
+	 */
+	new_usage_preemptee = assoc_preemptee->usage->
+		grp_used_tres[TRES_ARRAY_CPU];
+	new_usage_preemptor = assoc_preemptor->usage->
+		grp_used_tres[TRES_ARRAY_CPU] +
+		cpu_cnt_preemptor;
+
+	allotment_preemptee =
+		shares_preemptee * preemptee->part_ptr->total_cpus;
+	allotment_preemptor =
+		shares_preemptor * preemptor->part_ptr->total_cpus;
 
 	/* Fairshare will be less than 1 if running the job will not overrun
 	 * the share allocation */
@@ -636,11 +648,11 @@ static int _overalloc_test(struct job_record *preemptor,
 		     assoc_preemptor->acct, relation, preemptee->job_id,
 		     preemptee->name, assoc_preemptee->acct,
 		     new_fairshare_preemptor, new_fairshare_preemptor);
-		info("%s:   CPUs Needed: %u and %u  Used CPUS: %u and %u  "
-		     "Shares: %f and %f  CPUsTotal: %u and %u",
+		info("%s:   CPUs Needed: %u and %u  Used CPUS: %"PRIu64
+		     " and %"PRIu64" Shares: %f and %f  CPUsTotal: %u and %u",
 		     plugin_type, cpu_cnt_preemptor, cpu_cnt_preemptee,
-		     assoc_preemptor->usage->grp_used_cpus,
-		     assoc_preemptee->usage->grp_used_cpus,
+		     assoc_preemptor->usage->grp_used_tres[TRES_ARRAY_CPU],
+		     assoc_preemptee->usage->grp_used_tres[TRES_ARRAY_CPU],
 		     shares_preemptor, shares_preemptee,
 		     preemptor->part_ptr->total_cpus,
 		     preemptee->part_ptr->total_cpus);
diff --git a/src/plugins/preempt/none/Makefile.in b/src/plugins/preempt/none/Makefile.in
index 1d14ceb21..38c726223 100644
--- a/src/plugins/preempt/none/Makefile.in
+++ b/src/plugins/preempt/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/preempt/none/preempt_none.c b/src/plugins/preempt/none/preempt_none.c
index 23c0e69db..22041efbc 100644
--- a/src/plugins/preempt/none/preempt_none.c
+++ b/src/plugins/preempt/none/preempt_none.c
@@ -50,7 +50,7 @@
 
 const char	plugin_name[]	= "Preemption disabled";
 const char	plugin_type[]	= "preempt/none";
-const uint32_t	plugin_version	= 100;
+const uint32_t	plugin_version	= SLURM_VERSION_NUMBER;
 
 /**************************************************************************/
 /*  TAG(                              init                              ) */
diff --git a/src/plugins/preempt/partition_prio/Makefile.in b/src/plugins/preempt/partition_prio/Makefile.in
index c3d535cd7..27d4f350b 100644
--- a/src/plugins/preempt/partition_prio/Makefile.in
+++ b/src/plugins/preempt/partition_prio/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/preempt/partition_prio/preempt_partition_prio.c b/src/plugins/preempt/partition_prio/preempt_partition_prio.c
index 22a7f1381..7a4cb652a 100644
--- a/src/plugins/preempt/partition_prio/preempt_partition_prio.c
+++ b/src/plugins/preempt/partition_prio/preempt_partition_prio.c
@@ -51,7 +51,7 @@
 
 const char	plugin_name[]	= "Preempt by partition priority plugin";
 const char	plugin_type[]	= "preempt/partition_prio";
-const uint32_t	plugin_version	= 100;
+const uint32_t	plugin_version	= SLURM_VERSION_NUMBER;
 
 static uint32_t _gen_job_prio(struct job_record *job_ptr);
 static int  _sort_by_prio (void *x, void *y);
@@ -179,9 +179,13 @@ static int _sort_by_prio (void *x, void *y)
 /**************************************************************************/
 extern uint16_t job_preempt_mode(struct job_record *job_ptr)
 {
-	if (job_ptr->part_ptr &&
-	    (job_ptr->part_ptr->preempt_mode != (uint16_t) NO_VAL))
-		return job_ptr->part_ptr->preempt_mode;
+	struct part_record *part_ptr = job_ptr->part_ptr;
+	if (part_ptr && (part_ptr->preempt_mode != (uint16_t) NO_VAL)) {
+		if (part_ptr->preempt_mode & PREEMPT_MODE_GANG)
+			verbose("Partition '%s' preempt mode 'gang' has no "
+				"sense. Filtered out.\n", part_ptr->name);
+		return (part_ptr->preempt_mode & (~PREEMPT_MODE_GANG));
+	}
 
 	return (slurm_get_preempt_mode() & (~PREEMPT_MODE_GANG));
 }
diff --git a/src/plugins/preempt/qos/Makefile.in b/src/plugins/preempt/qos/Makefile.in
index 4b94056d5..a8a63c5e7 100644
--- a/src/plugins/preempt/qos/Makefile.in
+++ b/src/plugins/preempt/qos/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/preempt/qos/preempt_qos.c b/src/plugins/preempt/qos/preempt_qos.c
index 3f22e57ca..63b49e9e1 100644
--- a/src/plugins/preempt/qos/preempt_qos.c
+++ b/src/plugins/preempt/qos/preempt_qos.c
@@ -52,7 +52,7 @@
 
 const char	plugin_name[]	= "Preempt by Quality Of Service (QOS)";
 const char	plugin_type[]	= "preempt/qos";
-const uint32_t	plugin_version	= 100;
+const uint32_t	plugin_version	= SLURM_VERSION_NUMBER;
 
 static uint32_t _gen_job_prio(struct job_record *job_ptr);
 static bool _qos_preemptable(struct job_record *preemptee,
diff --git a/src/plugins/priority/Makefile.in b/src/plugins/priority/Makefile.in
index 59463f4ab..80d69c52d 100644
--- a/src/plugins/priority/Makefile.in
+++ b/src/plugins/priority/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/priority/basic/Makefile.am b/src/plugins/priority/basic/Makefile.am
index a90057717..f5c60818f 100644
--- a/src/plugins/priority/basic/Makefile.am
+++ b/src/plugins/priority/basic/Makefile.am
@@ -11,3 +11,4 @@ pkglib_LTLIBRARIES = priority_basic.la
 # basic priority logging plugin.
 priority_basic_la_SOURCES = priority_basic.c
 priority_basic_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+priority_basic_la_LIBADD  = -lm
diff --git a/src/plugins/priority/basic/Makefile.in b/src/plugins/priority/basic/Makefile.in
index 9ed1a34a0..406d9a91b 100644
--- a/src/plugins/priority/basic/Makefile.in
+++ b/src/plugins/priority/basic/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -160,7 +163,7 @@ am__uninstall_files_from_dir = { \
   }
 am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
-priority_basic_la_LIBADD =
+priority_basic_la_DEPENDENCIES =
 am_priority_basic_la_OBJECTS = priority_basic.lo
 priority_basic_la_OBJECTS = $(am_priority_basic_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -477,6 +491,7 @@ pkglib_LTLIBRARIES = priority_basic.la
 # basic priority logging plugin.
 priority_basic_la_SOURCES = priority_basic.c
 priority_basic_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
+priority_basic_la_LIBADD = -lm
 all: all-am
 
 .SUFFIXES:
diff --git a/src/plugins/priority/basic/priority_basic.c b/src/plugins/priority/basic/priority_basic.c
index 27d53802e..95af79418 100644
--- a/src/plugins/priority/basic/priority_basic.c
+++ b/src/plugins/priority/basic/priority_basic.c
@@ -55,6 +55,16 @@
 #include "src/common/slurm_priority.h"
 #include "src/common/assoc_mgr.h"
 
+/* These are defined here so when we link with something other than
+ * the slurmctld we will have these symbols defined.  They will get
+ * overwritten when linking with the slurmctld.
+ */
+#if defined (__APPLE__)
+int slurmctld_tres_cnt __attribute__((weak_import)) = 0;
+#else
+int slurmctld_tres_cnt = 0;
+#endif
+
 /*
  * These variables are required by the generic plugin interface.  If they
  * are not found in the plugin, the plugin loader will ignore it.
@@ -77,16 +87,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Priority BASIC plugin";
 const char plugin_type[]       	= "priority/basic";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
@@ -136,7 +142,7 @@ extern void priority_p_reconfig(bool assoc_clear)
 	return;
 }
 
-extern void priority_p_set_assoc_usage(slurmdb_association_rec_t *assoc)
+extern void priority_p_set_assoc_usage(slurmdb_assoc_rec_t *assoc)
 {
 	return;
 }
@@ -168,58 +174,75 @@ extern List priority_p_get_priority_factors_list(
 
 extern void priority_p_job_end(struct job_record *job_ptr)
 {
-	uint64_t unused_cpu_run_secs = 0;
 	uint64_t time_limit_secs = (uint64_t)job_ptr->time_limit * 60;
-	slurmdb_association_rec_t *assoc_ptr;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
+	slurmdb_assoc_rec_t *assoc_ptr;
+	int i;
+	uint64_t *unused_tres_run_secs;
+	assoc_mgr_lock_t locks = { NO_LOCK, WRITE_LOCK, NO_LOCK,
 				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 
 	/* No unused cpu_run_secs if job ran past its time limit */
 	if (job_ptr->end_time >= job_ptr->start_time + time_limit_secs)
 		return;
 
-	unused_cpu_run_secs = job_ptr->total_cpus *
-		(job_ptr->start_time + time_limit_secs - job_ptr->end_time);
+	unused_tres_run_secs = xmalloc(sizeof(uint64_t) * slurmctld_tres_cnt);
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		unused_tres_run_secs[i] =
+			(uint64_t)(job_ptr->start_time +
+				   time_limit_secs - job_ptr->end_time) *
+			job_ptr->tres_req_cnt[i];
+	}
 
 	assoc_mgr_lock(&locks);
 	if (job_ptr->qos_ptr) {
 		slurmdb_qos_rec_t *qos_ptr =
 			(slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-		if (unused_cpu_run_secs >
-		    qos_ptr->usage->grp_used_cpu_run_secs) {
-			qos_ptr->usage->grp_used_cpu_run_secs = 0;
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			if (unused_tres_run_secs[i] >
+			    qos_ptr->usage->grp_used_tres_run_secs[i]) {
+			qos_ptr->usage->grp_used_tres_run_secs[i] = 0;
 			debug2("acct_policy_job_fini: "
-			       "grp_used_cpu_run_secs "
-			       "underflow for qos %s", qos_ptr->name);
-		} else
-			qos_ptr->usage->grp_used_cpu_run_secs -=
-				unused_cpu_run_secs;
+			       "grp_used_tres_run_secs "
+			       "underflow for qos %s tres %s",
+			       qos_ptr->name,
+			       assoc_mgr_tres_name_array[i]);
+			} else
+				qos_ptr->usage->grp_used_tres_run_secs[i] -=
+					unused_tres_run_secs[i];
+		}
 	}
-	assoc_ptr = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	assoc_ptr = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 	while (assoc_ptr) {
 		/* If the job finished early remove the extra time now. */
-		if (unused_cpu_run_secs >
-		    assoc_ptr->usage->grp_used_cpu_run_secs) {
-			assoc_ptr->usage->grp_used_cpu_run_secs = 0;
-			debug2("acct_policy_job_fini: "
-			       "grp_used_cpu_run_secs "
-			       "underflow for account %s",
-			       assoc_ptr->acct);
-		} else {
-			assoc_ptr->usage->grp_used_cpu_run_secs -=
-				unused_cpu_run_secs;
-			debug4("acct_policy_job_fini: job %u. "
-			       "Removed %"PRIu64" unused seconds "
-			       "from assoc %s "
-			       "grp_used_cpu_run_secs = %"PRIu64"",
-			       job_ptr->job_id, unused_cpu_run_secs,
-			       assoc_ptr->acct,
-			       assoc_ptr->usage->grp_used_cpu_run_secs);
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			if (unused_tres_run_secs[i] >
+			    assoc_ptr->usage->grp_used_tres_run_secs[i]) {
+				assoc_ptr->usage->grp_used_tres_run_secs[i] = 0;
+				debug2("acct_policy_job_fini: "
+				       "grp_used_tres_run_secs "
+				       "underflow for account %s tres %s",
+				       assoc_ptr->acct,
+				       assoc_mgr_tres_name_array[i]);
+
+			} else {
+				assoc_ptr->usage->grp_used_tres_run_secs[i] -=
+					unused_tres_run_secs[i];
+				debug4("acct_policy_job_fini: job %u. "
+				       "Removed %"PRIu64" unused seconds "
+				       "from acct %s tres %s "
+				       "grp_used_tres_run_secs = %"PRIu64"",
+				       job_ptr->job_id, unused_tres_run_secs[i],
+				       assoc_ptr->acct,
+				       assoc_mgr_tres_name_array[i],
+				       assoc_ptr->usage->
+				       grp_used_tres_run_secs[i]);
+			}
 		}
 		/* now handle all the group limits of the parents */
 		assoc_ptr = assoc_ptr->usage->parent_assoc_ptr;
 	}
 	assoc_mgr_unlock(&locks);
+	xfree(unused_tres_run_secs);
 
 	return;
 }
diff --git a/src/plugins/priority/multifactor/Makefile.in b/src/plugins/priority/multifactor/Makefile.in
index b2faad574..a17785965 100644
--- a/src/plugins/priority/multifactor/Makefile.in
+++ b/src/plugins/priority/multifactor/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -275,6 +278,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -324,8 +329,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -344,6 +353,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -387,6 +399,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -410,6 +423,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/priority/multifactor/fair_tree.c b/src/plugins/priority/multifactor/fair_tree.c
index 967e79dcc..68899dcfa 100644
--- a/src/plugins/priority/multifactor/fair_tree.c
+++ b/src/plugins/priority/multifactor/fair_tree.c
@@ -53,7 +53,8 @@ extern void fair_tree_decay(List jobs, time_t start)
 	slurmctld_lock_t job_write_lock =
 		{ NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
 	assoc_mgr_lock_t locks =
-		{ WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+		{ WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+		  NO_LOCK, NO_LOCK, NO_LOCK };
 
 	/* apply decayed usage */
 	lock_slurmctld(job_write_lock);
@@ -73,9 +74,9 @@ extern void fair_tree_decay(List jobs, time_t start)
 
 
 /* In Fair Tree, usage_efctv is the normalized usage within the account */
-static void _ft_set_assoc_usage_efctv(slurmdb_association_rec_t *assoc)
+static void _ft_set_assoc_usage_efctv(slurmdb_assoc_rec_t *assoc)
 {
-	slurmdb_association_rec_t *parent = assoc->usage->fs_assoc_ptr;
+	slurmdb_assoc_rec_t *parent = assoc->usage->fs_assoc_ptr;
 
 	if (!parent || !parent->usage->usage_raw) {
 		assoc->usage->usage_efctv = 0L;
@@ -99,7 +100,7 @@ static int _ft_decay_apply_new_usage(struct job_record *job, time_t *start)
 }
 
 
-static void _ft_debug(slurmdb_association_rec_t *assoc,
+static void _ft_debug(slurmdb_assoc_rec_t *assoc,
 		      uint16_t assoc_level, bool tied)
 {
 	int spaces;
@@ -140,8 +141,8 @@ static int _cmp_level_fs(const void *x,
 	 *  2. Prioritize users over accounts (required for tie breakers when
 	 *     comparing users and accounts)
 	 */
-	slurmdb_association_rec_t **a = (slurmdb_association_rec_t **)x;
-	slurmdb_association_rec_t **b = (slurmdb_association_rec_t **)y;
+	slurmdb_assoc_rec_t **a = (slurmdb_assoc_rec_t **)x;
+	slurmdb_assoc_rec_t **b = (slurmdb_assoc_rec_t **)y;
 
 	/* 1. level_fs value */
 	if ((*a)->usage->level_fs != (*b)->usage->level_fs)
@@ -167,7 +168,7 @@ static int _cmp_level_fs(const void *x,
  * If LF > 1.0, the association is under-served.
  * If LF < 1.0, the association is over-served.
  */
-static void _calc_assoc_fs(slurmdb_association_rec_t *assoc)
+static void _calc_assoc_fs(slurmdb_assoc_rec_t *assoc)
 {
 	long double U; /* long double U != long W */
 	long double S;
@@ -192,7 +193,8 @@ static void _calc_assoc_fs(slurmdb_association_rec_t *assoc)
 	}
 
 	/* If S is 0, the assoc is assigned the lowest possible LF value. If
-	 * U==0 && S!=0, assoc is assigned the highest possible value, infinity.
+	 * U==0 && S!=0, assoc is assigned the highest possible value,
+	 * infinity.
 	 * Checking for U==0 then setting level_fs=INFINITY is not the same
 	 * since you would still have to check for S==0 then set level_fs=0.
 	 *
@@ -203,25 +205,24 @@ static void _calc_assoc_fs(slurmdb_association_rec_t *assoc)
 		assoc->usage->level_fs = S / U;
 }
 
-
 /* Append list of associations to array
  * IN list - list of associations
  * IN merged - array of associations to append to
  * IN/OUT merged_size - number of associations in merged array
  * RET - New array. Must be freed.
  */
-static slurmdb_association_rec_t** _append_list_to_array(
-	List list, slurmdb_association_rec_t** merged,
+static slurmdb_assoc_rec_t** _append_list_to_array(
+	List list, slurmdb_assoc_rec_t** merged,
 	size_t *merged_size)
 {
 	ListIterator itr;
-	slurmdb_association_rec_t *next;
+	slurmdb_assoc_rec_t *next;
 	size_t bytes;
 	size_t i = *merged_size;
 	*merged_size += list_count(list);
 
 	/* must be null-terminated, so add one extra slot */
-	bytes = sizeof(slurmdb_association_rec_t*) * (*merged_size + 1);
+	bytes = sizeof(slurmdb_assoc_rec_t*) * (*merged_size + 1);
 	merged = xrealloc(merged, bytes);
 
 	itr = list_iterator_create(list);
@@ -234,17 +235,16 @@ static slurmdb_association_rec_t** _append_list_to_array(
 	return merged;
 }
 
-
 /* Returns number of tied sibling accounts.
  * IN assocs - array of siblings, sorted by level_fs
  * IN begin_ndx - begin looking for ties at this index
  * RET - number of sibling accounts with equal level_fs values
  */
-static size_t _count_tied_accounts(slurmdb_association_rec_t** assocs,
+static size_t _count_tied_accounts(slurmdb_assoc_rec_t** assocs,
 				   size_t begin_ndx)
 {
-	slurmdb_association_rec_t* next_assoc;
-	slurmdb_association_rec_t* assoc = assocs[begin_ndx];
+	slurmdb_assoc_rec_t* next_assoc;
+	slurmdb_assoc_rec_t* assoc = assocs[begin_ndx];
 	size_t i = begin_ndx;
 	size_t tied_accounts = 0;
 	while ((next_assoc = assocs[++i])) {
@@ -267,16 +267,16 @@ static size_t _count_tied_accounts(slurmdb_association_rec_t** assocs,
  * IN assoc_level - depth in the tree (root is 0)
  * RET - Array of the children. Must be freed.
  */
-static slurmdb_association_rec_t** _merge_accounts(
-	slurmdb_association_rec_t** siblings,
+static slurmdb_assoc_rec_t** _merge_accounts(
+	slurmdb_assoc_rec_t** siblings,
 	size_t begin, size_t end, uint16_t assoc_level)
 {
 	size_t i;
 	/* number of associations in merged array */
 	size_t merged_size = 0;
 	/* merged is a null terminated array */
-	slurmdb_association_rec_t** merged = (slurmdb_association_rec_t **)
-		xmalloc(sizeof(slurmdb_association_rec_t *));
+	slurmdb_assoc_rec_t** merged = (slurmdb_assoc_rec_t **)
+		xmalloc(sizeof(slurmdb_assoc_rec_t *));
 	merged[0] = NULL;
 
 	for (i = begin; i <= end; i++) {
@@ -300,8 +300,8 @@ static slurmdb_association_rec_t** _merge_accounts(
  * (level_fs). Once they are sorted, operate on each child in sorted order.
  * This portion of the tree is now sorted and users are given a fairshare value
  * based on the order they are operated on. The basic equation is
- * (rank / g_user_assoc_count), though ties are allowed. The rank is decremented
- * for each user that is encountered except when ties occur.
+ * (rank / g_user_assoc_count), though ties are allowed. The rank is
+ * decremented for each user that is encountered except when ties occur.
  *
  * Tie Handling Rules:
  * 	1) Sibling users with the same level_fs receive the same rank
@@ -316,11 +316,11 @@ static slurmdb_association_rec_t** _merge_accounts(
  * IN/OUT rnt - rank, no ties (what rank would be if no tie exists)
  * IN account_tied - is this account tied with the previous user
  */
-static void _calc_tree_fs(slurmdb_association_rec_t** siblings,
+static void _calc_tree_fs(slurmdb_assoc_rec_t** siblings,
 			  uint16_t assoc_level, uint32_t *rank,
 			  uint32_t *rnt, bool account_tied)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	long double prev_level_fs = (long double) NO_VAL;
 	bool tied = false;
 	size_t i;
@@ -330,7 +330,7 @@ static void _calc_tree_fs(slurmdb_association_rec_t** siblings,
 		_calc_assoc_fs(assoc);
 
 	/* Sort children by level_fs */
-	qsort(siblings, i, sizeof(slurmdb_association_rec_t *), _cmp_level_fs);
+	qsort(siblings, i, sizeof(slurmdb_assoc_rec_t *), _cmp_level_fs);
 
 	/* Iterate through children in sorted order. If it's a user, calculate
 	 * fs_factor, otherwise recurse. */
@@ -347,7 +347,8 @@ static void _calc_tree_fs(slurmdb_association_rec_t** siblings,
 		if (priority_debug)
 			_ft_debug(assoc, assoc_level, tied);
 
-		/* If user, set their final fairshare factor and handle ranking.
+		/* If user, set their final fairshare factor and
+		 * handle ranking.
 		 * If account, merge any tied accounts then recurse with the
 		 * merged children array. */
 		if (assoc->user) {
@@ -359,16 +360,18 @@ static void _calc_tree_fs(slurmdb_association_rec_t** siblings,
 
 			(*rnt)--;
 		} else {
-			slurmdb_association_rec_t** children;
+			slurmdb_assoc_rec_t** children;
 			size_t merge_count = _count_tied_accounts(siblings, i);
 
 			/* Merging does not affect child level_fs calculations
 			 * since the necessary information is stored on each
 			 * assoc's usage struct */
-			children = _merge_accounts(siblings, i, i + merge_count,
+			children = _merge_accounts(siblings, i,
+						   i + merge_count,
 						   assoc_level);
 
-			_calc_tree_fs(children, assoc_level+1, rank, rnt, tied);
+			_calc_tree_fs(children, assoc_level+1,
+				      rank, rnt, tied);
 
 			/* Skip over any merged accounts */
 			i += merge_count;
@@ -384,7 +387,7 @@ static void _calc_tree_fs(slurmdb_association_rec_t** siblings,
 /* Start fairshare calculations at root. Call assoc_mgr_lock before this. */
 static void _apply_priority_fs(void)
 {
-	slurmdb_association_rec_t** children = NULL;
+	slurmdb_assoc_rec_t** children = NULL;
 	uint32_t rank = g_user_assoc_count;
 	uint32_t rnt = rank;
 	size_t child_count = 0;
diff --git a/src/plugins/priority/multifactor/priority_multifactor.c b/src/plugins/priority/multifactor/priority_multifactor.c
index 8accf4df3..8b51cc49c 100644
--- a/src/plugins/priority/multifactor/priority_multifactor.c
+++ b/src/plugins/priority/multifactor/priority_multifactor.c
@@ -73,8 +73,13 @@
 #include <math.h>
 #include "slurm/slurm_errno.h"
 
-#include "src/common/xstring.h"
 #include "src/common/parse_time.h"
+#include "src/common/slurm_time.h"
+#include "src/common/xstring.h"
+#include "src/common/gres.h"
+
+#include "src/slurmctld/licenses.h"
+#include "src/slurmctld/read_config.h"
 
 #include "fair_tree.h"
 
@@ -94,6 +99,7 @@ List job_list  __attribute__((weak_import)) = NULL;
 time_t last_job_update __attribute__((weak_import)) = (time_t) 0;
 uint16_t part_max_priority __attribute__((weak_import)) = 0;
 slurm_ctl_conf_t slurmctld_conf __attribute__((weak_import));
+int slurmctld_tres_cnt __attribute__((weak_import)) = 0;
 #else
 void *acct_db_conn = NULL;
 uint32_t cluster_cpus = NO_VAL;
@@ -101,6 +107,7 @@ List job_list = NULL;
 time_t last_job_update = (time_t) 0;
 uint16_t part_max_priority = 0;
 slurm_ctl_conf_t slurmctld_conf;
+int slurmctld_tres_cnt = 0;
 #endif
 
 /*
@@ -125,16 +132,12 @@ slurm_ctl_conf_t slurmctld_conf;
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]	= "Priority MULTIFACTOR plugin";
 const char plugin_type[]	= "priority/multifactor";
-const uint32_t plugin_version	= 100;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 static pthread_t decay_handler_thread;
 static pthread_t cleanup_handler_thread;
@@ -150,19 +153,18 @@ static uint32_t weight_fs;   /* weight for Fairshare factor */
 static uint32_t weight_js;   /* weight for Job Size factor */
 static uint32_t weight_part; /* weight for Partition factor */
 static uint32_t weight_qos;  /* weight for QOS factor */
+static double  *weight_tres; /* tres weights */
 static uint32_t flags;       /* Priority Flags */
 static uint32_t prevflags;    /* Priority Flags before _internal_setup() resets
 			       * flags after a reconfigure */
-static uint32_t max_tickets; /* Maximum number of tickets given to a
-			      * user. Protected by assoc_mgr lock. */
 static time_t g_last_ran = 0; /* when the last poll ran */
 static double decay_factor = 1; /* The decay factor when decaying time. */
 
 /* variables defined in prirority_multifactor.h */
 bool priority_debug = 0;
 
-static void _priority_p_set_assoc_usage_debug(slurmdb_association_rec_t *assoc);
-static void _set_assoc_usage_efctv(slurmdb_association_rec_t *assoc);
+static void _priority_p_set_assoc_usage_debug(slurmdb_assoc_rec_t *assoc);
+static void _set_assoc_usage_efctv(slurmdb_assoc_rec_t *assoc);
 
 /*
  * apply decay factor to all associations usage_raw
@@ -173,11 +175,12 @@ static void _set_assoc_usage_efctv(slurmdb_association_rec_t *assoc);
  */
 static int _apply_decay(double real_decay)
 {
+	int i;
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_qos_rec_t *qos = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	/* continue if real_decay is 0 or 1 since that doesn't help
 	   us at all. 1 means no decay and 0 will just zero
@@ -189,15 +192,17 @@ static int _apply_decay(double real_decay)
 
 	assoc_mgr_lock(&locks);
 
-	xassert(assoc_mgr_association_list);
+	xassert(assoc_mgr_assoc_list);
 	xassert(assoc_mgr_qos_list);
 
-	itr = list_iterator_create(assoc_mgr_association_list);
+	itr = list_iterator_create(assoc_mgr_assoc_list);
 	/* We want to do this to all associations including root.
 	   All usage_raws are calculated from the bottom up.
 	*/
 	while ((assoc = list_next(itr))) {
 		assoc->usage->usage_raw *= real_decay;
+		for (i=0; i<slurmctld_tres_cnt; i++)
+			assoc->usage->usage_tres_raw[i] *= real_decay;
 		assoc->usage->grp_used_wall *= real_decay;
 	}
 	list_iterator_destroy(itr);
@@ -205,6 +210,8 @@ static int _apply_decay(double real_decay)
 	itr = list_iterator_create(assoc_mgr_qos_list);
 	while ((qos = list_next(itr))) {
 		qos->usage->usage_raw *= real_decay;
+		for (i=0; i<slurmctld_tres_cnt; i++)
+			qos->usage->usage_tres_raw[i] *= real_decay;
 		qos->usage->grp_used_wall *= real_decay;
 	}
 	list_iterator_destroy(itr);
@@ -214,31 +221,34 @@ static int _apply_decay(double real_decay)
 }
 
 /*
- * reset usage_raw, and grp_used_wall on all associations
+ * reset usage_raw, and grp_used_wall on all assocs
  * This should be called every PriorityUsageResetPeriod
  * RET: SLURM_SUCCESS on SUCCESS, SLURM_ERROR else.
  */
 static int _reset_usage(void)
 {
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	slurmdb_qos_rec_t *qos = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	int i;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (!calc_fairshare)
 		return SLURM_SUCCESS;
 
 	assoc_mgr_lock(&locks);
 
-	xassert(assoc_mgr_association_list);
+	xassert(assoc_mgr_assoc_list);
 
-	itr = list_iterator_create(assoc_mgr_association_list);
+	itr = list_iterator_create(assoc_mgr_assoc_list);
 	/* We want to do this to all associations including root.
 	 * All usage_raws are calculated from the bottom up.
 	 */
 	while ((assoc = list_next(itr))) {
 		assoc->usage->usage_raw = 0;
+		for (i=0; i<slurmctld_tres_cnt; i++)
+			assoc->usage->usage_tres_raw[i] = 0;
 		assoc->usage->grp_used_wall = 0;
 	}
 	list_iterator_destroy(itr);
@@ -246,6 +256,8 @@ static int _reset_usage(void)
 	itr = list_iterator_create(assoc_mgr_qos_list);
 	while ((qos = list_next(itr))) {
 		qos->usage->usage_raw = 0;
+		for (i=0; i<slurmctld_tres_cnt; i++)
+			qos->usage->usage_tres_raw[i] = 0;
 		qos->usage->grp_used_wall = 0;
 	}
 	list_iterator_destroy(itr);
@@ -394,34 +406,6 @@ static int _write_last_decay_ran(time_t last_ran, time_t last_reset)
 }
 
 
-/* Set the effective usage of a node. */
-static void _ticket_based_set_usage_efctv(slurmdb_association_rec_t *assoc)
-{
-	long double min_shares_norm;
-	slurmdb_association_rec_t *fs_assoc = assoc;
-
-	if ((assoc->shares_raw == SLURMDB_FS_USE_PARENT)
-	    && assoc->usage->fs_assoc_ptr) {
-		/* This function needs to find the fairshare parent because
-		 * shares_raw needs to be a useful value, not
-		 * SLURMDB_FS_USE_PARENT */
-		fs_assoc = assoc->usage->fs_assoc_ptr;
-		assoc->usage->shares_norm = fs_assoc->usage->shares_norm;
-		assoc->usage->usage_norm = fs_assoc->usage->usage_norm;
-	}
-
-	if (fs_assoc->usage->level_shares) {
-		min_shares_norm = (long double) MIN_USAGE_FACTOR
-			* fs_assoc->shares_raw / fs_assoc->usage->level_shares;
-		if (fs_assoc->usage->usage_norm > min_shares_norm)
-			assoc->usage->usage_efctv = fs_assoc->usage->usage_norm;
-		else
-			assoc->usage->usage_efctv = min_shares_norm;
-	} else
-		assoc->usage->usage_efctv = fs_assoc->usage->usage_norm;
-}
-
-
 /* This should initially get the children list from assoc_mgr_root_assoc.
  * Since our algorithm goes from top down we calculate all the non-user
  * associations now.  When a user submits a job, that norm_fairshare is
@@ -429,11 +413,11 @@ static void _ticket_based_set_usage_efctv(slurmdb_association_rec_t *assoc)
  * to calculate a bunch of things that will never be used. (Fair Tree calls a
  * different function.)
  *
- * NOTE: acct_mgr_association_lock must be locked before this is called.
+ * NOTE: acct_mgr_assoc_lock must be locked before this is called.
  */
 static int _set_children_usage_efctv(List children_list)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	ListIterator itr = NULL;
 
 	if (!children_list || !list_count(children_list))
@@ -453,72 +437,17 @@ static int _set_children_usage_efctv(List children_list)
 }
 
 
-/* Distribute the tickets to child nodes recursively.
- *
- * NOTE: acct_mgr_association_lock must be locked before this is called.
- */
-static int _distribute_tickets(List children_list, uint32_t tickets)
-{
-	ListIterator itr;
-	slurmdb_association_rec_t *assoc;
-	double sfsum = 0, fs;
-
-	if (!children_list || !list_count(children_list))
-		return SLURM_SUCCESS;
-
-	itr = list_iterator_create(children_list);
-	while ((assoc = list_next(itr))) {
-		if (assoc->usage->active_seqno
-		    != assoc_mgr_root_assoc->usage->active_seqno)
-			continue;
-		if (fuzzy_equal(assoc->usage->usage_efctv, NO_VAL))
-			priority_p_set_assoc_usage(assoc);
-		fs = priority_p_calc_fs_factor(assoc->usage->usage_efctv,
-					       assoc->usage->shares_norm);
-		sfsum += assoc->usage->shares_norm * fs;
-	}
-	list_iterator_destroy(itr);
-
-	itr = list_iterator_create(children_list);
-	while ((assoc = list_next(itr))) {
-		if (assoc->usage->active_seqno
-		    != assoc_mgr_root_assoc->usage->active_seqno)
-			continue;
-		fs = priority_p_calc_fs_factor(assoc->usage->usage_efctv,
-					       assoc->usage->shares_norm);
-		assoc->usage->tickets = tickets * assoc->usage->shares_norm
-			* fs / sfsum;
-		if (priority_debug) {
-			if (assoc->user)
-				info("User %s in account %s gets %u tickets",
-				     assoc->user, assoc->acct,
-				     assoc->usage->tickets);
-			else
-				info("Account %s gets %u tickets",
-				     assoc->acct, assoc->usage->tickets);
-		}
-		if (assoc->user && assoc->usage->tickets > max_tickets)
-			max_tickets = assoc->usage->tickets;
-		_distribute_tickets(assoc->usage->children_list,
-				    assoc->usage->tickets);
-	}
-	list_iterator_destroy(itr);
-
-	return SLURM_SUCCESS;
-}
-
-
 /* job_ptr should already have the partition priority and such added here
  * before had we will be adding to it
  */
 static double _get_fairshare_priority(struct job_record *job_ptr)
 {
-	slurmdb_association_rec_t *job_assoc =
-		(slurmdb_association_rec_t *)job_ptr->assoc_ptr;
-	slurmdb_association_rec_t *fs_assoc = NULL;
+	slurmdb_assoc_rec_t *job_assoc =
+		(slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
+	slurmdb_assoc_rec_t *fs_assoc = NULL;
 	double priority_fs = 0.0;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (!calc_fairshare)
 		return 0;
@@ -541,20 +470,7 @@ static double _get_fairshare_priority(struct job_record *job_ptr)
 		priority_p_set_assoc_usage(fs_assoc);
 
 	/* Priority is 0 -> 1 */
-	priority_fs = 0;
-	if (flags & PRIORITY_FLAGS_TICKET_BASED) {
-		if (fs_assoc->usage->active_seqno ==
-		    assoc_mgr_root_assoc->usage->active_seqno && max_tickets) {
-			priority_fs = (double) fs_assoc->usage->tickets /
-				max_tickets;
-		}
-		if (priority_debug) {
-			info("Fairshare priority of job %u for user %s in acct"
-			     " %s is %f",
-			     job_ptr->job_id, job_assoc->user, job_assoc->acct,
-			     priority_fs);
-		}
-	} else if (flags & PRIORITY_FLAGS_FAIR_TREE) {
+	if (flags & PRIORITY_FLAGS_FAIR_TREE) {
 		priority_fs = job_assoc->usage->fs_factor;
 		if (priority_debug) {
 			info("Fairhare priority of job %u for user %s in acct"
@@ -587,11 +503,15 @@ static uint32_t _get_priority_internal(time_t start_time,
 	double priority	= 0.0;
 	priority_factors_object_t pre_factors;
 	uint64_t tmp_64;
+	double tmp_tres = 0.0;
 
 	if (job_ptr->direct_set_prio && (job_ptr->priority > 0)) {
-		if (job_ptr->prio_factors)
+		if (job_ptr->prio_factors) {
+			xfree(job_ptr->prio_factors->tres_weights);
+			xfree(job_ptr->prio_factors->priority_tres);
 			memset(job_ptr->prio_factors, 0,
 			       sizeof(priority_factors_object_t));
+		}
 		return job_ptr->priority;
 	}
 
@@ -599,15 +519,29 @@ static uint32_t _get_priority_internal(time_t start_time,
 		error("_get_priority_internal: job %u does not have a "
 		      "details symbol set, can't set priority",
 		      job_ptr->job_id);
-		if (job_ptr->prio_factors)
+		if (job_ptr->prio_factors) {
+			xfree(job_ptr->prio_factors->tres_weights);
+			xfree(job_ptr->prio_factors->priority_tres);
 			memset(job_ptr->prio_factors, 0,
 			       sizeof(priority_factors_object_t));
+		}
 		return 0;
 	}
 
 	set_priority_factors(start_time, job_ptr);
-	memcpy(&pre_factors, job_ptr->prio_factors,
-	       sizeof(priority_factors_object_t));
+
+	if (priority_debug) {
+		memcpy(&pre_factors, job_ptr->prio_factors,
+		       sizeof(priority_factors_object_t));
+		if (job_ptr->prio_factors->priority_tres) {
+			pre_factors.priority_tres = xmalloc(sizeof(double) *
+							    slurmctld_tres_cnt);
+			memcpy(pre_factors.priority_tres,
+			       job_ptr->prio_factors->priority_tres,
+			       sizeof(double) * slurmctld_tres_cnt);
+		}
+	} else	/* clang needs this memset to avoid a warning */
+		memset(&pre_factors, 0, sizeof(priority_factors_object_t));
 
 	job_ptr->prio_factors->priority_age  *= (double)weight_age;
 	job_ptr->prio_factors->priority_fs   *= (double)weight_fs;
@@ -615,11 +549,23 @@ static uint32_t _get_priority_internal(time_t start_time,
 	job_ptr->prio_factors->priority_part *= (double)weight_part;
 	job_ptr->prio_factors->priority_qos  *= (double)weight_qos;
 
+	if (weight_tres && job_ptr->prio_factors->priority_tres) {
+		int i;
+		double *tres_factors = NULL;
+		tres_factors = job_ptr->prio_factors->priority_tres;
+
+		for (i = 0; i < slurmctld_tres_cnt; i++) {
+			tres_factors[i] *= weight_tres[i];
+			tmp_tres += tres_factors[i];
+		}
+	}
+
 	priority = job_ptr->prio_factors->priority_age
 		+ job_ptr->prio_factors->priority_fs
 		+ job_ptr->prio_factors->priority_js
 		+ job_ptr->prio_factors->priority_part
 		+ job_ptr->prio_factors->priority_qos
+		+ tmp_tres
 		- (double)(job_ptr->prio_factors->nice - NICE_OFFSET);
 
 	/* Priority 0 is reserved for held jobs */
@@ -656,6 +602,7 @@ static uint32_t _get_priority_internal(time_t start_time,
 				 + job_ptr->prio_factors->priority_fs
 				 + job_ptr->prio_factors->priority_js
 				 + job_ptr->prio_factors->priority_qos
+				 + tmp_tres
 				 - (double)(job_ptr->prio_factors->nice
 					    - NICE_OFFSET));
 
@@ -681,6 +628,13 @@ static uint32_t _get_priority_internal(time_t start_time,
 	}
 
 	if (priority_debug) {
+		int i;
+		double *post_tres_factors =
+			job_ptr->prio_factors->priority_tres;
+		double *pre_tres_factors = pre_factors.priority_tres;
+		assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+					   READ_LOCK, NO_LOCK, NO_LOCK };
+
 		info("Weighted Age priority is %f * %u = %.2f",
 		     pre_factors.priority_age, weight_age,
 		     job_ptr->prio_factors->priority_age);
@@ -696,44 +650,34 @@ static uint32_t _get_priority_internal(time_t start_time,
 		info("Weighted QOS priority is %f * %u = %.2f",
 		     pre_factors.priority_qos, weight_qos,
 		     job_ptr->prio_factors->priority_qos);
-		info("Job %u priority: %.2f + %.2f + %.2f + %.2f + %.2f - %d "
-		     "= %.2f",
+
+		if (pre_tres_factors && post_tres_factors) {
+			assoc_mgr_lock(&locks);
+			for(i = 0; i < slurmctld_tres_cnt; i++) {
+				if (!post_tres_factors[i])
+					continue;
+				info("Weighted TRES:%s is %f * %.2f = %.2f",
+				     assoc_mgr_tres_name_array[i],
+				     pre_tres_factors[i], weight_tres[i],
+				     post_tres_factors[i]);
+			}
+			assoc_mgr_unlock(&locks);
+		}
+
+		info("Job %u priority: %.2f + %.2f + %.2f + %.2f + %.2f + %2.f "
+		     "- %d = %.2f",
 		     job_ptr->job_id, job_ptr->prio_factors->priority_age,
 		     job_ptr->prio_factors->priority_fs,
 		     job_ptr->prio_factors->priority_js,
 		     job_ptr->prio_factors->priority_part,
 		     job_ptr->prio_factors->priority_qos,
+		     tmp_tres,
 		     (job_ptr->prio_factors->nice - NICE_OFFSET),
 		     priority);
-	}
-	return (uint32_t)priority;
-}
-
-
-/* Mark an association and its parents as active (i.e. it may be given
- * tickets) during the current scheduling cycle.  The association
- * manager lock should be held on entry.  */
-static bool _mark_assoc_active(struct job_record *job_ptr)
-{
-	slurmdb_association_rec_t *job_assoc =
-		(slurmdb_association_rec_t *)job_ptr->assoc_ptr,
-		*assoc;
 
-	if (!job_assoc) {
-		error("Job %u has no association.  Unable to "
-		      "mark assiciation as active.", job_ptr->job_id);
-		return false;
-	}
-
-	for (assoc = job_assoc; assoc != assoc_mgr_root_assoc;
-	     assoc = assoc->usage->parent_assoc_ptr) {
-		if (assoc->usage->active_seqno
-		    == assoc_mgr_root_assoc->usage->active_seqno)
-			break;
-		assoc->usage->active_seqno
-			= assoc_mgr_root_assoc->usage->active_seqno;
+		xfree(pre_factors.priority_tres);
 	}
-	return true;
+	return (uint32_t)priority;
 }
 
 
@@ -743,7 +687,7 @@ static time_t _next_reset(uint16_t reset_period, time_t last_reset)
 	struct tm last_tm;
 	time_t tmp_time, now = time(NULL);
 
-	if (localtime_r(&last_reset, &last_tm) == NULL)
+	if (slurm_localtime_r(&last_reset, &last_tm) == NULL)
 		return (time_t) 0;
 
 	last_tm.tm_sec   = 0;
@@ -754,13 +698,13 @@ static time_t _next_reset(uint16_t reset_period, time_t last_reset)
 	last_tm.tm_isdst = -1;
 	switch (reset_period) {
 	case PRIORITY_RESET_DAILY:
-		tmp_time = mktime(&last_tm);
+		tmp_time = slurm_mktime(&last_tm);
 		tmp_time += SECS_PER_DAY;
 		while ((tmp_time + SECS_PER_DAY) < now)
 			tmp_time += SECS_PER_DAY;
 		return tmp_time;
 	case PRIORITY_RESET_WEEKLY:
-		tmp_time = mktime(&last_tm);
+		tmp_time = slurm_mktime(&last_tm);
 		tmp_time += (SECS_PER_DAY * (7 - last_tm.tm_wday));
 		while ((tmp_time + SECS_PER_WEEK) < now)
 			tmp_time += SECS_PER_WEEK;
@@ -795,7 +739,186 @@ static time_t _next_reset(uint16_t reset_period, time_t last_reset)
 	default:
 		return (time_t) 0;
 	}
-	return mktime(&last_tm);
+	return slurm_mktime(&last_tm);
+}
+
+
+/*
+ * Calculate billable TRES based on partition's defined BillingWeights. If none
+ * is defined, return total_cpus.  This is cached on job_ptr->billable_tres and
+ * is updated if the job was resized since the last iteration.
+ */
+static double _calc_billable_tres(struct job_record *job_ptr, time_t start_time)
+{
+	int    i;
+	double to_bill_node   = 0.0;
+	double to_bill_global = 0.0;
+	double *billing_weights = NULL;
+	struct part_record *part_ptr = job_ptr->part_ptr;
+
+	/* Don't recalculate unless the job is new or resized */
+	if ((!fuzzy_equal(job_ptr->billable_tres, NO_VAL)) &&
+	    difftime(job_ptr->resize_time, start_time) < 0.0)
+		return job_ptr->billable_tres;
+
+	if (priority_debug)
+		info("BillingWeight: job %d is either new or it was resized",
+		     job_ptr->job_id);
+
+	/* No billing weights defined. Return CPU count */
+	if (!part_ptr || !part_ptr->billing_weights) {
+		job_ptr->billable_tres = job_ptr->total_cpus;
+		return job_ptr->billable_tres;
+	}
+
+	if (priority_debug)
+		info("BillingWeight: job %d using \"%s\" from partition %s",
+		     job_ptr->job_id, part_ptr->billing_weights_str,
+		     job_ptr->part_ptr->name);
+
+	billing_weights = part_ptr->billing_weights;
+	for (i = 0; i < slurmctld_tres_cnt; i++) {
+		bool   is_mem      = false;
+		double tres_weight = billing_weights[i];
+		char  *tres_type   = assoc_mgr_tres_array[i]->type;
+		char  *tres_name   = assoc_mgr_tres_array[i]->name;
+		double tres_value  = job_ptr->tres_alloc_cnt[i];
+
+		if (!strcasecmp(tres_type, "mem")) {
+			is_mem = true;
+			tres_weight /= 1024; /* mem is weighted by gb. */
+		}
+
+		if (priority_debug)
+			info("BillingWeight: %s%s%s = %f * %f", tres_type,
+			     (tres_name) ? ":" : "",
+			     (tres_name) ? tres_name : "",
+			     tres_value, tres_weight);
+
+		tres_value *= tres_weight;
+
+		if ((flags & PRIORITY_FLAGS_MAX_TRES) &&
+		    ((is_mem) ||
+		     (!strcasecmp(tres_type, "cpu")) ||
+		     (!strcasecmp(tres_type, "gres"))))
+			to_bill_node = MAX(to_bill_node, tres_value);
+		else
+			to_bill_global += tres_value;
+	}
+
+	job_ptr->billable_tres = to_bill_node + to_bill_global;
+
+	if (priority_debug)
+		info("BillingWeight: Job %d %s = %f", job_ptr->job_id,
+		     (flags & PRIORITY_FLAGS_MAX_TRES) ?
+		     "MAX(node TRES) + SUM(Global TRES)" : "SUM(TRES)",
+		     job_ptr->billable_tres);
+
+	return job_ptr->billable_tres;
+}
+
+
+static void _handle_qos_tres_run_secs(long double *tres_run_decay,
+				      uint64_t *tres_run_delta,
+				      uint32_t job_id,
+				      slurmdb_qos_rec_t *qos)
+{
+	int i;
+
+	if (!qos)
+		return;
+
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		if (tres_run_decay)
+			qos->usage->usage_tres_raw[i] += tres_run_decay[i];
+
+		if (tres_run_delta[i] >
+		    qos->usage->grp_used_tres_run_secs[i]) {
+			error("_handle_qos_tres_run_secs: job %u: "
+			      "QOS %s TRES %s grp_used_tres_run_secs "
+			      "underflow, tried to remove %"PRIu64" seconds "
+			      "when only %"PRIu64" remained.",
+			      job_id,
+			      qos->name,
+			      assoc_mgr_tres_name_array[i],
+			      tres_run_delta[i],
+			      qos->usage->grp_used_tres_run_secs[i]);
+			qos->usage->grp_used_tres_run_secs[i] = 0;
+		} else
+			qos->usage->grp_used_tres_run_secs[i] -=
+				tres_run_delta[i];
+
+		if (priority_debug) {
+			info("_handle_qos_tres_run_secs: job %u: "
+			     "Removed %"PRIu64" unused seconds "
+			     "from QOS %s TRES %s "
+			     "grp_used_tres_run_secs = %"PRIu64,
+			     job_id,
+			     tres_run_delta[i],
+			     qos->name,
+			     assoc_mgr_tres_name_array[i],
+			     qos->usage->grp_used_tres_run_secs[i]);
+		}
+	}
+}
+
+static void _handle_assoc_tres_run_secs(long double *tres_run_decay,
+					uint64_t *tres_run_delta,
+					uint32_t job_id,
+					slurmdb_assoc_rec_t *assoc)
+{
+	int i;
+
+	if (!assoc)
+		return;
+
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		if (tres_run_decay)
+			assoc->usage->usage_tres_raw[i] += tres_run_decay[i];
+
+		if (tres_run_delta[i] >
+		    assoc->usage->grp_used_tres_run_secs[i]) {
+			error("_handle_assoc_tres_run_secs: job %u: "
+			      "assoc %u TRES %s grp_used_tres_run_secs "
+			      "underflow, tried to remove %"PRIu64" seconds "
+			      "when only %"PRIu64" remained.",
+			      job_id,
+			      assoc->id,
+			      assoc_mgr_tres_name_array[i],
+			      tres_run_delta[i],
+			      assoc->usage->grp_used_tres_run_secs[i]);
+			assoc->usage->grp_used_tres_run_secs[i] = 0;
+		} else
+			assoc->usage->grp_used_tres_run_secs[i] -=
+				tres_run_delta[i];
+
+		if (priority_debug) {
+			info("_handle_assoc_tres_run_secs: job %u: "
+			     "Removed %"PRIu64" unused seconds "
+			     "from assoc %d TRES %s "
+			     "grp_used_tres_run_secs = %"PRIu64,
+			     job_id,
+			     tres_run_delta[i],
+			     assoc->id,
+			     assoc_mgr_tres_name_array[i],
+			     assoc->usage->grp_used_tres_run_secs[i]);
+		}
+	}
+}
+
+static void _handle_tres_run_secs(uint64_t *tres_run_delta,
+				  struct job_record *job_ptr)
+{
+
+	slurmdb_assoc_rec_t *assoc = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
+
+	_handle_qos_tres_run_secs(NULL, tres_run_delta,
+				  job_ptr->job_id, job_ptr->qos_ptr);
+	while (assoc) {
+		_handle_assoc_tres_run_secs(NULL, tres_run_delta,
+					    job_ptr->job_id, assoc);
+		assoc = assoc->usage->parent_assoc_ptr;
+	}
 }
 
 /*
@@ -813,13 +936,12 @@ static void _init_grp_used_cpu_run_secs(time_t last_ran)
 {
 	struct job_record *job_ptr = NULL;
 	ListIterator itr;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 	slurmctld_lock_t job_read_lock =
 		{ NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
-	uint64_t delta;
-	slurmdb_qos_rec_t *qos;
-	slurmdb_association_rec_t *assoc;
+	uint64_t tres_run_delta[slurmctld_tres_cnt];
+	int i;
 
 	if (priority_debug)
 		info("Initializing grp_used_cpu_run_secs");
@@ -836,8 +958,6 @@ static void _init_grp_used_cpu_run_secs(time_t last_ran)
 	while ((job_ptr = list_next(itr))) {
 		if (priority_debug)
 			debug2("job: %u", job_ptr->job_id);
-		qos = NULL;
-		assoc = NULL;
 
 		if (!IS_JOB_RUNNING(job_ptr))
 			continue;
@@ -845,51 +965,13 @@ static void _init_grp_used_cpu_run_secs(time_t last_ran)
 		if (job_ptr->start_time > last_ran)
 			continue;
 
-		delta = job_ptr->total_cpus * (last_ran - job_ptr->start_time);
-
-		qos = (slurmdb_qos_rec_t *) job_ptr->qos_ptr;
-		assoc = (slurmdb_association_rec_t *) job_ptr->assoc_ptr;
-
-		if (qos) {
-			if (priority_debug) {
-				info("Subtracting %"PRIu64" from qos "
-				     "%s grp_used_cpu_run_secs "
-				     "%"PRIu64" = %"PRIu64"",
-				     delta,
-				     qos->name,
-				     qos->usage->grp_used_cpu_run_secs,
-				     qos->usage->grp_used_cpu_run_secs -
-				     delta);
-			}
-			if (qos->usage->grp_used_cpu_run_secs >= delta) {
-				qos->usage->grp_used_cpu_run_secs -= delta;
-			} else {
-				error("qos %s grp_used_cpu_run_secs underflow",
-				      qos->name);
-				qos->usage->grp_used_cpu_run_secs = 0;
-			}
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			tres_run_delta[i] =
+				(uint64_t)(last_ran - job_ptr->start_time) *
+				job_ptr->tres_alloc_cnt[i];
 		}
 
-		while (assoc) {
-			if (priority_debug) {
-				info("Subtracting %"PRIu64" from assoc %u "
-				     "grp_used_cpu_run_secs "
-				     "%"PRIu64" = %"PRIu64"",
-				     delta,
-				     assoc->id,
-				     assoc->usage->grp_used_cpu_run_secs,
-				     assoc->usage->grp_used_cpu_run_secs -
-				     delta);
-			}
-			if (assoc->usage->grp_used_cpu_run_secs >= delta) {
-				assoc->usage->grp_used_cpu_run_secs -= delta;
-			} else {
-				error("assoc %u grp_used_cpu_run_secs "
-				      "underflow", assoc->id);
-				assoc->usage->grp_used_cpu_run_secs = 0;
-			}
-			assoc = assoc->usage->parent_assoc_ptr;
-		}
+		_handle_tres_run_secs(tres_run_delta, job_ptr);
 	}
 	assoc_mgr_unlock(&locks);
 	list_iterator_destroy(itr);
@@ -906,12 +988,15 @@ static int _apply_new_usage(struct job_record *job_ptr,
 			    bool adjust_for_end)
 {
 	slurmdb_qos_rec_t *qos;
-	slurmdb_association_rec_t *assoc;
+	slurmdb_assoc_rec_t *assoc;
 	double run_delta = 0.0, run_decay = 0.0, real_decay = 0.0;
-	uint64_t cpu_run_delta = 0;
+	uint64_t tres_run_delta[slurmctld_tres_cnt];
+	long double tres_run_decay[slurmctld_tres_cnt];
+	uint64_t tres_time_delta = 0;
+	int i;
 	uint64_t job_time_limit_ends = 0;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
 	/* If end_time_exp is NO_VAL we have already ran the end for
 	 * this job.  We don't want to do it again, so just exit.
@@ -957,46 +1042,60 @@ static int _apply_new_usage(struct job_record *job_ptr,
 		(uint64_t)job_ptr->time_limit * 60;
 
 	if ((uint64_t)start_period >= job_time_limit_ends)
-		cpu_run_delta = 0;
+		tres_time_delta = 0;
 	else if (IS_JOB_FINISHED(job_ptr) || IS_JOB_COMPLETING(job_ptr)) {
 		/* If a job is being requeued sometimes the state will
 		   be pending + completing so handle that the same as
 		   finished so we don't leave time in the mix.
 		*/
-		cpu_run_delta = job_ptr->total_cpus *
-			(job_time_limit_ends - (uint64_t)start_period);
+		tres_time_delta = (job_time_limit_ends -
+				   (uint64_t)start_period);
 	} else if (end_period > job_ptr->end_time_exp) {
 		int end_exp = difftime(job_ptr->end_time_exp, start_period);
 
-		if (end_exp <= 0)
-			cpu_run_delta = 0;
-		else
-			cpu_run_delta = job_ptr->total_cpus * end_exp;
+		if (end_exp > 0)
+			tres_time_delta = (uint64_t)end_exp;
 	} else
-		cpu_run_delta = job_ptr->total_cpus * run_delta;
+		tres_time_delta = run_delta;
 
 	/* make sure we only run through this once at the end */
 	if (adjust_for_end)
 		job_ptr->end_time_exp = (time_t)NO_VAL;
 
-	if (priority_debug)
-		info("job %u ran for %g seconds on %u cpus, cpu_delta %"PRIu64,
-		     job_ptr->job_id, run_delta, job_ptr->total_cpus,
-		     cpu_run_delta);
-
+	if (priority_debug) {
+		info("job %u ran for %g seconds with TRES counts of",
+		     job_ptr->job_id, run_delta);
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			if (!job_ptr->tres_alloc_cnt[i])
+				continue;
+			info("TRES %s: %"PRIu64,
+			     assoc_mgr_tres_name_array[i],
+			     job_ptr->tres_alloc_cnt[i]);
+		}
+	}
 	/* get the time in decayed fashion */
 	run_decay = run_delta * pow(decay_factor, run_delta);
-
-	real_decay = run_decay * (double)job_ptr->total_cpus;
+	/* clang needs these memset to avoid a warning */
+	memset(tres_run_decay, 0, sizeof(tres_run_decay));
+	memset(tres_run_delta, 0, sizeof(tres_run_delta));
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		tres_run_delta[i] = tres_time_delta *
+			job_ptr->tres_alloc_cnt[i];
+		tres_run_decay[i] = (long double)run_decay *
+			(long double)job_ptr->tres_alloc_cnt[i];
+	}
 
 	assoc_mgr_lock(&locks);
+
+	real_decay = run_decay * _calc_billable_tres(job_ptr, start_period);
+
 	/* Just to make sure we don't make a
 	   window where the qos_ptr could of
 	   changed make sure we get it again
 	   here.
 	*/
 	qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-	assoc = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	assoc = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 
 	/* now apply the usage factor for this qos */
 	if (qos) {
@@ -1006,24 +1105,9 @@ static int _apply_new_usage(struct job_record *job_ptr,
 		}
 		qos->usage->grp_used_wall += run_decay;
 		qos->usage->usage_raw += (long double)real_decay;
-		if (qos->usage->grp_used_cpu_run_secs >= cpu_run_delta) {
-			if (priority_debug)
-				info("QOS %s has grp_used_cpu_run_secs "
-				     "of %"PRIu64", will subtract %"PRIu64"",
-				     qos->name,
-				     qos->usage->grp_used_cpu_run_secs,
-				     cpu_run_delta);
-			qos->usage->grp_used_cpu_run_secs -= cpu_run_delta;
-		} else {
-			if (priority_debug)
-				info("jobid %u, qos %s: setting "
-				     "grp_used_cpu_run_secs "
-				     "to 0 because %"PRIu64" < %"PRIu64"",
-				     job_ptr->job_id, qos->name,
-				     qos->usage->grp_used_cpu_run_secs,
-				     cpu_run_delta);
-			qos->usage->grp_used_cpu_run_secs = 0;
-		}
+
+		_handle_qos_tres_run_secs(tres_run_decay, tres_run_delta,
+					  job_ptr->job_id, qos);
 	}
 
 	/* We want to do this all the way up
@@ -1032,39 +1116,19 @@ static int _apply_new_usage(struct job_record *job_ptr,
 	 * has occured on the entire system
 	 * and use that to normalize against. */
 	while (assoc) {
-		if (assoc->usage->grp_used_cpu_run_secs >= cpu_run_delta) {
-			if (priority_debug)
-				info("assoc %u (user='%s' "
-				     "acct='%s') has grp_used_cpu_run_secs "
-				     "of %"PRIu64", will subtract %"PRIu64"",
-				     assoc->id, assoc->user, assoc->acct,
-				     assoc->usage->grp_used_cpu_run_secs,
-				     cpu_run_delta);
-			assoc->usage->grp_used_cpu_run_secs -= cpu_run_delta;
-		} else {
-			if (priority_debug)
-				info("jobid %u, assoc %u: setting "
-				     "grp_used_cpu_run_secs "
-				     "to 0 because %"PRIu64" < %"PRIu64"",
-				     job_ptr->job_id, assoc->id,
-				     assoc->usage->grp_used_cpu_run_secs,
-				     cpu_run_delta);
-			assoc->usage->grp_used_cpu_run_secs = 0;
-		}
-
 		assoc->usage->grp_used_wall += run_decay;
 		assoc->usage->usage_raw += (long double)real_decay;
 		if (priority_debug)
-			info("adding %f new usage to assoc %u (user='%s' "
-			     "acct='%s') raw usage is now %Lf.  Group wall "
-			     "added %f making it %f. GrpCPURunMins is "
-			     "%"PRIu64"",
-			     real_decay, assoc->id,
-			     assoc->user, assoc->acct,
-			     assoc->usage->usage_raw,
-			     run_decay,
-			     assoc->usage->grp_used_wall,
-			     assoc->usage->grp_used_cpu_run_secs/60);
+			info("Adding %f new usage to assoc %u (%s/%s/%s) "
+			     "raw usage is now %Lf.  Group wall "
+			     "added %f making it %f.",
+			     real_decay, assoc->id, assoc->acct,
+			     assoc->user, assoc->partition,
+			     assoc->usage->usage_raw, run_decay,
+			     assoc->usage->grp_used_wall);
+		_handle_assoc_tres_run_secs(tres_run_decay, tres_run_delta,
+					    job_ptr->job_id, assoc);
+
 		assoc = assoc->usage->parent_assoc_ptr;
 	}
 	assoc_mgr_unlock(&locks);
@@ -1072,93 +1136,6 @@ static int _apply_new_usage(struct job_record *job_ptr,
 }
 
 
-static void _ticket_based_decay(List job_list, time_t start_time)
-{
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
-	slurmctld_lock_t job_write_lock =
-		{ NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
-	ListIterator itr = NULL;
-	struct job_record *job_ptr = NULL;
-
-	/* Read lock on jobs, nodes, and partitions */
-	slurmctld_lock_t job_read_lock =
-		{ NO_LOCK, READ_LOCK, READ_LOCK, READ_LOCK };
-
-	/* Multifactor Ticket Based core algo 1/3. Iterate through all jobs,
-	 * mark parent associations with the current sequence id, so that we
-	 * know which associations/users are active. At the same time as we're
-	 * looping through all the jobs anyway, apply the new usage of running
-	 * jobs too.
-	 */
-
-	lock_slurmctld(job_read_lock);
-	assoc_mgr_lock(&locks);
-	/* seqno 0 is a special invalid value. */
-	assoc_mgr_root_assoc->usage->active_seqno++;
-	if (!assoc_mgr_root_assoc->usage->active_seqno)
-		assoc_mgr_root_assoc->usage->active_seqno++;
-	assoc_mgr_unlock(&locks);
-	itr = list_iterator_create(job_list);
-	while ((job_ptr = list_next(itr))) {
-		/* Don't need to handle finished jobs. */
-		if (IS_JOB_FINISHED(job_ptr) || IS_JOB_COMPLETING(job_ptr))
-			continue;
-		/* apply new usage */
-		if (((flags & PRIORITY_FLAGS_CALCULATE_RUNNING) ||
-		     !IS_JOB_PENDING(job_ptr)) &&
-		    job_ptr->start_time && job_ptr->assoc_ptr
-		    && g_last_ran)
-			_apply_new_usage(job_ptr,
-					 g_last_ran,
-					 start_time, 0);
-
-		if (IS_JOB_PENDING(job_ptr) && job_ptr->assoc_ptr) {
-			assoc_mgr_lock(&locks);
-			_mark_assoc_active(job_ptr);
-			assoc_mgr_unlock(&locks);
-		}
-	}
-	list_iterator_destroy(itr);
-	unlock_slurmctld(job_read_lock);
-
-	/* Multifactor Ticket Based core algo 2/3. Start from the root,
-	 * distribute tickets to active child associations proportional to the
-	 * fair share (s*F). We start with UINT32_MAX tickets at the root.
-	 */
-	assoc_mgr_lock(&locks);
-	max_tickets = 0;
-	assoc_mgr_root_assoc->usage->tickets = (uint32_t) -1;
-	_distribute_tickets(
-		assoc_mgr_root_assoc->usage->children_list,
-		(uint32_t) -1);
-	assoc_mgr_unlock(&locks);
-
-	/* Multifactor Ticket Based core algo 3/3. Iterate through the job
-	 * list again, give priorities proportional to the  maximum number of
-	 * tickets given to any user.
-	 */
-	lock_slurmctld(job_write_lock);
-	itr = list_iterator_create(job_list);
-	while ((job_ptr = list_next(itr))) {
-		/*
-		 * Priority 0 is reserved for held
-		 * jobs. Also skip priority
-		 * calculation for non-pending jobs.
-		 */
-		if ((job_ptr->priority == 0) || !IS_JOB_PENDING(job_ptr))
-			continue;
-
-		job_ptr->priority = _get_priority_internal(start_time, job_ptr);
-		last_job_update = time(NULL);
-		debug2("priority for job %u is now %u",
-		       job_ptr->job_id, job_ptr->priority);
-	}
-	list_iterator_destroy(itr);
-	unlock_slurmctld(job_write_lock);
-}
-
-
 static int _decay_apply_new_usage_and_weighted_factors(
 	struct job_record *job_ptr,
 	time_t *start_time_ptr)
@@ -1190,8 +1167,8 @@ static void *_decay_thread(void *no_data)
 	/* Write lock on jobs, read lock on nodes and partitions */
 	slurmctld_lock_t job_write_lock =
 		{ NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 #if HAVE_SYS_PRCTL_H
 	if (prctl(PR_SET_NAME, "slurmctld_decay", NULL, NULL, NULL) < 0) {
@@ -1332,8 +1309,7 @@ static void *_decay_thread(void *no_data)
 			break;
 		}
 
-		if (!(flags & (PRIORITY_FLAGS_TICKET_BASED
-			       | PRIORITY_FLAGS_FAIR_TREE))) {
+		if (!(flags & PRIORITY_FLAGS_FAIR_TREE)) {
 			lock_slurmctld(job_write_lock);
 			list_for_each(
 				job_list,
@@ -1344,9 +1320,7 @@ static void *_decay_thread(void *no_data)
 		}
 
 	get_usage:
-		if (flags & PRIORITY_FLAGS_TICKET_BASED)
-			_ticket_based_decay(job_list, start_time);
-		else if (flags & PRIORITY_FLAGS_FAIR_TREE)
+		if (flags & PRIORITY_FLAGS_FAIR_TREE)
 			fair_tree_decay(job_list, start_time);
 
 		g_last_ran = start_time;
@@ -1420,6 +1394,7 @@ static void *_cleanup_thread(void *no_data)
 
 static void _internal_setup(void)
 {
+	char *tres_weights_str;
 	if (slurm_get_debug_flags() & DEBUG_FLAG_PRIO)
 		priority_debug = 1;
 	else
@@ -1434,6 +1409,12 @@ static void _internal_setup(void)
 	weight_js = slurm_get_priority_weight_job_size();
 	weight_part = slurm_get_priority_weight_partition();
 	weight_qos = slurm_get_priority_weight_qos();
+	xfree(weight_tres);
+	if ((tres_weights_str = slurm_get_priority_weight_tres())) {
+		weight_tres = slurm_get_tres_weight_array(tres_weights_str,
+							  slurmctld_tres_cnt);
+	}
+	xfree(tres_weights_str);
 	flags = slurmctld_conf.priority_flags;
 
 	if (priority_debug) {
@@ -1451,12 +1432,12 @@ static void _internal_setup(void)
 
 
 /* Reursively call assoc_mgr_normalize_assoc_shares from assoc_mgr.c on
- * children of an association
+ * children of an assoc
  */
 static void _set_norm_shares(List children_list)
 {
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	if (!children_list || list_is_empty(children_list))
 		return;
@@ -1472,12 +1453,12 @@ static void _set_norm_shares(List children_list)
 }
 
 
-static void _depth_oblivious_set_usage_efctv(slurmdb_association_rec_t *assoc)
+static void _depth_oblivious_set_usage_efctv(slurmdb_assoc_rec_t *assoc)
 {
 	long double ratio_p, ratio_l, k, f, ratio_s;
-	slurmdb_association_rec_t *parent_assoc = NULL;
+	slurmdb_assoc_rec_t *parent_assoc = NULL;
 	ListIterator sib_itr = NULL;
-	slurmdb_association_rec_t *sibling = NULL;
+	slurmdb_assoc_rec_t *sibling = NULL;
 	char *child;
 	char *child_str;
 
@@ -1582,7 +1563,7 @@ static void _depth_oblivious_set_usage_efctv(slurmdb_association_rec_t *assoc)
 	}
 }
 
-static void _set_usage_efctv(slurmdb_association_rec_t *assoc)
+static void _set_usage_efctv(slurmdb_assoc_rec_t *assoc)
 {
 	/* Variable names taken from HTML documentation */
 	long double ua_child = assoc->usage->usage_norm;
@@ -1673,17 +1654,18 @@ int init ( void )
 int fini ( void )
 {
 	/* Daemon termination handled here */
+	slurm_mutex_lock(&decay_lock);
 	if (running_decay)
 		debug("Waiting for decay thread to finish.");
 
-	slurm_mutex_lock(&decay_lock);
-
 	/* cancel the decay thread and then join the cleanup thread */
 	if (decay_handler_thread)
 		pthread_cancel(decay_handler_thread);
 	if (cleanup_handler_thread)
 		pthread_join(cleanup_handler_thread, NULL);
 
+	xfree(weight_tres);
+
 	slurm_mutex_unlock(&decay_lock);
 
 	return SLURM_SUCCESS;
@@ -1700,9 +1682,8 @@ extern uint32_t priority_p_set(uint32_t last_prio, struct job_record *job_ptr)
 
 extern void priority_p_reconfig(bool assoc_clear)
 {
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
-
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	reconfig = 1;
 	prevflags = flags;
@@ -1732,7 +1713,7 @@ extern void priority_p_reconfig(bool assoc_clear)
 }
 
 
-extern void set_assoc_usage_norm(slurmdb_association_rec_t *assoc)
+extern void set_assoc_usage_norm(slurmdb_assoc_rec_t *assoc)
 {
 	/* If root usage is 0, there is no usage anywhere. */
 	if (!assoc_mgr_root_assoc->usage->usage_raw) {
@@ -1752,7 +1733,7 @@ extern void set_assoc_usage_norm(slurmdb_association_rec_t *assoc)
 }
 
 
-extern void priority_p_set_assoc_usage(slurmdb_association_rec_t *assoc)
+extern void priority_p_set_assoc_usage(slurmdb_assoc_rec_t *assoc)
 {
 	xassert(assoc_mgr_root_assoc);
 	xassert(assoc);
@@ -1778,14 +1759,7 @@ extern double priority_p_calc_fs_factor(long double usage_efctv,
 	if (shares_norm <= 0)
 		return priority_fs;
 
-	if (flags & PRIORITY_FLAGS_TICKET_BASED) {
-		if (usage_efctv < MIN_USAGE_FACTOR * shares_norm)
-			usage_efctv = MIN_USAGE_FACTOR * shares_norm;
-		priority_fs = shares_norm / usage_efctv;
-	} else {
-		priority_fs =
-			pow(2.0, -((usage_efctv/shares_norm) / damp_factor));
-	}
+	priority_fs = pow(2.0, -((usage_efctv/shares_norm) / damp_factor));
 
 	return priority_fs;
 }
@@ -1849,16 +1823,17 @@ extern List priority_p_get_priority_factors_list(
 				continue;
 
 			obj = xmalloc(sizeof(priority_factors_object_t));
-			memcpy(obj, job_ptr->prio_factors,
-			       sizeof(priority_factors_object_t));
+
+			slurm_copy_priority_factors_object(
+				obj, job_ptr->prio_factors);
+
 			obj->job_id = job_ptr->job_id;
 			obj->user_id = job_ptr->user_id;
 			list_append(ret_list, obj);
 		}
 		list_iterator_destroy(itr);
 		if (!list_count(ret_list)) {
-			list_destroy(ret_list);
-			ret_list = NULL;
+			FREE_NULL_LIST(ret_list);
 		}
 	}
 	unlock_slurmctld(job_read_lock);
@@ -1929,9 +1904,12 @@ extern void set_priority_factors(time_t start_time, struct job_record *job_ptr)
 	if (!job_ptr->prio_factors)
 		job_ptr->prio_factors =
 			xmalloc(sizeof(priority_factors_object_t));
-	else
+	else {
+		xfree(job_ptr->prio_factors->tres_weights);
+		xfree(job_ptr->prio_factors->priority_tres);
 		memset(job_ptr->prio_factors, 0,
 		       sizeof(priority_factors_object_t));
+	}
 
 	qos_ptr = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
 
@@ -1965,6 +1943,7 @@ extern void set_priority_factors(time_t start_time, struct job_record *job_ptr)
 			_get_fairshare_priority(job_ptr);
 	}
 
+	/* FIXME: this should work off the product of TRESBillingWeights */
 	if (weight_js) {
 		uint32_t cpu_cnt = 0, min_nodes = 1;
 		/* On the initial run of this we don't have total_cpus
@@ -2044,20 +2023,51 @@ extern void set_priority_factors(time_t start_time, struct job_record *job_ptr)
 		job_ptr->prio_factors->nice = job_ptr->details->nice;
 	else
 		job_ptr->prio_factors->nice = NICE_OFFSET;
+
+	if (weight_tres) {
+		int i;
+		double *tres_factors = NULL;
+
+		if (!job_ptr->prio_factors->priority_tres) {
+			job_ptr->prio_factors->priority_tres =
+				xmalloc(sizeof(double) * slurmctld_tres_cnt);
+			job_ptr->prio_factors->tres_weights =
+				xmalloc(sizeof(double) * slurmctld_tres_cnt);
+			memcpy(job_ptr->prio_factors->tres_weights, weight_tres,
+			       sizeof(double) * slurmctld_tres_cnt);
+			job_ptr->prio_factors->tres_cnt = slurmctld_tres_cnt;
+		}
+		tres_factors = job_ptr->prio_factors->priority_tres;
+
+		/* can't memcpy because of different types
+		 * uint64_t vs. double */
+		for (i = 0; i < slurmctld_tres_cnt; i++) {
+			uint64_t value = 0;
+			if (job_ptr->tres_alloc_cnt)
+				value = job_ptr->tres_alloc_cnt[i];
+			else if (job_ptr->tres_req_cnt)
+				value = job_ptr->tres_req_cnt[i];
+
+			if (value &&
+			    job_ptr->part_ptr &&
+			    job_ptr->part_ptr->tres_cnt &&
+			    job_ptr->part_ptr->tres_cnt[i])
+				tres_factors[i] = value /
+					(double)job_ptr->part_ptr->tres_cnt[i];
+		}
+	}
 }
 
 
 /* Set usage_efctv based on algorithm-specific code. Fair Tree sets this
  * elsewhere.
  */
-static void _set_assoc_usage_efctv(slurmdb_association_rec_t *assoc)
+static void _set_assoc_usage_efctv(slurmdb_assoc_rec_t *assoc)
 {
 	if (assoc->usage->fs_assoc_ptr == assoc_mgr_root_assoc)
 		assoc->usage->usage_efctv = assoc->usage->usage_norm;
-	else if (flags & PRIORITY_FLAGS_TICKET_BASED)
-		_ticket_based_set_usage_efctv(assoc);
 	else if (assoc->shares_raw == SLURMDB_FS_USE_PARENT) {
-		slurmdb_association_rec_t *parent_assoc =
+		slurmdb_assoc_rec_t *parent_assoc =
 			assoc->usage->fs_assoc_ptr;
 
 		assoc->usage->usage_efctv =
@@ -2069,7 +2079,7 @@ static void _set_assoc_usage_efctv(slurmdb_association_rec_t *assoc)
 }
 
 
-static void _priority_p_set_assoc_usage_debug(slurmdb_association_rec_t *assoc)
+static void _priority_p_set_assoc_usage_debug(slurmdb_assoc_rec_t *assoc)
 {
 	char *child;
 	char *child_str;
@@ -2097,14 +2107,8 @@ static void _priority_p_set_assoc_usage_debug(slurmdb_association_rec_t *assoc)
 		     assoc->usage->fs_assoc_ptr->acct,
 		     assoc->usage->usage_efctv,
 		     assoc->usage->usage_norm);
-	} else if (flags & PRIORITY_FLAGS_TICKET_BASED) {
-		info("Effective usage for %s %s off %s(%s) = %Lf",
-		     child, child_str,
-		     assoc->usage->parent_assoc_ptr->acct,
-		     assoc->usage->fs_assoc_ptr->acct,
-		     assoc->usage->usage_efctv);
 	} else if (assoc->shares_raw == SLURMDB_FS_USE_PARENT) {
-		slurmdb_association_rec_t *parent_assoc =
+		slurmdb_assoc_rec_t *parent_assoc =
 			assoc->usage->fs_assoc_ptr;
 
 		info("Effective usage for %s %s off %s %Lf",
diff --git a/src/plugins/priority/multifactor/priority_multifactor.h b/src/plugins/priority/multifactor/priority_multifactor.h
index 65785e14f..281b9d013 100644
--- a/src/plugins/priority/multifactor/priority_multifactor.h
+++ b/src/plugins/priority/multifactor/priority_multifactor.h
@@ -50,14 +50,14 @@
 #include "src/common/assoc_mgr.h"
 
 #include "src/slurmctld/locks.h"
-extern void priority_p_set_assoc_usage(slurmdb_association_rec_t *assoc);
+extern void priority_p_set_assoc_usage(slurmdb_assoc_rec_t *assoc);
 extern double priority_p_calc_fs_factor(
 		long double usage_efctv, long double shares_norm);
 extern bool decay_apply_new_usage(
 		struct job_record *job_ptr, time_t *start_time_ptr);
 extern int  decay_apply_weighted_factors(
 		struct job_record *job_ptr, time_t *start_time_ptr);
-extern void set_assoc_usage_norm(slurmdb_association_rec_t *assoc);
+extern void set_assoc_usage_norm(slurmdb_assoc_rec_t *assoc);
 extern void set_priority_factors(time_t start_time, struct job_record *job_ptr);
 
 extern bool priority_debug;
diff --git a/src/plugins/proctrack/Makefile.in b/src/plugins/proctrack/Makefile.in
index 237c217c9..90f4afe13 100644
--- a/src/plugins/proctrack/Makefile.in
+++ b/src/plugins/proctrack/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/aix/Makefile.in b/src/plugins/proctrack/aix/Makefile.in
index 513891c38..8792a3c0f 100644
--- a/src/plugins/proctrack/aix/Makefile.in
+++ b/src/plugins/proctrack/aix/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/aix/proctrack_aix.c b/src/plugins/proctrack/aix/proctrack_aix.c
index f303d6f1e..434e533cf 100644
--- a/src/plugins/proctrack/aix/proctrack_aix.c
+++ b/src/plugins/proctrack/aix/proctrack_aix.c
@@ -81,16 +81,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]      = "Process tracking via AIX kernel extension plugin";
 const char plugin_type[]      = "proctrack/aix";
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/proctrack/cgroup/Makefile.in b/src/plugins/proctrack/cgroup/Makefile.in
index a9725c0fd..867ee9e60 100644
--- a/src/plugins/proctrack/cgroup/Makefile.in
+++ b/src/plugins/proctrack/cgroup/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/cgroup/proctrack_cgroup.c b/src/plugins/proctrack/cgroup/proctrack_cgroup.c
index ade115388..4dc4cea3a 100644
--- a/src/plugins/proctrack/cgroup/proctrack_cgroup.c
+++ b/src/plugins/proctrack/cgroup/proctrack_cgroup.c
@@ -45,20 +45,21 @@
 #include <inttypes.h>
 #endif
 
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
 #include "slurm/slurm.h"
 #include "slurm/slurm_errno.h"
 #include "src/common/log.h"
 #include "src/common/xcgroup_read_config.h"
 #include "src/common/xstring.h"
+#include "src/slurmd/common/xcpuinfo.h"
+#include "src/slurmd/common/xcgroup.h"
 #include "src/slurmd/slurmd/slurmd.h"
 #include "src/slurmd/slurmstepd/slurmstepd_job.h"
-#include "src/slurmd/common/xcgroup.h"
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <stdlib.h>
 
 /*
  * These variables are required by the generic plugin interface.  If they
@@ -82,17 +83,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
-const char plugin_name[]      = "Process tracking via linux "
-	"cgroup freezer subsystem";
+const char plugin_name[]      = "Process tracking via linux cgroup freezer subsystem";
 const char plugin_type[]      = "proctrack/cgroup";
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 #ifndef PATH_MAX
 #define PATH_MAX 256
@@ -176,8 +172,8 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 	if (*user_cgroup_path == '\0') {
 		if (snprintf(user_cgroup_path, PATH_MAX,
 			     "%s/uid_%u", pre, uid) >= PATH_MAX) {
-			error("unable to build uid %u cgroup relative "
-			      "path : %m", uid);
+			error("unable to build uid %u cgroup relative path : %m",
+			      uid);
 			xfree(pre);
 			goto bail;
 		}
@@ -188,8 +184,8 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 	if (*job_cgroup_path == '\0') {
 		if (snprintf(job_cgroup_path, PATH_MAX, "%s/job_%u",
 			     user_cgroup_path, job->jobid) >= PATH_MAX) {
-			error("unable to build job %u cgroup relative "
-			      "path : %m", job->jobid);
+			error("unable to build job %u cgroup relative path : %m",
+			      job->jobid);
 			goto bail;
 		}
 	}
@@ -200,16 +196,22 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 			if (snprintf(jobstep_cgroup_path, PATH_MAX,
 				     "%s/step_batch", job_cgroup_path)
 			    >= PATH_MAX) {
-				error("proctrack/cgroup unable to build job step"
-				      " %u.batch freezer cg relative path: %m",
+				error("proctrack/cgroup unable to build job step %u.batch freezer cg relative path: %m",
+				      job->jobid);
+				goto bail;
+			}
+		} else if (job->stepid == SLURM_EXTERN_CONT) {
+			if (snprintf(jobstep_cgroup_path, PATH_MAX,
+				     "%s/step_extern", job_cgroup_path)
+			    >= PATH_MAX) {
+				error("proctrack/cgroup unable to build job step %u.extern freezer cg relative path: %m",
 				      job->jobid);
 				goto bail;
 			}
 		} else {
 			if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u",
 				     job_cgroup_path, job->stepid) >= PATH_MAX) {
-				error("proctrack/cgroup unable to build job step"
-				      " %u.%u freezer cg relative path: %m",
+				error("proctrack/cgroup unable to build job step %u.%u freezer cg relative path: %m",
 				      job->jobid, job->stepid);
 				goto bail;
 			}
@@ -255,7 +257,7 @@ int _slurm_cgroup_create(stepd_step_rec_t *job, uint64_t id, uid_t uid, gid_t gi
 	/* inhibit release agent for the step cgroup thus letting
 	 * slurmstepd being able to add new pids to the container
 	 * when the job ends (TaskEpilog,...) */
-	xcgroup_set_param(&step_freezer_cg,"notify_on_release","0");
+	xcgroup_set_param(&step_freezer_cg, "notify_on_release", "0");
 	slurm_freezer_init = true;
 
 	xcgroup_unlock(&freezer_cg);
@@ -268,14 +270,35 @@ bail:
 	return SLURM_ERROR;
 }
 
+static int _move_current_to_root_cgroup(xcgroup_ns_t *ns)
+{
+	xcgroup_t cg;
+	int rc;
+
+	if (xcgroup_create(ns, &cg, "", 0, 0) != XCGROUP_SUCCESS)
+		return SLURM_ERROR;
+
+	rc = xcgroup_move_process(&cg, getpid());
+	xcgroup_destroy(&cg);
+
+	return rc;
+}
+
 int _slurm_cgroup_destroy(void)
 {
 	xcgroup_lock(&freezer_cg);
 
+	/*
+	 *  First move slurmstepd process to the root cgroup, otherwise
+	 *   the rmdir(2) triggered by the calls below will always fail,
+	 *   because slurmstepd is still in the cgroup!
+	 */
+	_move_current_to_root_cgroup(&freezer_ns);
+
 	if (jobstep_cgroup_path[0] != '\0') {
 		if (xcgroup_delete(&step_freezer_cg) != XCGROUP_SUCCESS) {
-			debug("_slurm_cgroup_destroy: problem deleting step "
-			      "cgroup path %s: %m", step_freezer_cg.path);
+			debug("_slurm_cgroup_destroy: problem deleting step cgroup path %s: %m",
+			      step_freezer_cg.path);
 			xcgroup_unlock(&freezer_cg);
 			return SLURM_ERROR;
 		}
@@ -575,8 +598,7 @@ extern int proctrack_p_wait(uint64_t cont_id)
 		if (delay < 120) {
 			delay *= 2;
 		} else {
-			error("%s: Unable to destroy container %"PRIu64" "
-			      "in cgroup plugin, giving up after %d sec",
+			error("%s: Unable to destroy container %"PRIu64" in cgroup plugin, giving up after %d sec",
 			      __func__, cont_id, delay);
 			break;
 		}
diff --git a/src/plugins/proctrack/cray/Makefile.in b/src/plugins/proctrack/cray/Makefile.in
index 9e9769120..6a2024dd5 100644
--- a/src/plugins/proctrack/cray/Makefile.in
+++ b/src/plugins/proctrack/cray/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/cray/proctrack_cray.c b/src/plugins/proctrack/cray/proctrack_cray.c
index 0eeed3141..d2fb63178 100644
--- a/src/plugins/proctrack/cray/proctrack_cray.c
+++ b/src/plugins/proctrack/cray/proctrack_cray.c
@@ -68,7 +68,7 @@
 
 const char plugin_name[]      = "Process tracking via Cray job module";
 const char plugin_type[]      = "proctrack/cray";
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  *  Handle to libjob.so
diff --git a/src/plugins/proctrack/linuxproc/Makefile.in b/src/plugins/proctrack/linuxproc/Makefile.in
index be89f5eae..21f96baf0 100644
--- a/src/plugins/proctrack/linuxproc/Makefile.in
+++ b/src/plugins/proctrack/linuxproc/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/linuxproc/kill_tree.c b/src/plugins/proctrack/linuxproc/kill_tree.c
index ea6f12dbf..967e0602b 100644
--- a/src/plugins/proctrack/linuxproc/kill_tree.c
+++ b/src/plugins/proctrack/linuxproc/kill_tree.c
@@ -122,7 +122,7 @@ static void _push_to_hashtbl(pid_t ppid, pid_t pid,
 	hashtbl[idx] = newppid;
 }
 
-static int get_myname(char *s)
+static int _get_myname(char *s)
 {
 	char path[PATH_MAX], rbuf[1024];
 	int fd;
@@ -160,7 +160,8 @@ static xppid_t **_build_hashtbl(void)
 		error("opendir(/proc): %m");
 		return NULL;
 	}
-	if (get_myname(myname) < 0) return NULL;
+	if (_get_myname(myname) < 0)
+		return NULL;
 	debug3("Myname in build_hashtbl: %s", myname);
 
 	hashtbl = (xppid_t **)xmalloc(HASH_LEN * sizeof(xppid_t *));
@@ -376,7 +377,7 @@ extern int proctrack_linuxproc_get_pids(pid_t top, pid_t **pids, int *npids)
 	p = (pid_t *)xmalloc(sizeof(pid_t) * len);
 	ptr = list;
 	i = 0;
-	while(ptr != NULL) {
+	while (ptr != NULL) {
 		if (ptr->is_usercmd) { /* don't include the slurmstepd */
 			if (i >= len-1) {
 				len *= 2;
diff --git a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
index 9f44c9d37..5e97f32c8 100644
--- a/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
+++ b/src/plugins/proctrack/linuxproc/proctrack_linuxproc.c
@@ -78,16 +78,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]      = "Process tracking via linux /proc";
 const char plugin_type[]      = "proctrack/linuxproc";
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 
 /*
diff --git a/src/plugins/proctrack/lua/Makefile.in b/src/plugins/proctrack/lua/Makefile.in
index 8a1626f87..ec087ea58 100644
--- a/src/plugins/proctrack/lua/Makefile.in
+++ b/src/plugins/proctrack/lua/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/lua/proctrack_lua.c b/src/plugins/proctrack/lua/proctrack_lua.c
index a0c91118a..3c6423009 100644
--- a/src/plugins/proctrack/lua/proctrack_lua.c
+++ b/src/plugins/proctrack/lua/proctrack_lua.c
@@ -68,7 +68,7 @@
 
 const char plugin_name[]            = "LUA proctrack module";
 const char plugin_type[]            = "proctrack/lua";
-const uint32_t plugin_version       = 91;
+const uint32_t plugin_version       = SLURM_VERSION_NUMBER;
 
 static const char lua_script_path[] = DEFAULT_SCRIPT_DIR "/proctrack.lua";
 static lua_State *L = NULL;
diff --git a/src/plugins/proctrack/pgid/Makefile.in b/src/plugins/proctrack/pgid/Makefile.in
index 18105c9ad..bb2510aba 100644
--- a/src/plugins/proctrack/pgid/Makefile.in
+++ b/src/plugins/proctrack/pgid/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/pgid/proctrack_pgid.c b/src/plugins/proctrack/pgid/proctrack_pgid.c
index f5c334bcb..869136b16 100644
--- a/src/plugins/proctrack/pgid/proctrack_pgid.c
+++ b/src/plugins/proctrack/pgid/proctrack_pgid.c
@@ -88,16 +88,12 @@
  * only load job completion logging plugins if the plugin_type string has a
  * prefix of "jobcomp/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the job completion logging API
- * matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]      = "Process tracking via process group ID plugin";
 const char plugin_type[]      = "proctrack/pgid";
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/proctrack/sgi_job/Makefile.in b/src/plugins/proctrack/sgi_job/Makefile.in
index 09288e16e..377ffcfbd 100644
--- a/src/plugins/proctrack/sgi_job/Makefile.in
+++ b/src/plugins/proctrack/sgi_job/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
index a6688a475..fcc5569df 100644
--- a/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
+++ b/src/plugins/proctrack/sgi_job/proctrack_sgi_job.c
@@ -62,7 +62,7 @@
 
 const char plugin_name[]      = "Process tracking via SGI job module";
 const char plugin_type[]      = "proctrack/sgi_job";
-const uint32_t plugin_version = 91;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * We can't include <job.h> since its prototypes conflict with some
diff --git a/src/plugins/route/Makefile.in b/src/plugins/route/Makefile.in
index 623bf7f00..0d37ec978 100644
--- a/src/plugins/route/Makefile.in
+++ b/src/plugins/route/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/route/default/Makefile.in b/src/plugins/route/default/Makefile.in
index 0a6e64bdd..c30da287d 100644
--- a/src/plugins/route/default/Makefile.in
+++ b/src/plugins/route/default/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/route/default/route_default.c b/src/plugins/route/default/route_default.c
index dbf4de44f..b0f196f1c 100644
--- a/src/plugins/route/default/route_default.c
+++ b/src/plugins/route/default/route_default.c
@@ -71,15 +71,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "route default plugin";
 const char plugin_type[]        = "route/default";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 
 /*****************************************************************************\
@@ -157,6 +154,11 @@ extern slurm_addr_t* route_p_next_collector ( bool *is_collector )
  */
 extern slurm_addr_t* route_p_next_collector_backup ( void )
 {
-	return route_next_collector_backup();
+	/* return NULL until we have a clearly defined backup.
+	 * Otherwise we could get into a sending loop if the primary
+	 * fails with us sending to a sibling that may have me as a
+	 * parent.
+	 */
+	return NULL;
 }
 
diff --git a/src/plugins/route/topology/Makefile.in b/src/plugins/route/topology/Makefile.in
index 9cc81aa7a..b6b4905d7 100644
--- a/src/plugins/route/topology/Makefile.in
+++ b/src/plugins/route/topology/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/route/topology/route_topology.c b/src/plugins/route/topology/route_topology.c
index 09a16acc8..afad694bb 100644
--- a/src/plugins/route/topology/route_topology.c
+++ b/src/plugins/route/topology/route_topology.c
@@ -87,15 +87,12 @@ int switch_levels = 0;
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "route topology plugin";
 const char plugin_type[]        = "route/topology";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /* Global data */
 static uint64_t debug_flags = 0;
@@ -284,5 +281,10 @@ extern slurm_addr_t* route_p_next_collector ( bool *is_collector )
  */
 extern slurm_addr_t* route_p_next_collector_backup ( void )
 {
-	return route_next_collector_backup();
+	/* return NULL until we have a clearly defined backup.
+	 * Otherwise we could get into a sending loop if the primary
+	 * fails with us sending to a sibling that may have me as a
+	 * parent.
+	 */
+	return NULL;
 }
diff --git a/src/plugins/sched/Makefile.in b/src/plugins/sched/Makefile.in
index 3339dd2d5..673d1befc 100644
--- a/src/plugins/sched/Makefile.in
+++ b/src/plugins/sched/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/sched/backfill/Makefile.in b/src/plugins/sched/backfill/Makefile.in
index eb01fda7d..70e37383d 100644
--- a/src/plugins/sched/backfill/Makefile.in
+++ b/src/plugins/sched/backfill/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/sched/backfill/backfill.c b/src/plugins/sched/backfill/backfill.c
index d6b94502f..e05605072 100644
--- a/src/plugins/sched/backfill/backfill.c
+++ b/src/plugins/sched/backfill/backfill.c
@@ -72,6 +72,7 @@
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
+#include "src/common/power.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_protocol_api.h"
@@ -79,6 +80,7 @@
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
@@ -241,6 +243,15 @@ static bool _job_is_completing(void)
 	return completing;
 }
 
+static void _set_job_time_limit(struct job_record *job_ptr, uint32_t new_limit)
+{
+	job_ptr->time_limit = new_limit;
+	/* reset flag if we have a NO_VAL time_limit */
+	if (job_ptr->time_limit == NO_VAL)
+		job_ptr->limit_set.time = 0;
+
+}
+
 /*
  * _many_pending_rpcs - Determine if slurmctld is busy with many active RPCs
  * RET - True if slurmctld currently has more than SLURMCTLD_THREAD_LIMIT
@@ -324,17 +335,14 @@ static int  _try_sched(struct job_record *job_ptr, bitstr_t **avail_bitmap,
 			rc = ESLURM_NODES_BUSY;
 		} else {
 			preemptee_candidates =
-					slurm_find_preemptable_jobs(job_ptr);
+				slurm_find_preemptable_jobs(job_ptr);
 			rc = select_g_job_test(job_ptr, *avail_bitmap,
 					       high_cnt, max_nodes, req_nodes,
 					       SELECT_MODE_WILL_RUN,
 					       preemptee_candidates,
 					       &preemptee_job_list,
 					       exc_core_bitmap);
-			if (preemptee_job_list) {
-				list_destroy(preemptee_job_list);
-				preemptee_job_list = NULL;
-			}
+			FREE_NULL_LIST(preemptee_job_list);
 		}
 
 		/* Restore the feature counts */
@@ -369,10 +377,7 @@ static int  _try_sched(struct job_record *job_ptr, bitstr_t **avail_bitmap,
 				       preemptee_candidates,
 				       &preemptee_job_list,
 				       exc_core_bitmap);
-		if (preemptee_job_list) {
-			list_destroy(preemptee_job_list);
-			preemptee_job_list = NULL;
-		}
+		FREE_NULL_LIST(preemptee_job_list);
 
 		job_ptr->details->share_res = orig_shared;
 
@@ -386,16 +391,12 @@ static int  _try_sched(struct job_record *job_ptr, bitstr_t **avail_bitmap,
 					       preemptee_candidates,
 					       &preemptee_job_list,
 					       exc_core_bitmap);
-			if (preemptee_job_list) {
-				list_destroy(preemptee_job_list);
-				preemptee_job_list = NULL;
-			}
+			FREE_NULL_LIST(preemptee_job_list);
 		} else
 			FREE_NULL_BITMAP(tmp_bitmap);
 	}
 
-	if (preemptee_candidates)
-		list_destroy(preemptee_candidates);
+	FREE_NULL_LIST(preemptee_candidates);
 	return rc;
 
 }
@@ -429,8 +430,10 @@ static void _my_sleep(int usec)
 	struct timespec ts = {0, 0};
 	struct timeval  tv = {0, 0};
 
-	if (gettimeofday(&tv, NULL))
-		return;		/* Some error */
+	if (gettimeofday(&tv, NULL)) {		/* Some error */
+		sleep(1);
+		return;
+	}
 
 	nsec  = tv.tv_usec + usec;
 	nsec *= 1000;
@@ -571,9 +574,8 @@ extern void backfill_reconfig(void)
 }
 
 static void _do_diag_stats(struct timeval *tv1, struct timeval *tv2,
-			   int yield_sleep)
+			   int yield_sleep_usecs)
 {
-	uint32_t yield_sleep_usecs = yield_sleep * 1000000;
 	uint32_t delta_t, real_time;
 
 	delta_t  = (tv2->tv_sec  - tv1->tv_sec) * 1000000;
@@ -641,6 +643,7 @@ extern void *backfill_agent(void *args)
 		lock_slurmctld(all_locks);
 		(void) _attempt_backfill();
 		last_backfill_time = time(NULL);
+		(void) bb_g_job_try_stage_in();
 		unlock_slurmctld(all_locks);
 	}
 	return NULL;
@@ -726,7 +729,7 @@ static int _attempt_backfill(void)
 	List job_queue;
 	job_queue_rec_t *job_queue_rec;
 	slurmdb_qos_rec_t *qos_ptr = NULL;
-	int i, j, node_space_recs;
+	int bb, i, j, node_space_recs;
 	struct job_record *job_ptr;
 	struct part_record *part_ptr, **bf_part_ptr = NULL;
 	uint32_t end_time, end_reserve;
@@ -773,6 +776,7 @@ static int _attempt_backfill(void)
 	 * pending RPCs before starting the backfill scheduling logic */
 	_yield_locks(1000000);
 #endif
+	(void) bb_g_load_state(false);
 
 	START_TIMER;
 	if (debug_flags & DEBUG_FLAG_BACKFILL)
@@ -786,13 +790,17 @@ static int _attempt_backfill(void)
 		filter_root = true;
 
 	job_queue = build_job_queue(true, true);
-	if (list_count(job_queue) == 0) {
+	job_test_count = list_count(job_queue);
+	if (job_test_count == 0) {		
 		if (debug_flags & DEBUG_FLAG_BACKFILL)
 			info("backfill: no jobs to backfill");
 		else
 			debug("backfill: no jobs to backfill");
-		list_destroy(job_queue);
+		FREE_NULL_LIST(job_queue);
 		return 0;
+	} else {
+		debug("backfill: %u jobs to backfill", job_test_count);
+		job_test_count = 0;
 	}
 
 	if (backfill_continue)
@@ -895,6 +903,15 @@ static int _attempt_backfill(void)
 			xfree(job_queue_rec);
 			continue;
 		}
+		if ((job_ptr->array_task_id != job_queue_rec->array_task_id) &&
+		    (job_queue_rec->array_task_id == NO_VAL)) {
+			/* Job array element started in other partition,
+			 * reset pointer to "master" job array record */
+			job_ptr = find_job_record(job_ptr->array_job_id);
+			if (!job_ptr)	/* All task array elements started */
+				continue;
+		}
+
 		orig_start_time = job_ptr->start_time;
 		orig_time_limit = job_ptr->time_limit;
 		part_ptr = job_queue_rec->part_ptr;
@@ -905,8 +922,9 @@ next_task:
 		slurmctld_diag_stats.bf_last_depth++;
 		already_counted = false;
 
-		if (!IS_JOB_PENDING(job_ptr))
-			continue;	/* started in another partition */
+		if (!IS_JOB_PENDING(job_ptr) ||	/* Started in other partition */
+		    (job_ptr->priority == 0))	/* Job has been held */
+			continue;
 		if (job_ptr->preempt_in_progress)
 			continue; 	/* scheduled in another partition */
 		if (!avail_front_end(job_ptr))
@@ -1042,6 +1060,7 @@ next_task:
 			part_time_limit = part_ptr->max_time;
 		if (job_ptr->time_limit == NO_VAL) {
 			time_limit = part_time_limit;
+			job_ptr->limit_set.time = 1;
 		} else {
 			if (part_ptr->max_time == INFINITE)
 				time_limit = job_ptr->time_limit;
@@ -1061,14 +1080,17 @@ next_task:
 		later_start = now;
  TRY_LATER:
 		if (slurmctld_config.shutdown_time ||
-		    (difftime(time(NULL), orig_sched_start)>=backfill_interval))
+		    (difftime(time(NULL), orig_sched_start) >=
+		     backfill_interval)) {
+			_set_job_time_limit(job_ptr, orig_time_limit);
 			break;
+		}
 		if (((defer_rpc_cnt > 0) &&
 		     (slurmctld_config.server_thread_count >= defer_rpc_cnt)) ||
 		    (_delta_tv(&start_tv) >= sched_timeout)) {
 			uint32_t save_job_id = job_ptr->job_id;
 			uint32_t save_time_limit = job_ptr->time_limit;
-			job_ptr->time_limit = orig_time_limit;
+			_set_job_time_limit(job_ptr, orig_time_limit);
 			if (debug_flags & DEBUG_FLAG_BACKFILL) {
 				END_TIMER;
 				info("backfill: completed yielding locks "
@@ -1099,7 +1121,8 @@ next_task:
 			if ((job_ptr->magic  != JOB_MAGIC) ||
 			    (job_ptr->job_id != save_job_id))
 				continue;
-			if (!IS_JOB_PENDING(job_ptr))
+			if (!IS_JOB_PENDING(job_ptr) ||	/* Already started */
+			    (job_ptr->priority == 0))	/* Job has been held */
 				continue;
 			if (!avail_front_end(job_ptr))
 				continue;	/* No available frontend */
@@ -1122,7 +1145,7 @@ next_task:
 			if (debug_flags & DEBUG_FLAG_BACKFILL)
 				info("backfill: job %u reservation defer",
 				     job_ptr->job_id);
-			job_ptr->time_limit = orig_time_limit;
+			_set_job_time_limit(job_ptr, orig_time_limit);
 			continue;
 		}
 		if (start_res > now)
@@ -1176,7 +1199,7 @@ next_task:
 			}
 
 			/* Job can not start until too far in the future */
-			job_ptr->time_limit = orig_time_limit;
+			_set_job_time_limit(job_ptr, orig_time_limit);
 			job_ptr->start_time = sched_start + backfill_window;
 			if ((orig_start_time != 0) &&
 			    (orig_start_time < job_ptr->start_time)) {
@@ -1207,7 +1230,7 @@ next_task:
 
 		now = time(NULL);
 		if (j != SLURM_SUCCESS) {
-			job_ptr->time_limit = orig_time_limit;
+			_set_job_time_limit(job_ptr, orig_time_limit);
 			if (orig_start_time != 0)  /* Can start in other part */
 				job_ptr->start_time = orig_start_time;
 			else
@@ -1219,7 +1242,30 @@ next_task:
 			job_ptr->start_time = start_res;
 			last_job_update = now;
 		}
-		if (job_ptr->start_time <= now) {	/* Can start now */
+		if ((job_ptr->start_time <= now) &&
+		    ((bb = bb_g_job_test_stage_in(job_ptr, true)) != 1)) {
+			xfree(job_ptr->state_desc);
+			if (bb == -1) {
+				job_ptr->state_reason=
+					WAIT_BURST_BUFFER_RESOURCE;
+				job_ptr->start_time =
+					bb_g_job_get_est_start(job_ptr);
+			} else {	/* bb == 0 */
+				job_ptr->state_reason=WAIT_BURST_BUFFER_STAGING;
+				job_ptr->start_time = now + 1;
+			}
+			debug3("sched: JobId=%u. State=%s. Reason=%s. "
+			       "Priority=%u.",
+			       job_ptr->job_id,
+			       job_state_string(job_ptr->job_state),
+			       job_reason_string(job_ptr->state_reason),
+			       job_ptr->priority);
+			last_job_update = now;
+			_set_job_time_limit(job_ptr, orig_time_limit);
+			later_start = 0;
+			if (bb == -1)
+				continue;
+		} else if (job_ptr->start_time <= now) { /* Can start now */
 			uint32_t save_time_limit = job_ptr->time_limit;
 			uint32_t hard_limit;
 			bool reset_time = false;
@@ -1229,10 +1275,12 @@ next_task:
 					acct_policy_alter_job(
 						job_ptr, comp_time_limit);
 					job_ptr->time_limit = comp_time_limit;
+					job_ptr->limit_set.time = 1;
 				} else {
 					acct_policy_alter_job(
 						job_ptr, orig_time_limit);
-					job_ptr->time_limit = orig_time_limit;
+					_set_job_time_limit(job_ptr,
+							    orig_time_limit);
 				}
 			} else if ((rc == SLURM_SUCCESS) && job_ptr->time_min) {
 				/* Set time limit as high as possible */
@@ -1242,10 +1290,10 @@ next_task:
 			} else if (orig_time_limit == NO_VAL) {
 				acct_policy_alter_job(job_ptr, comp_time_limit);
 				job_ptr->time_limit = comp_time_limit;
+				job_ptr->limit_set.time = 1;
 			} else {
 				acct_policy_alter_job(job_ptr, orig_time_limit);
-				job_ptr->time_limit = orig_time_limit;
-
+				_set_job_time_limit(job_ptr, orig_time_limit);
 			}
 			/* Only set end_time if start_time is set,
 			 * or else end_time will be small (ie. 1969). */
@@ -1270,13 +1318,16 @@ next_task:
 			}
 
 			if ((rc == ESLURM_ACCOUNTING_POLICY) ||
-			    (rc == ESLURM_RESERVATION_BUSY)) {
+			    (rc == ESLURM_RESERVATION_BUSY) ||
+			    (rc == ESLURM_POWER_NOT_AVAIL) ||
+			    (rc == ESLURM_POWER_RESERVED)) {
 				/* Unknown future start time, just skip job */
 				if (orig_start_time != 0) {
 					/* Can start in different partition */
 					job_ptr->start_time = orig_start_time;
 				} else
 					job_ptr->start_time = 0;
+				_set_job_time_limit(job_ptr, orig_time_limit);
 				continue;
 			} else if (rc != SLURM_SUCCESS) {
 				if (debug_flags & DEBUG_FLAG_BACKFILL) {
@@ -1287,7 +1338,7 @@ next_task:
 				/* Drop through and reserve these resources.
 				 * Likely due to state changes during sleep.
 				 * Make best-effort based upon original state */
-				job_ptr->time_limit = orig_time_limit;
+				_set_job_time_limit(job_ptr, orig_time_limit);
 				later_start = 0;
 			} else {
 				/* Started this job, move to next one */
@@ -1296,10 +1347,9 @@ next_task:
 
 				/* Update the database if job time limit
 				 * changed and move to next job */
-				if (save_time_limit != job_ptr->time_limit &&
-				    (!with_slurmdbd || job_ptr->db_index))
-					jobacct_storage_g_job_start(acct_db_conn,
-								    job_ptr);
+				if (save_time_limit != job_ptr->time_limit)
+					jobacct_storage_job_start_direct(
+							acct_db_conn, job_ptr);
 				job_start_cnt++;
 				if (max_backfill_jobs_start &&
 				    (job_start_cnt >= max_backfill_jobs_start)){
@@ -1320,7 +1370,7 @@ next_task:
 				continue;
 			}
 		} else {
-			job_ptr->time_limit = orig_time_limit;
+			_set_job_time_limit(job_ptr, orig_time_limit);
 		}
 
 		start_time  = job_ptr->start_time;
@@ -1359,6 +1409,8 @@ next_task:
 		}
 
 		if ((job_ptr->start_time > now) &&
+		    (job_ptr->state_reason != WAIT_BURST_BUFFER_RESOURCE) &&
+		    (job_ptr->state_reason != WAIT_BURST_BUFFER_STAGING) &&
 		    _test_resv_overlap(node_space, avail_bitmap,
 				       start_time, end_reserve)) {
 			/* This job overlaps with an existing reservation for
@@ -1424,7 +1476,7 @@ next_task:
 			break;
 	}
 	xfree(node_space);
-	list_destroy(job_queue);
+	FREE_NULL_LIST(job_queue);
 	gettimeofday(&bf_time2, NULL);
 	_do_diag_stats(&bf_time1, &bf_time2, yield_sleep);
 	if (debug_flags & DEBUG_FLAG_BACKFILL) {
@@ -1473,13 +1525,16 @@ static int _start_job(struct job_record *job_ptr, bitstr_t *resv_bitmap)
 		/* job initiated */
 		last_job_update = time(NULL);
 		if (job_ptr->array_task_id == NO_VAL) {
-			info("backfill: Started JobId=%u on %s",
-			     job_ptr->job_id, job_ptr->nodes);
+			info("backfill: Started JobId=%u in %s on %s",
+			     job_ptr->job_id, job_ptr->part_ptr->name,
+			     job_ptr->nodes);
 		} else {
-			info("backfill: Started JobId=%u_%u (%u) on %s",
+			info("backfill: Started JobId=%u_%u (%u) in %s on %s",
 			     job_ptr->array_job_id, job_ptr->array_task_id,
-			     job_ptr->job_id, job_ptr->nodes);
+			     job_ptr->job_id, job_ptr->part_ptr->name,
+			     job_ptr->nodes);
 		}
+		power_g_job_start(job_ptr);
 		if (job_ptr->batch_flag == 0)
 			srun_allocate(job_ptr->job_id);
 		else if ((job_ptr->details == NULL) ||
@@ -1499,7 +1554,7 @@ static int _start_job(struct job_record *job_ptr, bitstr_t *resv_bitmap)
 		/* This happens when a job has sharing disabled and
 		 * a selected node is still completing some job,
 		 * which should be a temporary situation. */
-		verbose("backfill: Failed to start JobId=%u on %s: %s",
+		verbose("backfill: Failed to start JobId=%u with %s avail: %s",
 			job_ptr->job_id, node_list, slurm_strerror(rc));
 		xfree(node_list);
 		fail_jobid = job_ptr->job_id;
diff --git a/src/plugins/sched/backfill/backfill_wrapper.c b/src/plugins/sched/backfill/backfill_wrapper.c
index 27cb65cc7..40c805944 100644
--- a/src/plugins/sched/backfill/backfill_wrapper.c
+++ b/src/plugins/sched/backfill/backfill_wrapper.c
@@ -53,7 +53,7 @@
 
 const char		plugin_name[]	= "SLURM Backfill Scheduler plugin";
 const char		plugin_type[]	= "sched/backfill";
-const uint32_t		plugin_version	= 110;
+const uint32_t		plugin_version	= SLURM_VERSION_NUMBER;
 
 /* A plugin-global errno. */
 static int plugin_errno = SLURM_SUCCESS;
@@ -68,10 +68,9 @@ int init( void )
 {
 	pthread_attr_t attr;
 
-#ifdef HAVE_ALPS_CRAY
-	if (!slurmctld_primary)
+	if (slurmctld_config.scheduling_disabled)
 		return SLURM_SUCCESS;
-#endif
+
 
 	verbose( "sched: Backfill scheduler plugin loaded" );
 
diff --git a/src/plugins/sched/builtin/Makefile.in b/src/plugins/sched/builtin/Makefile.in
index 10d80c0b6..b7c6a0b9d 100644
--- a/src/plugins/sched/builtin/Makefile.in
+++ b/src/plugins/sched/builtin/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/sched/builtin/builtin.c b/src/plugins/sched/builtin/builtin.c
index ccdadb1fb..cc801f03f 100644
--- a/src/plugins/sched/builtin/builtin.c
+++ b/src/plugins/sched/builtin/builtin.c
@@ -59,6 +59,7 @@
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/preempt.h"
 #include "src/slurmctld/reservation.h"
@@ -241,7 +242,7 @@ static void _compute_start_times(void)
 			break;
 		}
 	}
-	list_destroy(job_queue);
+	FREE_NULL_LIST(job_queue);
 	FREE_NULL_BITMAP(alloc_bitmap);
 }
 
@@ -279,6 +280,7 @@ extern void *builtin_agent(void *args)
 		lock_slurmctld(all_locks);
 		_compute_start_times();
 		last_sched_time = time(NULL);
+		(void) bb_g_job_try_stage_in();
 		unlock_slurmctld(all_locks);
 	}
 	return NULL;
diff --git a/src/plugins/sched/builtin/builtin_wrapper.c b/src/plugins/sched/builtin/builtin_wrapper.c
index e7369f906..aced58dce 100644
--- a/src/plugins/sched/builtin/builtin_wrapper.c
+++ b/src/plugins/sched/builtin/builtin_wrapper.c
@@ -52,7 +52,7 @@
 
 const char		plugin_name[]	= "SLURM Built-in Scheduler plugin";
 const char		plugin_type[]	= "sched/builtin";
-const uint32_t		plugin_version	= 110;
+const uint32_t		plugin_version	= SLURM_VERSION_NUMBER;
 
 /* A plugin-global errno. */
 static int plugin_errno = SLURM_SUCCESS;
diff --git a/src/plugins/sched/hold/Makefile.in b/src/plugins/sched/hold/Makefile.in
index 21e6c3171..c49764edf 100644
--- a/src/plugins/sched/hold/Makefile.in
+++ b/src/plugins/sched/hold/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/sched/hold/hold_wrapper.c b/src/plugins/sched/hold/hold_wrapper.c
index c019c055c..6c3e217f3 100644
--- a/src/plugins/sched/hold/hold_wrapper.c
+++ b/src/plugins/sched/hold/hold_wrapper.c
@@ -51,7 +51,7 @@
 
 const char		plugin_name[]	= "SLURM Hold Scheduler plugin";
 const char		plugin_type[]	= "sched/hold";
-const uint32_t		plugin_version	= 110;
+const uint32_t		plugin_version	= SLURM_VERSION_NUMBER;
 
 /* A plugin-global errno. */
 static int plugin_errno = SLURM_SUCCESS;
diff --git a/src/plugins/sched/wiki/Makefile.in b/src/plugins/sched/wiki/Makefile.in
index 9f9a2af80..65c152c6b 100644
--- a/src/plugins/sched/wiki/Makefile.in
+++ b/src/plugins/sched/wiki/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/sched/wiki/job_modify.c b/src/plugins/sched/wiki/job_modify.c
index 5dbd2552c..762cc2052 100644
--- a/src/plugins/sched/wiki/job_modify.c
+++ b/src/plugins/sched/wiki/job_modify.c
@@ -203,10 +203,8 @@ host_fini:	if (rc) {
 	}
 
 	if (update_accounting) {
-		if (job_ptr->details && job_ptr->details->begin_time) {
-			/* Update job record in accounting to reflect changes */
-			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
-		}
+		/* Update job record in accounting to reflect changes */
+		jobacct_storage_job_start_direct(acct_db_conn, job_ptr);
 	}
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/sched/wiki/msg.c b/src/plugins/sched/wiki/msg.c
index 5879c93b5..eac27c3fb 100644
--- a/src/plugins/sched/wiki/msg.c
+++ b/src/plugins/sched/wiki/msg.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  msg.c - Message/communcation manager for Wiki plugin
+ *  msg.c - Message/communication manager for Wiki plugin
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -123,7 +123,7 @@ extern void term_msg_thread(void)
                 fd = slurm_open_stream(&addr, true);
                 if (fd != -1) {
                         /* we don't care if the open failed */
-                        slurm_close_stream(fd);
+                        slurm_close(fd);
                 }
 
                 debug2("waiting for sched/wiki thread to exit");
@@ -195,7 +195,7 @@ static void *_msg_thread(void *no_data)
 			_proc_msg(new_fd, msg);
 			xfree(msg);
 		}
-		slurm_close_accepted_conn(new_fd);
+		slurm_close(new_fd);
 	}
 	if (sock_fd > 0)
 		(void) slurm_shutdown_msg_engine(sock_fd);
diff --git a/src/plugins/sched/wiki/msg.h b/src/plugins/sched/wiki/msg.h
index 40707279b..9d81afbfd 100644
--- a/src/plugins/sched/wiki/msg.h
+++ b/src/plugins/sched/wiki/msg.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  msg.h - Message/communcation manager for Wiki plugin
+ *  msg.h - Message/communication manager for Wiki plugin
  *****************************************************************************
  *  Copyright (C) 2006 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
diff --git a/src/plugins/sched/wiki/sched_wiki.c b/src/plugins/sched/wiki/sched_wiki.c
index d3fcd316a..ba16890f5 100644
--- a/src/plugins/sched/wiki/sched_wiki.c
+++ b/src/plugins/sched/wiki/sched_wiki.c
@@ -48,7 +48,7 @@
 
 const char		plugin_name[]	= "Wiki (Maui) Scheduler plugin";
 const char		plugin_type[]	= "sched/wiki";
-const uint32_t		plugin_version	= 110;
+const uint32_t		plugin_version	= SLURM_VERSION_NUMBER;
 
 /* A plugin-global errno. */
 static int plugin_errno = SLURM_SUCCESS;
diff --git a/src/plugins/sched/wiki2/Makefile.in b/src/plugins/sched/wiki2/Makefile.in
index 546e150ce..7f47dc2a1 100644
--- a/src/plugins/sched/wiki2/Makefile.in
+++ b/src/plugins/sched/wiki2/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -278,6 +281,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -327,8 +332,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -347,6 +356,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -390,6 +402,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -413,6 +426,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/sched/wiki2/job_modify.c b/src/plugins/sched/wiki2/job_modify.c
index 882b218f4..6cbb2d3aa 100644
--- a/src/plugins/sched/wiki2/job_modify.c
+++ b/src/plugins/sched/wiki2/job_modify.c
@@ -41,6 +41,7 @@
 #include <strings.h>
 #include "src/common/gres.h"
 #include "src/common/node_select.h"
+#include "src/common/assoc_mgr.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/locks.h"
@@ -378,6 +379,9 @@ host_fini:	if (rc) {
 
 	if (gres_ptr) {
 		char *orig_gres;
+		assoc_mgr_lock_t locks = {
+			NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+			READ_LOCK, NO_LOCK, NO_LOCK };
 
 		if (!IS_JOB_PENDING(job_ptr)) {
 			error("wiki: MODIFYJOB GRES of non-pending job %u",
@@ -397,6 +401,18 @@ host_fini:	if (rc) {
 			return ESLURM_INVALID_GRES;
 		}
 		xfree(orig_gres);
+		assoc_mgr_lock(&locks);
+		gres_set_job_tres_cnt(job_ptr->gres_list,
+				      job_ptr->details ?
+				      job_ptr->details->min_nodes : 0,
+				      job_ptr->tres_req_cnt,
+				      true);
+		xfree(job_ptr->tres_req_str);
+		job_ptr->tres_req_str =
+			assoc_mgr_make_tres_str_from_array(
+				job_ptr->tres_req_cnt,
+				TRES_STR_FLAG_SIMPLE, true);
+		assoc_mgr_unlock(&locks);
 	}
 
 	if (wckey_ptr) {
@@ -408,11 +424,8 @@ host_fini:	if (rc) {
 	}
 
 	if (update_accounting) {
-		if (job_ptr->details && job_ptr->details->begin_time) {
-			/* Update job record in accounting to reflect
-			 * the changes */
-			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
-		}
+		/* Update job record in accounting to reflect the changes */
+		jobacct_storage_job_start_direct(acct_db_conn, job_ptr);
 	}
 
 	return SLURM_SUCCESS;
diff --git a/src/plugins/sched/wiki2/job_will_run.c b/src/plugins/sched/wiki2/job_will_run.c
index 2bfece784..c44b119ca 100644
--- a/src/plugins/sched/wiki2/job_will_run.c
+++ b/src/plugins/sched/wiki2/job_will_run.c
@@ -261,8 +261,7 @@ static char *	_will_run_test(uint32_t jobid, time_t start_time,
 			       min_nodes, max_nodes, req_nodes,
 			       SELECT_MODE_WILL_RUN,
 			       preemptee_candidates, NULL, exc_core_bitmap);
-	if (preemptee_candidates)
-		list_destroy(preemptee_candidates);
+	FREE_NULL_LIST(preemptee_candidates);
 
 	if (rc == SLURM_SUCCESS) {
 		char tmp_str[128];
@@ -557,8 +556,7 @@ static char *	_will_run_test2(uint32_t jobid, time_t start_time,
 			       req_nodes, SELECT_MODE_WILL_RUN,
 			       preemptee_candidates, &preempted_jobs,
 			       exc_core_bitmap);
-	if (preemptee_candidates)
-		list_destroy(preemptee_candidates);
+	FREE_NULL_LIST(preemptee_candidates);
 
 	if (rc == SLURM_SUCCESS) {
 		char *hostlist, *sep, tmp_str[128];
@@ -589,7 +587,7 @@ static char *	_will_run_test2(uint32_t jobid, time_t start_time,
 					 sep, pre_ptr->job_id);
 				xstrcat(reply_msg, tmp_str);
 			}
-			list_destroy(preempted_jobs);
+			FREE_NULL_LIST(preempted_jobs);
 		}
 	} else {
 		xstrcat(reply_msg, "Jobs not runable on selected nodes");
diff --git a/src/plugins/sched/wiki2/msg.c b/src/plugins/sched/wiki2/msg.c
index 26429bbd0..143e5b97e 100644
--- a/src/plugins/sched/wiki2/msg.c
+++ b/src/plugins/sched/wiki2/msg.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  msg.c - Message/communcation manager for Wiki plugin
+ *  msg.c - Message/communication manager for Wiki plugin
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
@@ -130,7 +130,7 @@ extern void term_msg_thread(void)
 		fd = slurm_open_stream(&addr, true);
 		if (fd != -1) {
 			/* we don't care if the open failed */
-			slurm_close_stream(fd);
+			slurm_close(fd);
 		}
 
 		debug2("waiting for sched/wiki2 thread to exit");
@@ -202,7 +202,7 @@ static void *_msg_thread(void *no_data)
 			_proc_msg(new_fd, msg);
 			xfree(msg);
 		}
-		slurm_close_accepted_conn(new_fd);
+		slurm_close(new_fd);
 	}
 	verbose("wiki: message engine shutdown");
 	if (sock_fd > 0)
diff --git a/src/plugins/sched/wiki2/msg.h b/src/plugins/sched/wiki2/msg.h
index 0479064f2..0d77179d2 100644
--- a/src/plugins/sched/wiki2/msg.h
+++ b/src/plugins/sched/wiki2/msg.h
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  msg.h - Message/communcation manager for Wiki plugin
+ *  msg.h - Message/communication manager for Wiki plugin
  *****************************************************************************
  *  Copyright (C) 2006-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
diff --git a/src/plugins/sched/wiki2/sched_wiki2.c b/src/plugins/sched/wiki2/sched_wiki2.c
index 1050b77b7..5e22b7a97 100644
--- a/src/plugins/sched/wiki2/sched_wiki2.c
+++ b/src/plugins/sched/wiki2/sched_wiki2.c
@@ -46,7 +46,7 @@
 
 const char		plugin_name[]	= "Wiki (Maui and Moab) Scheduler plugin";
 const char		plugin_type[]	= "sched/wiki2";
-const uint32_t		plugin_version	= 110;
+const uint32_t		plugin_version	= SLURM_VERSION_NUMBER;
 
 /* A plugin-global errno. */
 static int plugin_errno = SLURM_SUCCESS;
diff --git a/src/plugins/select/Makefile.in b/src/plugins/select/Makefile.in
index 4a2aadb00..4e6b406d8 100644
--- a/src/plugins/select/Makefile.in
+++ b/src/plugins/select/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/alps/Makefile.in b/src/plugins/select/alps/Makefile.in
index db3e679f4..d62c2480a 100644
--- a/src/plugins/select/alps/Makefile.in
+++ b/src/plugins/select/alps/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -326,6 +329,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -375,8 +380,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -395,6 +404,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -438,6 +450,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -461,6 +474,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/alps/basil_interface.c b/src/plugins/select/alps/basil_interface.c
index 169ea037b..61b9030d3 100644
--- a/src/plugins/select/alps/basil_interface.c
+++ b/src/plugins/select/alps/basil_interface.c
@@ -685,13 +685,13 @@ extern int basil_geometry(struct node_record *node_ptr_array, int node_cnt)
 
 struct basil_accel_param* build_accel_param(struct job_record* job_ptr)
 {
-	int gpu_mem_req;
+	uint64_t gpu_mem_req;
 	struct basil_accel_param* head,* bap_ptr;
 
 	gpu_mem_req = gres_plugin_get_job_value_by_type(job_ptr->gres_list,
 							"gpu_mem");
 
-	if (gpu_mem_req == NO_VAL)
+	if (gpu_mem_req == NO_VAL64)
 		gpu_mem_req = 0;
 
 	if (!job_ptr) {
@@ -706,7 +706,7 @@ struct basil_accel_param* build_accel_param(struct job_record* job_ptr)
 	bap_ptr = head;
 	bap_ptr->type = BA_GPU;	/* Currently BASIL only permits
 				 * generic resources of type GPU. */
-	bap_ptr->memory_mb = gpu_mem_req;
+	bap_ptr->memory_mb = (uint32_t)gpu_mem_req;
 	bap_ptr->next = NULL;
 
 	return head;
@@ -754,13 +754,15 @@ extern int do_basil_reserve(struct job_record *job_ptr)
 
 	if (cray_conf->sub_alloc) {
 		mppdepth = MAX(1, job_ptr->details->cpus_per_task);
-		if (!job_ptr->details->ntasks_per_node
-		    && job_ptr->details->num_tasks) {
+		if (job_ptr->details->ntasks_per_node) {
+			mppnppn  = job_ptr->details->ntasks_per_node;
+		} else if (job_ptr->details->num_tasks) {
 			mppnppn = (job_ptr->details->num_tasks +
 				   job_ptr->job_resrcs->nhosts - 1) /
 				job_ptr->job_resrcs->nhosts;
-		} else
-			mppnppn  = job_ptr->details->ntasks_per_node;
+		} else {
+			mppnppn = 1;
+		}
 	} else {
 		/* always be 1 */
 		mppdepth = 1;
@@ -1165,7 +1167,7 @@ extern int do_basil_release(struct job_record *job_ptr)
 	 *             stepdmgr, where job_state == NO_VAL is used to
 	 *             distinguish the context from that of slurmctld.
 	 */
-	if (job_ptr->job_state == (uint16_t)NO_VAL &&
+	if (job_ptr->job_state == NO_VAL &&
 	    (get_basil_version() >= BV_4_0)) {
 		int sleeptime = 1;
 
diff --git a/src/plugins/select/alps/libalps/Makefile.in b/src/plugins/select/alps/libalps/Makefile.in
index 0b156d4d6..dfaba76f8 100644
--- a/src/plugins/select/alps/libalps/Makefile.in
+++ b/src/plugins/select/alps/libalps/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -253,6 +256,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -302,8 +307,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -322,6 +331,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -365,6 +377,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -388,6 +401,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/alps/libemulate/Makefile.in b/src/plugins/select/alps/libemulate/Makefile.in
index 70f5dce7b..0814be211 100644
--- a/src/plugins/select/alps/libemulate/Makefile.in
+++ b/src/plugins/select/alps/libemulate/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -246,6 +249,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -295,8 +300,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -315,6 +324,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -358,6 +370,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -381,6 +394,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/alps/select_alps.c b/src/plugins/select/alps/select_alps.c
index 8345b4619..6f3c71b31 100644
--- a/src/plugins/select/alps/select_alps.c
+++ b/src/plugins/select/alps/select_alps.c
@@ -150,16 +150,13 @@ static int select_cray_dim_size[3] = {-1};
  * only load select plugins if the plugin_type string has a
  * prefix of "select/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]	= "Cray node selection plugin";
 const char plugin_type[]	= "select/alps";
 uint32_t plugin_id		= 104;
-const uint32_t plugin_version	= 120;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 static bool _zero_size_job ( struct job_record *job_ptr )
 {
@@ -360,10 +357,10 @@ extern int select_p_job_ready(struct job_record *job_ptr)
 	 *		means that we need to confirm only if batch_flag is 0,
 	 *		and execute the other_job_ready() only in slurmctld.
 	 */
-	if ((slurmctld_primary || (job_ptr->job_state == (uint16_t)NO_VAL))
+	if ((slurmctld_primary || (job_ptr->job_state == NO_VAL))
 	    && !job_ptr->batch_flag && !_zero_size_job(job_ptr))
 		rc = do_basil_confirm(job_ptr);
-	if ((rc != SLURM_SUCCESS) || (job_ptr->job_state == (uint16_t) NO_VAL))
+	if ((rc != SLURM_SUCCESS) || (job_ptr->job_state == NO_VAL))
 		return rc;
 	return other_job_ready(job_ptr);
 }
@@ -447,7 +444,7 @@ extern int select_p_job_fini(struct job_record *job_ptr)
 	 * handled on the stepd end.
 	 */
 	if (((slurmctld_primary && !job_ptr->batch_flag) ||
-	     (job_ptr->job_state == (uint16_t)NO_VAL))
+	     (job_ptr->job_state == NO_VAL))
 	    && !_zero_size_job(job_ptr) &&
 	    (do_basil_release(job_ptr) != SLURM_SUCCESS))
 		return SLURM_ERROR;
@@ -456,7 +453,7 @@ extern int select_p_job_fini(struct job_record *job_ptr)
 	 *             stepdmgr, where job_state == NO_VAL is used to
 	 *             distinguish the context from that of slurmctld.
 	 */
-	if (job_ptr->job_state == (uint16_t)NO_VAL)
+	if (job_ptr->job_state == NO_VAL)
 		return SLURM_SUCCESS;
 	return other_job_fini(job_ptr);
 }
diff --git a/src/plugins/select/bluegene/Makefile.in b/src/plugins/select/bluegene/Makefile.in
index d6b796183..c0b5ec136 100644
--- a/src/plugins/select/bluegene/Makefile.in
+++ b/src/plugins/select/bluegene/Makefile.in
@@ -132,6 +132,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -140,10 +141,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -156,7 +159,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -453,6 +456,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -502,8 +507,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -522,6 +531,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -565,6 +577,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -588,6 +601,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/bluegene/ba/Makefile.in b/src/plugins/select/bluegene/ba/Makefile.in
index bd3a97cc6..e45a750cc 100644
--- a/src/plugins/select/bluegene/ba/Makefile.in
+++ b/src/plugins/select/bluegene/ba/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -257,6 +260,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -306,8 +311,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -326,6 +335,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -369,6 +381,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -392,6 +405,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/bluegene/ba/block_allocator.c b/src/plugins/select/bluegene/ba/block_allocator.c
index a41cb2137..117aee780 100644
--- a/src/plugins/select/bluegene/ba/block_allocator.c
+++ b/src/plugins/select/bluegene/ba/block_allocator.c
@@ -987,10 +987,10 @@ extern char *set_bg_block(List results, select_ba_request_t* ba_request)
 					     start_list,
 					     ba_request->geometry,
 					     ba_request->conn_type[0])) {
-				list_destroy(start_list);
+				FREE_NULL_LIST(start_list);
 				goto end_it;
 			}
-			list_destroy(start_list);
+			FREE_NULL_LIST(start_list);
 		}
 	} else {
 		goto end_it;
@@ -1001,8 +1001,7 @@ extern char *set_bg_block(List results, select_ba_request_t* ba_request)
 				   ba_request->conn_type[0]);
 end_it:
 	if (!send_results && results) {
-		list_destroy(results);
-		results = NULL;
+		FREE_NULL_LIST(results);
 	}
 	if (name!=NULL) {
 		debug2("name = %s", name);
@@ -1800,14 +1799,8 @@ extern void ba_destroy_system(void)
 {
 	int x, y;
 
-	if (path) {
-		list_destroy(path);
-		path = NULL;
-	}
-	if (best_path) {
-		list_destroy(best_path);
-		best_path = NULL;
-	}
+	FREE_NULL_LIST(path);
+	FREE_NULL_LIST(best_path);
 
 #ifdef HAVE_BG_FILES
 	if (bg)
diff --git a/src/plugins/select/bluegene/ba/wire_test.c b/src/plugins/select/bluegene/ba/wire_test.c
index d9f7ba7c5..bcc1d797c 100644
--- a/src/plugins/select/bluegene/ba/wire_test.c
+++ b/src/plugins/select/bluegene/ba/wire_test.c
@@ -149,7 +149,7 @@ int main(int argc, char** argv)
 /* 		       alpha_num[request->geometry[1]], */
 /* 		       alpha_num[request->geometry[2]]); */
 /* 	} */
-/* 	list_destroy(results); */
+/* 	FREE_NULL_LIST(results); */
 
 /* 	/\* [001x801] *\/ */
 /* 	results = list_create(NULL); */
@@ -172,7 +172,7 @@ int main(int argc, char** argv)
 /* 		       request->geometry[1], */
 /* 		       request->geometry[2]); */
 /* 	} */
-/* 	list_destroy(results); */
+/* 	FREE_NULL_LIST(results); */
 
 	/* [001x801] */
 	results = list_create(NULL);
@@ -195,7 +195,7 @@ int main(int argc, char** argv)
 		       request->geometry[1],
 		       request->geometry[2]);
 	}
-	list_destroy(results);
+	FREE_NULL_LIST(results);
 
 
 	int dim,j;
@@ -244,7 +244,7 @@ int main(int argc, char** argv)
 			}
 		}
 	}
-	/* list_destroy(results); */
+	/* FREE_NULL_LIST(results); */
 
 /* 	ba_fini(); */
 
diff --git a/src/plugins/select/bluegene/ba_bgq/Makefile.in b/src/plugins/select/bluegene/ba_bgq/Makefile.in
index f1413bde2..2e2af87d8 100644
--- a/src/plugins/select/bluegene/ba_bgq/Makefile.in
+++ b/src/plugins/select/bluegene/ba_bgq/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -276,6 +279,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -325,8 +330,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -345,6 +354,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -388,6 +400,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -411,6 +424,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/bluegene/ba_bgq/block_allocator.c b/src/plugins/select/bluegene/ba_bgq/block_allocator.c
index 50d3d8908..2c7acaa66 100644
--- a/src/plugins/select/bluegene/ba_bgq/block_allocator.c
+++ b/src/plugins/select/bluegene/ba_bgq/block_allocator.c
@@ -917,8 +917,7 @@ extern char *set_bg_block(List results, select_ba_request_t* ba_request)
 		/* handle failure */
 		if (!name)
 			_reset_altered_mps(main_mps, 0);
-		list_destroy(main_mps);
-		main_mps = NULL;
+		FREE_NULL_LIST(main_mps);
 	}
 	slurm_mutex_unlock(&ba_system_mutex);
 
diff --git a/src/plugins/select/bluegene/ba_bgq/wire_test.c b/src/plugins/select/bluegene/ba_bgq/wire_test.c
index 3f8167a7c..f63910849 100644
--- a/src/plugins/select/bluegene/ba_bgq/wire_test.c
+++ b/src/plugins/select/bluegene/ba_bgq/wire_test.c
@@ -147,7 +147,7 @@ int main(int argc, char** argv)
 	} else
 		info("got back mps %s\n", request->save_name);
 
-	list_destroy(results);
+	FREE_NULL_LIST(results);
 
 /* 	/\* [001x801] *\/ */
 /* 	results = list_create(NULL); */
@@ -170,7 +170,7 @@ int main(int argc, char** argv)
 /* 		       request->geometry[1], */
 /* 		       request->geometry[2]); */
 /* 	} */
-/* 	list_destroy(results); */
+/* 	FREE_NULL_LIST(results); */
 
 	/* [001x801] */
 	results = list_create(NULL);
@@ -200,7 +200,7 @@ int main(int argc, char** argv)
 		       request->geometry[3]);
 	} else
 		info("got back mps %s\n", request->save_name);
-	list_destroy(results);
+	FREE_NULL_LIST(results);
 
 	int dim;
 	int a,b,c,d;
@@ -235,7 +235,7 @@ int main(int argc, char** argv)
 			}
 		}
 	}
-	/* list_destroy(results); */
+	/* FREE_NULL_LIST(results); */
 
 /* 	ba_fini(); */
 
diff --git a/src/plugins/select/bluegene/bg_core.c b/src/plugins/select/bluegene/bg_core.c
index 73f0331a7..26715c9dc 100644
--- a/src/plugins/select/bluegene/bg_core.c
+++ b/src/plugins/select/bluegene/bg_core.c
@@ -259,7 +259,7 @@ static void *_track_freeing_blocks(void *args)
 	slurm_mutex_unlock(&block_state_mutex);
 	last_bg_update = time(NULL);
 	list_iterator_destroy(itr);
-	list_destroy(track_list);
+	FREE_NULL_LIST(track_list);
 	xfree(bg_free_list);
 	return NULL;
 }
@@ -322,7 +322,7 @@ extern bool block_mp_passthrough(bg_record_t *bg_record, int mp_bit)
 
 /* block_state_mutex must be unlocked before calling this. */
 extern void bg_requeue_job(uint32_t job_id, bool wait_for_start,
-			   bool slurmctld_locked, uint16_t job_state,
+			   bool slurmctld_locked, uint32_t job_state,
 			   bool preempted)
 {
 	int rc;
@@ -642,8 +642,7 @@ extern void free_block_list(uint32_t job_id, List track_list,
 
 	if (kill_job_list) {
 		bg_status_process_kill_job_list(kill_job_list, JOB_FAILED, 0);
-		list_destroy(kill_job_list);
-		kill_job_list = NULL;
+		FREE_NULL_LIST(kill_job_list);
 	}
 
 	if (wait) {
diff --git a/src/plugins/select/bluegene/bg_core.h b/src/plugins/select/bluegene/bg_core.h
index 3b72b253e..42d246f36 100644
--- a/src/plugins/select/bluegene/bg_core.h
+++ b/src/plugins/select/bluegene/bg_core.h
@@ -69,7 +69,7 @@
 extern bool blocks_overlap(bg_record_t *rec_a, bg_record_t *rec_b);
 extern bool block_mp_passthrough(bg_record_t *bg_record, int mp_bit);
 extern void bg_requeue_job(uint32_t job_id, bool wait_for_start,
-			   bool slurmctld_locked, uint16_t job_state,
+			   bool slurmctld_locked, uint32_t job_state,
 			   bool preempted);
 
 /* sort a list of bg_records by size (node count) */
diff --git a/src/plugins/select/bluegene/bg_defined_block.c b/src/plugins/select/bluegene/bg_defined_block.c
index 88425105a..7fe389d43 100644
--- a/src/plugins/select/bluegene/bg_defined_block.c
+++ b/src/plugins/select/bluegene/bg_defined_block.c
@@ -164,7 +164,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						error("I was unable to "
 						      "make the "
 						      "requested block.");
-						list_destroy(results);
+						FREE_NULL_LIST(results);
 						rc = SLURM_ERROR;
 						break;
 					}
@@ -182,9 +182,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						      bg_record->mp_str,
 						      temp);
 					}
-					if (bg_record->ba_mp_list)
-						list_destroy(
-							bg_record->ba_mp_list);
+					FREE_NULL_LIST(bg_record->ba_mp_list);
 #ifdef HAVE_BGQ
 					bg_record->ba_mp_list = results;
 					results = NULL;
@@ -196,7 +194,7 @@ extern int create_defined_blocks(bg_layout_t overlapped,
 						list_create(destroy_ba_mp);
 					copy_node_path(results,
 						       &bg_record->ba_mp_list);
-					list_destroy(results);
+					FREE_NULL_LIST(results);
 #endif
 				}
 			}
@@ -435,21 +433,20 @@ extern int create_full_system_block(List bg_found_block_list)
 
 	if (!name) {
 		error("I was unable to make the full system block.");
-		list_destroy(results);
+		FREE_NULL_LIST(results);
 		list_iterator_destroy(itr);
 		slurm_mutex_unlock(&block_state_mutex);
 		return SLURM_ERROR;
 	}
 	xfree(name);
-	if (bg_record->ba_mp_list)
-		list_destroy(bg_record->ba_mp_list);
+	FREE_NULL_LIST(bg_record->ba_mp_list);
 #ifdef HAVE_BGQ
 	bg_record->ba_mp_list = results;
 	results = NULL;
 #else
 	bg_record->ba_mp_list = list_create(destroy_ba_mp);
 	copy_node_path(results, &bg_record->ba_mp_list);
-	list_destroy(results);
+	FREE_NULL_LIST(results);
 #endif
 	if ((rc = bridge_block_create(bg_record)) == SLURM_ERROR) {
 		error("create_full_system_block: "
@@ -463,8 +460,7 @@ extern int create_full_system_block(List bg_found_block_list)
 	list_append(bg_lists->main, bg_record);
 
 no_total:
-	if (records)
-		list_destroy(records);
+	FREE_NULL_LIST(records);
 	slurm_mutex_unlock(&block_state_mutex);
 	return rc;
 }
diff --git a/src/plugins/select/bluegene/bg_dynamic_block.c b/src/plugins/select/bluegene/bg_dynamic_block.c
index 15f2944bc..e0de0fa57 100644
--- a/src/plugins/select/bluegene/bg_dynamic_block.c
+++ b/src/plugins/select/bluegene/bg_dynamic_block.c
@@ -295,8 +295,7 @@ try_small_again:
 
 		/* Re-sort the list back to the original order. */
 		list_sort(block_list, (ListCmpF)bg_record_sort_aval_inc);
-		list_destroy(new_blocks);
-		new_blocks = NULL;
+		FREE_NULL_LIST(new_blocks);
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_PICK)
 			info("small block not able to be placed inside others");
 	}
@@ -465,10 +464,7 @@ finished:
 
 	xfree(request->save_name);
 
-	if (results) {
-		list_destroy(results);
-		results = NULL;
-	}
+	FREE_NULL_LIST(results);
 
 	errno = rc;
 
diff --git a/src/plugins/select/bluegene/bg_job_info.c b/src/plugins/select/bluegene/bg_job_info.c
index 94f9924eb..716872d5b 100644
--- a/src/plugins/select/bluegene/bg_job_info.c
+++ b/src/plugins/select/bluegene/bg_job_info.c
@@ -512,67 +512,6 @@ extern int  pack_select_jobinfo(select_jobinfo_t *jobinfo, Buf buffer,
 			packnull(buffer); //nodes
 			packnull(buffer); //ionodes
 
-			packnull(buffer); //blrts
-			packnull(buffer); //linux
-			packnull(buffer); //mloader
-			packnull(buffer); //ramdisk
-			pack16((uint16_t) 0, buffer); //mp_cnode_cnt
-			packnull(buffer); //units_avail
-			packnull(buffer); //units_used
-		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		if (jobinfo) {
-			if (jobinfo->dim_cnt)
-				dims = jobinfo->dim_cnt;
-			else if (bg_recover != NOT_FROM_CONTROLLER)
-				xassert(0);
-
-			pack16(dims, buffer);
-			/* NOTE: If new elements are added here, make sure to
-			 * add equivalant pack of zeros below for NULL
-			 * pointer */
-			for (i=0; i<dims; i++) {
-				pack16(jobinfo->geometry[i], buffer);
-				pack16(jobinfo->conn_type[i], buffer);
-				pack16(jobinfo->start_loc[i], buffer);
-			}
-			pack16(jobinfo->reboot, buffer);
-			pack16(jobinfo->rotate, buffer);
-
-			pack32(jobinfo->block_cnode_cnt, buffer);
-			pack32(jobinfo->cnode_cnt, buffer);
-
-			packstr(jobinfo->bg_block_id, buffer);
-			packstr(jobinfo->mp_str, buffer);
-			packstr(jobinfo->ionode_str, buffer);
-
-			packstr(jobinfo->blrtsimage, buffer);
-			packstr(jobinfo->linuximage, buffer);
-			packstr(jobinfo->mloaderimage, buffer);
-			packstr(jobinfo->ramdiskimage, buffer);
-			if (bg_conf) {
-				pack16(bg_conf->mp_cnode_cnt, buffer);
-				pack_bit_fmt(jobinfo->units_avail, buffer);
-				pack_bit_fmt(jobinfo->units_used, buffer);
-			} else {
-				pack16(0, buffer);
-				packnull(buffer);
-				packnull(buffer);
-			}
-		} else {
-			pack16(dims, buffer);
-			/* pack space for 3 positions for geo
-			 * conn_type and start_loc and then, reboot, and rotate
-			 */
-			for (i=0; i<((dims*3)+2); i++) {
-				pack16((uint16_t) 0, buffer);
-			}
-			pack32((uint32_t) 0, buffer); //block_cnode_cnt
-			pack32((uint32_t) 0, buffer); //cnode_cnt
-			packnull(buffer); //bg_block_id
-			packnull(buffer); //nodes
-			packnull(buffer); //ionodes
-
 			packnull(buffer); //blrts
 			packnull(buffer); //linux
 			packnull(buffer); //mloader
@@ -627,51 +566,6 @@ extern int unpack_select_jobinfo(select_jobinfo_t **jobinfo_pptr, Buf buffer,
 		safe_unpack16(&(jobinfo->cleaning), buffer);
 		safe_unpack32(&(jobinfo->cnode_cnt), buffer);
 
-		safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint32_tmp,
-				       buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->mp_str), &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->ionode_str), &uint32_tmp,
-				       buffer);
-
-		safe_unpackstr_xmalloc(&(jobinfo->blrtsimage),
-				       &uint32_tmp, buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->linuximage), &uint32_tmp,
-				       buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->mloaderimage), &uint32_tmp,
-				       buffer);
-		safe_unpackstr_xmalloc(&(jobinfo->ramdiskimage), &uint32_tmp,
-				       buffer);
-		safe_unpack16(&mp_cnode_cnt, buffer);
-		safe_unpackstr_xmalloc(&bit_char, &uint32_tmp, buffer);
-		if (bit_char) {
-			jobinfo->units_avail = bit_alloc(mp_cnode_cnt);
-			bit_unfmt(jobinfo->units_avail, bit_char);
-			xfree(bit_char);
-		}
-		safe_unpackstr_xmalloc(&bit_char, &uint32_tmp, buffer);
-		if (bit_char) {
-			jobinfo->units_used = bit_alloc(mp_cnode_cnt);
-			bit_unfmt(jobinfo->units_used, bit_char);
-			xfree(bit_char);
-		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		safe_unpack16(&jobinfo->dim_cnt, buffer);
-
-		xassert(jobinfo->dim_cnt);
-		dims = jobinfo->dim_cnt;
-
-		for (i=0; i<dims; i++) {
-			safe_unpack16(&(jobinfo->geometry[i]), buffer);
-			safe_unpack16(&(jobinfo->conn_type[i]), buffer);
-			safe_unpack16(&(jobinfo->start_loc[i]), buffer);
-		}
-
-		safe_unpack16(&(jobinfo->reboot), buffer);
-		safe_unpack16(&(jobinfo->rotate), buffer);
-
-		safe_unpack32(&(jobinfo->block_cnode_cnt), buffer);
-		safe_unpack32(&(jobinfo->cnode_cnt), buffer);
-
 		safe_unpackstr_xmalloc(&(jobinfo->bg_block_id), &uint32_tmp,
 				       buffer);
 		safe_unpackstr_xmalloc(&(jobinfo->mp_str), &uint32_tmp, buffer);
diff --git a/src/plugins/select/bluegene/bg_job_place.c b/src/plugins/select/bluegene/bg_job_place.c
index 4c6bc215c..7745cb6f5 100644
--- a/src/plugins/select/bluegene/bg_job_place.c
+++ b/src/plugins/select/bluegene/bg_job_place.c
@@ -433,7 +433,7 @@ static bg_record_t *_find_matching_block(List block_list,
 							&block_state_mutex);
 						free_block_list(NO_VAL,
 								tmp_list, 0, 0);
-						list_destroy(tmp_list);
+						FREE_NULL_LIST(tmp_list);
 					}
 				} else if (found_record->err_ratio &&
 					   (found_record->err_ratio
@@ -479,7 +479,8 @@ static bg_record_t *_find_matching_block(List block_list,
 					convert_num_unit(
 						(float)bg_record->cpu_cnt,
 						tmp_char,
-						sizeof(tmp_char), UNIT_NONE);
+						sizeof(tmp_char), UNIT_NONE,
+						CONVERT_NUM_UNIT_EXACT);
 					info("block %s CPU count (%u) "
 					     "not suitable, asking for %u-%u",
 					     bg_record->bg_block_id,
@@ -1021,10 +1022,10 @@ static int _check_for_booted_overlapping_blocks(
 						bg_status_process_kill_job_list(
 							kill_job_list,
 							JOB_FAILED, 1);
-						list_destroy(kill_job_list);
+						FREE_NULL_LIST(kill_job_list);
 					}
 					free_block_list(NO_VAL, tmp_list, 1, 0);
-					list_destroy(tmp_list);
+					FREE_NULL_LIST(tmp_list);
 				}
 				rc = 1;
 
@@ -1144,7 +1145,7 @@ static int _dynamically_request(List block_list, int *blocks_added,
 					(*blocks_added) = 1;
 				}
 			}
-			list_destroy(new_blocks);
+			FREE_NULL_LIST(new_blocks);
 			if (!*blocks_added) {
 				rc = SLURM_ERROR;
 				continue;
@@ -1161,12 +1162,9 @@ static int _dynamically_request(List block_list, int *blocks_added,
 	}
 	list_iterator_destroy(itr);
 
-	if (list_of_lists)
-		list_destroy(list_of_lists);
-	if (job_list)
-		list_destroy(job_list);
-	if (booted_list)
-		list_destroy(booted_list);
+	FREE_NULL_LIST(list_of_lists);
+	FREE_NULL_LIST(job_list);
+	FREE_NULL_LIST(booted_list);
 
 	return rc;
 }
@@ -1360,8 +1358,7 @@ static int _find_best_block_match(List block_list,
 			list_iterator_destroy(itr);
 		}
 
-		if (overlapped_list)
-			list_destroy(overlapped_list);
+		FREE_NULL_LIST(overlapped_list);
 
 		/* set the bitmap and do other allocation activities */
 		if (bg_record) {
@@ -1573,7 +1570,7 @@ static int _find_best_block_match(List block_list,
 				 */
 				(*found_bg_record) = list_pop(new_blocks);
 				if (!(*found_bg_record)) {
-					list_destroy(new_blocks);
+					FREE_NULL_LIST(new_blocks);
 					if (!bg_record) {
 						/* This should never happen */
 						error("got an empty list back");
@@ -1616,11 +1613,11 @@ static int _find_best_block_match(List block_list,
 							= bg_record->job_ptr;
 					}
 				}
-				list_destroy(new_blocks);
+				FREE_NULL_LIST(new_blocks);
 				break;
 			}
 
-			list_destroy(job_list);
+			FREE_NULL_LIST(job_list);
 
 			goto end_it;
 		} else {
@@ -1777,10 +1774,7 @@ static List _get_preemptables(uint16_t query_mode, bg_record_t *bg_record,
 			      found_record->job_ptr->job_id,
 			      found_record->bg_block_id,
 			      bg_record->bg_block_id);
-			if (preempt) {
-				list_destroy(preempt);
-				preempt = NULL;
-			}
+			FREE_NULL_LIST(preempt);
 			break;
 		}
 	}
@@ -2206,8 +2200,7 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 		}
 		/* set up the preempted job list */
 		if (SELECT_IS_PREEMPT_SET(local_mode)) {
-			if (*preemptee_job_list)
-				list_destroy(*preemptee_job_list);
+			FREE_NULL_LIST(*preemptee_job_list);
 			*preemptee_job_list = _get_preemptables(
 				local_mode, bg_record, job_ptr,
 				preemptee_candidates);
@@ -2239,6 +2232,6 @@ extern int submit_job(struct job_record *job_ptr, bitstr_t *slurm_block_bitmap,
 		slurm_mutex_unlock(&create_dynamic_mutex);
 	}
 
-	list_destroy(block_list);
+	FREE_NULL_LIST(block_list);
 	return rc;
 }
diff --git a/src/plugins/select/bluegene/bg_job_run.c b/src/plugins/select/bluegene/bg_job_run.c
index 54a28479c..ef0efd739 100644
--- a/src/plugins/select/bluegene/bg_job_run.c
+++ b/src/plugins/select/bluegene/bg_job_run.c
@@ -274,7 +274,7 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 	list_iterator_destroy(itr);
 
 	if (requeue_job) {
-		list_destroy(delete_list);
+		FREE_NULL_LIST(delete_list);
 
 		bg_reset_block(bg_record, bg_action_ptr->job_ptr);
 
@@ -289,7 +289,7 @@ static void _start_agent(bg_action_t *bg_action_ptr)
 	if (bg_conf->layout_mode == LAYOUT_DYNAMIC)
 		delete_it = 1;
 	free_block_list(req_job_id, delete_list, delete_it, 1);
-	list_destroy(delete_list);
+	FREE_NULL_LIST(delete_list);
 
 	while (1) {
 		slurm_mutex_lock(&block_state_mutex);
@@ -928,7 +928,7 @@ extern int sync_jobs(List job_list)
 		 * the unlock of block_state_mutex.
 		 */
 		bg_status_process_kill_job_list(kill_list, JOB_BOOT_FAIL, 1);
-		list_destroy(kill_list);
+		FREE_NULL_LIST(kill_list);
 	}
 
 	/* Insure that all other blocks are free of users */
@@ -940,7 +940,7 @@ extern int sync_jobs(List job_list)
 			term_jobs_on_block(bg_record->bg_block_id);
 		}
 		list_iterator_destroy(itr);
-		list_destroy(block_list);
+		FREE_NULL_LIST(block_list);
 	} else {
 		/* this should never happen,
 		 * vestigial logic */
diff --git a/src/plugins/select/bluegene/bg_node_info.c b/src/plugins/select/bluegene/bg_node_info.c
index dd5992bd0..f1a462de8 100644
--- a/src/plugins/select/bluegene/bg_node_info.c
+++ b/src/plugins/select/bluegene/bg_node_info.c
@@ -255,8 +255,7 @@ extern int select_nodeinfo_free(select_nodeinfo_t *nodeinfo)
 		xfree(nodeinfo->extra_info);
 		xfree(nodeinfo->failed_cnodes);
 		xfree(nodeinfo->rack_mp);
-		if (nodeinfo->subgrp_list)
-			list_destroy(nodeinfo->subgrp_list);
+		FREE_NULL_LIST(nodeinfo->subgrp_list);
 		xfree(nodeinfo);
 	}
 	return SLURM_SUCCESS;
diff --git a/src/plugins/select/bluegene/bg_read_config.c b/src/plugins/select/bluegene/bg_read_config.c
index b8f5aedb8..64df972c4 100644
--- a/src/plugins/select/bluegene/bg_read_config.c
+++ b/src/plugins/select/bluegene/bg_read_config.c
@@ -126,10 +126,7 @@ extern void destroy_image(void *ptr)
 	image_t *n = (image_t *)ptr;
 	if (n) {
 		xfree(n->name);
-		if (n->groups) {
-			list_destroy(n->groups);
-			n->groups = NULL;
-		}
+		FREE_NULL_LIST(n->groups);
 		xfree(n);
 	}
 }
@@ -874,12 +871,12 @@ no_calc:
 		      "STATIC LayoutMode.  Please update your bluegene.conf.");
 
 #ifdef HAVE_BGQ
-	if ((bg_recover != NOT_FROM_CONTROLLER)
+	if ((bg_recover != NOT_FROM_CONTROLLER) && assoc_mgr_qos_list
 	    && s_p_get_string(&tmp_char, "RebootQOSList", tbl)) {
 		bool valid;
 		char *token, *last = NULL;
 		slurmdb_qos_rec_t *qos = NULL;
-		assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
+		assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK,
 					   READ_LOCK, NO_LOCK,
 					   NO_LOCK, NO_LOCK };
 
diff --git a/src/plugins/select/bluegene/bg_record_functions.c b/src/plugins/select/bluegene/bg_record_functions.c
index 9c68638ae..f3325a65a 100644
--- a/src/plugins/select/bluegene/bg_record_functions.c
+++ b/src/plugins/select/bluegene/bg_record_functions.c
@@ -98,20 +98,12 @@ extern void destroy_bg_record(void *object)
 
 	if (bg_record) {
 		bg_record->magic = 0;
-		if (bg_record->ba_mp_list) {
-			list_destroy(bg_record->ba_mp_list);
-			bg_record->ba_mp_list = NULL;
-		}
+		FREE_NULL_LIST(bg_record->ba_mp_list);
 		xfree(bg_record->bg_block_id);
 		xfree(bg_record->blrtsimage);
 		xfree(bg_record->ionode_str);
 		FREE_NULL_BITMAP(bg_record->ionode_bitmap);
-
-		if (bg_record->job_list) {
-			list_destroy(bg_record->job_list);
-			bg_record->job_list = NULL;
-		}
-
+		FREE_NULL_LIST(bg_record->job_list);
 		xfree(bg_record->linuximage);
 		xfree(bg_record->mloaderimage);
 		xfree(bg_record->mp_str);
@@ -336,8 +328,7 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
 	sec_record->action = fir_record->action;
 	sec_record->bg_block_id = xstrdup(fir_record->bg_block_id);
 
-	if (sec_record->ba_mp_list)
-		list_destroy(sec_record->ba_mp_list);
+	FREE_NULL_LIST(sec_record->ba_mp_list);
 	sec_record->ba_mp_list = list_create(destroy_ba_mp);
 	if (fir_record->ba_mp_list) {
 		itr = list_iterator_create(fir_record->ba_mp_list);
@@ -400,10 +391,7 @@ extern void copy_bg_record(bg_record_t *fir_record, bg_record_t *sec_record)
 		sec_record->ionode_bitmap = NULL;
 	}
 
-	if (sec_record->job_list) {
-		list_destroy(sec_record->job_list);
-		sec_record->job_list = NULL;
-	}
+	FREE_NULL_LIST(sec_record->job_list);
 
 	if (fir_record->job_list) {
 		struct job_record *job_ptr;
@@ -707,7 +695,7 @@ extern void requeue_and_error(bg_record_t *bg_record, char *reason)
 
 	if (kill_job_list) {
 		bg_status_process_kill_job_list(kill_job_list, JOB_FAILED, 0);
-		list_destroy(kill_job_list);
+		FREE_NULL_LIST(kill_job_list);
 	}
 
 	if (rc)
@@ -851,8 +839,7 @@ extern int add_bg_record(List records, List *used_nodes,
 			debug4("add_bg_record: "
 			       "we didn't get a request list so we are "
 			       "destroying this mp list");
-			list_destroy(bg_record->ba_mp_list);
-			bg_record->ba_mp_list = NULL;
+			FREE_NULL_LIST(bg_record->ba_mp_list);
 		} else
 			setup_subblock_structs(bg_record);
 	} else {
@@ -964,7 +951,7 @@ extern int add_bg_record(List records, List *used_nodes,
 		}
 		list_iterator_destroy(itr);
 		destroy_bg_record(bg_record);
-		list_destroy(ba_mp_list);
+		FREE_NULL_LIST(ba_mp_list);
 	}
 
 	return SLURM_SUCCESS;
@@ -1227,10 +1214,7 @@ extern int down_nodecard(char *mp_name, bitoff_t io_start,
 	if (bg_conf->layout_mode != LAYOUT_DYNAMIC) {
 		debug3("running non-dynamic mode");
 		/* This should never happen, but just in case... */
-		if (delete_list) {
-			list_destroy(delete_list);
-			delete_list = NULL;
-		}
+		FREE_NULL_LIST(delete_list);
 		/* If we found a block that is smaller or equal to a
 		   midplane we will just mark it in an error state as
 		   opposed to draining the node.
@@ -1274,8 +1258,7 @@ extern int down_nodecard(char *mp_name, bitoff_t io_start,
 			cnt_set++;
 		}
 		list_iterator_destroy(itr);
-		list_destroy(delete_list);
-		delete_list = NULL;
+		FREE_NULL_LIST(delete_list);
 
 		if (!cnt_set) {
 			FREE_NULL_BITMAP(iobitmap);
@@ -1474,7 +1457,7 @@ extern int down_nodecard(char *mp_name, bitoff_t io_start,
 			error_bg_record = bg_record;
 		}
 	}
-	list_destroy(requests);
+	FREE_NULL_LIST(requests);
 
 	sort_bg_record_inc_size(bg_lists->main);
 	last_bg_update = time(NULL);
@@ -1483,7 +1466,7 @@ extern int down_nodecard(char *mp_name, bitoff_t io_start,
 cleanup:
 	if (kill_list) {
 		bg_status_process_kill_job_list(kill_list, JOB_NODE_FAIL, 1);
-		list_destroy(kill_list);
+		FREE_NULL_LIST(kill_list);
 	}
 
 	if (!slurmctld_locked)
@@ -1511,8 +1494,7 @@ cleanup:
 		if (bg_conf->layout_mode == LAYOUT_DYNAMIC)
 			delete_it = 1;
 		free_block_list(NO_VAL, delete_list, delete_it, 0);
-		list_destroy(delete_list);
-		delete_list = NULL;
+		FREE_NULL_LIST(delete_list);
 	}
 
 	return rc;
@@ -1844,10 +1826,7 @@ extern void bg_record_hw_failure(bg_record_t *bg_record, List *ret_kill_list)
 			*/
 			if (!bit_test(bg_conf->reboot_qos_bitmap,
 				      qos_ptr->id)) {
-				if (kill_list) {
-					list_destroy(kill_list);
-					kill_list = NULL;
-				}
+				FREE_NULL_LIST(kill_list);
 				break;
 			}
 			if (!kill_list)
@@ -1862,7 +1841,7 @@ extern void bg_record_hw_failure(bg_record_t *bg_record, List *ret_kill_list)
 			*ret_kill_list = kill_list;
 		} else {
 			list_transfer(*ret_kill_list, kill_list);
-			list_destroy(kill_list);
+			FREE_NULL_LIST(kill_list);
 		}
 		kill_list = NULL;
 	}
@@ -1910,7 +1889,7 @@ extern void bg_record_post_hw_failure(
 			       JOB_NODE_FAIL, 1);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(*kill_list);
+	FREE_NULL_LIST(*kill_list);
 	*kill_list = NULL;
 	if (!slurmctld_locked)
 		unlock_slurmctld(job_write_lock);
diff --git a/src/plugins/select/bluegene/bg_status.c b/src/plugins/select/bluegene/bg_status.c
index d3138741e..0cb1c8933 100644
--- a/src/plugins/select/bluegene/bg_status.c
+++ b/src/plugins/select/bluegene/bg_status.c
@@ -365,7 +365,7 @@ extern List bg_status_create_kill_job_list(void)
 }
 
 extern void bg_status_process_kill_job_list(List kill_job_list,
-					    uint16_t job_state,
+					    uint32_t job_state,
 					    bool slurmctld_locked)
 {
 	kill_job_struct_t *freeit = NULL;
diff --git a/src/plugins/select/bluegene/bg_status.h b/src/plugins/select/bluegene/bg_status.h
index c1e8407a8..3932ebd4e 100644
--- a/src/plugins/select/bluegene/bg_status.h
+++ b/src/plugins/select/bluegene/bg_status.h
@@ -49,7 +49,7 @@ extern int bg_status_update_block_state(bg_record_t *bg_record,
 					List kill_job_list);
 extern List bg_status_create_kill_job_list(void);
 extern void bg_status_process_kill_job_list(List kill_job_list,
-					    uint16_t job_state,
+					    uint32_t job_state,
 					    bool slurmctld_locked);
 
 /* defined in the various bridge_status' */
diff --git a/src/plugins/select/bluegene/bl/Makefile.in b/src/plugins/select/bluegene/bl/Makefile.in
index 6c792a6c3..d4e75b171 100644
--- a/src/plugins/select/bluegene/bl/Makefile.in
+++ b/src/plugins/select/bluegene/bl/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -246,6 +249,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -295,8 +300,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -315,6 +324,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -358,6 +370,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -381,6 +394,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/bluegene/bl/bridge_linker.c b/src/plugins/select/bluegene/bl/bridge_linker.c
index e274838ba..142292c58 100644
--- a/src/plugins/select/bluegene/bl/bridge_linker.c
+++ b/src/plugins/select/bluegene/bl/bridge_linker.c
@@ -39,6 +39,7 @@
 
 #include "../ba/block_allocator.h"
 #include "../bridge_linker.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "bridge_status.h"
 #include "bridge_switch_connections.h"
@@ -561,7 +562,7 @@ static void _pre_allocate(bg_record_t *bg_record)
 		      bg_err_str(rc));
 
 	gettimeofday(&my_tv, NULL);
-	localtime_r(&my_tv.tv_sec, &my_tm);
+	slurm_localtime_r(&my_tv.tv_sec, &my_tm);
 	bg_record->bg_block_id = xstrdup_printf(
 		"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3ld",
 		my_tm.tm_mday, mon_abbr(my_tm.tm_mon),
@@ -661,7 +662,7 @@ static int _post_allocate(bg_record_t *bg_record)
 		struct tm my_tm;
 		struct timeval my_tv;
 		gettimeofday(&my_tv, NULL);
-		localtime_r(&my_tv.tv_sec, &my_tm);
+		slurm_localtime_r(&my_tv.tv_sec, &my_tm);
 		bg_record->bg_block_id = xstrdup_printf(
 			"RMP%2.2d%2.2s%2.2d%2.2d%2.2d%3.3ld",
 			my_tm.tm_mday, mon_abbr(my_tm.tm_mon),
@@ -925,10 +926,7 @@ static int _block_get_and_set_mps(bg_record_t *bg_record)
 	}
 	return SLURM_SUCCESS;
 end_it:
-	if (bg_record->ba_mp_list) {
-		list_destroy(bg_record->ba_mp_list);
-		bg_record->ba_mp_list = NULL;
-	}
+	FREE_NULL_LIST(bg_record->ba_mp_list);
 	return SLURM_ERROR;
 }
 
diff --git a/src/plugins/select/bluegene/bl/bridge_switch_connections.c b/src/plugins/select/bluegene/bl/bridge_switch_connections.c
index ad5e06e0c..2a4b79b72 100644
--- a/src/plugins/select/bluegene/bl/bridge_switch_connections.c
+++ b/src/plugins/select/bluegene/bl/bridge_switch_connections.c
@@ -243,7 +243,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 	} else {
 		if (bg_conf->slurm_debug_flags & DEBUG_FLAG_BG_WIRES)
 			info("we got a switch with no connections");
-		list_destroy(conn_list);
+		FREE_NULL_LIST(conn_list);
                 return SLURM_ERROR;
 	}
 
@@ -259,7 +259,7 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 				      "(RM_SwitchFirstConnection): "
 				      "%s",
 				      bg_err_str(rc));
-				list_destroy(conn_list);
+				FREE_NULL_LIST(conn_list);
 				return SLURM_ERROR;
 			}
 			firstconnect=0;
@@ -272,13 +272,13 @@ static int _add_switch_conns(rm_switch_t* curr_switch,
 				fatal("bridge_set_data"
 				      "(RM_SwitchNextConnection): %s",
 				      bg_err_str(rc));
-				list_destroy(conn_list);
+				FREE_NULL_LIST(conn_list);
 				return SLURM_ERROR;
 			}
 		}
 	}
 
-	list_destroy(conn_list);
+	FREE_NULL_LIST(conn_list);
 
 	return SLURM_SUCCESS;
 }
diff --git a/src/plugins/select/bluegene/bl_bgq/Makefile.in b/src/plugins/select/bluegene/bl_bgq/Makefile.in
index 58d6b8049..ec400ac9c 100644
--- a/src/plugins/select/bluegene/bl_bgq/Makefile.in
+++ b/src/plugins/select/bluegene/bl_bgq/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -264,6 +267,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -313,8 +318,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -333,6 +342,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -376,6 +388,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -399,6 +412,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/bluegene/select_bluegene.c b/src/plugins/select/bluegene/select_bluegene.c
index caddb4e42..e4d6552a1 100644
--- a/src/plugins/select/bluegene/select_bluegene.c
+++ b/src/plugins/select/bluegene/select_bluegene.c
@@ -112,16 +112,13 @@ List assoc_mgr_qos_list = NULL;
  * only load select plugins if the plugin_type string has a
  * prefix of "select/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "BlueGene node selection plugin";
 const char plugin_type[]       	= "select/bluegene";
 const uint32_t plugin_id	= 100;
-const uint32_t plugin_version	= 120;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 /* Global variables */
 bg_config_t *bg_conf = NULL;
@@ -139,10 +136,7 @@ extern int select_p_alter_node_cnt(enum select_node_cnt type, void *data);
 static void _destroy_bg_config(bg_config_t *bg_conf)
 {
 	if (bg_conf) {
-		if (bg_conf->blrts_list) {
-			list_destroy(bg_conf->blrts_list);
-			bg_conf->blrts_list = NULL;
-		}
+		FREE_NULL_LIST(bg_conf->blrts_list);
 
 		xfree(bg_conf->bridge_api_file);
 
@@ -150,22 +144,9 @@ static void _destroy_bg_config(bg_config_t *bg_conf)
 		xfree(bg_conf->default_linuximage);
 		xfree(bg_conf->default_mloaderimage);
 		xfree(bg_conf->default_ramdiskimage);
-
-		if (bg_conf->linux_list) {
-			list_destroy(bg_conf->linux_list);
-			bg_conf->linux_list = NULL;
-		}
-
-		if (bg_conf->mloader_list) {
-			list_destroy(bg_conf->mloader_list);
-			bg_conf->mloader_list = NULL;
-		}
-
-		if (bg_conf->ramdisk_list) {
-			list_destroy(bg_conf->ramdisk_list);
-			bg_conf->ramdisk_list = NULL;
-		}
-
+		FREE_NULL_LIST(bg_conf->linux_list);
+		FREE_NULL_LIST(bg_conf->mloader_list);
+		FREE_NULL_LIST(bg_conf->ramdisk_list);
 		FREE_NULL_BITMAP(bg_conf->reboot_qos_bitmap);
 		xfree(bg_conf->slurm_user_name);
 		xfree(bg_conf->slurm_node_prefix);
@@ -176,38 +157,17 @@ static void _destroy_bg_config(bg_config_t *bg_conf)
 static void _destroy_bg_lists(bg_lists_t *bg_lists)
 {
 	if (bg_lists) {
-		if (bg_lists->booted) {
-			list_destroy(bg_lists->booted);
-			bg_lists->booted = NULL;
-		}
+		FREE_NULL_LIST(bg_lists->booted);
 
 		if (bg_lists->job_running) {
-			list_destroy(bg_lists->job_running);
-			bg_lists->job_running = NULL;
+			FREE_NULL_LIST(bg_lists->job_running);
 			num_unused_cpus = 0;
 		}
-
-		if (bg_lists->main) {
-			list_destroy(bg_lists->main);
-			bg_lists->main = NULL;
-		}
-
-		if (bg_lists->valid_small32) {
-			list_destroy(bg_lists->valid_small32);
-			bg_lists->valid_small32 = NULL;
-		}
-		if (bg_lists->valid_small64) {
-			list_destroy(bg_lists->valid_small64);
-			bg_lists->valid_small64 = NULL;
-		}
-		if (bg_lists->valid_small128) {
-			list_destroy(bg_lists->valid_small128);
-			bg_lists->valid_small128 = NULL;
-		}
-		if (bg_lists->valid_small256) {
-			list_destroy(bg_lists->valid_small256);
-			bg_lists->valid_small256 = NULL;
-		}
+		FREE_NULL_LIST(bg_lists->main);
+		FREE_NULL_LIST(bg_lists->valid_small32);
+		FREE_NULL_LIST(bg_lists->valid_small64);
+		FREE_NULL_LIST(bg_lists->valid_small128);
+		FREE_NULL_LIST(bg_lists->valid_small256);
 
 		xfree(bg_lists);
 	}
@@ -320,7 +280,7 @@ static int _delete_old_blocks(List curr_block_list, List found_block_list)
 	slurm_mutex_unlock(&block_state_mutex);
 
 	free_block_list(NO_VAL, destroy_list, 1, 0);
-	list_destroy(destroy_list);
+	FREE_NULL_LIST(destroy_list);
 
 	return SLURM_SUCCESS;
 }
@@ -332,16 +292,13 @@ static void _set_bg_lists()
 
 	slurm_mutex_lock(&block_state_mutex);
 
-	if (bg_lists->booted)
-		list_destroy(bg_lists->booted);
+	FREE_NULL_LIST(bg_lists->booted);
 	bg_lists->booted = list_create(NULL);
 
-	if (bg_lists->job_running)
-		list_destroy(bg_lists->job_running);
+	FREE_NULL_LIST(bg_lists->job_running);
 	bg_lists->job_running = list_create(NULL);
 
-	if (bg_lists->main)
-		list_destroy(bg_lists->main);
+	FREE_NULL_LIST(bg_lists->main);
 	bg_lists->main = list_create(destroy_bg_record);
 
 	slurm_mutex_unlock(&block_state_mutex);
@@ -691,12 +648,8 @@ static int _load_state_file(List curr_block_list, char *dir_name)
 	buffer = create_buf(data, data_size);
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in block_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, BLOCK_STATE_VERSION)) {
-			safe_unpack16(&protocol_version, buffer);
-		} else
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, BLOCK_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
 
 	if (protocol_version == (uint16_t)NO_VAL) {
 		error("***********************************************");
@@ -757,7 +710,7 @@ static int _load_state_file(List curr_block_list, char *dir_name)
 			error("block %s(%s) can't be made in the current "
 			      "system, but was around in the previous one.",
 			      bg_record->bg_block_id, bg_record->mp_str);
-			list_destroy(results);
+			FREE_NULL_LIST(results);
 			destroy_bg_record(bg_record);
 			continue;
 		}
@@ -809,7 +762,7 @@ static int _load_state_file(List curr_block_list, char *dir_name)
 			if (!name) {
 				error("I was unable to make the "
 				      "requested block.");
-				list_destroy(results);
+				FREE_NULL_LIST(results);
 				destroy_bg_record(bg_record);
 				bg_record = NULL;
 				continue;
@@ -827,15 +780,14 @@ static int _load_state_file(List curr_block_list, char *dir_name)
 				      "YOU MUST COLDSTART",
 				      bg_record->mp_str, temp);
 			}
-			if (bg_record->ba_mp_list)
-				list_destroy(bg_record->ba_mp_list);
+			FREE_NULL_LIST(bg_record->ba_mp_list);
 #ifdef HAVE_BGQ
 			bg_record->ba_mp_list =	results;
 			results = NULL;
 #else
 			bg_record->ba_mp_list =	list_create(destroy_ba_mp);
 			copy_node_path(results, &bg_record->ba_mp_list);
-			list_destroy(results);
+			FREE_NULL_LIST(results);
 #endif
 		}
 
@@ -1273,17 +1225,13 @@ extern int init(void)
 		bg_conf->slurm_debug_level = slurmctld_conf.slurmctld_debug;
 		slurm_conf_unlock();
 
-		if (bg_conf->blrts_list)
-			list_destroy(bg_conf->blrts_list);
+		FREE_NULL_LIST(bg_conf->blrts_list);
 		bg_conf->blrts_list = list_create(destroy_image);
-		if (bg_conf->linux_list)
-			list_destroy(bg_conf->linux_list);
+		FREE_NULL_LIST(bg_conf->linux_list);
 		bg_conf->linux_list = list_create(destroy_image);
-		if (bg_conf->mloader_list)
-			list_destroy(bg_conf->mloader_list);
+		FREE_NULL_LIST(bg_conf->mloader_list);
 		bg_conf->mloader_list = list_create(destroy_image);
-		if (bg_conf->ramdisk_list)
-			list_destroy(bg_conf->ramdisk_list);
+		FREE_NULL_LIST(bg_conf->ramdisk_list);
 		bg_conf->ramdisk_list = list_create(destroy_image);
 		bg_conf->reboot_qos_bitmap = NULL;
 
@@ -1461,10 +1409,8 @@ extern int select_p_state_restore(char *dir_name)
 		}
 	}
 
-	list_destroy(curr_block_list);
-	curr_block_list = NULL;
-	list_destroy(found_block_list);
-	found_block_list = NULL;
+	FREE_NULL_LIST(curr_block_list);
+	FREE_NULL_LIST(found_block_list);
 
 	slurm_mutex_lock(&block_state_mutex);
 	last_bg_update = time(NULL);
@@ -2407,8 +2353,7 @@ extern int select_p_update_block(update_block_msg_t *block_desc_ptr)
 	if (kill_job_list) {
 		slurm_mutex_unlock(&block_state_mutex);
 		bg_status_process_kill_job_list(kill_job_list, JOB_FAILED, 0);
-		list_destroy(kill_job_list);
-		kill_job_list = NULL;
+		FREE_NULL_LIST(kill_job_list);
 		slurm_mutex_lock(&block_state_mutex);
 		if (!block_ptr_exist_in_list(bg_lists->main, bg_record)) {
 			slurm_mutex_unlock(&block_state_mutex);
@@ -2484,7 +2429,7 @@ extern int select_p_update_block(update_block_msg_t *block_desc_ptr)
 		if (bg_conf->layout_mode == LAYOUT_DYNAMIC)
 			delete_it = 1;
 		free_block_list(NO_VAL, delete_list, delete_it, 0);
-		list_destroy(delete_list);
+		FREE_NULL_LIST(delete_list);
 		put_block_in_error_state(bg_record, reason);
 	} else if (block_desc_ptr->state == BG_BLOCK_FREE) {
 		/* Resume the block first and then free the block */
@@ -2616,7 +2561,7 @@ extern int select_p_update_block(update_block_msg_t *block_desc_ptr)
 
 		slurm_mutex_unlock(&block_state_mutex);
 		free_block_list(NO_VAL, delete_list, 1, 0);
-		list_destroy(delete_list);
+		FREE_NULL_LIST(delete_list);
 	} else if (block_desc_ptr->state == BG_BLOCK_BOOTING) {
 		/* This means recreate the block, remove it and then
 		   recreate it.
diff --git a/src/plugins/select/bluegene/sfree/Makefile.in b/src/plugins/select/bluegene/sfree/Makefile.in
index 077397c8a..1e6932011 100644
--- a/src/plugins/select/bluegene/sfree/Makefile.in
+++ b/src/plugins/select/bluegene/sfree/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -248,6 +251,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -297,8 +302,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -317,6 +326,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -360,6 +372,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -383,6 +396,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/bluegene/sfree/sfree.c b/src/plugins/select/bluegene/sfree/sfree.c
index 14aa8dc64..8a70afb5a 100644
--- a/src/plugins/select/bluegene/sfree/sfree.c
+++ b/src/plugins/select/bluegene/sfree/sfree.c
@@ -173,7 +173,7 @@ int main(int argc, char *argv[])
 	if (wait_full)
 		_check_status();
 
-	list_destroy(block_list);
+	FREE_NULL_LIST(block_list);
 	info("done");
 	return 0;
 }
diff --git a/src/plugins/select/cons_res/Makefile.in b/src/plugins/select/cons_res/Makefile.in
index 9fad5d9d6..18bc791d3 100644
--- a/src/plugins/select/cons_res/Makefile.in
+++ b/src/plugins/select/cons_res/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/cons_res/dist_tasks.c b/src/plugins/select/cons_res/dist_tasks.c
index dbd390b66..c42b3b7af 100644
--- a/src/plugins/select/cons_res/dist_tasks.c
+++ b/src/plugins/select/cons_res/dist_tasks.c
@@ -326,6 +326,7 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
 {
 	uint32_t c, s, i, j, n, b, z, size, csize, core_cnt;
 	uint16_t cpus, num_bits, vpus = 1;
+	uint16_t cpus_per_task = job_ptr->details->cpus_per_task;
 	job_resources_t *job_res = job_ptr->job_resrcs;
 	bool alloc_cores = false, alloc_sockets = false;
 	uint16_t ntasks_per_core = 0xffff;
@@ -353,15 +354,10 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
 	if (!job_res)
 		return;
 
-	if (cr_type & CR_CORE)
+	if (cr_type & CR_SOCKET)
+		alloc_sockets = true;
+	else if (cr_type & CR_CORE)
 		alloc_cores = true;
-	if (slurmctld_conf.select_type_param & CR_ALLOCATE_FULL_SOCKET) {
-		if (cr_type & CR_SOCKET)
-			alloc_sockets = true;
-	} else {
-		if (cr_type & CR_SOCKET)
-			alloc_cores = true;
-	}
 
 	if (job_ptr->details && job_ptr->details->mc_ptr) {
 		multi_core_data_t *mc_ptr = job_ptr->details->mc_ptr;
@@ -399,7 +395,12 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
 			fatal("cons_res: _block_sync_core_bitmap index error");
 
 		cpus  = job_res->cpus[i];
-		vpus  = MIN(select_node_record[n].vpus, ntasks_per_core);
+		if (ntasks_per_core == 0xffff) {
+			vpus = select_node_record[n].vpus;
+		} else {
+			vpus = MIN(select_node_record[n].vpus,
+				   (ntasks_per_core * cpus_per_task));
+		}
 
 		/* compute still required cores on the node */
 		req_cpus = cpus / vpus;
@@ -672,10 +673,11 @@ static void _block_sync_core_bitmap(struct job_record *job_ptr,
  * virtual CPUs (hyperthreads)
  */
 static int _cyclic_sync_core_bitmap(struct job_record *job_ptr,
-				     const uint16_t cr_type)
+				     const uint16_t cr_type, bool preempt_mode)
 {
 	uint32_t c, i, j, s, n, *sock_start, *sock_end, size, csize, core_cnt;
 	uint16_t cps = 0, cpus, vpus, sockets, sock_size;
+	uint16_t cpus_per_task = job_ptr->details->cpus_per_task;
 	job_resources_t *job_res = job_ptr->job_resrcs;
 	bitstr_t *core_map;
 	bool *sock_used, *sock_avoid;
@@ -687,15 +689,11 @@ static int _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 	    (job_ptr->details == NULL))
 		return error_code;
 
-	if (cr_type & CR_CORE)
+	if (cr_type & CR_SOCKET)
+		alloc_sockets = true;
+	else if (cr_type & CR_CORE)
 		alloc_cores = true;
-	if (slurmctld_conf.select_type_param & CR_ALLOCATE_FULL_SOCKET) {
-		if (cr_type & CR_SOCKET)
-			alloc_sockets = true;
-	} else {
-		if (cr_type & CR_SOCKET)
-			alloc_cores = true;
-	}
+
 	core_map = job_res->core_bitmap;
 	if (job_ptr->details->mc_ptr) {
 		multi_core_data_t *mc_ptr = job_ptr->details->mc_ptr;
@@ -725,7 +723,12 @@ static int _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 			continue;
 		sockets = select_node_record[n].sockets;
 		cps     = select_node_record[n].cores;
-		vpus    = MIN(select_node_record[n].vpus, ntasks_per_core);
+		if (ntasks_per_core == 0xffff) {
+			vpus = select_node_record[n].vpus;
+		} else {
+			vpus = MIN(select_node_record[n].vpus,
+				   (ntasks_per_core * cpus_per_task));
+		}
 
 		if (select_debug_flags & DEBUG_FLAG_SELECT_TYPE) {
 			info("DEBUG: job %u node %s vpus %u cpus %u",
@@ -841,15 +844,19 @@ static int _cyclic_sync_core_bitmap(struct job_record *job_ptr,
 					cpus -= vpus;
 				sock_start[s]++;
 			}
-			if (prev_cpus == cpus) {
+			if (prev_cpus != cpus)
+				 continue;
+			if (!preempt_mode) {
 				/* we're stuck! */
 				job_ptr->priority = 0;
 				job_ptr->state_reason = WAIT_HELD;
-				error("cons_res: sync loop not progressing, "
-				      "holding job %u", job_ptr->job_id);
-				error_code = SLURM_ERROR;
-				goto fini;
+				error("%s: sync loop not progressing on node %s, holding job %u",
+				      __func__,
+				      select_node_record[n].node_ptr->name,
+				      job_ptr->job_id);
 			}
+			error_code = SLURM_ERROR;
+			goto fini;
 		}
 
 		/* clear the rest of the cores in each socket
@@ -916,8 +923,13 @@ fini:	xfree(sock_avoid);
  * - "cyclic" removes cores "evenly", starting from the last socket,
  * - "block" removes cores from the "last" socket(s)
  * - "plane" removes cores "in chunks"
+ *
+ * IN job_ptr - job to be allocated resources
+ * IN cr_type - allocation type (sockets, cores, etc.)
+ * IN preempt_mode - true if testing with simulated preempted jobs
  */
-extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type)
+extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type,
+		   bool preempt_mode)
 {
 	int error_code, cr_cpu = 1;
 
@@ -928,7 +940,7 @@ extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type)
 	}
 
 	if ((job_ptr->job_resrcs->node_req == NODE_CR_RESERVED) ||
-	    (job_ptr->details->whole_node != 0)) {
+	    (job_ptr->details->whole_node == 1)) {
 		/* The job has been allocated an EXCLUSIVE set of nodes,
 		 * so it gets all of the bits in the core_bitmap and
 		 * all of the available CPUs in the cpus array. */
@@ -939,7 +951,8 @@ extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type)
 
 	_log_select_maps("cr_dist/start", job_ptr->job_resrcs->node_bitmap,
 			 job_ptr->job_resrcs->core_bitmap);
-	if (job_ptr->details->task_dist == SLURM_DIST_PLANE) {
+	if ((job_ptr->details->task_dist & SLURM_DIST_STATE_BASE) ==
+	    SLURM_DIST_PLANE) {
 		/* perform a plane distribution on the 'cpus' array */
 		error_code = _compute_plane_dist(job_ptr);
 		if (error_code != SLURM_SUCCESS) {
@@ -975,7 +988,7 @@ extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type)
 	 * by the next code block
 	 */
 	if ( slurmctld_conf.select_type_param & CR_CORE_DEFAULT_DIST_BLOCK ) {
-		switch(job_ptr->details->task_dist) {
+		switch(job_ptr->details->task_dist & SLURM_DIST_NODEMASK) {
 		case SLURM_DIST_ARBITRARY:
 		case SLURM_DIST_BLOCK:
 		case SLURM_DIST_CYCLIC:
@@ -988,7 +1001,7 @@ extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type)
 	/* Determine the number of logical processors per node needed
 	 * for this job. Make sure below matches the layouts in
 	 * lllp_distribution in plugins/task/affinity/dist_task.c (FIXME) */
-	switch(job_ptr->details->task_dist) {
+	switch(job_ptr->details->task_dist & SLURM_DIST_NODESOCKMASK) {
 	case SLURM_DIST_BLOCK_BLOCK:
 	case SLURM_DIST_CYCLIC_BLOCK:
 	case SLURM_DIST_PLANE:
@@ -1002,7 +1015,8 @@ extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type)
 	case SLURM_DIST_BLOCK_CFULL:
 	case SLURM_DIST_CYCLIC_CFULL:
 	case SLURM_DIST_UNKNOWN:
-		error_code = _cyclic_sync_core_bitmap(job_ptr, cr_type);
+		error_code = _cyclic_sync_core_bitmap(job_ptr, cr_type,
+						      preempt_mode);
 		break;
 	default:
 		error("select/cons_res: invalid task_dist entry");
diff --git a/src/plugins/select/cons_res/dist_tasks.h b/src/plugins/select/cons_res/dist_tasks.h
index 30b2bf2b4..0846875dd 100644
--- a/src/plugins/select/cons_res/dist_tasks.h
+++ b/src/plugins/select/cons_res/dist_tasks.h
@@ -50,6 +50,12 @@
 
 #include "select_cons_res.h"
 
-int cr_dist(struct job_record *job_ptr,const uint16_t cr_type);
+/* Distribute tasks over CPUs
+ * IN job_ptr - job to be allocated resources
+ * IN cr_type - allocation type (sockets, cores, etc.)
+ * IN preempt_mode - true if testing with simulated preempted jobs
+ */
+extern int cr_dist(struct job_record *job_ptr, const uint16_t cr_type,
+		   bool preeempt_mode);
 
 #endif /* !_CONS_RES_DIST_TASKS_H */
diff --git a/src/plugins/select/cons_res/job_test.c b/src/plugins/select/cons_res/job_test.c
index 64b80af96..9f338fe5f 100644
--- a/src/plugins/select/cons_res/job_test.c
+++ b/src/plugins/select/cons_res/job_test.c
@@ -58,6 +58,7 @@
  *
  *****************************************************************************
  *  Copyright (C) 2005-2008 Hewlett-Packard Development Company, L.P.
+ *  Portions Copyright (C) 2010-2015 SchedMD <http://www.schedmd.com>.
  *  Written by Susanne M. Balle <susanne.balle@hp.com>, who borrowed heavily
  *  from select/linear
  *
@@ -112,7 +113,21 @@
 static uint16_t _allocate_sc(struct job_record *job_ptr, bitstr_t *core_map,
 			      bitstr_t *part_core_map, const uint32_t node_i,
 			      bool entire_sockets_only);
+static int _choose_nodes(struct job_record *job_ptr, bitstr_t *node_map,
+			 uint32_t min_nodes, uint32_t max_nodes,
+			 uint32_t req_nodes, uint32_t cr_node_cnt,
+			 uint16_t *cpu_cnt, uint16_t cr_type,
+			 bool prefer_alloc_nodes);
 static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt, uint16_t cr_type,
+			bool prefer_alloc_nodes);
+static int _eval_nodes_busy(struct job_record *job_ptr, bitstr_t *node_map,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt);
+static int _eval_nodes_dfly(struct job_record *job_ptr, bitstr_t *node_map,
 			uint32_t min_nodes, uint32_t max_nodes,
 			uint32_t req_nodes, uint32_t cr_node_cnt,
 			uint16_t *cpu_cnt, uint16_t cr_type);
@@ -127,7 +142,15 @@ static int _eval_nodes_serial(struct job_record *job_ptr, bitstr_t *node_map,
 static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *node_map,
 			uint32_t min_nodes, uint32_t max_nodes,
 			uint32_t req_nodes, uint32_t cr_node_cnt,
-			uint16_t *cpu_cnt);
+			uint16_t *cpu_cnt, uint16_t cr_type);
+static uint16_t *_select_nodes(struct job_record *job_ptr, uint32_t min_nodes,
+				uint32_t max_nodes, uint32_t req_nodes,
+				bitstr_t *node_map, uint32_t cr_node_cnt,
+				bitstr_t *core_map,
+				struct node_use_record *node_usage,
+				uint16_t cr_type, bool test_only,
+				bitstr_t *part_core_map,
+				bool prefer_alloc_nodes);
 
 /* _allocate_sockets - Given the job requirements, determine which sockets
  *                     from the given node can be allocated (if any) to this
@@ -186,7 +209,7 @@ static uint16_t _allocate_sc(struct job_record *job_ptr, bitstr_t *core_map,
 	uint32_t core_end      = cr_get_coremap_offset(node_i+1);
 	uint32_t c;
 	uint16_t cpus_per_task = job_ptr->details->cpus_per_task;
-	uint16_t *used_cores, *free_cores, free_core_count = 0;
+	uint16_t *used_cores, *free_cores = NULL, free_core_count = 0;
 	uint16_t i, j, sockets    = select_node_record[node_i].sockets;
 	uint16_t cores_per_socket = select_node_record[node_i].cores;
 	uint16_t threads_per_core = select_node_record[node_i].vpus;
@@ -195,6 +218,7 @@ static uint16_t _allocate_sc(struct job_record *job_ptr, bitstr_t *core_map,
 	uint32_t free_cpu_count = 0, used_cpu_count = 0, *used_cpu_array = NULL;
 
 	if (job_ptr->details && job_ptr->details->mc_ptr) {
+		uint32_t threads_per_socket;
 		multi_core_data_t *mc_ptr = job_ptr->details->mc_ptr;
 		if (mc_ptr->cores_per_socket != (uint16_t) NO_VAL) {
 			min_cores = mc_ptr->cores_per_socket;
@@ -210,6 +234,18 @@ static uint16_t _allocate_sc(struct job_record *job_ptr, bitstr_t *core_map,
 			ntasks_per_core = mc_ptr->threads_per_core;
 		}
 		ntasks_per_socket = mc_ptr->ntasks_per_socket;
+
+		if ((ntasks_per_core != (uint16_t) NO_VAL) &&
+		    (ntasks_per_core != (uint16_t) INFINITE) &&
+		    (ntasks_per_core > threads_per_core)) {
+			goto fini;
+		}
+		threads_per_socket = threads_per_core * cores_per_socket;
+		if ((ntasks_per_socket != (uint16_t) NO_VAL) &&
+		    (ntasks_per_socket != (uint16_t) INFINITE) &&
+		    (ntasks_per_socket > threads_per_socket)) {
+			goto fini;
+		}
 	}
 
 	/* These are the job parameters that we must respect:
@@ -350,7 +386,10 @@ static uint16_t _allocate_sc(struct job_record *job_ptr, bitstr_t *core_map,
 	 */
 	avail_cpus = 0;
 	num_tasks = 0;
-	threads_per_core = MIN(threads_per_core, ntasks_per_core);
+	if (ntasks_per_core != 0xffff) {
+		threads_per_core = MIN(threads_per_core,
+				       (ntasks_per_core * cpus_per_task));
+	}
 
 	for (i = 0; i < sockets; i++) {
 		uint16_t tmp = free_cores[i] * threads_per_core;
@@ -445,6 +484,24 @@ fini:
 		cpu_count = 0;
 	}
 	xfree(free_cores);
+
+	if ((job_ptr->details->core_spec != (uint16_t) NO_VAL) &&
+	    (job_ptr->details->core_spec & CORE_SPEC_THREAD)   &&
+	    ((select_node_record[node_i].threads == 1) ||
+	     (select_node_record[node_i].threads ==
+	      select_node_record[node_i].vpus))) {
+		/* NOTE: Currently does not support the situation when Slurm
+		 * allocates by core the thread specialization count occupies
+		 * a full core */
+		c = job_ptr->details->core_spec & (~CORE_SPEC_THREAD);
+		if (((cpu_count + c) <= select_node_record[node_i].cpus))
+			;
+		else if (cpu_count > c)
+			cpu_count -= c;
+		else
+			cpu_count = 0;
+	}
+
 	return cpu_count;
 }
 
@@ -527,7 +584,8 @@ uint16_t _can_job_run_on_node(struct job_record *job_ptr, bitstr_t *core_map,
 		 *          - there are enough free_cores (MEM_PER_CPU == 1)
 		 */
 		req_mem   = job_ptr->details->pn_min_memory & ~MEM_PER_CPU;
-		avail_mem = select_node_record[node_i].real_memory;
+		avail_mem = select_node_record[node_i].real_memory -
+			    select_node_record[node_i].mem_spec_limit;
 		if (!test_only)
 			avail_mem -= node_usage[node_i].alloc_memory;
 		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
@@ -589,19 +647,23 @@ uint16_t _can_job_run_on_node(struct job_record *job_ptr, bitstr_t *core_map,
  * allocated CPUs with multi-row partitions.
  */
 static int _is_node_busy(struct part_res_record *p_ptr, uint32_t node_i,
-			 int sharing_only, struct part_record *my_part_ptr)
+			 int sharing_only, struct part_record *my_part_ptr,
+			 bool qos_preemptor)
 {
 	uint32_t r, cpu_begin = cr_get_coremap_offset(node_i);
 	uint32_t i, cpu_end   = cr_get_coremap_offset(node_i+1);
+	uint16_t num_rows;
 
 	for (; p_ptr; p_ptr = p_ptr->next) {
+		num_rows = p_ptr->num_rows;
+		if (preempt_by_qos && !qos_preemptor)
+			num_rows--;	/* Don't use extra row */
 		if (sharing_only &&
-		    ((p_ptr->num_rows < 2) ||
-		     (p_ptr->part_ptr == my_part_ptr)))
+		    ((num_rows < 2) || (p_ptr->part_ptr == my_part_ptr)))
 			continue;
 		if (!p_ptr->row)
 			continue;
-		for (r = 0; r < p_ptr->num_rows; r++) {
+		for (r = 0; r < num_rows; r++) {
 			if (!p_ptr->row[r].row_bitmap)
 				continue;
 			for (i = cpu_begin; i < cpu_end; i++) {
@@ -638,7 +700,7 @@ static int _verify_node_state(struct part_res_record *cr_part_ptr,
 			      uint16_t cr_type,
 			      struct node_use_record *node_usage,
 			      enum node_cr_state job_node_req,
-			      bitstr_t *exc_core_bitmap)
+			      bitstr_t *exc_core_bitmap, bool qos_preemptor)
 {
 	struct node_record *node_ptr;
 	uint32_t i, j, free_mem, gres_cpus, gres_cores, min_mem;
@@ -695,7 +757,7 @@ static int _verify_node_state(struct part_res_record *cr_part_ptr,
 		}
 
 		/* Exclude nodes with reserved cores */
-		if (job_ptr->details->whole_node && exc_core_bitmap) {
+		if ((job_ptr->details->whole_node == 1) && exc_core_bitmap) {
 			for (j = core_start_bit; j <= core_end_bit; j++) {
 				if (bit_test(exc_core_bitmap, j))
 					continue;
@@ -740,7 +802,7 @@ static int _verify_node_state(struct part_res_record *cr_part_ptr,
 			/* cannot use this node if it is running jobs
 			 * in sharing partitions */
 			if (_is_node_busy(cr_part_ptr, i, 1,
-					  job_ptr->part_ptr)) {
+					  job_ptr->part_ptr, qos_preemptor)) {
 				debug3("cons_res: _vns: node %s sharing?",
 				       node_ptr->name);
 				goto clear_bit;
@@ -750,7 +812,8 @@ static int _verify_node_state(struct part_res_record *cr_part_ptr,
 		} else {
 			if (job_node_req == NODE_CR_RESERVED) {
 				if (_is_node_busy(cr_part_ptr, i, 0,
-						  job_ptr->part_ptr)) {
+						  job_ptr->part_ptr,
+						  qos_preemptor)) {
 					debug3("cons_res: _vns: node %s busy",
 					       node_ptr->name);
 					goto clear_bit;
@@ -759,7 +822,8 @@ static int _verify_node_state(struct part_res_record *cr_part_ptr,
 				/* cannot use this node if it is running jobs
 				 * in sharing partitions */
 				if (_is_node_busy(cr_part_ptr, i, 1,
-						  job_ptr->part_ptr)) {
+						  job_ptr->part_ptr,
+						  qos_preemptor)) {
 					debug3("cons_res: _vns: node %s vbusy",
 					       node_ptr->name);
 					goto clear_bit;
@@ -793,8 +857,11 @@ bitstr_t *_make_core_bitmap(bitstr_t *node_map, uint16_t core_spec)
 	size = cr_get_coremap_offset(nodes);
 	bitstr_t *core_map = bit_alloc(size);
 
-	nodes = bit_size(node_map);
+	if ((core_spec != (uint16_t) NO_VAL) &&
+	    (core_spec & CORE_SPEC_THREAD))	/* Reserving threads */
+		core_spec = (uint16_t) NO_VAL;	/* Don't remove cores */
 
+	nodes = bit_size(node_map);
 	for (n = 0; n < nodes; n++) {
 		if (!bit_test(node_map, n))
 			continue;
@@ -912,11 +979,13 @@ static bool _enough_nodes(int avail_nodes, int rem_nodes,
 }
 
 static void _cpus_to_use(int *avail_cpus, int rem_cpus, int rem_nodes,
-			 struct job_details *details_ptr, uint16_t *cpu_cnt)
+			 struct job_details *details_ptr, uint16_t *cpu_cnt,
+			 int node_inx, uint16_t cr_type)
 {
 	int resv_cpus;	/* CPUs to be allocated on other nodes */
+	int vpus;
 
-	if (details_ptr->whole_node)	/* Use all CPUs on this node */
+	if (details_ptr->whole_node == 1)	/* Use all CPUs on this node */
 		return;
 
 	resv_cpus = MAX((rem_nodes - 1), 0);
@@ -924,8 +993,12 @@ static void _cpus_to_use(int *avail_cpus, int rem_cpus, int rem_nodes,
 	rem_cpus -= resv_cpus;
 
 	if (*avail_cpus > rem_cpus) {
+		vpus = select_node_record[node_inx].vpus;
+		if (cr_type & CR_SOCKET)
+			vpus *= select_node_record[node_inx].cores;
 		*avail_cpus = MAX(rem_cpus, (int)details_ptr->pn_min_cpus);
-		*cpu_cnt = *avail_cpus;
+		/* Round up CPU count to CPU in allocation unit (e.g. core) */
+		*cpu_cnt = ((int)(*avail_cpus + vpus - 1) / vpus) * vpus;
 	}
 }
 
@@ -933,7 +1006,8 @@ static void _cpus_to_use(int *avail_cpus, int rem_cpus, int rem_nodes,
 static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 			uint32_t min_nodes, uint32_t max_nodes,
 			uint32_t req_nodes, uint32_t cr_node_cnt,
-			uint16_t *cpu_cnt, uint16_t cr_type)
+			uint16_t *cpu_cnt, uint16_t cr_type,
+			bool prefer_alloc_nodes)
 {
 	int i, j, error_code = SLURM_ERROR;
 	int *consec_nodes;	/* how many nodes we can add from this
@@ -968,6 +1042,16 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 	    (!bit_super_set(details_ptr->req_node_bitmap, node_map)))
 		return error_code;
 
+	if (prefer_alloc_nodes && !details_ptr->contiguous) {
+		/* Select resource on busy nodes first in order to leave
+		 * idle resources free for as long as possible so that longer
+		 * running jobs can get more easily started by the backfill
+		 * scheduler plugin */
+		return _eval_nodes_busy(job_ptr, node_map,
+				       min_nodes, max_nodes, req_nodes,
+				       cr_node_cnt, cpu_cnt);
+	}
+
 	if ((cr_type & CR_LLN) ||
 	    (!details_ptr->req_node_layout && job_ptr->part_ptr &&
 	     (job_ptr->part_ptr->flags & PART_FLAG_LLN))) {
@@ -989,9 +1073,15 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 
 	if (switch_record_cnt && switch_record_table) {
 		/* Perform optimized resource selection based upon topology */
-		return _eval_nodes_topo(job_ptr, node_map,
-					min_nodes, max_nodes, req_nodes,
-					cr_node_cnt, cpu_cnt);
+		if (have_dragonfly) {
+			return _eval_nodes_dfly(job_ptr, node_map,
+						min_nodes, max_nodes, req_nodes,
+						cr_node_cnt, cpu_cnt, cr_type);
+		} else {
+			return _eval_nodes_topo(job_ptr, node_map,
+						min_nodes, max_nodes, req_nodes,
+						cr_node_cnt, cpu_cnt, cr_type);
+		}
 	}
 
 	consec_size = 50;	/* start allocation for 50 sets of
@@ -1164,8 +1254,8 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 				 * them and then the step layout will sort
 				 * things out. */
 				_cpus_to_use(&avail_cpus, rem_cpus,
-					     min_rem_nodes,
-					     details_ptr, &cpu_cnt[i]);
+					     min_rem_nodes, details_ptr,
+					     &cpu_cnt[i], i, cr_type);
 				total_cpus += avail_cpus;
 				/* enforce the max_cpus limit */
 				if ((details_ptr->max_cpus != NO_VAL) &&
@@ -1198,8 +1288,8 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 				 * them and then the step layout will sort
 				 * things out. */
 				_cpus_to_use(&avail_cpus, rem_cpus,
-					     min_rem_nodes,
-					     details_ptr, &cpu_cnt[i]);
+					     min_rem_nodes, details_ptr,
+					     &cpu_cnt[i], i, cr_type);
 				total_cpus += avail_cpus;
 				/* enforce the max_cpus limit */
 				if ((details_ptr->max_cpus != NO_VAL) &&
@@ -1278,8 +1368,8 @@ static int _eval_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 				 * them and then the step layout will sort
 				 * things out. */
 				_cpus_to_use(&avail_cpus, rem_cpus,
-					     min_rem_nodes,
-					     details_ptr, &cpu_cnt[i]);
+					     min_rem_nodes, details_ptr,
+					     &cpu_cnt[i], i, cr_type);
 				total_cpus += avail_cpus;
 				/* enforce the max_cpus limit */
 				if ((details_ptr->max_cpus != NO_VAL) &&
@@ -1320,6 +1410,108 @@ fini:	xfree(consec_cpus);
 	return error_code;
 }
 
+/*
+ * A variation of _eval_nodes() to select resources starting with allocated
+ * nodes. Based upon _eval_nodes_lln().
+ */
+static int _eval_nodes_busy(struct job_record *job_ptr, bitstr_t *node_map,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt)
+{
+	int i, i_start, i_end, error_code = SLURM_ERROR;
+	int rem_cpus, rem_nodes; /* remaining resources desired */
+	int min_rem_nodes;	/* remaining resources desired */
+	int total_cpus = 0;	/* #CPUs allocated to job */
+	int avail_cpus = 0;
+	struct job_details *details_ptr = job_ptr->details;
+	bitstr_t *req_map = details_ptr->req_node_bitmap;
+
+	rem_cpus = details_ptr->min_cpus;
+	rem_nodes = MAX(min_nodes, req_nodes);
+	min_rem_nodes = min_nodes;
+	i_start = bit_ffs(node_map);
+	if (i_start >= 0)
+		i_end = bit_fls(node_map);
+	else
+		i_end = i_start - 1;
+	if (req_map) {
+		for (i = i_start; i <= i_end; i++) {
+			if (!bit_test(req_map, i)) {
+				bit_clear(node_map, i);
+				continue;
+			}
+			if (bit_test(node_map, i)) {
+				avail_cpus = cpu_cnt[i];
+				if ((avail_cpus > 0) && (max_nodes > 0)) {
+					total_cpus += avail_cpus;
+					rem_cpus   -= avail_cpus;
+					rem_nodes--;
+					min_rem_nodes--;
+					/* leaving bitmap set, decr max limit */
+					max_nodes--;
+				} else {	/* node not selected (yet) */
+					bit_clear(node_map, i);
+				}
+			}
+		}
+	} else {
+		bit_nclear(node_map, 0, (cr_node_cnt - 1));
+	}
+
+	/* Compute CPUs already allocated to required nodes */
+	if ((details_ptr->max_cpus != NO_VAL) &&
+	    (total_cpus > details_ptr->max_cpus)) {
+		info("Job %u can't use required nodes due to max CPU limit",
+		     job_ptr->job_id);
+		return error_code;
+	}
+
+	/* Start by using nodes that already have a job running */
+	for (i = i_start; i <= i_end; i++) {
+		if (bit_test(node_map, i) ||
+		    bit_test(idle_node_bitmap, i))
+			continue;
+		avail_cpus = cpu_cnt[i];
+		if ((avail_cpus > 0) && (max_nodes > 0)) {
+			bit_set(node_map, i);
+			total_cpus += avail_cpus;
+			rem_cpus   -= avail_cpus;
+			rem_nodes--;
+			max_nodes--;
+			if ((max_nodes <= 0) ||
+			    ((rem_cpus <= 0) && (rem_nodes <= 0)))
+				break;
+		}
+	}
+
+	/* Now try to use idle nodes */
+	for (i = i_start; i <= i_end; i++) {
+		if (bit_test(node_map, i) ||
+		    !bit_test(idle_node_bitmap, i))
+			continue;
+		avail_cpus = cpu_cnt[i];
+		if ((avail_cpus > 0) && (max_nodes > 0)) {
+			bit_set(node_map, i);
+			total_cpus += avail_cpus;
+			rem_cpus   -= avail_cpus;
+			rem_nodes--;
+			max_nodes--;
+			if ((max_nodes <= 0) ||
+			    ((rem_cpus <= 0) && (rem_nodes <= 0)))
+				break;
+		}
+	}
+
+	if ((rem_cpus > 0) || (min_rem_nodes > 0))  {
+		bit_nclear(node_map, 0, cr_node_cnt-1); /* Clear Map. */
+		error_code = SLURM_ERROR;
+	} else
+		error_code = SLURM_SUCCESS;
+
+	return error_code;
+}
+
 /*
  * A variation of _eval_nodes() to select resources on the least loaded nodes */
 static int _eval_nodes_lln(struct job_record *job_ptr, bitstr_t *node_map,
@@ -1512,7 +1704,7 @@ fini:	return error_code;
 static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint32_t min_nodes, uint32_t max_nodes,
 			uint32_t req_nodes, uint32_t cr_node_cnt,
-			uint16_t *cpu_cnt)
+			uint16_t *cpu_cnt, uint16_t cr_type)
 {
 	bitstr_t **switches_bitmap = NULL;	/* nodes on this switch */
 	int       *switches_cpu_cnt = NULL;	/* total CPUs on switch */
@@ -1584,11 +1776,12 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 				node_names = bitmap2node_name(
 						switches_bitmap[i]);
 			}
-			debug("switch=%s nodes=%u:%s required:%u speed:%u",
-			      switch_record_table[i].name,
-			      switches_node_cnt[i], node_names,
-			      switches_required[i],
-			      switch_record_table[i].link_speed);
+			info("switch=%s level=%d nodes=%u:%s required:%u speed:%u",
+			     switch_record_table[i].name,
+			     switch_record_table[i].level,
+			     switches_node_cnt[i], node_names,
+			     switches_required[i],
+			     switch_record_table[i].link_speed);
 			xfree(node_names);
 		}
 	}
@@ -1639,7 +1832,7 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			 * them and then the step layout will sort
 			 * things out. */
 			_cpus_to_use(&avail_cpus, rem_cpus, min_rem_nodes,
-				     job_ptr->details, &cpu_cnt[i]);
+				     job_ptr->details, &cpu_cnt[i], i, cr_type);
 			rem_nodes--;
 			min_rem_nodes--;
 			max_nodes--;
@@ -1914,7 +2107,8 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			 * them and then the step layout will sort
 			 * things out. */
 			_cpus_to_use(&bfsize, rem_cpus, min_rem_nodes,
-				     job_ptr->details, &cpu_cnt[bfloc]);
+				     job_ptr->details, &cpu_cnt[bfloc], bfloc,
+				     cr_type);
 
 			/* enforce the max_cpus limit */
 			if ((job_ptr->details->max_cpus != NO_VAL) &&
@@ -1962,13 +2156,430 @@ static int _eval_nodes_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	return rc;
 }
 
+/*
+ * A dragonfly network topology aware version of _eval_nodes().
+ * NOTE: The logic here is almost identical to that of _job_test_topo()
+ *       in select_linear.c. Any bug found here is probably also there.
+ */
+static int _eval_nodes_dfly(struct job_record *job_ptr, bitstr_t *bitmap,
+			uint32_t min_nodes, uint32_t max_nodes,
+			uint32_t req_nodes, uint32_t cr_node_cnt,
+			uint16_t *cpu_cnt, uint16_t cr_type)
+{
+	bitstr_t **switches_bitmap = NULL;	/* nodes on this switch */
+	int       *switches_cpu_cnt = NULL;	/* total CPUs on switch */
+	int       *switches_node_cnt = NULL;	/* total nodes on switch */
+	int       *switches_node_use = NULL;	/* nodes from switch used */
+	int        leaf_switch_count = 0;	/* Count of leaf node switches used */
+
+	bitstr_t  *avail_nodes_bitmap = NULL;	/* nodes on any switch */
+	bitstr_t  *req_nodes_bitmap   = NULL;
+	int rem_cpus, rem_nodes;	/* remaining resources desired */
+	int min_rem_nodes;	/* remaining resources desired */
+	int avail_cpus;
+	int total_cpus = 0;	/* #CPUs allocated to job */
+	int i, j, rc = SLURM_SUCCESS;
+	int best_fit_inx, first, last;
+	int best_fit_nodes, best_fit_cpus;
+	int best_fit_location = 0;
+	long time_waiting = 0;
+	int req_switch_cnt = 0;
+	int req_switch_id = -1;
+
+	if (job_ptr->req_switch > 1) {
+		/* Maximum leaf switch count >1 probably makes no sense */
+		info("%s: Resetting job %u leaf switch count from %u to 0",
+		     __func__, job_ptr->job_id, job_ptr->req_switch);
+		job_ptr->req_switch = 0;
+	}
+	if (job_ptr->req_switch) {
+		time_t     time_now;
+		time_now = time(NULL);
+		if (job_ptr->wait4switch_start == 0)
+			job_ptr->wait4switch_start = time_now;
+		time_waiting = time_now - job_ptr->wait4switch_start;
+	}
+
+	rem_cpus = job_ptr->details->min_cpus;
+	rem_nodes = MAX(min_nodes, req_nodes);
+	min_rem_nodes = min_nodes;
+
+	if (job_ptr->details->req_node_bitmap) {
+		req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
+		i = bit_set_count(req_nodes_bitmap);
+		if (i > max_nodes) {
+			info("job %u requires more nodes than currently "
+			     "available (%u>%u)",
+			     job_ptr->job_id, i, max_nodes);
+			rc = SLURM_ERROR;
+			goto fini;
+		}
+	}
+
+	/* Construct a set of switch array entries,
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_use = xmalloc(sizeof(int)        * switch_record_cnt);
+	avail_nodes_bitmap = bit_alloc(cr_node_cnt);
+	for (i = 0; i < switch_record_cnt; i++) {
+		switches_bitmap[i] = bit_copy(switch_record_table[i].
+					      node_bitmap);
+		bit_and(switches_bitmap[i], bitmap);
+		bit_or(avail_nodes_bitmap, switches_bitmap[i]);
+		switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+	}
+	bit_nclear(bitmap, 0, cr_node_cnt - 1);
+
+	if (select_debug_flags & DEBUG_FLAG_SELECT_TYPE) {
+		for (i = 0; i < switch_record_cnt; i++) {
+			char *node_names = NULL;
+			if (switches_node_cnt[i]) {
+				node_names = bitmap2node_name(
+						switches_bitmap[i]);
+			}
+			debug("switch=%s nodes=%u:%s speed:%u",
+			      switch_record_table[i].name,
+			      switches_node_cnt[i], node_names,
+			      switch_record_table[i].link_speed);
+			xfree(node_names);
+		}
+	}
+
+	if (req_nodes_bitmap &&
+	    (!bit_super_set(req_nodes_bitmap, avail_nodes_bitmap))) {
+		info("job %u requires nodes not available on any switch",
+		     job_ptr->job_id);
+		rc = SLURM_ERROR;
+		goto fini;
+	}
+
+	/* Check that specific required nodes are linked together */
+	if (req_nodes_bitmap) {
+		rc = SLURM_ERROR;
+		for (i = 0; i < switch_record_cnt; i++) {
+			if (bit_super_set(req_nodes_bitmap,
+					  switches_bitmap[i])) {
+				rc = SLURM_SUCCESS;
+				break;
+			}
+		}
+		if ( rc == SLURM_ERROR ) {
+			info("job %u requires nodes that are not linked "
+			     "together", job_ptr->job_id);
+			goto fini;
+		}
+	}
+
+	if (req_nodes_bitmap) {
+		/* Accumulate specific required resources, if any */
+		first = bit_ffs(req_nodes_bitmap);
+		last  = bit_fls(req_nodes_bitmap);
+		for (i = first; ((i <= last) && (first >= 0)); i++) {
+			if (!bit_test(req_nodes_bitmap, i))
+				continue;
+			if (max_nodes <= 0) {
+				info("job %u requires nodes than allowed",
+				     job_ptr->job_id);
+				rc = SLURM_ERROR;
+				goto fini;
+			}
+			bit_set(bitmap, i);
+			bit_clear(avail_nodes_bitmap, i);
+			avail_cpus = _get_cpu_cnt(job_ptr, i, cpu_cnt);
+			/* This could result in 0, but if the user
+			 * requested nodes here we will still give
+			 * them and then the step layout will sort
+			 * things out. */
+			_cpus_to_use(&avail_cpus, rem_cpus, min_rem_nodes,
+				     job_ptr->details, &cpu_cnt[i], i, cr_type);
+			rem_nodes--;
+			min_rem_nodes--;
+			max_nodes--;
+			total_cpus += avail_cpus;
+			rem_cpus   -= avail_cpus;
+			for (j = 0; j < switch_record_cnt; j++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+				switches_node_use[j]++;
+				if (switch_record_table[j].level == 0) {
+					req_switch_cnt++;
+					req_switch_id = j;
+				}
+			}
+		}
+		/* Compute CPUs already allocated to required nodes */
+		if ((job_ptr->details->max_cpus != NO_VAL) &&
+		    (total_cpus > job_ptr->details->max_cpus)) {
+			info("Job %u can't use required node due to max CPU "
+			     "limit", job_ptr->job_id);
+			rc = SLURM_ERROR;
+			goto fini;
+		}
+		if ((rem_nodes <= 0) && (rem_cpus <= 0))
+			goto fini;
+
+		/* Update bitmaps and node counts for higher-level switches */
+		for (j = 0; j < switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0)
+				continue;
+			last  = bit_fls(switches_bitmap[j]);
+			for (i = first; i <= last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				if (!bit_test(avail_nodes_bitmap, i)) {
+					/* cleared from lower level */
+					bit_clear(switches_bitmap[j], i);
+					switches_node_cnt[j]--;
+				} else {
+					switches_cpu_cnt[j] +=
+						_get_cpu_cnt(job_ptr, i,
+							     cpu_cnt);
+				}
+			}
+		}
+	} else {
+		/* No specific required nodes, calculate CPU counts */
+		for (j = 0; j < switch_record_cnt; j++) {
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0)
+				continue;
+			last  = bit_fls(switches_bitmap[j]);
+			for (i = first; i <= last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				switches_cpu_cnt[j] +=
+					_get_cpu_cnt(job_ptr, i, cpu_cnt);
+			}
+		}
+	}
+
+	/* Determine lowest level switch satisfying request with best fit 
+	 * in respect of the specific required nodes if specified
+	 */
+	best_fit_inx = -1;
+	for (j = 0; j < switch_record_cnt; j++) {
+		if ((switches_cpu_cnt[j] < rem_cpus) ||
+		    (!_enough_nodes(switches_node_cnt[j], rem_nodes,
+				    min_nodes, req_nodes)))
+			continue;
+		if ((best_fit_inx != -1) && (req_nodes > min_nodes) &&
+		    (switches_node_cnt[best_fit_inx] < req_nodes) &&
+		    (switches_node_cnt[best_fit_inx] < switches_node_cnt[j])) {
+			/* Try to get up to the requested node count */
+			best_fit_inx = -1;
+		}
+
+		if ((req_switch_cnt == 1) && (req_switch_id == j)) {
+			best_fit_inx = j;
+			break;
+		}
+
+		/*
+		 * If first possibility OR
+		 * lower level switch OR
+		 * same level but tighter switch (less resource waste) OR
+		 * 2 required switches of same level and nodes count
+		 * but the latter accumulated CPUs count is bigger than 
+		 * the former one
+		 */
+		if ((best_fit_inx == -1) ||
+		    (switch_record_table[j].level <
+		     switch_record_table[best_fit_inx].level) ||
+		    ((switch_record_table[j].level ==
+		      switch_record_table[best_fit_inx].level) &&
+		     (switches_node_cnt[j] < switches_node_cnt[best_fit_inx]))){
+			best_fit_inx = j;
+		}
+	}
+	if (best_fit_inx == -1) {
+		debug("job %u: best_fit topology failure: no switch currently "
+		      "has sufficient resource to satisfy the request",
+		      job_ptr->job_id);
+		rc = SLURM_ERROR;
+		goto fini;
+	}
+	bit_and(avail_nodes_bitmap, switches_bitmap[best_fit_inx]);
+
+	/* Identify usable leafs (within higher switch having best fit) */
+	for (j = 0; j < switch_record_cnt; j++) {
+		if ((switch_record_table[j].level != 0) ||
+		    (!bit_super_set(switches_bitmap[j],
+				    switches_bitmap[best_fit_inx]))) {
+			switches_node_cnt[j] = 0;
+		}
+	}
+
+	/* Select resources from leafs on a best-fit or round-robin basis */
+	while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
+		int *cpus_array = NULL, array_len;
+		best_fit_cpus = best_fit_nodes = 0;
+		for (j = 0; j < switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+
+			/* If multiple leaf switches must be used, prefer use
+			 * of leaf switches with fewest number of idle CPUs.
+			 * This results in more leaf switches being used and
+			 * achieves better network bandwidth. */
+			if ((best_fit_nodes == 0) ||
+			    (switches_node_use[best_fit_location] >
+			     switches_node_use[j]) ||
+			    ((switches_node_use[best_fit_location] ==
+			      switches_node_use[j]) &&
+			     (switches_cpu_cnt[j] < best_fit_cpus))) {
+				best_fit_cpus =  switches_cpu_cnt[j];
+				best_fit_nodes = switches_node_cnt[j];
+				best_fit_location = j;
+			}
+		}
+
+		if (best_fit_nodes == 0)
+			break;
+
+		/* Use select nodes from this leaf */
+		first = bit_ffs(switches_bitmap[best_fit_location]);
+		last  = bit_fls(switches_bitmap[best_fit_location]);
+
+		/* compute best-switch nodes available CPUs array */
+		array_len = last - first + 1;
+		cpus_array = xmalloc(sizeof(int) * array_len);
+		for (i = first, j = 0; ((i <= last) && (first >= 0)); i++, j++){
+			if (!bit_test(switches_bitmap
+				      [best_fit_location], i))
+				cpus_array[j] = 0;
+			else
+				cpus_array[j] = _get_cpu_cnt(job_ptr, i, 
+							     cpu_cnt);
+		}
+
+		if (job_ptr->req_switch > 0) {
+			if (time_waiting >= job_ptr->wait4switch) {
+				job_ptr->best_switch = true;
+				debug3("Job=%u Waited %ld sec for switches use=%d",
+					job_ptr->job_id, time_waiting,
+					leaf_switch_count);
+			} else if (leaf_switch_count > job_ptr->req_switch) {
+				/* Allocation is for more than requested number
+				 * of switches */
+				job_ptr->best_switch = false;
+				debug3("Job=%u waited %ld sec for switches=%u "
+					"found=%d wait %u",
+					job_ptr->job_id, time_waiting,
+					job_ptr->req_switch,
+					leaf_switch_count,
+					job_ptr->wait4switch);
+			} else {
+				job_ptr->best_switch = true;
+			}
+		}
+
+		/* accumulate resources from this leaf on a best-fit basis */
+		while ((max_nodes > 0) && ((rem_nodes > 0) || (rem_cpus > 0))) {
+			/* pick a node using a best-fit approach */
+			/* if rem_cpus < 0, then we will search for nodes 
+			 * with lower free cpus nb first
+			 */
+			int suff = 0, bfsuff = 0, bfloc = 0 , bfsize = 0;
+			int ca_bfloc = 0;
+			for (i = first, j = 0; ((i <= last) && (first >= 0)); 
+			     i++, j++) {
+				if (cpus_array[j] == 0)
+					continue;
+				suff = cpus_array[j] >= rem_cpus;
+				if ( (bfsize == 0) ||
+				     (suff && !bfsuff) ||
+				     (suff && (cpus_array[j] < bfsize)) ||
+				     (!suff && (cpus_array[j] > bfsize)) ) {
+					bfsuff = suff;
+					bfloc = i;
+					bfsize = cpus_array[j];
+					ca_bfloc = j;
+				}
+			}
+
+			/* no node found, break */
+			if (bfsize == 0)
+				break;
+			
+			/* clear resources of this node from the switch */
+			bit_clear(switches_bitmap[best_fit_location], bfloc);
+			switches_node_cnt[best_fit_location]--;
+			switches_node_use[best_fit_location]++;
+			switches_cpu_cnt[best_fit_location] -= bfsize;
+			cpus_array[ca_bfloc] = 0;
+
+			/* if this node was already selected in an other */
+			/* switch, skip it */
+			if (bit_test(bitmap, bfloc)) {
+				continue;
+			}
+
+			/* This could result in 0, but if the user
+			 * requested nodes here we will still give
+			 * them and then the step layout will sort
+			 * things out. */
+			_cpus_to_use(&bfsize, rem_cpus, min_rem_nodes,
+				     job_ptr->details, &cpu_cnt[bfloc], bfloc,
+				     cr_type);
+
+			/* enforce the max_cpus limit */
+			if ((job_ptr->details->max_cpus != NO_VAL) &&
+			    (total_cpus+bfsize > job_ptr->details->max_cpus)) {
+				debug2("5 can't use this node since it "
+				       "would put us over the limit");
+				continue;
+			}
+
+			/* take the node into account */
+			bit_set(bitmap, bfloc);
+			total_cpus += bfsize;
+			rem_nodes--;
+			min_rem_nodes--;
+			max_nodes--;
+			rem_cpus -= bfsize;
+			break;
+		}		
+
+		/* free best-switch nodes available cpus array */
+		xfree(cpus_array);
+	}
+
+	if ((rem_cpus <= 0) &&
+	    _enough_nodes(0, rem_nodes, min_nodes, req_nodes)) {
+		rc = SLURM_SUCCESS;
+	} else
+		rc = SLURM_ERROR;
+
+ fini:	FREE_NULL_BITMAP(avail_nodes_bitmap);
+	FREE_NULL_BITMAP(req_nodes_bitmap);
+	if (switches_bitmap) {
+		for (i = 0; i < switch_record_cnt; i++) {
+			FREE_NULL_BITMAP(switches_bitmap[i]);
+		}
+	}
+	xfree(switches_bitmap);
+	xfree(switches_cpu_cnt);
+	xfree(switches_node_cnt);
+	xfree(switches_node_use);
+
+	return rc;
+}
+
 /* this is an intermediary step between _select_nodes and _eval_nodes
  * to tackle the knapsack problem. This code incrementally removes nodes
  * with low cpu counts for the job and re-evaluates each result */
 static int _choose_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 			 uint32_t min_nodes, uint32_t max_nodes,
 			 uint32_t req_nodes, uint32_t cr_node_cnt,
-			 uint16_t *cpu_cnt, uint16_t cr_type)
+			 uint16_t *cpu_cnt, uint16_t cr_type,
+			 bool prefer_alloc_nodes)
 {
 	int i, count, ec, most_cpus = 0;
 	bitstr_t *origmap, *reqmap = NULL;
@@ -1982,7 +2593,7 @@ static int _choose_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 			continue;
 		/* Make sure we don't say we can use a node exclusively
 		 * that is bigger than our max cpu count. */
-		if (((job_ptr->details->whole_node) &&
+		if (((job_ptr->details->whole_node == 1) &&
 		     (job_ptr->details->max_cpus != NO_VAL) &&
 		     (job_ptr->details->max_cpus < cpu_cnt[i])) ||
 		/* OR node has no CPUs */
@@ -2001,8 +2612,8 @@ static int _choose_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 
 	origmap = bit_copy(node_map);
 
-	ec = _eval_nodes(job_ptr, node_map, min_nodes, max_nodes,
-			 req_nodes, cr_node_cnt, cpu_cnt, cr_type);
+	ec = _eval_nodes(job_ptr, node_map, min_nodes, max_nodes, req_nodes,
+			 cr_node_cnt, cpu_cnt, cr_type, prefer_alloc_nodes);
 
 	if (ec == SLURM_SUCCESS) {
 		FREE_NULL_BITMAP(origmap);
@@ -2032,7 +2643,8 @@ static int _choose_nodes(struct job_record *job_ptr, bitstr_t *node_map,
 		if (nochange)
 			continue;
 		ec = _eval_nodes(job_ptr, node_map, min_nodes, max_nodes,
-				 req_nodes, cr_node_cnt, cpu_cnt, cr_type);
+				 req_nodes, cr_node_cnt, cpu_cnt, cr_type,
+				 prefer_alloc_nodes);
 		if (ec == SLURM_SUCCESS) {
 			FREE_NULL_BITMAP(origmap);
 			return ec;
@@ -2072,6 +2684,7 @@ static inline void _log_select_maps(char *loc, bitstr_t *node_map,
  * IN: test_only    - ignore allocated memory check
  * IN: part_core_map - bitmap of cores allocated to jobs of this partition
  *                     or NULL if don't care
+ * IN: prefer_alloc_nodes - select currently allocated nodes first
  * RET - array with number of CPUs available per node or NULL if not runnable
  */
 static uint16_t *_select_nodes(struct job_record *job_ptr, uint32_t min_nodes,
@@ -2080,7 +2693,8 @@ static uint16_t *_select_nodes(struct job_record *job_ptr, uint32_t min_nodes,
 				bitstr_t *core_map,
 				struct node_use_record *node_usage,
 				uint16_t cr_type, bool test_only,
-				bitstr_t *part_core_map)
+				bitstr_t *part_core_map,
+				bool prefer_alloc_nodes)
 {
 	int i, rc;
 	uint16_t *cpu_cnt, *cpus = NULL;
@@ -2123,7 +2737,7 @@ static uint16_t *_select_nodes(struct job_record *job_ptr, uint32_t min_nodes,
 
 	/* choose the best nodes for the job */
 	rc = _choose_nodes(job_ptr, node_map, min_nodes, max_nodes, req_nodes,
-			   cr_node_cnt, cpu_cnt, cr_type);
+			   cr_node_cnt, cpu_cnt, cr_type, prefer_alloc_nodes);
 	_log_select_maps("_select_nodes/choose_nodes", node_map, core_map);
 
 	/* if successful, sync up the core_map with the node_map, and
@@ -2203,7 +2817,8 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 			uint32_t cr_node_cnt,
 			struct part_res_record *cr_part_ptr,
 			struct node_use_record *node_usage,
-			bitstr_t *exc_core_bitmap)
+			bitstr_t *exc_core_bitmap, bool prefer_alloc_nodes,
+			bool qos_preemptor, bool preempt_mode)
 {
 	static int gang_mode = -1;
 	int error_code = SLURM_SUCCESS, ll; /* ll = layout array index */
@@ -2242,7 +2857,7 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		error_code = _verify_node_state(cr_part_ptr, job_ptr,
 						node_bitmap, cr_type,
 						node_usage, job_node_req,
-						exc_core_bitmap);
+						exc_core_bitmap, qos_preemptor);
 		if (error_code != SLURM_SUCCESS) {
 			return error_code;
 		}
@@ -2281,7 +2896,7 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
 				  node_bitmap, cr_node_cnt, free_cores,
 				  node_usage, cr_type, test_only,
-				  part_core_map);
+				  part_core_map, prefer_alloc_nodes);
 	if (cpu_count == NULL) {
 		/* job cannot fit */
 		FREE_NULL_BITMAP(orig_map);
@@ -2407,13 +3022,13 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 			}
 		}
 	}
-	if (job_ptr->details->whole_node)
+	if (job_ptr->details->whole_node == 1)
 		_block_whole_nodes(node_bitmap, avail_cores, free_cores);
 
 	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
 				  node_bitmap, cr_node_cnt, free_cores,
 				  node_usage, cr_type, test_only,
-				  part_core_map);
+				  part_core_map, prefer_alloc_nodes);
 
 	if ((cpu_count) && (job_ptr->best_switch)) {
 		/* job fits! We're done. */
@@ -2431,8 +3046,7 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		 * removes jobs from simulated resource allocation map
 		 * before this point. */
 		if (select_debug_flags & DEBUG_FLAG_SELECT_TYPE) {
-			info("cons_res: cr_job_test: test 1 fail - "
-			     "no idle resources available");
+			info("cons_res: cr_job_test: test 1 fail - no idle resources available");
 		}
 		goto alloc_job;
 	}
@@ -2487,14 +3101,14 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 			bit_and(free_cores, tmpcore);
 		}
 	}
-	if (job_ptr->details->whole_node)
+	if (job_ptr->details->whole_node == 1)
 		_block_whole_nodes(node_bitmap, avail_cores, free_cores);
 	/* make these changes permanent */
 	bit_copybits(avail_cores, free_cores);
 	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
 				  node_bitmap, cr_node_cnt, free_cores,
 				  node_usage, cr_type, test_only,
-				  part_core_map);
+				  part_core_map, prefer_alloc_nodes);
 	if (!cpu_count) {
 		/* job needs resources that are currently in use by
 		 * higher-priority jobs, so fail for now */
@@ -2532,7 +3146,7 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 	cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes, req_nodes,
 				  node_bitmap, cr_node_cnt, free_cores,
 				  node_usage, cr_type, test_only,
-				  part_core_map);
+				  part_core_map, prefer_alloc_nodes);
 	if (cpu_count) {
 		/* jobs from low-priority partitions are the only thing left
 		 * in our way. for now we'll ignore them, but FIXME: we need
@@ -2571,7 +3185,8 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes,
 					  req_nodes, node_bitmap, cr_node_cnt,
 					  free_cores, node_usage, cr_type,
-					  test_only, part_core_map);
+					  test_only, part_core_map,
+					  prefer_alloc_nodes);
 		if (select_debug_flags & DEBUG_FLAG_SELECT_TYPE) {
 			info("cons_res: cr_job_test: test 4 pass - "
 			     "first row found");
@@ -2579,9 +3194,12 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		goto alloc_job;
 	}
 
-	cr_sort_part_rows(jp_ptr);
+	if ((jp_ptr->num_rows > 1) && !preempt_by_qos)
+		cr_sort_part_rows(jp_ptr);	/* Preserve row order for QOS */
 	c = jp_ptr->num_rows;
-	if (job_node_req != NODE_CR_AVAILABLE)
+	if (preempt_by_qos && !qos_preemptor)
+		c--;				/* Do not use extra row */
+	if (preempt_by_qos && (job_node_req != NODE_CR_AVAILABLE))
 		c = 1;
 	for (i = 0; i < c; i++) {
 		if (!jp_ptr->row[i].row_bitmap)
@@ -2594,7 +3212,8 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes,
 					  req_nodes, node_bitmap, cr_node_cnt,
 					  free_cores, node_usage, cr_type,
-					  test_only, part_core_map);
+					  test_only, part_core_map,
+					  prefer_alloc_nodes);
 		if (cpu_count) {
 			if (select_debug_flags & DEBUG_FLAG_SELECT_TYPE) {
 				info("cons_res: cr_job_test: test 4 pass - "
@@ -2617,7 +3236,8 @@ extern int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		cpu_count = _select_nodes(job_ptr, min_nodes, max_nodes,
 					  req_nodes, node_bitmap, cr_node_cnt,
 					  free_cores, node_usage, cr_type,
-					  test_only, part_core_map);
+					  test_only, part_core_map,
+					  prefer_alloc_nodes);
 	}
 
 	if (!cpu_count) {
@@ -2782,7 +3402,7 @@ alloc_job:
 
 	/* distribute the tasks and clear any unused cores */
 	job_ptr->job_resrcs = job_res;
-	error_code = cr_dist(job_ptr, cr_type);
+	error_code = cr_dist(job_ptr, cr_type, preempt_mode);
 	if (error_code != SLURM_SUCCESS) {
 		free_job_resources(&job_ptr->job_resrcs);
 		return error_code;
@@ -2790,7 +3410,7 @@ alloc_job:
 
 	/* translate job_res->cpus array into format with rep count */
 	build_cnt = build_job_resources_cpu_array(job_res);
-	if (job_ptr->details->whole_node) {
+	if (job_ptr->details->whole_node == 1) {
 		first = bit_ffs(job_res->node_bitmap);
 		if (first != -1)
 			last  = bit_fls(job_res->node_bitmap);
@@ -2860,7 +3480,7 @@ alloc_job:
 			job_res->memory_allocated[i] = save_mem;
 		}
 	} else {	/* --mem=0, allocate job all memory on node */
-		uint32_t lowest_mem = 0;
+		uint32_t avail_mem, lowest_mem = 0;
 		first = bit_ffs(job_res->node_bitmap);
 		if (first != -1)
 			last  = bit_fls(job_res->node_bitmap);
@@ -2869,11 +3489,11 @@ alloc_job:
 		for (i = first, j = 0; i <= last; i++) {
 			if (!bit_test(job_res->node_bitmap, i))
 				continue;
-			if ((j == 0) ||
-			    (lowest_mem > select_node_record[i].real_memory))
-				lowest_mem = select_node_record[i].real_memory;
-			job_res->memory_allocated[j++] =
-				select_node_record[i].real_memory;
+			avail_mem = select_node_record[i].real_memory -
+				    select_node_record[i].mem_spec_limit;
+			if ((j == 0) || (lowest_mem > avail_mem))
+				lowest_mem = avail_mem;
+			job_res->memory_allocated[j++] = avail_mem;
 		}
 		details_ptr->pn_min_memory = lowest_mem;
 	}
diff --git a/src/plugins/select/cons_res/job_test.h b/src/plugins/select/cons_res/job_test.h
index 631b3cd13..89a0be1da 100644
--- a/src/plugins/select/cons_res/job_test.h
+++ b/src/plugins/select/cons_res/job_test.h
@@ -66,6 +66,7 @@ int cr_job_test(struct job_record *job_ptr, bitstr_t *node_bitmap,
 		int mode, uint16_t cr_type,
 		enum node_cr_state job_node_req, uint32_t cr_node_cnt,
 		struct part_res_record *cr_part_ptr,
-		struct node_use_record *node_usage, bitstr_t *exc_core_bitmap);
+		struct node_use_record *node_usage, bitstr_t *exc_core_bitmap,
+		bool prefer_alloc_nodes, bool qos_preemptor, bool preempt_mode);
 
 #endif /* !_CR_JOB_TEST_H */
diff --git a/src/plugins/select/cons_res/select_cons_res.c b/src/plugins/select/cons_res/select_cons_res.c
index fe81206fe..d1a162f22 100644
--- a/src/plugins/select/cons_res/select_cons_res.c
+++ b/src/plugins/select/cons_res/select_cons_res.c
@@ -164,21 +164,21 @@ uint32_t *cr_node_cores_offset;
  * only load select plugins if the plugin_type string has a
  * prefix of "select/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Consumable Resources (CR) Node Selection plugin";
 const char plugin_type[] = "select/cons_res";
 const uint32_t plugin_id      = 101;
-const uint32_t plugin_version = 120;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 const uint32_t pstate_version = 7;	/* version control on saved state */
 
 uint16_t cr_type = CR_CPU; /* cr_type is overwritten in init() */
 
+bool     backfill_busy_nodes  = false;
+bool     have_dragonfly       = false;
 bool     pack_serial_at_end   = false;
+bool     preempt_by_qos       = false;
 uint64_t select_debug_flags   = 0;
 uint16_t select_fast_schedule = 0;
 
@@ -430,6 +430,8 @@ static void _create_part_data(void)
 		this_ptr->num_rows = p_ptr->max_share;
 		if (this_ptr->num_rows & SHARED_FORCE)
 			this_ptr->num_rows &= (~SHARED_FORCE);
+		if (preempt_by_qos)	/* Add row for QOS preemption */
+			this_ptr->num_rows++;
 		/* SHARED=EXCLUSIVE sets max_share = 0 */
 		if (this_ptr->num_rows < 1)
 			this_ptr->num_rows = 1;
@@ -466,9 +468,7 @@ static void _destroy_node_data(struct node_use_record *node_usage,
 	xfree(node_data);
 	if (node_usage) {
 		for (i = 0; i < select_node_cnt; i++) {
-			if (node_usage[i].gres_list) {
-				list_destroy(node_usage[i].gres_list);
-			}
+			FREE_NULL_LIST(node_usage[i].gres_list);
 		}
 		xfree(node_usage);
 	}
@@ -838,7 +838,13 @@ static int _add_job_to_res(struct job_record *job_ptr, int action)
 				      job_ptr->job_id);
 			}
 		}
+		if ((powercap_get_cluster_current_cap() != 0) &&
+		    (which_power_layout() == 2)) {
+			adapt_layouts(job, job_ptr->details->cpu_freq_max, n,
+				      node_ptr->name, true);
+		}
 	}
+	
 
 	/* add cores */
 	if (action != 1) {
@@ -1061,7 +1067,7 @@ static int _job_expand(struct job_record *from_job_ptr,
 				}
 			}
 		}
-		if (to_job_ptr->details->whole_node) {
+		if (to_job_ptr->details->whole_node == 1) {
 			to_job_ptr->total_cpus += select_node_record[i].cpus;
 		} else {
 			to_job_ptr->total_cpus += new_job_resrcs_ptr->
@@ -1175,8 +1181,6 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr,
 		}
 
 		if (action != 2) {
-			if (job->memory_allocated[n] == 0)
-				continue;	/* no memory allocated */
 			if (node_usage[i].alloc_memory <
 			    job->memory_allocated[n]) {
 				error("cons_res: node %s memory is "
@@ -1186,10 +1190,14 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr,
 				      job->memory_allocated[n],
 				      job_ptr->job_id);
 				node_usage[i].alloc_memory = 0;
-			} else {
+			} else
 				node_usage[i].alloc_memory -=
 					job->memory_allocated[n];
-			}
+		}
+		if ((powercap_get_cluster_current_cap() != 0) &&
+		    (which_power_layout() == 2)) {
+			adapt_layouts(job, job_ptr->details->cpu_freq_max, n,
+				      node_ptr->name, false);
 		}
 	}
 
@@ -1323,6 +1331,7 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 		job->cpus[n] = 0;
 		job->ncpus = build_job_resources_cpu_array(job);
 		clear_job_resources_node(job, n);
+
 		if (node_usage[i].alloc_memory < job->memory_allocated[n]) {
 			error("cons_res: node %s memory is underallocated "
 			      "(%u-%u) for job %u",
@@ -1331,6 +1340,7 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 			node_usage[i].alloc_memory = 0;
 		} else
 			node_usage[i].alloc_memory -= job->memory_allocated[n];
+
 		job->memory_allocated[n] = 0;
 		break;
 	}
@@ -1459,21 +1469,19 @@ static int _test_only(struct job_record *job_ptr, bitstr_t *bitmap,
 	uint16_t tmp_cr_type = cr_type;
 
 	if (job_ptr->part_ptr->cr_type) {
-		if (((cr_type & CR_SOCKET) || (cr_type & CR_CORE)) &&
-		    (cr_type & CR_ALLOCATE_FULL_SOCKET)) {
+		if ((cr_type & CR_SOCKET) || (cr_type & CR_CORE)) {
 			tmp_cr_type &= ~(CR_SOCKET|CR_CORE);
 			tmp_cr_type |= job_ptr->part_ptr->cr_type;
 		} else {
 			info("cons_res: Can't use Partition SelectType unless "
-			     "using CR_Socket or CR_Core and "
-			     "CR_ALLOCATE_FULL_SOCKET");
+			     "using CR_Socket or CR_Core");
 		}
 	}
 
 	rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes,
 			 SELECT_MODE_TEST_ONLY, tmp_cr_type, job_node_req,
 			 select_node_cnt, select_part_record,
-			 select_node_usage, NULL);
+			 select_node_usage, NULL, false, false, false);
 	return rc;
 }
 
@@ -1503,38 +1511,58 @@ static int _run_now(struct job_record *job_ptr, bitstr_t *bitmap,
 {
 	int rc;
 	bitstr_t *orig_map = NULL, *save_bitmap;
-	struct job_record *tmp_job_ptr;
+	struct job_record *tmp_job_ptr = NULL;
 	ListIterator job_iterator, preemptee_iterator;
 	struct part_res_record *future_part;
 	struct node_use_record *future_usage;
 	bool remove_some_jobs = false;
 	uint16_t pass_count = 0;
-	uint16_t mode;
+	uint16_t mode = (uint16_t) NO_VAL;
 	uint16_t tmp_cr_type = cr_type;
+	bool preempt_mode = false;
 
 	save_bitmap = bit_copy(bitmap);
 top:	orig_map = bit_copy(save_bitmap);
 
 	if (job_ptr->part_ptr->cr_type) {
-		if (((cr_type & CR_SOCKET) || (cr_type & CR_CORE)) &&
-		    (cr_type & CR_ALLOCATE_FULL_SOCKET)) {
+		if ((cr_type & CR_SOCKET) || (cr_type & CR_CORE)) {
 			tmp_cr_type &= ~(CR_SOCKET|CR_CORE);
 			tmp_cr_type |= job_ptr->part_ptr->cr_type;
 		} else {
 			info("cons_res: Can't use Partition SelectType unless "
-			     "using CR_Socket or CR_Core and "
-			     "CR_ALLOCATE_FULL_SOCKET");
+			     "using CR_Socket or CR_Core");
 		}
 	}
 
 	rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes,
 			 SELECT_MODE_RUN_NOW, tmp_cr_type, job_node_req,
 			 select_node_cnt, select_part_record,
-			 select_node_usage, exc_core_bitmap);
+			 select_node_usage, exc_core_bitmap, false, false,
+			 preempt_mode);
 
-	if ((rc != SLURM_SUCCESS) && preemptee_candidates) {
+	if ((rc != SLURM_SUCCESS) && preemptee_candidates && preempt_by_qos) {
+		/* Determine QOS preempt mode of first job */
+		job_iterator = list_iterator_create(preemptee_candidates);
+		if ((tmp_job_ptr = (struct job_record *)
+		    list_next(job_iterator))) {
+			mode = slurm_job_preempt_mode(tmp_job_ptr);
+		}
+		list_iterator_destroy(job_iterator);
+	}
+	if ((rc != SLURM_SUCCESS) && preemptee_candidates && preempt_by_qos &&
+	    (mode == PREEMPT_MODE_SUSPEND) &&
+	    (job_ptr->priority != 0)) {	/* Job can be held by bad allocate */
+		/* Try to schedule job using extra row of core bitmap */
+		bit_or(bitmap, orig_map);
+		rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes,
+				 req_nodes, SELECT_MODE_RUN_NOW, tmp_cr_type,
+				 job_node_req, select_node_cnt,
+				 select_part_record, select_node_usage,
+				 exc_core_bitmap, false, true, preempt_mode);
+	} else if ((rc != SLURM_SUCCESS) && preemptee_candidates) {
 		int preemptee_cand_cnt = list_count(preemptee_candidates);
 		/* Remove preemptable jobs from simulated environment */
+		preempt_mode = true;
 		future_part = _dup_part_data(select_part_record);
 		if (future_part == NULL) {
 			FREE_NULL_BITMAP(orig_map);
@@ -1570,7 +1598,8 @@ top:	orig_map = bit_copy(save_bitmap);
 					 tmp_cr_type, job_node_req,
 					 select_node_cnt,
 					 future_part, future_usage,
-					 exc_core_bitmap);
+					 exc_core_bitmap, false, false,
+					 preempt_mode);
 			tmp_job_ptr->details->usable_nodes = 0;
 			if (rc != SLURM_SUCCESS)
 				continue;
@@ -1646,8 +1675,7 @@ top:	orig_map = bit_copy(save_bitmap);
 			}
 			list_iterator_destroy(preemptee_iterator);
 			if (!remove_some_jobs) {
-				list_destroy(*preemptee_job_list);
-				*preemptee_job_list = NULL;
+				FREE_NULL_LIST(*preemptee_job_list);
 			}
 		}
 
@@ -1700,18 +1728,17 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	int action, rc = SLURM_ERROR;
 	time_t now = time(NULL);
 	uint16_t tmp_cr_type = cr_type;
+	bool qos_preemptor = false;
 
 	orig_map = bit_copy(bitmap);
 
 	if (job_ptr->part_ptr->cr_type) {
-		if (((cr_type & CR_SOCKET) || (cr_type & CR_CORE)) &&
-		    (cr_type & CR_ALLOCATE_FULL_SOCKET)) {
+		if ((cr_type & CR_SOCKET) || (cr_type & CR_CORE)) {
 			tmp_cr_type &= ~(CR_SOCKET|CR_CORE);
 			tmp_cr_type |= job_ptr->part_ptr->cr_type;
 		} else {
 			info("cons_res: Can't use Partition SelectType unless "
-			     "using CR_Socket or CR_Core and "
-			     "CR_ALLOCATE_FULL_SOCKET");
+			     "using CR_Socket or CR_Core");
 		}
 	}
 
@@ -1719,7 +1746,8 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes, req_nodes,
 			 SELECT_MODE_WILL_RUN, tmp_cr_type, job_node_req,
 			 select_node_cnt, select_part_record,
-			 select_node_usage, exc_core_bitmap);
+			 select_node_usage, exc_core_bitmap, false, false,
+			 false);
 	if (rc == SLURM_SUCCESS) {
 		FREE_NULL_BITMAP(orig_map);
 		job_ptr->start_time = now;
@@ -1757,9 +1785,11 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint16_t mode = slurm_job_preempt_mode(tmp_job_ptr);
 			if (mode == PREEMPT_MODE_OFF)
 				continue;
-			if (mode == PREEMPT_MODE_SUSPEND)
+			if (mode == PREEMPT_MODE_SUSPEND) {
 				action = 2;	/* remove cores, keep memory */
-			else
+				if (preempt_by_qos)
+					qos_preemptor = true;
+			} else
 				action = 0;	/* remove cores and memory */
 			/* Remove preemptable job now */
 			_rm_job_from_res(future_part, future_usage,
@@ -1775,7 +1805,8 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		rc = cr_job_test(job_ptr, bitmap, min_nodes, max_nodes,
 				 req_nodes, SELECT_MODE_WILL_RUN, tmp_cr_type,
 				 job_node_req, select_node_cnt, future_part,
-				 future_usage, exc_core_bitmap);
+				 future_usage, exc_core_bitmap, false,
+				 qos_preemptor, true);
 		if (rc == SLURM_SUCCESS) {
 			/* Actual start time will actually be later than "now",
 			 * but return "now" for backfill scheduler to
@@ -1804,7 +1835,8 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 					 SELECT_MODE_WILL_RUN, tmp_cr_type,
 					 job_node_req, select_node_cnt,
 					 future_part, future_usage,
-					 exc_core_bitmap);
+					 exc_core_bitmap, backfill_busy_nodes,
+					 qos_preemptor, true);
 			if (rc == SLURM_SUCCESS) {
 				if (tmp_job_ptr->end_time <= now) {
 					job_ptr->start_time =
@@ -1838,7 +1870,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		list_iterator_destroy(preemptee_iterator);
 	}
 
-	list_destroy(cr_job_list);
+	FREE_NULL_LIST(cr_job_list);
 	_destroy_part_data(future_part);
 	_destroy_node_data(future_usage, NULL);
 	FREE_NULL_BITMAP(orig_map);
@@ -1867,11 +1899,18 @@ _compare_support(const void *v, const void *v1)
  */
 extern int init(void)
 {
+	char *topo_param;
+
 	cr_type = slurmctld_conf.select_type_param;
 	if (cr_type)
 		verbose("%s loaded with argument %u", plugin_name, cr_type);
 	select_debug_flags = slurm_get_debug_flags();
 
+	topo_param = slurm_get_topology_param();
+	if (topo_param && strstr(topo_param, "dragonfly"))
+		have_dragonfly = true;
+	xfree(topo_param);
+
 	return SLURM_SUCCESS;
 }
 
@@ -1941,7 +1980,7 @@ extern bool select_p_node_ranking(struct node_record *node_ptr, int node_cnt)
  */
 extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 {
-	char *sched_params, *tmp_ptr;
+	char *preempt_type, *sched_params, *tmp_ptr;
 	int i, tot_core;
 
 	info("cons_res: select_p_node_init");
@@ -1962,17 +2001,34 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 	sched_params = slurm_get_sched_params();
 	if (sched_params && strstr(sched_params, "preempt_strict_order"))
 		preempt_strict_order = true;
+	else
+		preempt_strict_order = false;
 	if (sched_params &&
-	    (tmp_ptr = strstr(sched_params, "preempt_reorder_count=")))
+	    (tmp_ptr = strstr(sched_params, "preempt_reorder_count="))) {
 		preempt_reorder_cnt = atoi(tmp_ptr + 22);
-	if (preempt_reorder_cnt < 0) {
-		fatal("Invalid SchedulerParameters preempt_reorder_count: %d",
-		      preempt_reorder_cnt);
+		if (preempt_reorder_cnt < 0) {
+			fatal("Invalid SchedulerParameters "
+			      "preempt_reorder_count: %d",
+			      preempt_reorder_cnt);
+		}
 	}
 	if (sched_params && strstr(sched_params, "pack_serial_at_end"))
 		pack_serial_at_end = true;
+	else
+		pack_serial_at_end = false;
+	if (sched_params && strstr(sched_params, "bf_busy_nodes"))
+		backfill_busy_nodes = true;
+	else
+		backfill_busy_nodes = false;
 	xfree(sched_params);
 
+	preempt_type = slurm_get_preempt_type();
+	if (preempt_type && strstr(preempt_type, "qos"))
+		preempt_by_qos = true;
+	else
+		preempt_by_qos = false;
+	xfree(preempt_type);
+
 	/* initial global core data structures */
 	select_state_initializing = true;
 	select_fast_schedule = slurm_get_fast_schedule();
@@ -1987,6 +2043,8 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 
 	for (i = 0; i < select_node_cnt; i++) {
 		select_node_record[i].node_ptr = &node_ptr[i];
+		select_node_record[i].mem_spec_limit = node_ptr[i].
+						       mem_spec_limit;
 		if (select_fast_schedule) {
 			struct config_record *config_ptr;
 			config_ptr = node_ptr[i].config_ptr;
@@ -1994,6 +2052,7 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 			select_node_record[i].boards  = config_ptr->boards;
 			select_node_record[i].sockets = config_ptr->sockets;
 			select_node_record[i].cores   = config_ptr->cores;
+			select_node_record[i].threads = config_ptr->threads;
 			select_node_record[i].vpus    = config_ptr->threads;
 			select_node_record[i].real_memory = config_ptr->
 				real_memory;
@@ -2002,11 +2061,13 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 			select_node_record[i].boards  = node_ptr[i].boards;
 			select_node_record[i].sockets = node_ptr[i].sockets;
 			select_node_record[i].cores   = node_ptr[i].cores;
+			select_node_record[i].threads = node_ptr[i].threads;
 			select_node_record[i].vpus    = node_ptr[i].threads;
 			select_node_record[i].real_memory = node_ptr[i].
 				real_memory;
 		}
-		tot_core = select_node_record[i].sockets *
+		tot_core = select_node_record[i].boards  *
+			   select_node_record[i].sockets *
 			   select_node_record[i].cores;
 		if (tot_core >= select_node_record[i].cpus)
 			select_node_record[i].vpus = 1;
@@ -2081,7 +2142,7 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t * bitmap,
 	if (slurm_get_use_spec_resources() == 0)
 		job_ptr->details->core_spec = (uint16_t) NO_VAL;
 	if ((job_ptr->details->core_spec != (uint16_t) NO_VAL) &&
-	    (job_ptr->details->whole_node == 0)) {
+	    (job_ptr->details->whole_node != 1)) {
 		info("Setting Exclusive mode for job %u with CoreSpec=%u",
 		      job_ptr->job_id, job_ptr->details->core_spec);
 		job_ptr->details->whole_node = 1;
@@ -2569,6 +2630,8 @@ extern int select_p_update_node_config (int index)
 
 	select_node_record[index].real_memory = select_node_record[index].
 		node_ptr->real_memory;
+	select_node_record[index].mem_spec_limit = select_node_record[index].
+		node_ptr->mem_spec_limit;
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/select/cons_res/select_cons_res.h b/src/plugins/select/cons_res/select_cons_res.h
index 6c0e96b99..d04e4707f 100644
--- a/src/plugins/select/cons_res/select_cons_res.h
+++ b/src/plugins/select/cons_res/select_cons_res.h
@@ -55,11 +55,12 @@
 #include "src/common/node_select.h"
 #include "src/common/pack.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_resource_info.h"
+#include "src/common/slurm_topology.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_resource_info.h"
-#include "src/common/slurm_topology.h"
+#include "src/slurmctld/powercapping.h"
 #include "src/slurmctld/preempt.h"
 #include "src/slurmctld/slurmctld.h"
 
@@ -77,7 +78,7 @@ struct part_row_data {
 /* partition CPU allocation data */
 struct part_res_record {
 	struct part_res_record *next;	/* Ptr to next part_res_record */
-	uint16_t num_rows;		/* Number of row_bitmaps */
+	uint16_t num_rows;		/* Number of elements in "row" array */
 	struct part_record *part_ptr;   /* controller part record pointer */
 	struct part_row_data *row;	/* array of rows containing jobs */
 };
@@ -89,9 +90,11 @@ struct node_res_record {
 	uint16_t boards; 		/* count of boards configured */
 	uint16_t sockets;		/* count of sockets configured */
 	uint16_t cores;			/* count of cores configured */
+	uint16_t threads;		/* count of hyperthreads per core */
 	uint16_t vpus;			/* count of virtual cpus (hyperthreads)
 					 * configured per core */
 	uint32_t real_memory;		/* MB of real memory configured */
+	uint32_t mem_spec_limit;	/* MB of specialized/system memory */
 };
 
 /* per-node resource usage record */
@@ -103,7 +106,10 @@ struct node_use_record {
 	uint16_t node_state;		/* see node_cr_state comments */
 };
 
+extern bool     backfill_busy_nodes;
+extern bool     have_dragonfly;
 extern bool     pack_serial_at_end;
+extern bool     preempt_by_qos;
 extern uint64_t select_debug_flags;
 extern uint16_t select_fast_schedule;
 
diff --git a/src/plugins/select/cray/Makefile.in b/src/plugins/select/cray/Makefile.in
index 353416e29..8d3f45187 100644
--- a/src/plugins/select/cray/Makefile.in
+++ b/src/plugins/select/cray/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/cray/select_cray.c b/src/plugins/select/cray/select_cray.c
index f0ce249c1..3c4295c36 100644
--- a/src/plugins/select/cray/select_cray.c
+++ b/src/plugins/select/cray/select_cray.c
@@ -159,6 +159,8 @@ static int active_post_nhc_cnt = 0;
 static pthread_mutex_t throttle_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t throttle_cond = PTHREAD_COND_INITIALIZER;
 
+static bool scheduling_disabled = false; //Backup running on external cray node?
+
 #if defined(HAVE_NATIVE_CRAY_GA) && !defined(HAVE_CRAY_NETWORK)
 static size_t topology_num_nodes = 0;
 static alpsc_topology_t *topology = NULL;
@@ -230,21 +232,21 @@ static uint64_t debug_flags = 0;
  * only load select plugins if the plugin_type string has a
  * prefix of "select/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]	= "Cray node selection plugin";
 const char plugin_type[]	= "select/cray";
 uint32_t plugin_id		= 107;
-const uint32_t plugin_version	= 120;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 extern int select_p_select_jobinfo_free(select_jobinfo_t *jobinfo);
 
 static int _run_nhc(nhc_info_t *nhc_info)
 {
+	if (scheduling_disabled)
+		return 0;
+
 #ifdef HAVE_NATIVE_CRAY
 	int argc = 13, status = 1, wait_rc, i = 0;
 	char *argv[argc];
@@ -750,6 +752,9 @@ static void _update_app(struct job_record *job_ptr,
 
 static void _start_aeld_thread()
 {
+	if (scheduling_disabled)
+		return;
+
 	debug("cray: %s", __func__);
 
 	// Spawn the aeld thread, only in slurmctld.
@@ -765,6 +770,9 @@ static void _start_aeld_thread()
 
 static void _stop_aeld_thread()
 {
+	if (scheduling_disabled)
+		return;
+
 	debug("cray: %s", __func__);
 
 	_aeld_cleanup();
@@ -1164,6 +1172,10 @@ unpack_error:
  */
 extern int init ( void )
 {
+#if defined(HAVE_NATIVE_CRAY_GA) && !defined(HAVE_CRAY_NETWORK)
+	char *err_msg = NULL;
+#endif
+
 	/* We must call the api here since we call this from other
 	 * things other than the slurmctld.
 	 */
@@ -1172,6 +1184,21 @@ extern int init ( void )
 		plugin_id = 108;
 	debug_flags = slurm_get_debug_flags();
 
+	if (!slurmctld_primary && run_in_daemon("slurmctld")) {
+		if (slurmctld_config.scheduling_disabled) {
+			info("Scheduling disabled on backup");
+			scheduling_disabled = true;
+		}
+
+#if defined(HAVE_NATIVE_CRAY_GA) && !defined(HAVE_CRAY_NETWORK)
+		else if (alpsc_get_topology(&err_msg, &topology,
+					    &topology_num_nodes))
+			fatal("Running backup on an external node requires "
+			      "the \"no_backup_scheduling\" "
+			      "SchedulerParameter.");
+#endif
+	}
+
 	verbose("%s loaded", plugin_name);
 	return SLURM_SUCCESS;
 }
@@ -1299,6 +1326,9 @@ extern int select_p_state_restore(char *dir_name)
 	uint16_t protocol_version = (uint16_t)NO_VAL;
 	uint32_t record_count;
 
+	if (scheduling_disabled)
+		return SLURM_SUCCESS;
+
 	debug("cray: select_p_state_restore");
 
 	static time_t last_config_update = (time_t) 0;
@@ -1369,7 +1399,16 @@ extern int select_p_state_restore(char *dir_name)
 
 		if (_unpack_blade(&blade_info, buffer, protocol_version))
 			goto unpack_error;
-		if (blade_info.id == blade_array[i].id) {
+		if (!blade_info.node_bitmap) {
+			error("Blade %"PRIu64"(%d %d %d) doesn't have "
+			      "any nodes from the state file!  "
+			      "Unexpected results could "
+			      "happen if jobs are running!",
+			      blade_info.id,
+			      GET_BLADE_X(blade_info.id),
+			      GET_BLADE_Y(blade_info.id),
+			      GET_BLADE_Z(blade_info.id));
+		} else if (blade_info.id == blade_array[i].id) {
 			//blade_array[i].job_cnt = blade_info.job_cnt;
 			if (!bit_equal(blade_array[i].node_bitmap,
 				       blade_info.node_bitmap))
@@ -1522,6 +1561,9 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 	int i, j;
 	uint64_t blade_id = 0;
 
+	if (scheduling_disabled)
+		return other_node_init(node_ptr, node_cnt);
+
 #if defined(HAVE_NATIVE_CRAY_GA) && !defined(HAVE_CRAY_NETWORK)
 	int nn, end_nn, last_nn = 0;
 	bool found = 0;
@@ -2039,6 +2081,9 @@ extern int select_p_select_nodeinfo_set_all(void)
 	int i;
 	static time_t last_set_all = 0;
 
+	if (scheduling_disabled)
+		return other_select_nodeinfo_set_all();
+
 	/* only set this once when the last_bg_update is newer than
 	   the last time we set things up. */
 	if (last_set_all && (last_npc_update-1 < last_set_all)) {
diff --git a/src/plugins/select/linear/Makefile.in b/src/plugins/select/linear/Makefile.in
index ff05e8235..749a5fb89 100644
--- a/src/plugins/select/linear/Makefile.in
+++ b/src/plugins/select/linear/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/linear/select_linear.c b/src/plugins/select/linear/select_linear.c
index a1c9f9313..3c961aac5 100644
--- a/src/plugins/select/linear/select_linear.c
+++ b/src/plugins/select/linear/select_linear.c
@@ -6,6 +6,7 @@
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
  *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Copyright (C) 2014 Silicon Graphics International Corp. All rights reserved.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -67,6 +68,7 @@
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_topology.h"
 #include "src/common/slurm_resource_info.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
@@ -94,6 +96,12 @@ int node_record_count __attribute__((weak_import));
 time_t last_node_update __attribute__((weak_import));
 struct switch_record *switch_record_table __attribute__((weak_import));
 int switch_record_cnt __attribute__((weak_import));
+
+int hypercube_dimensions __attribute__((weak_import));
+struct hypercube_switch *hypercube_switch_table __attribute__((weak_import));
+int hypercube_switch_cnt __attribute__((weak_import));
+struct hypercube_switch ***hypercube_switches __attribute__((weak_import));
+
 #else
 slurm_ctl_conf_t slurmctld_conf;
 struct node_record *node_record_table_ptr;
@@ -103,6 +111,11 @@ int node_record_count;
 time_t last_node_update;
 struct switch_record *switch_record_table;
 int switch_record_cnt;
+
+int hypercube_dimensions;
+struct hypercube_switch *hypercube_switch_table;
+int hypercube_switch_cnt;
+struct hypercube_switch ***hypercube_switches;
 #endif
 
 struct select_nodeinfo {
@@ -139,6 +152,12 @@ static int _job_expand(struct job_record *from_job_ptr,
 static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		     uint32_t min_nodes, uint32_t max_nodes,
 		     uint32_t req_nodes);
+static int _job_test_dfly(struct job_record *job_ptr, bitstr_t *bitmap,
+			  uint32_t min_nodes, uint32_t max_nodes,
+			  uint32_t req_nodes);
+static int _job_test_hypercube(struct job_record *job_ptr, bitstr_t *bitmap,
+			 uint32_t min_nodes, uint32_t max_nodes,
+			 uint32_t req_nodes);		     
 static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			  uint32_t min_nodes, uint32_t max_nodes,
 			  uint32_t req_nodes);
@@ -191,123 +210,24 @@ extern int select_p_select_nodeinfo_free(select_nodeinfo_t *nodeinfo);
  * only load select plugins if the plugin_type string has a
  * prefix of "select/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]       	= "Linear node selection plugin";
 const char plugin_type[]       	= "select/linear";
 const uint32_t plugin_id	= 102;
-const uint32_t plugin_version	= 120;
+const uint32_t plugin_version	= SLURM_VERSION_NUMBER;
 
 static struct node_record *select_node_ptr = NULL;
 static int select_node_cnt = 0;
 static uint16_t select_fast_schedule;
 static uint16_t cr_type;
+static bool have_dragonfly = false;
 
 /* Record of resources consumed on each node including job details */
 static struct cr_record *cr_ptr = NULL;
 static pthread_mutex_t cr_mutex = PTHREAD_MUTEX_INITIALIZER;
 
-#ifdef HAVE_XCPU
-#define XCPU_POLL_TIME 120
-static pthread_t xcpu_thread = 0;
-static pthread_mutex_t thread_flag_mutex = PTHREAD_MUTEX_INITIALIZER;
-static int agent_fini = 0;
-
-static void *xcpu_agent(void *args)
-{
-	int i;
-	static time_t last_xcpu_test;
-	char clone_path[128], down_node_list[512];
-	struct stat buf;
-	time_t now;
-
-	last_xcpu_test = time(NULL) + XCPU_POLL_TIME;
-	while (!agent_fini) {
-		now = time(NULL);
-
-		if (difftime(now, last_xcpu_test) >= XCPU_POLL_TIME) {
-			debug3("Running XCPU node state test");
-			down_node_list[0] = '\0';
-
-			for (i=0; i<select_node_cnt; i++) {
-				snprintf(clone_path, sizeof(clone_path),
-					 "%s/%s/xcpu/clone", XCPU_DIR,
-					 select_node_ptr[i].name);
-				if (stat(clone_path, &buf) == 0)
-					continue;
-				error("stat %s: %m", clone_path);
-				if ((strlen(select_node_ptr[i].name) +
-				     strlen(down_node_list) + 2) <
-				    sizeof(down_node_list)) {
-					if (down_node_list[0] != '\0')
-						strcat(down_node_list,",");
-					strcat(down_node_list,
-					       select_node_ptr[i].name);
-				} else
-					error("down_node_list overflow");
-			}
-			if (down_node_list[0]) {
-				slurm_drain_nodes(
-					down_node_list,
-					"select_linear: Can not stat XCPU ",
-					slurm_get_slurm_user_id());
-			}
-			last_xcpu_test = now;
-		}
-
-		sleep(1);
-	}
-	return NULL;
-}
-
-static int _init_status_pthread(void)
-{
-	pthread_attr_t attr;
-
-	slurm_mutex_lock( &thread_flag_mutex );
-	if ( xcpu_thread ) {
-		debug2("XCPU thread already running, not starting another");
-		slurm_mutex_unlock( &thread_flag_mutex );
-		return SLURM_ERROR;
-	}
-
-	slurm_attr_init( &attr );
-	pthread_attr_setdetachstate( &attr, PTHREAD_CREATE_DETACHED );
-	pthread_create( &xcpu_thread, &attr, xcpu_agent, NULL);
-	slurm_mutex_unlock( &thread_flag_mutex );
-	slurm_attr_destroy( &attr );
-
-	return SLURM_SUCCESS;
-}
-
-static int _fini_status_pthread(void)
-{
-	int i, rc = SLURM_SUCCESS;
-
-	slurm_mutex_lock( &thread_flag_mutex );
-	if ( xcpu_thread ) {
-		agent_fini = 1;
-		for (i=0; i<4; i++) {
-			sleep(1);
-			if (pthread_kill(xcpu_thread, 0)) {
-				xcpu_thread = 0;
-				break;
-			}
-		}
-		if ( xcpu_thread ) {
-			error("could not kill XCPU agent thread");
-			rc = SLURM_ERROR;
-		}
-	}
-	slurm_mutex_unlock( &thread_flag_mutex );
-	return rc;
-}
-#endif
-
 /* Add job id to record of jobs running on this node */
 static void _add_run_job(struct cr_record *cr_ptr, uint32_t job_id)
 {
@@ -562,7 +482,7 @@ static void _build_select_struct(struct job_record *job_ptr, bitstr_t *bitmap)
 	uint32_t job_memory_cpu = 0, job_memory_node = 0;
 	job_resources_t *job_resrcs_ptr;
 
-	if (job_ptr->details->pn_min_memory  && (cr_type == CR_MEMORY)) {
+	if (job_ptr->details->pn_min_memory  && (cr_type & CR_MEMORY)) {
 		if (job_ptr->details->pn_min_memory & MEM_PER_CPU)
 			job_memory_cpu = job_ptr->details->pn_min_memory &
 				(~MEM_PER_CPU);
@@ -644,7 +564,7 @@ static int _job_count_bitmap(struct cr_record *cr_ptr,
 	if (mode != SELECT_MODE_TEST_ONLY) {
 		use_total_gres = false;
 		if (job_ptr->details->pn_min_memory  &&
-		    (cr_type == CR_MEMORY)) {
+		    (cr_type & CR_MEMORY)) {
 			if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
 				job_memory_cpu = job_ptr->details->pn_min_memory
 					& (~MEM_PER_CPU);
@@ -715,6 +635,7 @@ static int _job_count_bitmap(struct cr_record *cr_ptr,
 				else
 					job_mem = job_memory_node;
 			}
+			avail_mem -= node_ptr->mem_spec_limit;
 			if ((alloc_mem + job_mem) > avail_mem) {
 				bit_clear(jobmap, i);
 				continue;
@@ -821,10 +742,21 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	    (!bit_super_set(job_ptr->details->req_node_bitmap, bitmap)))
 		return error_code;
 
+	if (hypercube_switch_table && hypercube_switch_cnt) {
+		/* Optimized resource selection based on hypercube topology */
+		return _job_test_hypercube(job_ptr, bitmap,
+					   min_nodes, max_nodes, req_nodes);
+	}
+	
 	if (switch_record_cnt && switch_record_table) {
 		/* Perform optimized resource selection based upon topology */
-		return _job_test_topo(job_ptr, bitmap,
-				      min_nodes, max_nodes, req_nodes);
+		if (have_dragonfly) {
+			return _job_test_dfly(job_ptr, bitmap,
+					      min_nodes, max_nodes, req_nodes);
+		} else {
+			return _job_test_topo(job_ptr, bitmap,
+					      min_nodes, max_nodes, req_nodes);
+		}
 	}
 
 	consec_index = 0;
@@ -1046,6 +978,917 @@ static int _job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 	return error_code;
 }
 
+/*
+ * Compute the variance on the passed sufficient cluster and see if it's the
+ * best we've seen so far
+ */
+static void
+_hypercube_update_variance(
+	int dim, int dir, int start_index, int end_index,
+	int node_count, int max_nodes,
+	int leftover_nodes, int64_t summed_squares,
+	int64_t squared_sums, int * min_curve, int * min_direction,
+	int * min_start_index, int * min_neighbors,
+	int * min_extra_nodes, int64_t * min_variance)
+{
+//XXX use actual node count?
+	int64_t variance = summed_squares - 
+		squared_sums * squared_sums / node_count;
+
+	/* Don't calculate if we've used too many nodes */
+	if (0 > max_nodes)
+		return;
+
+	if ((variance < *min_variance) ||
+	    ((variance == *min_variance) &&
+	     (leftover_nodes <= *min_extra_nodes))) {
+		int begin = start_index - dir;
+		int end = end_index + dir;
+		int neighbors = 0;
+
+		if (0 > begin) {
+			begin = hypercube_switch_cnt - 1;
+		} else if (hypercube_switch_cnt >= begin) {
+			begin = 0;
+		}
+
+		if (0 > end) {
+			end = hypercube_switch_cnt - 1;
+		} else if (hypercube_switch_cnt >= end) {
+			end = 0;
+		}
+
+		if (begin != end_index) {
+			neighbors += hypercube_switches[dim][begin]->avail_cnt;
+		}
+		if (end != start_index && begin != end) {
+			neighbors += hypercube_switches[dim][end]->avail_cnt;
+		}
+
+		/*
+		 * Update if variance is lowest found or if variance
+		 * == lowest variance found and there are less extra
+		 * nodes on the switch or if variances and extra nodes
+		 * are the same but there are less neighboring nodes
+		 */
+		if ((variance < *min_variance) ||
+		    ((variance == *min_variance) &&
+		     (leftover_nodes < *min_extra_nodes)) ||
+		    ((variance == *min_variance) &&
+		     (leftover_nodes == *min_extra_nodes) &&
+		     (neighbors < *min_neighbors))) {
+			*min_variance = variance;
+			*min_start_index = start_index;
+			*min_extra_nodes = leftover_nodes;
+			*min_neighbors = neighbors;
+			*min_direction = dir;
+			*min_curve = dim;
+		}
+	}
+}
+
+/*
+ * We're passed a cluster (start_index, end_index) and asked to add another
+ * switch with its nodes if we don't already have enough. As an experiment we
+ * try adding switches to the left, but we otherwise add to the right.
+ */
+static void
+_hypercube_add_nodes(
+	struct job_record * job_ptr, bitstr_t * avail_bitmap,
+	int dim, int32_t start_index, int32_t * end_index, int node_count,
+	int32_t max_nodes, int32_t rem_nodes, int32_t rem_cpus,
+	int leftover_nodes, bitstr_t * bitmap,
+	int64_t * distance_offset, int64_t summed_squares, int64_t squared_sums,
+	int * min_curve, int * min_direction,
+	int * min_start_index, int32_t * min_neighbors,
+	int32_t * min_extra_nodes,
+	int64_t * min_variance)
+{
+	bitstr_t * tmp_bitmap;
+	int32_t l_start_index = *end_index;
+	int32_t l_end_index = start_index;
+	int32_t l_temp_max_nodes = max_nodes;
+	int32_t l_temp_rem_nodes = rem_nodes;
+	int32_t l_temp_rem_cpus = rem_cpus;
+	int64_t l_summed_squares = summed_squares;
+	int64_t l_squared_sums = squared_sums;
+	int32_t l_leftover_nodes = 0;
+	int32_t l_distance_offset = 0;
+	int32_t l_distance;
+
+	/* Don't need to add any more nodes */
+	if (leftover_nodes ||
+	    (0 >= rem_nodes && 0 >= rem_cpus)) {
+		return;
+	}
+
+	tmp_bitmap = bit_copy(bitmap);
+
+	/*
+	 * Create a temporary right-sided cluster and try sliding the left edge
+	 * further left until we have enough nodes.
+	 */
+	while (0 <= l_temp_max_nodes &&
+		(0 < l_temp_rem_nodes || 0 < l_temp_rem_cpus)) {
+		int cnt, n, new_nodes = 0;
+
+		l_end_index--;
+		if (l_end_index < 0) { /* Handle wrap-around */
+			l_end_index = hypercube_switch_cnt - 1;
+			l_distance_offset = -1 *
+				hypercube_switches[dim]
+				[hypercube_switch_cnt - 1]->distance[dim];
+		}
+
+		/* Add nodes from the switch until we've hit our limits */
+		cnt = hypercube_switches[dim][l_end_index]->node_cnt;
+		for (n = 0; n < cnt; n++) {
+			int node = hypercube_switches[dim]
+				[l_end_index]->node_index[n];
+
+			if (!bit_test(avail_bitmap, node) ||
+			    bit_test(tmp_bitmap, node)) {
+				continue;
+			}
+
+			/* The node is unused and available, add it */
+			new_nodes++;
+			bit_set(tmp_bitmap, node);
+			l_temp_max_nodes--;
+			l_temp_rem_nodes--;
+			l_temp_rem_cpus -= _get_avail_cpus(job_ptr, node);
+
+			/* Have we hit our limits? */
+			if ((0 > l_temp_max_nodes) ||
+			    (0 >= l_temp_rem_nodes && 0 >= l_temp_rem_cpus)) {
+				break;
+			}
+		}
+
+		/* Factor in the distance for the new nodes */
+		l_distance = l_distance_offset +
+			hypercube_switches[dim][l_end_index]->distance[dim];
+		l_summed_squares += new_nodes * l_distance * l_distance;
+		l_squared_sums += new_nodes * l_distance;
+		l_leftover_nodes = hypercube_switches[dim][l_end_index]->avail_cnt -
+			new_nodes;
+	}
+
+	bit_free(tmp_bitmap);
+
+	/* Let's see how good this right-sided cluster is */
+	_hypercube_update_variance(
+		dim, -1, l_start_index, l_end_index, node_count, l_temp_max_nodes,
+		l_leftover_nodes, l_summed_squares, l_squared_sums,
+		min_curve, min_direction, min_start_index, min_neighbors,
+		min_extra_nodes, min_variance);
+
+	/*
+	 * OK, we're back to working on the left-sided cluster. Move the right
+	 * side further right to pick up a new switch and add more of the needed
+	 * nodes.
+	 */
+	(*end_index)++;						
+	if (*end_index == hypercube_switch_cnt) { /* Handle wrap-around */
+		*end_index = 0;
+		*distance_offset =
+			hypercube_switches[dim][hypercube_switch_cnt - 1]->
+			distance[dim];
+	}
+}
+
+/* Variance based best-fit cluster algorithm:
+ * 	 Loop through all of the Hilbert Curves that were created.
+ * Each Hilbert Curve is essentially a particular ordering of all the 
+ * switches in the network. For each Hilbert Curve, the algorithm loops
+ * through all clusters of neighboring nodes, but only tests clusters that
+ * either have their leftmost switch completed saturated (all available 
+ * nodes for the switch are in the cluster) or their rightmost switch 
+ * completed saturated, also called left-saturated clusters and 
+ * right-saturated clusters. The algorithm starts at the left (top) of 
+ * the table and works its way to the right (down).
+ * 	  The algorithm starts by adding nodes from the switch at the top of 
+ * the table to the cluster. If after the nodes are added, the cluster
+ * still needs more nodes, the algorithm continues down (right) the table
+ * adding the number of nodes the next switch has available to the 
+ * cluster. It continues adding nodes until it has enough for the cluster.
+ * If the cluster only needs 4 more nodes and the next rightmost switch 
+ * has 8 nodes available, the cluster only adds the 4 needed nodes to the
+ * cluster: called adding a partial. When the algorithm moves to the next
+ * cluster, it will pick up where it left off and will add the remaining 4
+ * nodes on the switch before moving to the next switch in the table. 
+ * 	  Once the algorithm has added enough nodes to the cluster, it 
+ * computes the variance for the cluster of nodes. If this cluster is 
+ * the best-fit cluster found so far, it saves the cluster's information.
+ * To move on to testing the next cluster, first it removes all the nodes 
+ * from the leftmost switch. Then the algorithm repeats the process of 
+ * adding nodes to the cluster from rightmost switch until it has enough.
+ * 		If the rightside of the cluster reaches the bottom of the table,
+ * then it loops back around to the top most switch in the table and
+ * continues. The algorithm continues until the leftmost switch of the
+ * cluster has reached the end of the table. At which point it knows
+ * that it has tested all possible left-saturated clusters. 
+ * 	  Although this algorithm could be run in reverse order on the table
+ * in order to test all right-saturated clusters, it would result in 
+ * redundant calculations. Instead, all right-saturated clusters are
+ * tested during the node adding process of the left-saturated clusters
+ * algorithm. While running the left-saturated clusters algorithm 
+ * described above, anytime nodes are added from the rightmost switch 
+ * resulting in all the available nodes from that switch being in the 
+ * cluster and the cluster still needs more nodes, then create a temporary
+ * cluster equal to the current cluster. 
+ * 	  Use this temporary cluster to test the right-saturated cluster 
+ * starting at the rightmost switch in the cluster and moving left (up the
+ * table). Since the temporary cluster needs more nodes, add nodes by
+ * moving up/left in the table (rather than right, like is done for the
+ * left-saturated clusters). Once the cluster has enough nodes, calculate
+ * its variance and remember it if it is the best fit cluster found so far.
+ * Then erase the temporary cluster and continue with the original cluster
+ * where the algorithm left off. By doing this right-saturated clusters
+ * calcution everytime the rightmost switch of a cluster is fully added, 
+ * the algorithm tests every possible right-saturated cluster. 
+ * 
+ * 	  Equation used to calculate the variance of a cluster:
+ * Variance = sum(x^2) - sum(x)^2/num(x), where sum(x) is the sum of all
+ * values of x, and num(x) is the number of x values summed
+ * 
+ * *** Important Note: There isn't actually a 'cluster' struct, but rather
+ * a cluster is described by its necesary characteristics including: 
+ * start_index, end_index, summed_squares, squared_sums, and rem_nodes ***
+ */
+static void
+_explore_hypercube(struct job_record * job_ptr, bitstr_t * avail_bitmap,
+		    const int64_t * req_summed_squares,
+		    const int64_t * req_squared_sums,
+		    const int max_nodes,
+		    const int rem_nodes, const int rem_cpus, const int node_count,
+		    int * min_start_index,
+		    int * min_direction, int * min_curve)
+{
+	bitstr_t * tmp_bitmap = bit_alloc(bit_size(avail_bitmap));
+	int64_t min_variance = INT64_MAX;
+	int32_t min_extra_nodes = INT32_MAX;
+	int32_t min_neighbors = INT32_MAX;
+	int dim;
+
+	/* Check each dimension for the best cluster of nodes */
+	for (dim = 0; dim < hypercube_dimensions; dim++) {
+		int64_t summed_squares = req_summed_squares[dim];
+		int64_t squared_sums = req_squared_sums[dim];
+		int64_t distance, distance_offset = 0;
+		int32_t start_index = 0, end_index;
+		int32_t temp_rem_nodes = rem_nodes;
+		int32_t temp_rem_cpus = rem_cpus;
+		int32_t temp_max_nodes = max_nodes;
+
+		/* If this curve wasn't set up then skip it */
+		if (hypercube_switch_table[0].distance[dim] == 0) {continue;}
+
+		/* Move to first switch with available nodes */
+		while (hypercube_switches[dim][start_index]->avail_cnt == 0) {
+			start_index++;
+		}
+		end_index = start_index;
+		bit_clear_all(tmp_bitmap);
+
+		/* Test every switch to see if it's the best starting point */
+		while ((start_index < hypercube_switch_cnt) &&
+		       (start_index >= 0)) {
+			int leftover_nodes = 0;
+
+			/*
+			 * Add new nodes to cluster. If next switch has more nodes
+			 * then needed, only add nodes needed. This is called
+			 * adding a partial.
+			 */
+			while ((0 <= temp_max_nodes) &&
+				(0 < temp_rem_nodes || 0 < temp_rem_cpus)) {
+				int cnt = hypercube_switches[dim][end_index]->
+					node_cnt;
+				int fn = hypercube_switches[dim][end_index]->
+					node_index[0];
+				int ln = hypercube_switches[dim][end_index]->
+					node_index[cnt - 1];
+				int new_nodes = 0;
+				int n;
+
+				/* Add free nodes from the switch */
+				for (n = 0; n < cnt; n++) {
+					int node = hypercube_switches[dim]
+						[end_index]->node_index[n];
+
+					if (!bit_test(avail_bitmap, node) ||
+					    bit_test(tmp_bitmap, node)) {
+						continue;
+					}
+
+					/* Unused and available, add it */
+					new_nodes++;
+					bit_set(tmp_bitmap, node);
+					temp_max_nodes--;
+					temp_rem_nodes--;
+					temp_rem_cpus -= _get_avail_cpus(
+						job_ptr, node);
+
+					/* Do we have enough resources? */
+					if ((0 > temp_max_nodes) ||
+					    (0 >= temp_rem_nodes &&
+					     0 >= temp_rem_cpus)) {
+						break;
+					}
+				}
+
+				/*
+				 * Calculate the inputs to the variance for the
+				 * current cluster
+				 */
+				distance = hypercube_switches[dim]
+					[end_index]->distance[dim] + distance_offset;
+				summed_squares += new_nodes * distance * distance;
+				squared_sums += new_nodes * distance;
+				leftover_nodes = hypercube_switches[dim][end_index]->
+					avail_cnt -
+					bit_set_count_range(tmp_bitmap, fn, ln + 1);
+
+				/* Add nodes from an additional switch */
+				_hypercube_add_nodes(
+					job_ptr, avail_bitmap,
+					dim, start_index, &end_index, node_count,
+					temp_max_nodes, temp_rem_nodes, temp_rem_cpus,
+					leftover_nodes, tmp_bitmap,
+					&distance_offset, summed_squares,
+					squared_sums, min_curve, min_direction,
+					min_start_index, &min_neighbors,
+					&min_extra_nodes, &min_variance);
+			}
+
+			/* Check to see if this is the lowest variance so far */
+			_hypercube_update_variance(
+				dim, 1, start_index, end_index, node_count,
+				temp_max_nodes,
+				leftover_nodes, summed_squares, squared_sums,
+				min_curve, min_direction, min_start_index,
+				&min_neighbors, &min_extra_nodes, &min_variance);
+
+			/*
+			 * We're updating our indices to slide right and have a
+			 * new leftmost switch. Remove the nodes from the current
+			 * leftmost switch.
+			 */
+			while ((temp_rem_nodes <= 0) &&
+				(start_index < hypercube_switch_cnt) &&
+				(start_index >= 0)) {
+				int cnt = hypercube_switches[dim][start_index]->
+					node_cnt;
+				int used = MIN(
+					rem_nodes,
+					hypercube_switches[dim][start_index]->
+					avail_cnt);
+				int n;
+
+				if (hypercube_switches[dim][start_index]->
+				    avail_cnt == 0) {
+					if (start_index == end_index)
+						end_index++;
+					start_index++;
+					continue;
+				}
+
+				distance = hypercube_switches[dim][start_index]->
+					distance[dim];
+				summed_squares -= distance * distance * used;
+				squared_sums -= distance * used;
+
+				/* Free the nodes we added on this switch */
+				for (n = 0; n < cnt; n++) {
+					int node = hypercube_switches[dim]
+						[start_index]->node_index[n];
+
+					if (!bit_test(tmp_bitmap, node))
+						continue;
+
+					bit_clear(tmp_bitmap, node);
+					temp_max_nodes++;
+					temp_rem_nodes++;
+					temp_rem_cpus += _get_avail_cpus(job_ptr,
+									 node);
+				}
+
+				if (start_index == end_index)
+					end_index++;
+				start_index++;
+			}
+
+			/*
+			 * If the cluster had holes with switches
+			 * completely allocated to other jobs, keep sliding
+			 * right until we find a switch with free nodes
+			 */
+			while ((start_index < hypercube_switch_cnt) && 
+				(hypercube_switches[dim][start_index]->
+				 avail_cnt == 0)) {
+				if (start_index == end_index)
+					end_index++;
+				start_index++;
+			}
+		}
+	}
+
+	bit_free(tmp_bitmap);
+}
+
+/* a hypercube topology version of _job_test - 
+ * does most of the real work for select_p_job_test(), which
+ *	pretty much just handles load-leveling and max_share logic */
+static int _job_test_hypercube(struct job_record *job_ptr, bitstr_t *bitmap,
+			       uint32_t min_nodes, uint32_t max_nodes,
+			       uint32_t req_nodes)
+{
+	int i, rc = EINVAL;
+	int32_t rem_cpus, rem_nodes, node_count = 0, total_cpus = 0;
+	int32_t alloc_nodes = 0;
+	int64_t *req_summed_squares = xmalloc(
+		hypercube_dimensions * sizeof(int64_t));
+	int64_t *req_squared_sums = xmalloc(
+		hypercube_dimensions * sizeof(int64_t));
+	bitstr_t *req_nodes_bitmap = NULL;
+	bitstr_t *avail_bitmap = NULL;
+	int32_t cur_node_index = -1, node_counter = 0, switch_index;
+	int32_t min_start_index = -1, min_direction = 1234, min_curve = 4321;
+
+	for (i = 0; i < hypercube_dimensions; i++) {
+		req_summed_squares[i] = 0;
+		req_squared_sums[i] = 0;
+	}
+
+	rem_cpus = job_ptr->details->min_cpus;
+	node_count = rem_nodes = MAX(req_nodes, min_nodes);
+
+	/* Give up now if there aren't enough hosts */
+	if (bit_set_count(bitmap) < rem_nodes)
+		goto fini;
+
+	/* Grab all of the required nodes if there are any */
+	if (job_ptr->details->req_node_bitmap) {
+		req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
+
+		// set avail_bitmap to all available nodes except the required nodes
+		// set bitmap to just the required nodes
+		avail_bitmap = bit_copy(req_nodes_bitmap);
+		bit_not(avail_bitmap);
+		bit_and(avail_bitmap, bitmap );
+		bit_copybits(bitmap, req_nodes_bitmap);
+
+		i = bit_set_count(req_nodes_bitmap);
+		if (i > (int)max_nodes) {
+			info("job %u requires more nodes than currently "
+			     "available (%d>%u)",
+			     job_ptr->job_id, i, max_nodes);
+			FREE_NULL_BITMAP(req_nodes_bitmap);
+			FREE_NULL_BITMAP(avail_bitmap);
+			xfree(req_squared_sums);
+			xfree(req_summed_squares);
+			return EINVAL;
+		}
+		rem_nodes -= i;
+		alloc_nodes += i;
+	} else { // if there are no required nodes, update bitmaps accordingly
+		avail_bitmap = bit_copy(bitmap);
+		bit_nclear(bitmap, 0, node_record_count - 1);
+	}
+
+	/* Calculate node availability for each switch */
+	for (i = 0; i < hypercube_switch_cnt; i++) {
+		const int node_idx = hypercube_switch_table[i].node_index[0];
+		const int cnt = hypercube_switch_table[i].node_cnt;
+
+		/* Add all the nodes on this switch */
+		hypercube_switch_table[i].avail_cnt = bit_set_count_range(
+			avail_bitmap, node_idx, node_idx + cnt);
+
+		/* If the switch has nodes that are required, loop through them */
+		if (req_nodes_bitmap && (hypercube_switch_table[i].avail_cnt != 0) && 
+		    (bit_set_count_range(
+			     req_nodes_bitmap, node_idx, node_idx + cnt) > 0)) {
+			int j;
+
+			/* Check each node on the switch */
+			for (j = 0; j < cnt; j++) {
+				int idx = hypercube_switch_table[i].node_index[j];
+
+				/* If is req'd, add cpus and distance to calulations */
+				if (bit_test(req_nodes_bitmap, idx)) {
+					int k;
+
+					rem_cpus   -= _get_avail_cpus(job_ptr, idx);
+					total_cpus += _get_total_cpus(idx);
+
+					/*
+					 * Add the required nodes data to the
+					 * variance calculations
+					 */
+					for (k = 0; k < hypercube_dimensions; k++) {
+						int distance = hypercube_switch_table[i].
+							distance[k];
+
+						req_summed_squares[k] += distance *
+							distance;
+						req_squared_sums[k] += distance;
+					}
+				}
+			}
+		}
+	}
+
+	// check to see if no more nodes need to be added to the job
+	if ((alloc_nodes >= max_nodes) ||
+	    ((rem_nodes <= 0) && (rem_cpus <= 0))) {
+		goto fini;
+	}
+
+	/* Find the best starting switch and traversal path to get nodes from */
+	if (alloc_nodes < max_nodes)
+		i = max_nodes - alloc_nodes;
+	else
+		i = 0;
+	_explore_hypercube(job_ptr, avail_bitmap, req_summed_squares,
+			   req_squared_sums, i, rem_nodes, rem_cpus,
+			   node_count, &min_start_index, &min_direction,
+			   &min_curve);
+	if (-1 == min_start_index)
+		goto fini;
+
+	/*
+	 * Assigns nodes from the best cluster to the job. Starts at the start
+	 * index switch and keeps adding available nodes until it has as many
+	 * as it needs
+	 */
+	switch_index = min_start_index;
+	node_counter = 0;
+	while ((alloc_nodes < max_nodes) &&
+	       ((rem_nodes > 0) || (rem_cpus > 0))) {
+		int node_index;
+
+		/* If we used up all the nodes in a switch, move to the next */
+		if (node_counter ==
+		    hypercube_switches[min_curve][switch_index]->avail_cnt) {
+			node_counter = 0;
+			cur_node_index = -1;
+			do {
+				// min_direction == 1 moves up the table
+				// min_direction == -1 moves down the table 	
+				switch_index += min_direction;
+				if (switch_index == hypercube_switch_cnt) {
+					switch_index = 0;
+				} else if (switch_index == -1) {
+					switch_index = hypercube_switch_cnt - 1;
+				} else if (switch_index == min_start_index) {
+					goto fini;
+				}
+			} while (hypercube_switches[min_curve][switch_index]->
+				  avail_cnt == 0);
+		}
+
+		/* Find the next usable node in the switch */
+		do {
+			cur_node_index++;
+			node_index = hypercube_switches[min_curve][switch_index]->
+				node_index[cur_node_index];
+		} while (FALSE == bit_test(avail_bitmap, node_index));
+
+		/* Allocate the CPUs from the node */
+		bit_set(bitmap, node_index);
+		rem_cpus   -= _get_avail_cpus(job_ptr, node_index);
+		total_cpus += _get_total_cpus(node_index);
+
+		rem_nodes--;
+		alloc_nodes++;
+		node_counter++;
+	}
+fini:	
+	/* If we allocated sufficient CPUs and nodes, we were successful */
+	if ((rem_cpus <= 0) && (bit_set_count(bitmap) >= min_nodes)) {
+		rc = SLURM_SUCCESS;
+		/* Job's total_cpus is needed for SELECT_MODE_WILL_RUN */
+		job_ptr->total_cpus = total_cpus;
+	} else { 
+		rc = EINVAL;
+		if (alloc_nodes > max_nodes) {
+			info("job %u requires more nodes than allowed",
+			     job_ptr->job_id);
+		}
+	}
+
+	xfree(req_squared_sums);
+	xfree(req_summed_squares);
+	FREE_NULL_BITMAP(req_nodes_bitmap);
+	FREE_NULL_BITMAP(avail_bitmap);
+
+	return rc;
+}
+
+
+/*
+ * _job_test_dfly - A dragonfly topology aware version of _job_test()
+ * NOTE: The logic here is almost identical to that of _eval_nodes_dfly() in
+ *       select/cons_res/job_test.c. Any bug found here is probably also there.
+ */
+static int _job_test_dfly(struct job_record *job_ptr, bitstr_t *bitmap,
+			  uint32_t min_nodes, uint32_t max_nodes,
+			  uint32_t req_nodes)
+{
+	bitstr_t **switches_bitmap;		/* nodes on this switch */
+	int       *switches_cpu_cnt;		/* total CPUs on switch */
+	uint32_t  *switches_node_cnt;		/* total nodes on switch */
+	uint32_t  *switches_node_use;		/* nodes from switch used */
+
+	bitstr_t  *req_nodes_bitmap   = NULL;
+	int rem_cpus;			/* remaining resources desired */
+	int avail_cpus, total_cpus = 0;
+	uint32_t want_nodes, alloc_nodes = 0;
+	int i, j, rc = SLURM_SUCCESS;
+	int best_fit_inx, first, last;
+	int best_fit_nodes, best_fit_cpus;
+	int best_fit_location = 0;
+	bool sufficient;
+	long time_waiting = 0;
+	int leaf_switch_count = 0;	/* Count of leaf node switches used */
+
+	if (job_ptr->req_switch > 1) {
+		/* Maximum leaf switch count >1 probably makes no sense */
+		info("%s: Resetting job %u leaf switch count from %u to 0",
+		     __func__, job_ptr->job_id, job_ptr->req_switch);
+		job_ptr->req_switch = 0;
+	}
+	if (job_ptr->req_switch) {
+		time_t     time_now;
+		time_now = time(NULL);
+		if (job_ptr->wait4switch_start == 0)
+			job_ptr->wait4switch_start = time_now;
+		time_waiting = time_now - job_ptr->wait4switch_start;
+	}
+
+	rem_cpus = job_ptr->details->min_cpus;
+	if (req_nodes > min_nodes)
+		want_nodes = req_nodes;
+	else
+		want_nodes = min_nodes;
+
+	/* Construct a set of switch array entries,
+	 * use the same indexes as switch_record_table in slurmctld */
+	switches_bitmap   = xmalloc(sizeof(bitstr_t *) * switch_record_cnt);
+	switches_cpu_cnt  = xmalloc(sizeof(int)        * switch_record_cnt);
+	switches_node_cnt = xmalloc(sizeof(uint32_t)   * switch_record_cnt);
+	switches_node_use = xmalloc(sizeof(int)        * switch_record_cnt);
+	if (job_ptr->details->req_node_bitmap) {
+		req_nodes_bitmap = bit_copy(job_ptr->details->req_node_bitmap);
+		i = bit_set_count(req_nodes_bitmap);
+		if (i > (int)max_nodes) {
+			info("job %u requires more nodes than currently "
+			     "available (%u>%u)",
+			     job_ptr->job_id, i, max_nodes);
+			rc = EINVAL;
+			goto fini;
+		}
+	}
+
+	/* phase 1: make availability bitmaps for switches */
+	sufficient = false;
+	for (i = 0; i < switch_record_cnt; i++) {
+		switches_bitmap[i] = bit_copy(switch_record_table[i].
+					      node_bitmap);
+		bit_and(switches_bitmap[i], bitmap);
+		if (req_nodes_bitmap &&
+		    !bit_super_set(req_nodes_bitmap, switches_bitmap[i]))
+			switches_node_cnt[i] = 0;
+		else {
+			switches_node_cnt[i] = bit_set_count(switches_bitmap[i]);
+			sufficient = true;
+		}
+	}
+	bit_nclear(bitmap, 0, node_record_count - 1);
+
+#if SELECT_DEBUG
+	/* Don't compile this, it slows things down too much */
+	for (i = 0; i < switch_record_cnt; i++) {
+		char *node_names = NULL;
+		if (switches_node_cnt[i])
+			node_names = bitmap2node_name(switches_bitmap[i]);
+		debug("switch=%s nodes=%u:%s speed=%u",
+		      switch_record_table[i].name,
+		      switches_node_cnt[i], node_names,
+		      switch_record_table[i].link_speed);
+		xfree(node_names);
+	}
+#endif
+
+	/* check if requested nodes are available */
+	if (!sufficient) {
+		info("job %u requires nodes not available on any switch",
+		     job_ptr->job_id);
+		rc = EINVAL;
+		goto fini;
+	}
+
+	/* phase 2: calculate CPU resources for each switch */
+	for (i = 0; i < node_record_count; i++) {
+		avail_cpus = _get_avail_cpus(job_ptr, i);
+		for (j = 0; j < switch_record_cnt; j++) {
+			if (bit_test(switches_bitmap[j], i)) {
+				switches_cpu_cnt[j] += avail_cpus;
+			}
+		}
+	}
+
+	/* phase 3 */
+	/* Determine lowest level switch satifying request with best fit */
+	best_fit_inx = -1;
+	for (j = 0; j < switch_record_cnt; j++) {
+#if SELECT_DEBUG
+		debug5("checking switch %d: nodes %u cpus %d", j,
+		       switches_node_cnt[j], switches_cpu_cnt[j]);
+#endif
+		if ((switches_cpu_cnt[j]  < rem_cpus) ||
+		    (switches_node_cnt[j] < min_nodes))
+			continue;
+		if ((best_fit_inx == -1) ||
+		    (switch_record_table[j].level <
+		     switch_record_table[best_fit_inx].level) ||
+		    ((switch_record_table[j].level ==
+		      switch_record_table[best_fit_inx].level) &&
+		     (switches_node_cnt[j] < switches_node_cnt[best_fit_inx])))
+			best_fit_inx = j;
+	}
+	if (best_fit_inx == -1) {
+		debug("%s: could not find resources for job %u",
+		      __func__, job_ptr->job_id);
+		rc = EINVAL;
+		goto fini;
+	}
+
+	/* phase 4: select resources from already allocated leaves */
+	/* Identify usable leafs (within higher switch having best fit) */
+	for (j = 0; j < switch_record_cnt; j++) {
+		if ((switch_record_table[j].level > 0) ||
+		    (!bit_super_set(switches_bitmap[j],
+				    switches_bitmap[best_fit_inx]))) {
+			switches_node_cnt[j] = 0;
+		} else if (req_nodes_bitmap) {
+			/* we have subnodes count zeroed yet so count them */
+			switches_node_cnt[j] = bit_set_count(switches_bitmap[j]);
+		}
+	}
+	/* set already allocated nodes and gather additional resources */
+	if (req_nodes_bitmap) {
+		/* Accumulate specific required resources, if any */
+		for (j = 0; j < switch_record_cnt; j++) {
+			if (alloc_nodes > max_nodes)
+				break;
+			if (switches_node_cnt[j] == 0 ||
+			    bit_overlap(req_nodes_bitmap,
+					switches_bitmap[j]) == 0)
+				continue;
+
+			/* Use nodes from this leaf */
+			first = bit_ffs(switches_bitmap[j]);
+			if (first < 0) {
+				switches_node_cnt[j] = 0;
+				continue;
+			}
+			last  = bit_fls(switches_bitmap[j]);
+			for (i = first; i <= last; i++) {
+				if (!bit_test(switches_bitmap[j], i))
+					continue;
+				if (!bit_test(req_nodes_bitmap, i)) {
+					/* node wasn't requested */
+					continue;
+				}
+
+				bit_clear(switches_bitmap[j], i);
+				switches_node_cnt[j]--;
+				avail_cpus = _get_avail_cpus(job_ptr, i);
+				switches_cpu_cnt[j] -= avail_cpus;
+
+				if (bit_test(bitmap, i)) {
+					/* node on multiple leaf switches
+					 * and already selected */
+					continue;
+				}
+
+				switches_node_use[j]++;
+				bit_set(bitmap, i);
+				alloc_nodes++;
+				rem_cpus -= avail_cpus;
+				total_cpus += _get_total_cpus(i);
+			}
+		}
+	}
+
+	/* phase 5 */
+	/* Select resources from leafs on a best-fit or round-robin basis */
+	while ((alloc_nodes <= max_nodes) &&
+	       ((alloc_nodes < want_nodes) || (rem_cpus > 0))) {
+		best_fit_cpus = best_fit_nodes = 0;
+		for (j = 0; j < switch_record_cnt; j++) {
+			if (switches_node_cnt[j] == 0)
+				continue;
+			/* If multiple leaf switches must be used, prefer use
+			 * of leaf switches with fewest number of idle CPUs.
+			 * This results in more leaf switches being used and
+			 * achieves better network bandwidth. */
+			if ((best_fit_nodes == 0) ||
+			    (switches_node_use[best_fit_location] >
+			     switches_node_use[j]) ||
+			    ((switches_node_use[best_fit_location] ==
+			      switches_node_use[j]) &&
+			     (switches_cpu_cnt[j] < best_fit_cpus))) {
+				best_fit_cpus =  switches_cpu_cnt[j];
+				best_fit_nodes = switches_node_cnt[j];
+				best_fit_location = j;
+			}
+		}
+#if SELECT_DEBUG
+		info("%s: found switch %d for allocation: nodes %d cpus %d "
+		       "allocated %u", __func__, best_fit_location,
+		       best_fit_nodes, best_fit_cpus, alloc_nodes);
+#endif
+		if (best_fit_nodes == 0)
+			break;
+
+		/* Use select nodes from this leaf */
+		first = bit_ffs(switches_bitmap[best_fit_location]);
+		if (first < 0) {
+			switches_node_cnt[best_fit_location] = 0;
+			continue;
+		}
+		last  = bit_fls(switches_bitmap[best_fit_location]);
+		for (i = first; i <= last; i++) {
+			if (!bit_test(switches_bitmap[best_fit_location], i))
+				continue;
+
+			if (bit_test(bitmap, i)) {
+				/* node on multiple leaf switches
+				 * and already selected */
+				continue;
+			}
+
+			bit_clear(switches_bitmap[best_fit_location], i);
+			switches_node_cnt[best_fit_location]--;
+			switches_node_use[best_fit_location]++;
+			bit_set(bitmap, i);
+			alloc_nodes++;
+			j = _get_avail_cpus(job_ptr, i);
+			switches_cpu_cnt[best_fit_location] -= j;
+			rem_cpus -= j;
+			total_cpus += _get_total_cpus(i);
+			break;
+		}
+		leaf_switch_count++;
+		if (job_ptr->req_switch > 0) {
+			if (time_waiting >= job_ptr->wait4switch) {
+				job_ptr->best_switch = true;
+				debug3("Job=%u Waited %ld sec for switches use=%d",
+					job_ptr->job_id, time_waiting,
+					leaf_switch_count);
+			} else if (leaf_switch_count > job_ptr->req_switch) {
+				/* Allocation is for more than requested number
+				 * of switches */
+				job_ptr->best_switch = false;
+				debug3("Job=%u waited %ld sec for switches=%u "
+					"found=%d wait %u",
+					job_ptr->job_id, time_waiting,
+					job_ptr->req_switch,
+					leaf_switch_count,
+					job_ptr->wait4switch);
+			} else {
+				job_ptr->best_switch = true;
+			}
+		}
+	}
+	if ((alloc_nodes <= max_nodes) && (rem_cpus <= 0) &&
+	    (alloc_nodes >= min_nodes)) {
+		rc = SLURM_SUCCESS;
+	} else
+		rc = EINVAL;
+
+fini:	if (rc == SLURM_SUCCESS) {
+		/* Job's total_cpus is needed for SELECT_MODE_WILL_RUN */
+		job_ptr->total_cpus = total_cpus;
+	} else if (alloc_nodes > max_nodes)
+		info("job %u requires more nodes than allowed",
+		     job_ptr->job_id);
+	FREE_NULL_BITMAP(req_nodes_bitmap);
+	for (i = 0; i < switch_record_cnt; i++)
+		FREE_NULL_BITMAP(switches_bitmap[i]);
+	xfree(switches_bitmap);
+	xfree(switches_cpu_cnt);
+	xfree(switches_node_cnt);
+	xfree(switches_node_use);
+
+	return rc;
+}
+
+
 /*
  * _job_test_topo - A topology aware version of _job_test()
  * NOTE: The logic here is almost identical to that of _eval_nodes_topo() in
@@ -1067,7 +1910,7 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	int i, j, rc = SLURM_SUCCESS;
 	int best_fit_inx, first, last;
 	int best_fit_nodes, best_fit_cpus;
-	int best_fit_location = 0, best_fit_sufficient;
+	int best_fit_location = 0;
 	bool sufficient;
 	long time_waiting = 0;
 	int leaf_switch_count = 0;	/* Count of leaf node switches used */
@@ -1105,9 +1948,6 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 
 	/* phase 1: make availability bitmaps for switches */
-#if SELECT_DEBUG
-	debug5("_job_test_topo: phase 1");
-#endif
 	sufficient = false;
 	for (i=0; i<switch_record_cnt; i++) {
 		switches_bitmap[i] = bit_copy(switch_record_table[i].
@@ -1146,9 +1986,6 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 
 	/* phase 2: accumulate all cpu resources for each switch */
-#if SELECT_DEBUG
-	debug5("_job_test_topo: phase 2");
-#endif
 	for (i = 0; i < node_record_count; i++) {
 		avail_cpus = _get_avail_cpus(job_ptr, i);
 		for (j=0; j<switch_record_cnt; j++) {
@@ -1159,12 +1996,9 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 
 	/* phase 3 */
-#if SELECT_DEBUG
-	debug5("_job_test_topo: phase 3");
-#endif
 	/* Determine lowest level switch satifying request with best fit */
 	best_fit_inx = -1;
-	for (j=0; j<switch_record_cnt; j++) {
+	for (j = 0; j < switch_record_cnt; j++) {
 #if SELECT_DEBUG
 		debug5("checking switch %d: nodes %u cpus %d", j,
 		       switches_node_cnt[j], switches_cpu_cnt[j]);
@@ -1181,18 +2015,15 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 			best_fit_inx = j;
 	}
 	if (best_fit_inx == -1) {
-		debug("_job_test_topo: could not find resources for job %u",
-		      job_ptr->job_id);
+		debug("%s: could not find resources for job %u",
+		      __func__, job_ptr->job_id);
 		rc = EINVAL;
 		goto fini;
 	}
 
 	/* phase 4: select resources from already allocated leaves */
-#if SELECT_DEBUG
-	debug5("_job_test_topo: phase 4");
-#endif
 	/* Identify usable leafs (within higher switch having best fit) */
-	for (j=0; j<switch_record_cnt; j++) {
+	for (j = 0; j < switch_record_cnt; j++) {
 		if ((switch_record_table[j].level > 0) ||
 		    (!bit_super_set(switches_bitmap[j],
 				    switches_bitmap[best_fit_inx]))) {
@@ -1205,7 +2036,7 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	/* set already allocated nodes and gather additional resources */
 	if (req_nodes_bitmap) {
 		/* Accumulate specific required resources, if any */
-		for (j=0; j<switch_record_cnt; j++) {
+		for (j = 0; j < switch_record_cnt; j++) {
 			if (alloc_nodes > max_nodes)
 				break;
 			if (switches_node_cnt[j] == 0 ||
@@ -1289,40 +2120,31 @@ static int _job_test_topo(struct job_record *job_ptr, bitstr_t *bitmap,
 	}
 
 	/* phase 5 */
-#if SELECT_DEBUG
-	debug5("_job_test_topo: phase 5");
-#endif
 	/* Select resources from these leafs on a best-fit basis */
 	/* Compute best-switch nodes available array */
 	while ((alloc_nodes <= max_nodes) &&
 	       ((alloc_nodes < want_nodes) || (rem_cpus > 0))) {
-		best_fit_cpus = best_fit_nodes = best_fit_sufficient = 0;
-		i = min_nodes - alloc_nodes; /* use it as a temp. int */
+		best_fit_cpus = best_fit_nodes = 0;
 		for (j=0; j<switch_record_cnt; j++) {
 			if (switches_node_cnt[j] == 0)
 				continue;
-			sufficient = (switches_cpu_cnt[j] >= rem_cpus) &&
-				     ((int)switches_node_cnt[j] >= i);
-			/* If first possibility OR */
-			/* first set large enough for request OR */
-			/* tightest fit (less resource waste) OR */
-			/* nothing yet large enough, but this is biggest */
+			/* If multiple leaf switches must be used, prefer use
+			 * of leaf switches with fewest number of idle CPUs.
+			 * This results in more leaf switches being used and
+			 * achieves better network bandwidth. */
 			if ((best_fit_nodes == 0) ||
-			    (sufficient && (best_fit_sufficient == 0)) ||
-			    (sufficient &&
-			     (switches_cpu_cnt[j] < best_fit_cpus)) ||
-			    (!sufficient &&
-			     (switches_cpu_cnt[j] > best_fit_cpus))) {
+			    (!switches_required[best_fit_location] &&
+			     switches_required[j]) ||
+			    (switches_cpu_cnt[j] < best_fit_cpus)) {
 				best_fit_cpus =  switches_cpu_cnt[j];
 				best_fit_nodes = switches_node_cnt[j];
 				best_fit_location = j;
-				best_fit_sufficient = sufficient;
 			}
 		}
 #if SELECT_DEBUG
-		debug5("found switch %d for allocation: nodes %d cpus %d "
-		       "allocated %u", best_fit_location, best_fit_nodes,
-		       best_fit_cpus, alloc_nodes);
+		debug("%s: found switch %d for allocation: nodes %d cpus %d "
+		       "allocated %u", __func__, best_fit_location,
+		       best_fit_nodes, best_fit_cpus, alloc_nodes);
 #endif
 		if (best_fit_nodes == 0)
 			break;
@@ -1431,7 +2253,7 @@ static int _rm_job_from_nodes(struct cr_record *cr_ptr,
 	}
 
 	if (remove_all && job_ptr->details &&
-	    job_ptr->details->pn_min_memory && (cr_type == CR_MEMORY)) {
+	    job_ptr->details->pn_min_memory && (cr_type & CR_MEMORY)) {
 		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
 			job_memory_cpu = job_ptr->details->pn_min_memory &
 				(~MEM_PER_CPU);
@@ -1839,7 +2661,7 @@ static int _rm_job_from_one_node(struct job_record *job_ptr,
 	}
 
 	if (job_ptr->details &&
-	    job_ptr->details->pn_min_memory && (cr_type == CR_MEMORY)) {
+	    job_ptr->details->pn_min_memory && (cr_type & CR_MEMORY)) {
 		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
 			job_memory_cpu = job_ptr->details->pn_min_memory &
 				(~MEM_PER_CPU);
@@ -1926,7 +2748,7 @@ static int _add_job_to_nodes(struct cr_record *cr_ptr,
 	}
 
 	if (alloc_all && job_ptr->details &&
-	    job_ptr->details->pn_min_memory && (cr_type == CR_MEMORY)) {
+	    job_ptr->details->pn_min_memory && (cr_type & CR_MEMORY)) {
 		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
 			job_memory_cpu = job_ptr->details->pn_min_memory &
 				(~MEM_PER_CPU);
@@ -2022,8 +2844,7 @@ static void _free_cr(struct cr_record *cr_ptr)
 			xfree(part_cr_ptr1);
 			part_cr_ptr1 = part_cr_ptr2;
 		}
-		if (cr_ptr->nodes[i].gres_list)
-			list_destroy(cr_ptr->nodes[i].gres_list);
+		FREE_NULL_LIST(cr_ptr->nodes[i].gres_list);
 	}
 	xfree(cr_ptr->nodes);
 	xfree(cr_ptr->run_job_ids);
@@ -2189,7 +3010,7 @@ static void _init_node_cr(void)
 		job_memory_cpu  = 0;
 		job_memory_node = 0;
 		if (job_ptr->details && job_ptr->details->pn_min_memory &&
-		    (cr_type == CR_MEMORY)) {
+		    (cr_type & CR_MEMORY)) {
 			if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
 				job_memory_cpu = job_ptr->details->
 					pn_min_memory &
@@ -2616,7 +3437,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		list_iterator_destroy(preemptee_iterator);
 	}
 
-	list_destroy(cr_job_list);
+	FREE_NULL_LIST(cr_job_list);
 	_free_cr(exp_cr);
 	FREE_NULL_BITMAP(orig_map);
 	return rc;
@@ -2635,23 +3456,25 @@ static int  _cr_job_list_sort(void *x, void *y)
  */
 extern int init ( void )
 {
+	char *topo_param;
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_XCPU
-	rc = _init_status_pthread();
-#endif
+
 	cr_type = slurmctld_conf.select_type_param;
 	if (cr_type)
 		verbose("%s loaded with argument %u", plugin_name, cr_type);
 
+	topo_param = slurm_get_topology_param();
+	if (topo_param && strstr(topo_param, "dragonfly"))
+		have_dragonfly = true;
+	xfree(topo_param);
+
 	return rc;
 }
 
 extern int fini ( void )
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_XCPU
-	rc = _fini_status_pthread();
-#endif
+
 	cr_fini_global_core_data();
 	slurm_mutex_lock(&cr_mutex);
 	_free_cr(cr_ptr);
@@ -2824,29 +3647,7 @@ extern int select_p_job_test(struct job_record *job_ptr, bitstr_t *bitmap,
 extern int select_p_job_begin(struct job_record *job_ptr)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_XCPU
-	int i;
-	char clone_path[128];
 
-	xassert(job_ptr);
-	xassert(job_ptr->node_bitmap);
-
-	for (i=0; i<select_node_cnt; i++) {
-		if (bit_test(job_ptr->node_bitmap, i) == 0)
-			continue;
-		snprintf(clone_path, sizeof(clone_path),
-			 "%s/%s/xcpu/clone", XCPU_DIR,
-			 select_node_ptr[i].name);
-		if (chown(clone_path, (uid_t)job_ptr->user_id,
-			  (gid_t)job_ptr->group_id)) {
-			error("chown %s: %m", clone_path);
-			rc = SLURM_ERROR;
-		} else {
-			debug("chown %s to %u", clone_path,
-			      job_ptr->user_id);
-		}
-	}
-#endif
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
 		_init_node_cr();
@@ -2916,20 +3717,6 @@ extern int select_p_job_resized(struct job_record *job_ptr,
 				struct node_record *node_ptr)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_XCPU
-	int i = node_ptr - node_record_table_ptr;
-	char clone_path[128];
-
-	if (bit_test(job_ptr->node_bitmap, i) == 0)
-		continue;
-	snprintf(clone_path, sizeof(clone_path), "%s/%s/xcpu/clone", XCPU_DIR,
-		 node_ptr->name);
-	if (chown(clone_path, (uid_t)0, (gid_t)0)) {
-		error("chown %s: %m", clone_path);
-		rc = SLURM_ERROR;
-	} else
-		debug("chown %s to 0", clone_path);
-#endif
 
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
@@ -2951,23 +3738,7 @@ extern int select_p_job_signal(struct job_record *job_ptr, int signal)
 extern int select_p_job_fini(struct job_record *job_ptr)
 {
 	int rc = SLURM_SUCCESS;
-#ifdef HAVE_XCPU
-	int i;
-	char clone_path[128];
 
-	for (i=0; i<select_node_cnt; i++) {
-		if (bit_test(job_ptr->node_bitmap, i) == 0)
-			continue;
-		snprintf(clone_path, sizeof(clone_path), "%s/%s/xcpu/clone",
-			 XCPU_DIR, select_node_ptr[i].name);
-		if (chown(clone_path, (uid_t)0, (gid_t)0)) {
-			error("chown %s: %m", clone_path);
-			rc = SLURM_ERROR;
-		} else {
-			debug("chown %s to 0", clone_path);
-		}
-	}
-#endif
 	slurm_mutex_lock(&cr_mutex);
 	if (cr_ptr == NULL)
 		_init_node_cr();
@@ -3382,8 +4153,8 @@ extern bitstr_t * select_p_resv_test(resv_desc_msg_t *resv_desc_ptr,
 	int i, j;
 	int best_fit_inx, first, last;
 	int best_fit_nodes;
-	int best_fit_location = 0, best_fit_sufficient;
-	bool sufficient;
+	int best_fit_location = 0;
+	bool sufficient, best_fit_sufficient;
 
 	xassert(avail_bitmap);
 	xassert(resv_desc_ptr);
@@ -3415,11 +4186,12 @@ extern bitstr_t * select_p_resv_test(resv_desc_msg_t *resv_desc_ptr,
 		char *node_names = NULL;
 		if (switches_node_cnt[i])
 			node_names = bitmap2node_name(switches_bitmap[i]);
-		debug("switch=%s nodes=%u:%s required:%u speed=%u",
-		      switch_record_table[i].name,
-		      switches_node_cnt[i], node_names,
-		      switches_required[i],
-		      switch_record_table[i].link_speed);
+		info("switch=%s level=%d nodes=%u:%s required:%u speed=%u",
+		     switch_record_table[i].name,
+		     switch_record_table[i].level,
+		     switches_node_cnt[i], node_names,
+		     switches_required[i],
+		     switch_record_table[i].link_speed);
 		xfree(node_names);
 	}
 #endif
diff --git a/src/plugins/select/other/Makefile.in b/src/plugins/select/other/Makefile.in
index dc9048a55..b41a971af 100644
--- a/src/plugins/select/other/Makefile.in
+++ b/src/plugins/select/other/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -241,6 +244,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -290,8 +295,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -310,6 +319,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -353,6 +365,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -376,6 +389,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/serial/Makefile.in b/src/plugins/select/serial/Makefile.in
index 41e8626e6..8bdfcfec2 100644
--- a/src/plugins/select/serial/Makefile.in
+++ b/src/plugins/select/serial/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/select/serial/job_test.c b/src/plugins/select/serial/job_test.c
index 517223d25..5b979cc97 100644
--- a/src/plugins/select/serial/job_test.c
+++ b/src/plugins/select/serial/job_test.c
@@ -135,7 +135,8 @@ uint16_t _can_job_run_on_node(struct job_record *job_ptr, bitstr_t *core_map,
 
 	if ((cr_type & CR_MEMORY) && cpus) {
 		req_mem   = job_ptr->details->pn_min_memory & ~MEM_PER_CPU;
-		avail_mem = select_node_record[node_i].real_memory;
+		avail_mem = select_node_record[node_i].real_memory -
+			    select_node_record[node_i].mem_spec_limit;
 		if (!test_only)
 			avail_mem -= node_usage[node_i].alloc_memory;
 		if (req_mem > avail_mem)
@@ -971,5 +972,6 @@ alloc_job:
 		/* memory is per-node */
 		job_res->memory_allocated[0] = save_mem;
 	}
+
 	return error_code;
 }
diff --git a/src/plugins/select/serial/select_serial.c b/src/plugins/select/serial/select_serial.c
index 35756d238..8a6cd56c7 100644
--- a/src/plugins/select/serial/select_serial.c
+++ b/src/plugins/select/serial/select_serial.c
@@ -105,16 +105,13 @@ uint32_t *cr_node_cores_offset;
  * only load select plugins if the plugin_type string has a
  * prefix of "select/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as the node selection API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "Serial Job Resource Selection plugin";
 const char plugin_type[] = "select/serial";
 const uint32_t plugin_id      = 106;
-const uint32_t plugin_version = 120;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 const uint32_t pstate_version = 7;	/* version control on saved state */
 
 uint16_t cr_type = CR_CPU; /* cr_type is overwritten in init() */
@@ -397,9 +394,7 @@ static void _destroy_node_data(struct node_use_record *node_usage,
 	xfree(node_data);
 	if (node_usage) {
 		for (i = 0; i < select_node_cnt; i++) {
-			if (node_usage[i].gres_list) {
-				list_destroy(node_usage[i].gres_list);
-			}
+			FREE_NULL_LIST(node_usage[i].gres_list);
 		}
 		xfree(node_usage);
 	}
@@ -907,6 +902,7 @@ static int _rm_job_from_res(struct part_res_record *part_record_ptr,
 		if (action != 2) {
 			if (job->memory_allocated[n] == 0)
 				continue;	/* no memory allocated */
+
 			if (node_usage[i].alloc_memory <
 			    job->memory_allocated[n]) {
 				error("select/serial: node %s memory is "
@@ -1193,8 +1189,7 @@ top:	orig_map = bit_copy(save_bitmap);
 			}
 			list_iterator_destroy(preemptee_iterator);
 			if (!remove_some_jobs) {
-				list_destroy(*preemptee_job_list);
-				*preemptee_job_list = NULL;
+				FREE_NULL_LIST(*preemptee_job_list);
 			}
 		}
 
@@ -1344,7 +1339,7 @@ static int _will_run_test(struct job_record *job_ptr, bitstr_t *bitmap,
 		list_iterator_destroy(preemptee_iterator);
 	}
 
-	list_destroy(cr_job_list);
+	FREE_NULL_LIST(cr_job_list);
 	_destroy_part_data(future_part);
 	_destroy_node_data(future_usage, NULL);
 	FREE_NULL_BITMAP(orig_map);
@@ -1463,6 +1458,8 @@ extern int select_p_node_init(struct node_record *node_ptr, int node_cnt)
 	select_core_cnt = 0;
 	for (i = 0; i < select_node_cnt; i++) {
 		select_node_record[i].node_ptr = &node_ptr[i];
+		select_node_record[i].mem_spec_limit = node_ptr[i].
+						       mem_spec_limit;
 		if (select_fast_schedule) {
 			struct config_record *config_ptr;
 			config_ptr = node_ptr[i].config_ptr;
@@ -2081,6 +2078,8 @@ extern int select_p_update_node_config(int index)
 
 	select_node_record[index].real_memory = select_node_record[index].
 						node_ptr->real_memory;
+	select_node_record[index].mem_spec_limit = select_node_record[index].
+		node_ptr->mem_spec_limit;
 	return SLURM_SUCCESS;
 }
 
diff --git a/src/plugins/select/serial/select_serial.h b/src/plugins/select/serial/select_serial.h
index 6571c578e..627704367 100644
--- a/src/plugins/select/serial/select_serial.h
+++ b/src/plugins/select/serial/select_serial.h
@@ -89,6 +89,7 @@ struct node_res_record {
 	uint16_t vpus;			/* count of virtual cpus (hyperthreads)
 					 * configured per core */
 	uint32_t real_memory;		/* MB of real memory configured */
+	uint32_t mem_spec_limit;	/* MB of specialized/system memory */
 };
 
 /* per-node resource usage record */
diff --git a/src/plugins/slurmctld/Makefile.am b/src/plugins/slurmctld/Makefile.am
index bdb5bbb92..b71a662e3 100644
--- a/src/plugins/slurmctld/Makefile.am
+++ b/src/plugins/slurmctld/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for slurmctld plugins
 
-SUBDIRS = dynalloc nonstop
+SUBDIRS = nonstop
diff --git a/src/plugins/slurmctld/Makefile.in b/src/plugins/slurmctld/Makefile.in
index f6bd07e7a..7d39278f5 100644
--- a/src/plugins/slurmctld/Makefile.in
+++ b/src/plugins/slurmctld/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -447,7 +461,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = dynalloc nonstop
+SUBDIRS = nonstop
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/slurmctld/dynalloc/Makefile.am b/src/plugins/slurmctld/dynalloc/Makefile.am
deleted file mode 100644
index 691891b16..000000000
--- a/src/plugins/slurmctld/dynalloc/Makefile.am
+++ /dev/null
@@ -1,39 +0,0 @@
-# Makefile for dynalloc (resource dynamic allocation) plugin
-
-AUTOMAKE_OPTIONS = foreign
-
-AM_CXXFLAGS = -fexceptions
-
-PLUGIN_FLAGS = -module -avoid-version --export-dynamic
-
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
-
-AS_DYNALLOC_SOURCES = \
-	allocate.c	\
-	allocate.h	\
-	allocator.c	\
-	allocator.h	\
-	argv.c		\
-	argv.h		\
-	deallocate.c \
-	deallocate.h \
-	info.c	\
-	info.h	\
-	job_ports_list.c \
-	job_ports_list.h \
-	msg.c	\
-	msg.h	\
-	slurmctld_dynalloc.c
-
-if SLURM_ENABLE_DYNAMIC_ALLOCATION
-
-pkglib_LTLIBRARIES = slurmctld_dynalloc.la
-slurmctld_dynalloc_la_SOURCES = $(AS_DYNALLOC_SOURCES)
-slurmctld_dynalloc_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-
-else
-
-EXTRA_slurmctld_dynalloc_la_SOURCES = $(AS_DYNALLOC_SOURCES)
-
-endif
-
diff --git a/src/plugins/slurmctld/dynalloc/allocate.c b/src/plugins/slurmctld/dynalloc/allocate.c
deleted file mode 100644
index f9d003eea..000000000
--- a/src/plugins/slurmctld/dynalloc/allocate.c
+++ /dev/null
@@ -1,730 +0,0 @@
-/*****************************************************************************\
- *  allocate.c  - dynamic resource allocation
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include <signal.h>
-
-#include "slurm/slurm.h"
-#include "slurm/slurm_errno.h"
-
-#include "src/common/log.h"
-#include "src/common/bitstring.h"
-#include "src/common/node_conf.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-#include "src/common/node_select.h"
-
-#include "src/slurmctld/slurmctld.h"
-#include "src/slurmctld/node_scheduler.h"
-#include "src/slurmctld/locks.h"
-#include "src/slurmctld/state_save.h"
-#include "src/slurmctld/port_mgr.h"
-
-#include "allocate.h"
-#include "info.h"
-#include "constants.h"
-#include "job_ports_list.h"
-
-
-static int _get_nodelist_optional(uint16_t request_node_num,
-				  const char *node_range_list,
-				  char *final_req_node_list);
-
-static int _get_nodelist_mandatory(uint16_t request_node_num,
-				   const char *node_range_list,
-				   char *final_req_node_list);
-
-static int _get_tasks_per_node(
-			const resource_allocation_response_msg_t *alloc,
-		  	const job_desc_msg_t *desc, char *tasks_per_node);
-
-static char *_uint16_array_to_str_xmalloc(int array_len,
-								const uint16_t *array);
-
-static int _setup_job_desc_msg(uint32_t np, uint32_t request_node_num,
-			       char *node_range_list, const char *flag,
-			       time_t timeout, const char *cpu_bind,
-			       uint32_t mem_per_cpu, job_desc_msg_t *job_desc_msg);
-
-/**
- *	select n nodes from the given node_range_list.
- *
- *	optional means trying best to allocate node from
- *	node_range_list, allocation should include all nodes
- *	in the given list that are currently available. If
- *	that isn't enough to meet the request_node_num,
- * 	then take any other nodes that are available to
- * 	fill out the requested number.
- *
- *	IN:
- *		request_node_num: requested node number
- *		node_range_list: specified node range to select from
- *	OUT Parameter:
- *		final_req_node_list
- *	RET OUT
- *		-1 if requested node number is larger than available
- *		0  successful, final_req_node_list is returned
- */
-static int _get_nodelist_optional(uint16_t request_node_num,
-				  const char *node_range_list,
-				  char *final_req_node_list)
-{
-	hostlist_t avail_hl_system = NULL;  //available hostlist in slurm
-	hostlist_t avail_hl_pool = NULL;    //available hostlist in the given node pool
-	hostlist_t hostlist = NULL;
-	char *avail_pool_range = NULL;
-	int avail_pool_num;
-	int extra_needed_num;
-	char *subset = NULL;
-	char *hostname = NULL;
-	char *tmp = NULL;
-	int i;
-
-	/* get all available hostlist in SLURM system */
-	avail_hl_system = get_available_host_list_system_m();
-
-	if (request_node_num > slurm_hostlist_count(avail_hl_system)){
-		slurm_hostlist_destroy(avail_hl_system);
-		return SLURM_FAILURE;
-	}
-
-	avail_hl_pool = choose_available_from_node_list_m(node_range_list);
-	avail_pool_range = slurm_hostlist_ranged_string_malloc(avail_hl_pool);
-	avail_pool_num = slurm_hostlist_count(avail_hl_pool);
-
-	if (request_node_num <= avail_pool_num) {
-		subset = get_hostlist_subset_m(avail_pool_range,request_node_num);
-		strcpy(final_req_node_list, subset);
-		free(subset);
-	} else { /* avail_pool_num < reqeust_node_num <= avail_node_num_system */
-		hostlist = slurm_hostlist_create(avail_pool_range);
-		extra_needed_num = request_node_num - avail_pool_num;
-
-		for (i = 0; i < extra_needed_num; ) {
-			hostname = slurm_hostlist_shift(avail_hl_system);
-			if (slurm_hostlist_find(hostlist, hostname) == -1) {
-				slurm_hostlist_push_host(hostlist, hostname);
-				i++;
-			}
-			free(hostname);
-		}
-
-		tmp = slurm_hostlist_ranged_string_xmalloc(hostlist);
-		strcpy(final_req_node_list, tmp);
-		xfree(tmp);
-		slurm_hostlist_destroy(hostlist);
-	}
-
-	free(avail_pool_range);
-	slurm_hostlist_destroy(avail_hl_system);
-	slurm_hostlist_destroy(avail_hl_pool);
-
-	return SLURM_SUCCESS;
-}
-
-/**
- *	select n nodes from the given node_range_list
- *
- *	mandatory means all nodes must be allocated
- *	from node_range_list
- *
- *	IN:
- *		request_node_num: requested node number
- *		node_range_list: specified node range to select from
- *	OUT Parameter:
- *		final_req_node_list
- *	RET OUT
- *		-1 if requested node number is larger than available
- *		0  successful, final_req_node_list is returned
- */
-static int _get_nodelist_mandatory(uint16_t request_node_num,
-				   const char *node_range_list,
-				   char *final_req_node_list)
-{
-	hostlist_t avail_hl = NULL;
-	char *avail_node_range = NULL;
-	char *subset = NULL;
-	int rc;
-
-	/* select n (request_node_num) available nodes from node_range_list */
-	avail_hl = choose_available_from_node_list_m(node_range_list);
-	avail_node_range = slurm_hostlist_ranged_string_malloc(avail_hl);
-
-	if (request_node_num <= slurm_hostlist_count(avail_hl)) {
-		subset = get_hostlist_subset_m(avail_node_range, request_node_num);
-		strcpy(final_req_node_list, subset);
-
-		free(subset);
-		rc = SLURM_SUCCESS;
-	} else {
-		rc = SLURM_FAILURE;
-	}
-
-	free(avail_node_range);
-	slurm_hostlist_destroy(avail_hl);
-	return rc;
-}
-
-/*
- * Note: the return should be xfree(str)
- */
-static char* _uint16_array_to_str_xmalloc(int array_len,
-								const uint16_t *array)
-{
-	int i;
-	int previous = 0;
-	char *sep = ",";  /* seperator */
-	char *str = xstrdup("");
-
-	if (NULL == array)
-		return str;
-
-	for (i = 0; i < array_len; i++) {
-		if ((i+1 < array_len)
-		    && (array[i] == array[i+1])) {
-				previous++;
-				continue;
-		}
-
-		if (i == array_len-1) /* last time through loop */
-			sep = "";
-		if (0 < previous) {
-			xstrfmtcat(str, "%u(x%u)%s",
-				   array[i], previous+1, sep);
-		} else {
-			xstrfmtcat(str, "%u%s", array[i], sep);
-		}
-		previous = 0;
-	}
-
-	return str;
-}
-
-/**
- *	get tasks_per_nodes
- *
- *	IN:
- *		alloc: resource allocation response
- *		desc: job resource requirement
- *	OUT Parameter:
- *		tasks_per_node
- *	RET OUT
- *		-1 if failed
- *		0  successful, tasks_per_node is returned
- */
-static int _get_tasks_per_node(
-			const resource_allocation_response_msg_t *alloc,
-		  	const job_desc_msg_t *desc, char *tasks_per_node)
-{
-	uint32_t num_tasks = desc->num_tasks;
-	slurm_step_layout_t *step_layout = NULL;
-	uint32_t node_cnt = alloc->node_cnt;
-	char *tmp = NULL;
-	int i;
-
-	/* If no tasks were given we will figure it out here
-	 * by totalling up the cpus and then dividing by the
-	 * number of cpus per task */
-	if (NO_VAL == num_tasks) {
-		num_tasks = 0;
-		for (i = 0; i < alloc->num_cpu_groups; i++) {
-			num_tasks += alloc->cpu_count_reps[i]
-				* alloc->cpus_per_node[i];
-		}
-		if ((int)desc->cpus_per_task > 1
-		   && desc->cpus_per_task != (uint16_t)NO_VAL)
-			num_tasks /= desc->cpus_per_task;
-	}
-
-	if (!(step_layout = slurm_step_layout_create(alloc->node_list,
-							alloc->cpus_per_node,
-							alloc->cpu_count_reps,
-							node_cnt,
-							num_tasks,
-							desc->cpus_per_task,
-							desc->task_dist,
-							desc->plane_size)))
-		return SLURM_FAILURE;
-
-	tmp = _uint16_array_to_str_xmalloc(step_layout->node_cnt, step_layout->tasks);
-
-	slurm_step_layout_destroy(step_layout);
-
-	if (NULL != tmp)
-		strcpy(tasks_per_node, tmp);
-
-	xfree(tmp);
-	return SLURM_SUCCESS;
-}
-
-/**
- *	after initing, setup job_desc_msg_t with specific requirements
- *
- *	IN:
- *		np: number of process to run
- *		request_node_num: the amount of requested node
- *		node_range_list: requested node pool
- *		flag: optional or mandatory
- *		timeout:
- *		cpu_bind: e.g., cores, sockets, threads
- *		mem_per_cpu: memory size per CPU (MB)
- *	OUT Parameter:
- *		job_desc_msg
- *	RET OUT
- *		-1 if failed
- *		0  successful, job_desc_msg is returned
- */
-static int _setup_job_desc_msg(uint32_t np, uint32_t request_node_num,
-			       char *node_range_list, const char *flag,
-			       time_t timeout, const char *cpu_bind,
-			       uint32_t mem_per_cpu, job_desc_msg_t *job_desc_msg)
-{
-	char final_req_node_list[SIZE] = "";
-	int rc;
-	hostlist_t hostlist = NULL;
-
-	job_desc_msg->user_id = getuid();
-	job_desc_msg->group_id = getgid();
-	job_desc_msg->contiguous = 0;
-
-	/* set np */
-	if (0 != np) {
-		job_desc_msg->num_tasks = np;
-		job_desc_msg->min_cpus = np;
-	}
-
-	if (0 != request_node_num) {  /* N != 0 */
-		if (0 != strlen(node_range_list)) {
-			/* N != 0 && node_list != "", select nodes according to flag */
-			if (0 == strcmp(flag, "mandatory")) {
-				rc = _get_nodelist_mandatory(request_node_num,
-						node_range_list, final_req_node_list);
-
-				if (SLURM_SUCCESS == rc) {
-					if (0 != strlen(final_req_node_list))
-						job_desc_msg->req_nodes = final_req_node_list;
-					else
-						job_desc_msg->min_nodes = request_node_num;
-				} else {
-					error ("can not meet mandatory requirement");
-					return SLURM_FAILURE;
-				}
-			} else { /* flag == "optional" */
-				rc = _get_nodelist_optional(request_node_num,
-									node_range_list, final_req_node_list);
-				if (SLURM_SUCCESS == rc) {
-					if (0 != strlen(final_req_node_list))
-						job_desc_msg->req_nodes = final_req_node_list;
-					else
-						job_desc_msg->min_nodes = request_node_num;
-				} else {
-					job_desc_msg->min_nodes = request_node_num;
-				}
-			}
-		} else {
-			/* N != 0 && node_list == "" */
-			job_desc_msg->min_nodes = request_node_num;
-		}
-	} else { /* N == 0 */
-		if (0 != strlen(node_range_list)) {
-			/* N == 0 && node_list != "" */
-			if (0 == strcmp(flag, "optional")) {
-				hostlist = slurm_hostlist_create(node_range_list);
-				request_node_num = slurm_hostlist_count(hostlist);
-				rc = _get_nodelist_optional(request_node_num,
-									node_range_list, final_req_node_list);
-				if (SLURM_SUCCESS == rc) {
-					if (0 != strlen(final_req_node_list))
-						job_desc_msg->req_nodes = final_req_node_list;
-					else
-						job_desc_msg->min_nodes = request_node_num;
-				} else {
-					job_desc_msg->min_nodes = request_node_num;
-				}
-
-				slurm_hostlist_destroy(hostlist);
-			} else {  /* flag == "mandatory" */
-				job_desc_msg->req_nodes = node_range_list;
-			}
-		}
-		/* if N == 0 && node_list == "", do nothing */
-	}
-
-	/* for cgroup */
-	if (mem_per_cpu > 0)
-		job_desc_msg->pn_min_memory =  mem_per_cpu | MEM_PER_CPU;
-
-	if (NULL != cpu_bind || 0 != strlen(cpu_bind)) {
-		if(0 == strcmp(cpu_bind, "cores"))
-			job_desc_msg->cpu_bind_type = CPU_BIND_TO_CORES;
-		else if (0 == strcmp(cpu_bind, "sockets"))
-			job_desc_msg->cpu_bind_type = CPU_BIND_TO_SOCKETS;
-		else if (0 == strcmp(cpu_bind, "threads"))
-			job_desc_msg->cpu_bind_type = CPU_BIND_TO_THREADS;
-	}
-	return SLURM_SUCCESS;
-}
-
-
-/**
- *	select n nodes from the given node_range_list through rpc
- *
- *  if (flag == mandatory), all requested nodes must be allocated
- *  from node_list; else if (flag == optional), try best to allocate
- *  node from node_list, and the allocation should include all
- *  nodes in the given list that are currently available. If that
- *  isn't enough to meet the node_num_request, then take any other
- *  nodes that are available to fill out the requested number.
- *
- *	IN:
- *		np: number of process to run
- *		request_node_num: requested node number
- *		node_range_list: specified node range to select from
- *		flag: optional or mandatory
- *		timeout: timeout
- *		cpu_bind:e.g., cores, threads, sockets
- *		mem_per_cpu: memory size per CPU (MB)
- *	OUT Parameter:
- *		jobid: slurm jobid
- *		reponse_node_list:
- *		tasks_per_node: like 4(x2) 3,2
- *	RET OUT:
- *		-1 if requested node number is larger than available or timeout
- *		0  successful
- */
-int allocate_node_rpc(uint32_t np, uint32_t request_node_num,
-		      char *node_range_list, const char *flag,
-		      time_t timeout, const char *cpu_bind,
-		      uint32_t mem_per_cpu, uint32_t resv_port_cnt,
-		      uint32_t *slurm_jobid, char *reponse_node_list,
-		      char *tasks_per_node, char *resv_ports)
-{
-	job_desc_msg_t job_desc_msg;
-	resource_allocation_response_msg_t *job_alloc_resp_msg = NULL;
-	struct job_record *job_ptr = NULL;
-	struct step_record step;
-	uid_t uid = getuid();
-	int rc, i;
-
-	slurm_init_job_desc_msg (&job_desc_msg);
-	rc = _setup_job_desc_msg(np, request_node_num, node_range_list, flag,
-					 timeout, cpu_bind, mem_per_cpu, &job_desc_msg);
-	if (rc)
-		return SLURM_FAILURE;
-
-	job_alloc_resp_msg = slurm_allocate_resources_blocking(&job_desc_msg,
-							       timeout, NULL);
-	if (!job_alloc_resp_msg) {
-		error("allocate failure, timeout or request too many nodes");
-		return SLURM_FAILURE;
-	}
-
-	/* OUT: slurm_jobid, reponse_node_list, tasks_per_node */
-	*slurm_jobid = job_alloc_resp_msg->job_id;
-	strcpy(reponse_node_list, job_alloc_resp_msg->node_list);
-	_get_tasks_per_node(job_alloc_resp_msg, &job_desc_msg, tasks_per_node);
-
-	info("allocate [ node_list = %s ] to [ job_id = %u ]",
-	     job_alloc_resp_msg->node_list, job_alloc_resp_msg->job_id);
-
-	/* free the allocated resource msg */
-	slurm_free_resource_allocation_response_msg(job_alloc_resp_msg);
-
-	job_ptr = find_job_record(job_alloc_resp_msg->job_id);
-	/**************************\
-	 * 		resv port 		  *
-	\**************************/
-	if (0 == resv_port_cnt)
-		resv_port_cnt = 1;
-	step.resv_port_cnt = resv_port_cnt;
-	step.job_ptr = job_ptr;
-	step.step_node_bitmap = job_ptr->node_bitmap;
-	rc = resv_port_alloc(&step);
-	if (SLURM_SUCCESS != rc) {
-		cancel_job(job_ptr->job_id, uid);
-		xfree(step.resv_ports);
-		xfree(step.resv_port_array);
-		return SLURM_FAILURE;
-	}
-	strcpy(resv_ports, step.resv_ports);
-	for (i = 0; i < step.resv_port_cnt; i++) {
-		info("reserved ports %s for job %u : resv_port_array[%d]=%u",
-				step.resv_ports, step.job_ptr->job_id,
-				i, step.resv_port_array[i]);
-	}
-
-	/* keep slurm_jobid and resv_port_array in a List
-	 * for future release port */
-	append_job_ports_item(job_ptr->job_id, step.resv_port_cnt,
-			step.resv_ports, step.resv_port_array);
-
-	xfree(step.resv_ports);
-	xfree(step.resv_port_array);
-
-#if 0
-	//kill the job, release the resource, just for test
-	if (slurm_kill_job(job_alloc_resp_msg->job_id, SIGKILL, 0)) {
-		 error ("ERROR: kill job %d\n", slurm_get_errno());
-		 return SLURM_FAILURE;
-	}
-#endif
-
-	return SLURM_SUCCESS;
-}
-
-/**
- *	select n nodes from the given node_range_list directly through
- *	"job_allocate" in slurmctld/job_mgr.c
- *
- *  if (flag == mandatory), all requested nodes must be allocated
- *  from node_list; else if (flag == optional), try best to allocate
- *  node from node_list, and the allocation should include all
- *  nodes in the given list that are currently available. If that
- *  isn't enough to meet the node_num_request, then take any other
- *  nodes that are available to fill out the requested number.
- *
- *	IN:
- *		np: number of process to run
- *		request_node_num: requested node number
- *		node_range_list: specified node range to select from
- *		flag: optional or mandatory
- *		timeout: timeout
- *		cpu_bind: cpu bind type, e.g., cores, socket
- *		mem_per_cpu: memory size per cpu (MB)
- *	OUT Parameter:
- *		slurm_jobid: slurm jobid
- *		reponse_node_list:
- *		tasks_per_node: like 4(x2) 3,2
- *	RET OUT:
- *		-1 if requested node number is larger than available or timeout
- *		0  successful, final_req_node_list is returned
- */
-int allocate_node(uint32_t np, uint32_t request_node_num,
-		  char *node_range_list, const char *flag,
-		  time_t timeout, const char *cpu_bind,
-		  uint32_t mem_per_cpu, uint32_t resv_port_cnt,
-		  uint32_t *slurm_jobid, char *reponse_node_list,
-		  char *tasks_per_node, char *resv_ports)
-{
-	int rc, error_code, i;
-	char *err_msg = NULL;
-	resource_allocation_response_msg_t alloc_msg;
-	job_desc_msg_t job_desc_msg;
-	struct job_record *job_ptr = NULL;
-	bool job_waiting = false;
-	uid_t uid = getuid();
-	struct step_record step;
-
-	slurm_init_job_desc_msg (&job_desc_msg);
-	rc = _setup_job_desc_msg(np, request_node_num, node_range_list, flag,
-				 timeout, cpu_bind, mem_per_cpu, &job_desc_msg);
-
-	if (rc)
-		return SLURM_FAILURE;
-
-	/* Locks: Read config, write job, write node, read partition */
-	slurmctld_lock_t job_write_lock = {
-			READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
-
-	job_desc_msg.immediate = 0;
-	rc = validate_job_create_req(&job_desc_msg, job_desc_msg.user_id,
-				     &err_msg);
-	if (rc) {
-		error("invalid job request: %s", err_msg);
-		xfree(err_msg);
-		return SLURM_FAILURE;
-	}
-
-	lock_slurmctld(job_write_lock);
-	error_code = job_allocate(&job_desc_msg, job_desc_msg.immediate,
-				  false, //will run
-				  NULL, // will_run_response_msg_t
-				  true, //allocate
-				  job_desc_msg.user_id, &job_ptr, NULL);
-	unlock_slurmctld(job_write_lock);
-
-	/* cleanup */
-	xfree(job_desc_msg.partition);
-
-	if ((error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) ||
-		(error_code == ESLURM_RESERVATION_NOT_USABLE) ||
-		(error_code == ESLURM_QOS_THRES) ||
-		(error_code == ESLURM_NODE_NOT_AVAIL) ||
-		(error_code == ESLURM_JOB_HELD))
-		job_waiting = true;
-
-	if ((SLURM_SUCCESS == error_code) ||
-	    ((0 == job_desc_msg.immediate) && job_waiting)) {
-		xassert(job_ptr);
-
-		/* note: allocated node list is in 'job_ptr->job_id' */
-		/* not 'job_ptr->alloc_node' */
-
-		if (0 < job_ptr->job_id && NULL == job_ptr->nodes) {
-			/* job is pending, so cancel the job */
-			cancel_job(job_ptr->job_id, uid);
-			return SLURM_FAILURE;
-		} else {  /* allocate successful */
-			strcpy(reponse_node_list, job_ptr->nodes);
-			*slurm_jobid = job_ptr->job_id;
-			info("allocate [ allocated_node_list=%s ] to [ slurm_jobid=%u ]",
-			     job_ptr->nodes, job_ptr->job_id);
-
-			/* transform job_ptr to alloc_msg for further use */
-			if (job_ptr->job_resrcs &&
-			    job_ptr->job_resrcs->cpu_array_cnt) {
-				alloc_msg.num_cpu_groups =
-						job_ptr->job_resrcs->cpu_array_cnt;
-				i = sizeof(uint32_t) * alloc_msg.num_cpu_groups;
-				alloc_msg.cpu_count_reps = xmalloc(i);
-				memcpy(alloc_msg.cpu_count_reps,
-				       job_ptr->job_resrcs->cpu_array_reps, i);
-				i = sizeof(uint16_t) * alloc_msg.num_cpu_groups;
-				alloc_msg.cpus_per_node  = xmalloc(i);
-				memcpy(alloc_msg.cpus_per_node,
-				       job_ptr->job_resrcs->cpu_array_value, i);
-			} else {
-				alloc_msg.num_cpu_groups = 0;
-				alloc_msg.cpu_count_reps = NULL;
-				alloc_msg.cpus_per_node  = NULL;
-			}
-			alloc_msg.error_code     = error_code;
-			alloc_msg.job_id         = job_ptr->job_id;
-			alloc_msg.node_cnt       = job_ptr->node_cnt;
-			alloc_msg.node_list      = xstrdup(job_ptr->nodes);
-			alloc_msg.alias_list     = xstrdup(job_ptr->alias_list);
-			alloc_msg.select_jobinfo =
-					select_g_select_jobinfo_copy(job_ptr->select_jobinfo);
-			if (job_ptr->details) {
-					alloc_msg.pn_min_memory = job_ptr->details->
-								  pn_min_memory;
-			} else {
-					alloc_msg.pn_min_memory = 0;
-			}
-
-			/* to get tasks_per_node */
-			_get_tasks_per_node(&alloc_msg, &job_desc_msg,
-					    tasks_per_node);
-
-			/* cleanup */
-			xfree(alloc_msg.cpu_count_reps);
-			xfree(alloc_msg.cpus_per_node);
-			xfree(alloc_msg.node_list);
-			xfree(alloc_msg.alias_list);
-
-			select_g_select_jobinfo_free(alloc_msg.select_jobinfo);
-			schedule_job_save();	/* has own locks */
-			schedule_node_save();	/* has own locks */
-
-			/**************************\
-			 * 		resv port 		  *
-			\**************************/
-			if (0 == resv_port_cnt)
-				resv_port_cnt = 1;
-			step.resv_port_cnt = resv_port_cnt;
-			step.job_ptr = job_ptr;
-			step.step_node_bitmap = job_ptr->node_bitmap;
-			rc = resv_port_alloc(&step);
-			if (SLURM_SUCCESS != rc) {
-				cancel_job(job_ptr->job_id, uid);
-				xfree(step.resv_ports);
-				xfree(step.resv_port_array);
-				return SLURM_FAILURE;
-			}
-			strcpy(resv_ports, step.resv_ports);
-			for (i = 0; i < step.resv_port_cnt; i++) {
-				info("reserved ports %s for job %u : resv_port_array[%d]=%u",
-						step.resv_ports, step.job_ptr->job_id,
-						i, step.resv_port_array[i]);
-			}
-
-			/* keep slurm_jobid and resv_port_array in a List */
-			append_job_ports_item(job_ptr->job_id, step.resv_port_cnt,
-					step.resv_ports, step.resv_port_array);
-
-			xfree(step.resv_ports);
-			xfree(step.resv_port_array);
-
-#if 0
-			/* only for test */
-			cancel_job(job_ptr->job_id, uid);
-#endif
-			return SLURM_SUCCESS;
-		}
-	} else {
-		return SLURM_FAILURE;
-	}
-}
-
-/**
- *	cancel a job
- *
- *	IN:
- *		job_id: slurm jobid
- *		uid: user id
- *	OUT Parameter:
- *	RET OUT:
- *		-1 failed
- *		0  successful
- */
-int cancel_job(uint32_t job_id, uid_t uid)
-{
-	int rc;
-	/* Locks: Read config, write job, write node */
-	slurmctld_lock_t job_write_lock = {
-		READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-
-	lock_slurmctld(job_write_lock);
-	rc = job_signal(job_id, SIGKILL, 0, uid, false);
-	unlock_slurmctld(job_write_lock);
-
-	if (rc) { /* cancel failure */
-		info("Signal %u JobId=%u by UID=%u: %s",
-				SIGKILL, job_id, uid, slurm_strerror(rc));
-		return SLURM_FAILURE;
-	} else { /* cancel successful */
-		info("sched: Cancel of JobId=%u by UID=%u", job_id, uid);
-		slurmctld_diag_stats.jobs_canceled++;
-
-		/* Below function provides its own locking */
-		schedule_job_save();
-		return SLURM_SUCCESS;
-	}
-}
diff --git a/src/plugins/slurmctld/dynalloc/allocate.h b/src/plugins/slurmctld/dynalloc/allocate.h
deleted file mode 100644
index f4014d67f..000000000
--- a/src/plugins/slurmctld/dynalloc/allocate.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/*****************************************************************************\
- *  allocate.h - dynamic resource allocation
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef DYNALLOC_ALLOCATE_H_
-#define DYNALLOC_ALLOCATE_H_
-
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
-
-/**
- *	select n nodes from the given node_range_list through rpc
- *
- *  if (flag == mandatory), all requested nodes must be allocated
- *  from node_list; else if (flag == optional), try best to allocate
- *  node from node_list, and the allocation should include all
- *  nodes in the given list that are currently available. If that
- *  isn't enough to meet the node_num_request, then take any other
- *  nodes that are available to fill out the requested number.
- *
- *	IN:
- *		np: number of process to run
- *		request_node_num: requested node number
- *		node_range_list: specified node range to select from
- *		flag: optional or mandatory
- *		timeout: timeout
- *		cpu_bind:e.g., cores, threads, sockets
- *		mem_per_cpu: memory size per CPU (MB)
- *	OUT Parameter:
- *		jobid: slurm jobid
- *		reponse_node_list:
- *		tasks_per_node: like 4(x2) 3,2
- *	RET OUT:
- *		-1 if requested node number is larger than available or timeout
- *		0  successful
- */
-int allocate_node_rpc(uint32_t np, uint32_t request_node_num,
-		      char *node_range_list, const char *flag,
-		      time_t timeout, const char *cpu_bind,
-		      uint32_t mem_per_cpu, uint32_t resv_port_cnt,
-		      uint32_t *slurm_jobid, char *reponse_node_list,
-		      char *tasks_per_node, char *resv_ports);
-
-/**
- *	select n nodes from the given node_range_list directly through
- *	"job_allocate" in slurmctld/job_mgr.c
- *
- *  if (flag == mandatory), all requested nodes must be allocated
- *  from node_list; else if (flag == optional), try best to allocate
- *  node from node_list, and the allocation should include all
- *  nodes in the given list that are currently available. If that
- *  isn't enough to meet the node_num_request, then take any other
- *  nodes that are available to fill out the requested number.
- *
- *	IN:
- *		np: number of process to run
- *		request_node_num: requested node number
- *		node_range_list: specified node range to select from
- *		flag: optional or mandatory
- *		timeout: timeout
- *		cpu_bind: cpu bind type, e.g., cores, socket
- *		mem_per_cpu: memory size per cpu (MB)
- *	OUT Parameter:
- *		slurm_jobid: slurm jobid
- *		reponse_node_list:
- *		tasks_per_node: like 4(x2) 3,2
- *	RET OUT:
- *		-1 if requested node number is larger than available or timeout
- *		0  successful, final_req_node_list is returned
- */
-int allocate_node(uint32_t np, uint32_t request_node_num,
-		  char *node_range_list, const char *flag,
-		  time_t timeout, const char *cpu_bind,
-		  uint32_t mem_per_cpu, uint32_t resv_port_cnt,
-		  uint32_t *slurm_jobid, char *reponse_node_list,
-		  char *tasks_per_node, char *resv_ports);
-
-/**
- *	cancel a job
- *
- *	IN:
- *		job_id: slurm jobid
- *		uid: user id
- *	OUT Parameter:
- *	RET OUT:
- *		-1 failed
- *		0  successful
- */
-extern int cancel_job(uint32_t job_id, uid_t uid);
-
-#endif /* DYNALLOC_ALLOCATE_H_ */
diff --git a/src/plugins/slurmctld/dynalloc/allocator.c b/src/plugins/slurmctld/dynalloc/allocator.c
deleted file mode 100644
index 768dd9a73..000000000
--- a/src/plugins/slurmctld/dynalloc/allocator.c
+++ /dev/null
@@ -1,293 +0,0 @@
-/*****************************************************************************\
- *  allocator.c  - dynamic resource allocation
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "allocator.h"
-#include "allocate.h"
-#include "info.h"
-#include "argv.h"
-#include "msg.h"
-#include "constants.h"
-
-
-static void _parse_job_params(const char *cmd, char *orte_jobid,
-					char *return_flag,	size_t *job_timeout);
-
-static void _parse_app_params(const char *cmd, char *appid,
-					uint32_t *np, uint32_t *request_node_num,
-					char *node_range_list, char *flag,
-					char *cpu_bind, uint32_t *mem_per_cpu,
-					uint32_t *resv_port_cnt);
-
-static void _allocate_app_op(const char *msg_app,
-							size_t app_timeout,
-							char *app_resp_msg);
-
-/*
- * Parse the job part of msg(cmd) to obtain job parameters
- *
- *	e.g., if a allocate request is like "allocate jobid=100
- *	return=all timeout=10:app=0 np=5 N=2 node_list=vm2,vm3
- *	flag=mandatory:app=1 N=2", then the job part of msg is
- *	"jobid=100 return=all timeout=10".
- *
- * IN:
- * 	cmd: job part of msg
- * OUT Parameter:
- * 	orte_jobid:
- * 	return_flag:
- * 	job_timeout: timeout of resource allocation for the whole job
- */
-static void _parse_job_params(const char *cmd, char *orte_jobid,
-					char *return_flag,	size_t *job_timeout)
-{
-	char *tmp = NULL;
-	char *p_str = NULL;
-	char *pos = NULL;
-
-	tmp = xstrdup(cmd);
-	p_str = strtok(tmp, " ");
-	while (p_str) {
-		if (strstr(p_str, "jobid")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-			strcpy(orte_jobid, pos);
-		} else if (strstr(p_str, "return")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-			strcpy(return_flag, pos);
-		} else if (strstr(p_str, "timeout")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-			*job_timeout = atol(pos);
-		}
-		p_str = strtok(NULL, " ");
-	}
-
-	/* cleanup */
-	xfree(tmp);
-}
-
-/*
- * Parse the app part of msg(cmd) to obtain app parameters
- *
- *	e.g., if a allocate request is like "allocate jobid=100
- *	return=all timeout=10:app=0 np=5 N=2 node_list=vm2,vm3
- *	flag=mandatory:app=1 N=2", then the app part of msg is
- *	"app=0 np=5 N=2 node_list=vm2,vm3 flag=mandatory:app=1 N=2".
- *
- * IN:
- * 	cmd: app part of msg
- * OUT Parameter:
- * 	appid:
- * 	np: number of process
- * 	request_node_num:
- * 	node_range_list:
- * 	flag: mandatory or optional
- * 	cpu_bind: cpu bind type, e.g., cores
- * 	mem_per_cpu: memory per cpu (MB)
- */
-static void _parse_app_params(const char *cmd, char *appid,
-					uint32_t *np, uint32_t *request_node_num,
-					char *node_range_list, char *flag,
-					char *cpu_bind, uint32_t *mem_per_cpu,
-					uint32_t *resv_port_cnt)
-{
-	char *tmp = NULL;
-	char *p_str = NULL;
-	char *pos = NULL;
-
-	tmp = xstrdup(cmd);
-	p_str = strtok(tmp, " ");
-	while (p_str) {
-		if (strstr(p_str, "app")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-			strcpy(appid, pos);
-		} else if (strstr(p_str, "np")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-			*np = strtoul(pos, NULL, 10);
-		} else if (strstr(p_str, "N=")) {
-			pos =  strchr(p_str, '=');
-			pos++;  /* step over the = */
-			*request_node_num = strtoul(pos, NULL, 10);
-		} else  if (strstr(p_str, "node_list")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-            strcpy(node_range_list, pos);
-		} else  if (strstr(p_str, "flag")) {
-			pos = strchr(p_str, '=');
-			pos++;  /* step over the = */
-            strcpy(flag, pos);
-		} else if (strstr(p_str, "cpu_bind")) {
-			pos = strchr(p_str, '=');
-			pos++;
-			strcpy(cpu_bind, pos);
-		} else if (strstr(p_str, "mem_per_cpu")) {
-			pos = strchr(p_str, '=');
-			pos++;
-			*mem_per_cpu = strtoul(pos, NULL, 10);
-		} else if (strstr(p_str, "resv_port_cnt")) {
-			pos = strchr(p_str, '=');
-			pos++;
-			*resv_port_cnt = strtoul(pos, NULL, 10);
-		}
-		p_str = strtok(NULL, " ");
-	}
-
-	/* cleanup */
-	xfree(tmp);
-}
-
-/*
- * allocate resource for an app
- *
- * IN:
- * 	msg_app: cmd of allocation requirement
- * 	app_timeout:
- * OUT Parameter:
- * 	app_resp_msg: allocation result
- */
-static void _allocate_app_op(const char *msg_app,
-							size_t app_timeout,
-							char *app_resp_msg)
-{
-	char appid[16];
-	uint32_t  np = 0;
-	uint32_t  request_node_num = 0;
-	char node_range_list[SIZE] = "";
-	char flag[16] = "mandatory";  /* if not specified, by default */
-
-	char cpu_bind[32] = "";
-	uint32_t mem_per_cpu = 0;
-	uint32_t resv_port_cnt = 1;
-	/* out params */
-	uint32_t slurm_jobid;
-	char resp_node_list[SIZE];
-	char tasks_per_node[SIZE] = "";
-	char resv_ports[SIZE] = "";
-	int rc;
-
-	_parse_app_params(msg_app, appid, &np, &request_node_num,
-					node_range_list, flag, cpu_bind, &mem_per_cpu, &resv_port_cnt);
-
-	rc = allocate_node(np, request_node_num, node_range_list, flag,
-						app_timeout, cpu_bind, mem_per_cpu, resv_port_cnt,
-						&slurm_jobid, resp_node_list, tasks_per_node, resv_ports);
-
-	if (SLURM_SUCCESS == rc) {
-		sprintf(app_resp_msg,
-				"app=%s slurm_jobid=%u allocated_node_list=%s tasks_per_node=%s resv_ports=%s",
-				appid, slurm_jobid, resp_node_list, tasks_per_node, resv_ports);
-	} else {
-		sprintf(app_resp_msg, "app=%s allocate_failure", appid);
-	}
-}
-
-/*
- * allocate resources for a job.
- *
- * The job will consist of at least one app, e.g., "allocate
- * jobid=100 return=all timeout=10:app=0 np=5 N=2
- * node_list=vm2,vm3 flag=mandatory:app=1 N=2".
- *
- * IN:
- * 	new_fd: send allocation result to socket_fd
- * 	msg: resource requirement cmd
- */
-extern void allocate_job_op(slurm_fd_t new_fd, const char *msg)
-{
-	char orte_jobid[16] = "";
-	char return_flag[16] = "";
-	size_t job_timeout = 15; /* if not specified, by default */
-
-	char send_buf[SIZE];
-	char **app_argv = NULL, **tmp_app_argv;
-	size_t app_timeout;
-	uint32_t app_count = 1;
-	char app_resp_msg[SIZE];
-	char **all_resp_msg_argv = NULL, **tmp_all_resp_msg_argv;
-
-	app_argv = argv_split(msg, ':');
-	/* app_count dose not include the first part (job info) */
-	app_count = argv_count(app_argv) - 1;
-	/* app_argv will be freed */
-	tmp_app_argv = app_argv;
-	while (*tmp_app_argv) {
-		if (strstr(*tmp_app_argv, "allocate")) {
-			_parse_job_params(*tmp_app_argv, orte_jobid,
-								return_flag, &job_timeout);
-		} else if (strstr(*tmp_app_argv, "app")) {
-			app_timeout = job_timeout / app_count;
-
-			_allocate_app_op(*tmp_app_argv, app_timeout, app_resp_msg);
-
-			if (0 == strcmp(return_flag, "all")
-					&& 0 != strlen(app_resp_msg)) {
-				argv_append_nosize(&all_resp_msg_argv, app_resp_msg);
-			} else if (0 != strlen(app_resp_msg)) {
-				/* if return_flag != "all",
-				 * each app's allocation will be sent individually */
-				sprintf(send_buf, "jobid=%s:%s", orte_jobid, app_resp_msg);
-				info("BBB: send to client: %s", send_buf);
-				send_reply(new_fd, send_buf);
-			}
-		}
-		tmp_app_argv++;
-	}
-	/* free app_argv */
-	argv_free(app_argv);
-
-	if (0 == strcmp(return_flag, "all")) {
-		sprintf(send_buf, "jobid=%s", orte_jobid);
-		/* all_resp_msg_argv will be freed */
-		tmp_all_resp_msg_argv = all_resp_msg_argv;
-		while (*tmp_all_resp_msg_argv) {
-			sprintf(send_buf, "%s:%s", send_buf, *tmp_all_resp_msg_argv);
-			tmp_all_resp_msg_argv++;
-		}
-		/* free all_resp_msg_argv */
-		argv_free(all_resp_msg_argv);
-
-		info("BBB: send to client: %s", send_buf);
-		send_reply(new_fd, send_buf);
-	}
-}
diff --git a/src/plugins/slurmctld/dynalloc/argv.c b/src/plugins/slurmctld/dynalloc/argv.c
deleted file mode 100644
index 7e152dfdd..000000000
--- a/src/plugins/slurmctld/dynalloc/argv.c
+++ /dev/null
@@ -1,553 +0,0 @@
-/*****************************************************************************\
- *  argv.c  -
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "slurm/slurm.h"
-
-#include "argv.h"
-#include "constants.h"
-
-/*
- * Append a string to the end of a new or existing argv array.
- */
-int argv_append(int *argc, char ***argv, const char *arg)
-{
-	int rc;
-
-	/* add the new element */
-	if (SLURM_SUCCESS != (rc = argv_append_nosize(argv, arg))) {
-		return rc;
-	}
-
-	*argc = argv_count(*argv);
-
-	return SLURM_SUCCESS;
-}
-
-int argv_append_nosize(char ***argv, const char *arg)
-{
-	int argc;
-
-	/* Create new argv. */
-	if (NULL == *argv) {
-		*argv = (char**) malloc(2 * sizeof(char *));
-		if (NULL == *argv) {
-			return SLURM_FAILURE;
-		}
-		argc = 0;
-		(*argv)[0] = NULL;
-		(*argv)[1] = NULL;
-	}
-
-	/* Extend existing argv. */
-	else {
-		/* count how many entries currently exist */
-		argc = argv_count(*argv);
-
-		*argv = (char**) realloc(*argv, (argc + 2) * sizeof(char *));
-		if (NULL == *argv) {
-			return SLURM_FAILURE;
-		}
-	}
-
-	/* Set the newest element to point to a copy of the arg string */
-	(*argv)[argc] = strdup(arg);
-	if (NULL == (*argv)[argc]) {
-		return SLURM_FAILURE;
-	}
-
-	argc = argc + 1;
-	(*argv)[argc] = NULL;
-
-	return SLURM_SUCCESS;
-}
-
-int argv_prepend_nosize(char ***argv, const char *arg)
-{
-	int argc;
-	int i;
-
-	/* Create new argv. */
-	if (NULL == *argv) {
-		*argv = (char**) malloc(2 * sizeof(char *));
-		if (NULL == *argv) {
-			return SLURM_FAILURE;
-		}
-		(*argv)[0] = strdup(arg);
-		(*argv)[1] = NULL;
-	} else {
-		/* count how many entries currently exist */
-		argc = argv_count(*argv);
-
-		*argv = (char**) realloc(*argv, (argc + 2) * sizeof(char *));
-		if (NULL == *argv) {
-			return SLURM_FAILURE;
-		}
-		(*argv)[argc+1] = NULL;
-
-		/* shift all existing elements down 1 */
-		for (i=argc; 0 < i; i--) {
-			(*argv)[i] = (*argv)[i-1];
-		}
-		(*argv)[0] = strdup(arg);
-	}
-
-	return SLURM_SUCCESS;
-}
-
-int argv_append_unique_nosize(char ***argv, const char *arg, bool overwrite)
-{
-	int i;
-
-	/* if the provided array is NULL, then the arg cannot be present,
-	 * so just go ahead and append
-	 */
-	if (NULL == *argv) {
-		return argv_append_nosize(argv, arg);
-	}
-
-	/* see if this arg is already present in the array */
-	for (i=0; NULL != (*argv)[i]; i++) {
-		if (0 == strcmp(arg, (*argv)[i])) {
-			/* already exists - are we authorized to overwrite? */
-			if (overwrite) {
-				free((*argv)[i]);
-				(*argv)[i] = strdup(arg);
-			}
-			return SLURM_SUCCESS;
-		}
-	}
-
-	/* we get here if the arg is not in the array - so add it */
-	return argv_append_nosize(argv, arg);
-}
-
-/*
- * Free a NULL-terminated argv array.
- */
-void argv_free(char **argv)
-{
-	char **p;
-
-	if (NULL == argv)
-		return;
-
-	for (p = argv; NULL != *p; ++p) {
-		free(*p);
-	}
-
-	free(argv);
-}
-
-
-/*
- * Split a string into a NULL-terminated argv array.
- */
-static char **argv_split_inter(const char *src_string, int delimiter,
-		int include_empty)
-{
-	char arg[SIZE];
-	char **argv = NULL;
-	const char *p;
-	char *argtemp;
-	int argc = 0;
-	size_t arglen;
-
-	while (src_string && *src_string) {
-		p = src_string;
-		arglen = 0;
-
-		while (('\0' != *p) && (*p != delimiter)) {
-			++p;
-			++arglen;
-		}
-
-		/* zero length argument, skip */
-		if (src_string == p) {
-			if (include_empty) {
-				arg[0] = '\0';
-				if (SLURM_SUCCESS != argv_append(&argc, &argv, arg))
-					return NULL;
-			}
-		}
-
-		/* tail argument, add straight from the original string */
-		else if ('\0' == *p) {
-			if (SLURM_SUCCESS != argv_append(&argc, &argv, src_string))
-				return NULL;
-			src_string = p;
-			continue;
-		}
-
-		/* long argument, malloc buffer, copy and add */
-		else if (arglen > (SIZE - 1)) {
-			argtemp = (char*) malloc(arglen + 1);
-			if (NULL == argtemp)
-				return NULL;
-
-			strncpy(argtemp, src_string, arglen);
-			argtemp[arglen] = '\0';
-
-			if (SLURM_SUCCESS != argv_append(&argc, &argv, argtemp)) {
-				free(argtemp);
-				return NULL;
-			}
-
-			free(argtemp);
-		}
-
-		/* short argument, copy to buffer and add */
-		else {
-			strncpy(arg, src_string, arglen);
-			arg[arglen] = '\0';
-
-			if (SLURM_SUCCESS != argv_append(&argc, &argv, arg))
-				return NULL;
-		}
-
-		src_string = p + 1;
-	}
-
-	/* All done */
-	return argv;
-}
-
-char **argv_split(const char *src_string, int delimiter)
-{
-	return argv_split_inter(src_string, delimiter, 0);
-}
-
-char **argv_split_with_empty(const char *src_string, int delimiter)
-{
-	return argv_split_inter(src_string, delimiter, 1);
-}
-
-/*
- * Return the length of a NULL-terminated argv array.
- */
-int argv_count(char **argv)
-{
-	char **p;
-	int i;
-
-	if (NULL == argv)
-		return 0;
-
-	for (i = 0, p = argv; *p; i++, p++)
-		continue;
-
-	return i;
-}
-
-/*
- * Join all the elements of an argv array into a single
- * newly-allocated string.
- */
-char *argv_join(char **argv, int delimiter)
-{
-	char **p;
-	char *pp;
-	char *str;
-	size_t str_len = 0;
-	size_t i;
-
-	/* Bozo case */
-	if (NULL == argv || NULL == argv[0]) {
-		return strdup("");
-	}
-
-	/* Find the total string length in argv including delimiters.  The
-     	 last delimiter is replaced by the NULL character. */
-	for (p = argv; *p; ++p) {
-		str_len += strlen(*p) + 1;
-	}
-
-	/* Allocate the string. */
-	if (NULL == (str = (char*) malloc(str_len)))
-		return NULL;
-
-	/* Loop filling in the string. */
-	str[--str_len] = '\0';
-	p = argv;
-	pp = *p;
-
-	for (i = 0; i < str_len; ++i) {
-		if ('\0' == *pp) {
-			/* End of a string, fill in a delimiter
-			 * and go to the next string. */
-			str[i] = (char) delimiter;
-			++p;
-			pp = *p;
-		} else {
-			str[i] = *pp++;
-		}
-	}
-
-	/* All done */
-	return str;
-}
-
-/*
- * Join all the elements of an argv array from within a
- * specified range into a single newly-allocated string.
- */
-char *argv_join_range(char **argv, size_t start, size_t end, int delimiter)
-{
-	char **p;
-	char *pp;
-	char *str;
-	size_t str_len = 0;
-	size_t i;
-
-	/* Bozo case */
-	if (NULL == argv || NULL == argv[0] || (int)start > argv_count(argv)) {
-		return strdup("");
-	}
-
-	/* Find the total string length in argv including delimiters.  The
-	 * last delimiter is replaced by the NULL character. */
-	for (p = &argv[start], i=start; *p && i < end; ++p, ++i) {
-		str_len += strlen(*p) + 1;
-	}
-
-	/* Allocate the string. */
-	if (NULL == (str = (char*) malloc(str_len)))
-		return NULL;
-
-	/* Loop filling in the string. */
-	str[--str_len] = '\0';
-	p = &argv[start];
-	pp = *p;
-
-	for (i = 0; i < str_len; ++i) {
-		if ('\0' == *pp) {
-			/* End of a string, fill in a delimiter and go to the
-			 * next string. */
-			str[i] = (char) delimiter;
-			++p;
-			pp = *p;
-		} else {
-			str[i] = *pp++;
-		}
-	}
-
-	/* All done */
-	return str;
-}
-
-/*
- * Return the number of bytes consumed by an argv array.
- */
-size_t argv_len(char **argv)
-{
-	char **p;
-	size_t length;
-
-	if (NULL == argv)
-		return (size_t) 0;
-
-	length = sizeof(char *);
-
-	for (p = argv; *p; ++p) {
-		length += strlen(*p) + 1 + sizeof(char *);
-	}
-
-	return length;
-}
-
-/*
- * Copy a NULL-terminated argv array.
- */
-char **argv_copy(char **argv)
-{
-	char **dupv = NULL;
-	int dupc = 0;
-
-	if (NULL == argv)
-		return NULL;
-
-	/* create an "empty" list, so that we return something valid if we
-	 * were passed a valid list with no contained elements */
-	dupv = (char**) malloc(sizeof(char*));
-	dupv[0] = NULL;
-
-	while (NULL != *argv) {
-		if (SLURM_SUCCESS != argv_append(&dupc, &dupv, *argv)) {
-			argv_free(dupv);
-			return NULL;
-		}
-
-		++argv;
-	}
-
-	/* All done */
-	return dupv;
-}
-
-int argv_delete(int *argc, char ***argv, int start, int num_to_delete)
-{
-	int i;
-	int count;
-	int suffix_count;
-	char **tmp;
-
-	/* Check for the bozo cases */
-	if (NULL == argv || NULL == *argv || 0 == num_to_delete) {
-		return SLURM_SUCCESS;
-	}
-	count = argv_count(*argv);
-	if (start > count) {
-		return SLURM_SUCCESS;
-	} else if (start < 0 || num_to_delete < 0) {
-		return SLURM_FAILURE;
-	}
-
-	/* Ok, we have some tokens to delete.  Calculate the new length of
-	 * the argv array. */
-	suffix_count = count - (start + num_to_delete);
-	if (suffix_count < 0) {
-		suffix_count = 0;
-	}
-
-	/* Free all items that are being deleted */
-	for (i = start; i < count && i < start + num_to_delete; ++i) {
-		free((*argv)[i]);
-	}
-
-	/* Copy the suffix over the deleted items */
-	for (i = start; i < start + suffix_count; ++i) {
-		(*argv)[i] = (*argv)[i + num_to_delete];
-	}
-
-	/* Add the trailing NULL */
-	(*argv)[i] = NULL;
-
-	/* adjust the argv array */
-	tmp = (char**)realloc(*argv, sizeof(char**) * (i + 1));
-	if (NULL != tmp) *argv = tmp;
-
-	/* adjust the argc */
-	(*argc) -= num_to_delete;
-
-	return SLURM_SUCCESS;
-}
-
-int argv_insert(char ***target, int start, char **source)
-{
-	int i, source_count, target_count;
-	int suffix_count;
-
-	/* Check for the bozo cases */
-	if (NULL == target || NULL == *target || start < 0) {
-		return SLURM_FAILURE;
-	} else if (NULL == source) {
-		return SLURM_SUCCESS;
-	}
-
-	/* Easy case: appending to the end */
-	target_count = argv_count(*target);
-	source_count = argv_count(source);
-	if (start > target_count) {
-		for (i = 0; i < source_count; ++i) {
-			argv_append(&target_count, target, source[i]);
-		}
-	}
-
-	/* Harder: insertting into the middle */
-	else {
-		/* Alloc out new space */
-		*target = (char**) realloc(*target,
-					   sizeof(char *) *
-					   (target_count + source_count + 1));
-
-		/* Move suffix items down to the end */
-		suffix_count = target_count - start;
-		for (i = suffix_count - 1; i >= 0; --i) {
-			(*target)[start + source_count + i] =
-					(*target)[start + i];
-		}
-		(*target)[start + suffix_count + source_count] = NULL;
-
-		/* Strdup in the source argv */
-		for (i = start; i < start + source_count; ++i) {
-			(*target)[i] = strdup(source[i - start]);
-		}
-	}
-
-	/* All done */
-	return SLURM_SUCCESS;
-}
-
-int argv_insert_element(char ***target, int location, char *source)
-{
-	int i, target_count;
-	int suffix_count;
-
-	/* Check for the bozo cases */
-	if (NULL == target || NULL == *target || location < 0) {
-		return SLURM_FAILURE;
-	} else if (NULL == source) {
-		return SLURM_SUCCESS;
-	}
-
-	/* Easy case: appending to the end */
-	target_count = argv_count(*target);
-	if (location > target_count) {
-		argv_append(&target_count, target, source);
-		return SLURM_SUCCESS;
-	}
-
-	/* Alloc out new space */
-	*target = (char**) realloc(*target,
-			sizeof(char*) * (target_count + 2));
-
-	/* Move suffix items down to the end */
-	suffix_count = target_count - location;
-	for (i = suffix_count - 1; i >= 0; --i) {
-		(*target)[location + 1 + i] =
-				(*target)[location + i];
-	}
-	(*target)[location + suffix_count + 1] = NULL;
-
-	/* Strdup in the source */
-	(*target)[location] = strdup(source);
-
-	/* All done */
-	return SLURM_SUCCESS;
-}
diff --git a/src/plugins/slurmctld/dynalloc/argv.h b/src/plugins/slurmctld/dynalloc/argv.h
deleted file mode 100644
index 90335dfac..000000000
--- a/src/plugins/slurmctld/dynalloc/argv.h
+++ /dev/null
@@ -1,317 +0,0 @@
-/*****************************************************************************\
- *  argv.h -
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef DYNALLOC_ARGV_H_
-#define DYNALLOC_ARGV_H_
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
-
-#ifdef HAVE_SYS_TYPES_H
-#include <sys/types.h>
-#endif
-
-/**
- * Append a string (by value) to an new or existing NULL-terminated
- * argv array.
- *
- * @param argc Pointer to the length of the argv array.  Must not be
- * NULL.
- * @param argv Pointer to an argv array.
- * @param str Pointer to the string to append.
- *
- * @retval DYNALLOC_SUCCESS On success
- * @retval DYNALLOC_ERROR On failure
- *
- * This function adds a string to an argv array of strings by value;
- * it is permissable to pass a string on the stack as the str
- * argument to this function.
- *
- * To add the first entry to an argv array, call this function with
- * (*argv == NULL).  This function will allocate an array of length
- * 2; the first entry will point to a copy of the string passed in
- * arg, the second entry will be set to NULL.
- *
- * If (*argv != NULL), it will be realloc'ed to be 1 (char*) larger,
- * and the next-to-last entry will point to a copy of the string
- * passed in arg.  The last entry will be set to NULL.
- *
- * Just to reinforce what was stated above: the string is copied by
- * value into the argv array; there is no need to keep the original
- * string (i.e., the arg parameter) after invoking this function.
- */
-extern int argv_append(int *argc, char ***argv, const char *arg);
-
-/**
- * Append to an argv-style array, but ignore the size of the array.
- *
- * @param argv Pointer to an argv array.
- * @param str Pointer to the string to append.
- *
- * @retval DYNALLOC_SUCCESS On success
- * @retval DYNALLOC_ERROR On failure
- *
- * This function is identical to the argv_append() function
- * except that it does not take a pointer to an argc (integer
- * representing the size of the array).  This is handy for
- * argv-style arrays that do not have integers that are actively
- * maintaing their sizes.
- */
-extern  int argv_append_nosize(char ***argv, const char *arg);
-
-/**
- * Insert the provided arg at the beginning of the array
- *
- * @param argv Pointer to an argv array
- * @param str Pointer to the string to prepend
- *
- * @retval DYNALLOC_SUCCESS On success
- * @retval DYNALLOC_ERROR On failure
- */
-extern int argv_prepend_nosize(char ***argv, const char *arg);
-
-/**
- * Append to an argv-style array, but only if the provided argument
- * doesn't already exist somewhere in the array. Ignore the size of the array.
- *
- * @param argv Pointer to an argv array.
- * @param str Pointer to the string to append.
- * @param bool Whether or not to overwrite a matching value if found
- *
- * @retval DYNALLOC_SUCCESS On success
- * @retval DYNALLOC_ERROR On failure
- *
- * This function is identical to the argv_append_nosize() function
- * except that it only appends the provided argument if it does not already
- * exist in the provided array, or overwrites it if it is.
- */
-extern  int argv_append_unique_nosize(char ***argv, const char *arg,
-				      bool overwrite);
-
-/**
- * Free a NULL-terminated argv array.
- *
- * @param argv Argv array to free.
- *
- * This function frees an argv array and all of the strings that it
- * contains.  Since the argv parameter is passed by value, it is not
- * set to NULL in the caller's scope upon return.
- *
- * It is safe to invoke this function with a NULL pointer.  It is
- * not safe to invoke this function with a non-NULL-terminated argv
- * array.
- */
-extern  void argv_free(char **argv);
-
-/**
- * Split a string into a NULL-terminated argv array. Do not include empty
- * strings in result array.
- *
- * @param src_string Input string.
- * @param delimiter Delimiter character.
- *
- * @retval argv pointer to new argv array on success
- * @retval NULL on error
- *
- * All strings are insertted into the argv array by value; the
- * newly-allocated array makes no references to the src_string
- * argument (i.e., it can be freed after calling this function
- * without invalidating the output argv).
- */
-extern  char **argv_split(const char *src_string, int delimiter);
-
-/**
- * Split a string into a NULL-terminated argv array. Include empty
- * strings in result array.
- *
- * @param src_string Input string.
- * @param delimiter Delimiter character.
- *
- * @retval argv pointer to new argv array on success
- * @retval NULL on error
- *
- * All strings are insertted into the argv array by value; the
- * newly-allocated array makes no references to the src_string
- * argument (i.e., it can be freed after calling this function
- * without invalidating the output argv).
- */
-extern  char **argv_split_with_empty(const char *src_string, int delimiter) ;
-
-/**
- * Return the length of a NULL-terminated argv array.
- *
- * @param argv The input argv array.
- *
- * @retval 0 If NULL is passed as argv.
- * @retval count Number of entries in the argv array.
- *
- * The argv array must be NULL-terminated.
- */
-extern  int argv_count(char **argv);
-
-/**
- * Join all the elements of an argv array into a single
- * newly-allocated string.
- *
- * @param argv The input argv array.
- * @param delimiter Delimiter character placed between each argv string.
- *
- * @retval new_string Output string on success.
- * @retval NULL On failure.
- *
- * Similar to the Perl join function, this function takes an input
- * argv and joins them into into a single string separated by the
- * delimiter character.
- *
- * It is the callers responsibility to free the returned string.
- */
-extern  char *argv_join(char **argv, int delimiter) ;
-
-extern char *argv_join_range(char **argv, size_t start, size_t end,
-			     int delimiter);
-
-/**
- * Return the number of bytes consumed by an argv array.
- *
- * @param argv The input argv array.
- *
- * Count the number of bytes consumed by a NULL-terminated argv
- * array.  This includes the number of bytes used by each of the
- * strings as well as the pointers used in the argv array.
- */
-extern  size_t argv_len(char **argv);
-
-/**
- * Copy a NULL-terminated argv array.
- *
- * @param argv The input argv array.
- *
- * @retval argv Copied argv array on success.
- * @retval NULL On failure.
- *
- * Copy an argv array, including copying all off its strings.
- * Specifically, the output argv will be an array of the same length
- * as the input argv, and strcmp(argv_in[i], argv_out[i]) will be 0.
- */
-extern char **argv_copy(char **argv);
-
-/**
- * Delete one or more tokens from the middle of an argv.
- *
- * @param argv The argv to delete from
- * @param start The index of the first token to delete
- * @param num_to_delete How many tokens to delete
- *
- * @retval DYNALLOC_SUCCESS Always
- *
- * Delete some tokens from within an existing argv.  The start
- * parameter specifies the first token to delete, and will delete
- * (num_to_delete-1) tokens following it.  argv will be realloc()ed
- * to *argc - num_deleted size.
- *
- * If start is beyond the end of the argv array, this function is
- * a no-op.
- *
- * If num_to_delete runs beyond the end of the argv array, this
- * function will delete all tokens starting with start to the end
- * of the array.
- *
- * All deleted items in the argv array will have their contents
- * free()ed (it is assumed that the argv "owns" the memory that
- * the pointer points to).
- */
-extern  int argv_delete(int *argc, char ***argv,
-                                    int start, int num_to_delete);
-
-/**
- * Insert one argv array into the middle of another
- *
- * @param target The argv to insert tokens into
- * @param start Index where the first token will be placed in target
- * @param source The argv to copy tokens from
- *
- * @retval DYNALLOC_SUCCESS upon success
- * @retval DYNALLOC_BAD_PARAM if any parameters are non-sensical
- *
- * This function takes one arg and inserts it in the middle of
- * another.  The first token in source will be insertted at index
- * start in the target argv; all other tokens will follow it.
- * Similar to argv_append(), the target may be realloc()'ed
- * to accomodate the new storage requirements.
- *
- * The source array is left unaffected -- its contents are copied
- * by value over to the target array (i.e., the strings that
- * source points to are strdup'ed into the new locations in
- * target).
- */
-extern  int argv_insert(char ***target, int start, char **source);
-
-/**
- * Insert one argv element in front of a specific position in an array
- *
- * @param target The argv to insert tokens into
- * @param location Index where the token will be placed in target
- * @param source The token to be inserted
- *
- * @retval DYNALLOC_SUCCESS upon success
- * @retval DYNALLOC_BAD_PARAM if any parameters are non-sensical
- *
- * This function takes one arg and inserts it in the middle of
- * another.  The token will be inserted at the specified index
- * in the target argv; all other tokens will be shifted down.
- * Similar to argv_append(), the target may be realloc()'ed
- * to accomodate the new storage requirements.
- *
- * The source token is left unaffected -- its contents are copied
- * by value over to the target array (i.e., the string that
- * source points to is strdup'ed into the new location in
- * target).
- */
-extern  int argv_insert_element(char ***target, int location, char *source);
-
-
-#endif /* DYNALLOC_ARGV_H_ */
diff --git a/src/plugins/slurmctld/dynalloc/deallocate.c b/src/plugins/slurmctld/dynalloc/deallocate.c
deleted file mode 100644
index 6da065179..000000000
--- a/src/plugins/slurmctld/dynalloc/deallocate.c
+++ /dev/null
@@ -1,167 +0,0 @@
-/*****************************************************************************\
- *  deallocate.c  - complete job resource allocation
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "slurm/slurm.h"
-#include "slurm/slurm_errno.h"
-
-#include "src/common/log.h"
-#include "src/slurmctld/locks.h"
-#include "src/slurmctld/port_mgr.h"
-#include "src/slurmctld/state_save.h"
-
-#include "deallocate.h"
-#include "argv.h"
-#include "constants.h"
-#include "job_ports_list.h"
-
-
-/**
- * deallocate the resources for slurm jobs.
- *
- * the deallocate msg can be like "deallocate slurm_jobid=123
- * job_return_code=0:slurm_jobid=124 job_return_code=0"
- *
- * IN:
- *	msg: the deallocate msg
- *
- */
-extern void deallocate(const char *msg)
-{
-	char **jobid_argv = NULL, **tmp_jobid_argv;
-	char *pos = NULL;
-	/* params to complete a job allocation */
-	uint32_t slurm_jobid;
-	uid_t uid = 0;
-	bool job_requeue = false;
-	bool node_fail = false;
-	uint32_t job_return_code = NO_VAL;
-	int  rc = SLURM_SUCCESS;
-	/* Locks: Write job, write node */
-	slurmctld_lock_t job_write_lock = {
-		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK
-	};
-
-	jobid_argv = argv_split(msg, ':');
-	/* jobid_argv will be freed */
-	tmp_jobid_argv = jobid_argv;
-
-	while (*tmp_jobid_argv) {
-		/* to identify the slurm_job */
-		if (NULL != (pos = strstr(*tmp_jobid_argv, "slurm_jobid="))) {
-			pos = pos + strlen("slurm_jobid=");  /* step over */
-			sscanf(pos, "%u", &slurm_jobid);
-		}
-
-		if (NULL != (pos = strstr(*tmp_jobid_argv,"job_return_code="))){
-			pos = pos + strlen("job_return_code=");  /* step over*/
-			sscanf(pos, "%u", &job_return_code);
-		}
-
-		lock_slurmctld(job_write_lock);
-		rc = job_complete(slurm_jobid, uid, job_requeue,
-				  node_fail, job_return_code);
-		unlock_slurmctld(job_write_lock);
-
-		/* return result */
-		if (rc) {
-			info("deallocate JobId=%u: %s ",
-			     slurm_jobid, slurm_strerror(rc));
-		} else {
-			debug2("deallocate JobId=%u ", slurm_jobid);
-			(void) schedule_job_save();	/* Has own locking */
-			(void) schedule_node_save();	/* Has own locking */
-		}
-
-		/* deallocate port */
-		deallocate_port(slurm_jobid);
-
-		/*step to the next */
-		tmp_jobid_argv++;
-	}
-	/* free app_argv */
-	argv_free(jobid_argv);
-}
-
-/**
- * deallocate the ports for a slurm job.
- *
- * deallocate the ports and remove the entry from List.
- *
- * IN:
- *	slurm_jobid: slurm jobid
- *
- */
-extern void deallocate_port(uint32_t slurm_jobid)
-{
-	job_ports_t *item = NULL;
-	ListIterator it = NULL;
-	struct job_record *job_ptr = NULL;
-	struct step_record step;
-
-	if (NULL == job_ports_list)
-		return;
-
-	it = list_iterator_create(job_ports_list);
-	item = (job_ports_t *) list_find(it, find_job_ports_item_func,
-					 &slurm_jobid);
-	if (NULL == item) {
-		info ("slurm_jobid = %u not found in List.", slurm_jobid);
-		return;
-	}
-
-	job_ptr = find_job_record(slurm_jobid);
-	step.job_ptr = job_ptr;
-	step.step_node_bitmap = job_ptr->node_bitmap;
-	step.step_id = 0;
-	step.resv_port_cnt = item->port_cnt;
-	step.resv_ports =item->resv_ports;
-	step.resv_port_array = xmalloc(sizeof(int) * step.resv_port_cnt);
-	memcpy(step.resv_port_array, item->port_array,
-					sizeof(int) * step.resv_port_cnt);
-	/* call resv_port_free in port_mgr.c */
-	resv_port_free(&step);
-
-	/* delete the item from list and automatically
-	 * call 'free_job_ports_item_func' */
-	list_delete_item (it);
-	/* destroy iterator */
-	list_iterator_destroy(it);
-}
diff --git a/src/plugins/slurmctld/dynalloc/info.c b/src/plugins/slurmctld/dynalloc/info.c
deleted file mode 100644
index b79027df3..000000000
--- a/src/plugins/slurmctld/dynalloc/info.c
+++ /dev/null
@@ -1,263 +0,0 @@
-/*****************************************************************************\
- *  info.c - get nodes information in slurm
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#define _GNU_SOURCE
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "slurm/slurm.h"
-#include "slurm/slurm_errno.h"
-#include "src/common/log.h"
-#include "src/common/node_conf.h"
-#include "src/common/slurm_protocol_api.h"
-#include "src/common/xmalloc.h"
-
-#include "src/slurmctld/locks.h"
-
-#include "info.h"
-
-static uint16_t fast_schedule = (uint16_t) NO_VAL;
-
-/**
- *	get total number of nodes and slots in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *		nodes: number of nodes in slurm
- *		slots: number of slots in slurm
- */
-void get_total_nodes_slots (uint16_t *nodes, uint16_t *slots)
-{
-	int i;
-	struct node_record *node_ptr;
-	/* Locks: Read node */
-	slurmctld_lock_t node_read_lock = {
-					NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
-
-	if (fast_schedule == (uint16_t) NO_VAL)
-		fast_schedule = slurm_get_fast_schedule();
-
-	*slots = 0;
-	lock_slurmctld(node_read_lock);
-	*nodes = node_record_count;
-	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
-	     i++, node_ptr++) {
-		if (fast_schedule == 2)
-			(*slots) += node_ptr->config_ptr->cpus;
-		else
-			(*slots) += node_ptr->cpus;
-	}
-	unlock_slurmctld(node_read_lock);
-}
-
-/**
- *	get number of available nodes and slots in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *		nodes: number of available nodes in slurm
- *		slots: number of available slots in slurm
- */
-void get_free_nodes_slots (uint16_t *nodes, uint16_t *slots)
-{
-	int i;
-	struct node_record *node_ptr;
-	/* Locks: Read node */
-	slurmctld_lock_t node_read_lock = {
-					NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
-
-	if (fast_schedule == (uint16_t) NO_VAL)
-		fast_schedule = slurm_get_fast_schedule();
-
-	*nodes = 0;
-	*slots = 0;
-	lock_slurmctld(node_read_lock);
-	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
-	     i++, node_ptr++) {
-		if (IS_NODE_IDLE(node_ptr)) {
-			(*nodes) ++;
-			if (fast_schedule == 2)
-				(*slots) += node_ptr->config_ptr->cpus;
-			else
-				(*slots) += node_ptr->cpus;
-		}
-	}
-	unlock_slurmctld(node_read_lock);
-}
-
-/**
- *	get available node list in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *	RET OUT:
- *		hostlist_t: available node list in slurm
- *
- *	Note: the return result should be slurm_hostlist_destroy(hostlist)
- */
-hostlist_t get_available_host_list_system_m(void)
-{
-	int i;
-	struct node_record *node_ptr;
-	hostlist_t hostlist = NULL;
-
-	/* Locks: Read node */
-	slurmctld_lock_t node_read_lock = {
-					NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
-
-	hostlist = slurm_hostlist_create(NULL);
-	lock_slurmctld(node_read_lock);
-	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
-	     i++, node_ptr++) {
-		if (IS_NODE_IDLE(node_ptr)) {
-			 slurm_hostlist_push_host(hostlist, node_ptr->name);
-		}
-	}
-	unlock_slurmctld(node_read_lock);
-
-	return hostlist;
-}
-
-/**
- *	get the range of available node list in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *	RET OUT:
- *		a string indicating the range of available node list in slurm
- *
- *	Note: the return result should be free(str)
- */
-char* get_available_host_list_range_sytem_m(void)
-{
-	hostlist_t hostlist = NULL;
-	char *range = NULL;
-
-	hostlist = get_available_host_list_system_m();
-	range = slurm_hostlist_ranged_string_malloc (hostlist);
-	slurm_hostlist_destroy(hostlist);
-	return range;
-}
-
-/**
- *	get available node list within a given node list range
- *
- *	IN:
- *		node_list: the given node list range
- *	OUT Parameter:
- *	RET OUT
- *		available node list
- *
- * Note: the return result should be slurm_hostlist_destroy(hostlist)
- */
-hostlist_t choose_available_from_node_list_m(const char *node_list)
-{
-	char *hostname = NULL;
-	hostlist_t given_hl = NULL;
-	hostlist_t avail_hl_system = NULL;
-	hostlist_t result_hl = NULL;
-
-	given_hl = slurm_hostlist_create (node_list);
-	avail_hl_system  = get_available_host_list_system_m();
-	result_hl = slurm_hostlist_create(NULL);
-
-	while ((hostname = slurm_hostlist_shift(given_hl))) {
-		if (-1 != slurm_hostlist_find (avail_hl_system, hostname)) {
-			slurm_hostlist_push_host(result_hl, hostname);
-		}
-		/* Note: to free memory after slurm_hostlist_shift(),
-		 * 	remember to use free(str), not xfree(str)
-		 */
-		free(hostname);
-	}
-
-	slurm_hostlist_destroy(given_hl);
-	slurm_hostlist_destroy(avail_hl_system);
-	return result_hl;
-}
-
-/**
- *	get a subset node range with node_num nodes from a host_name_list
- *
- *	IN:
- *		host_name_list: the given host_name_list
- *		node_num: the number of host to choose
- *	OUT Parameter:
- *	RET OUT
- *		the subset node range, NULL if the node number of subset is
- *		larger than the node number in host_name_list
- *
- *	Note: the return should be free(str)
- */
-char* get_hostlist_subset_m(const char *host_name_list, uint16_t node_num)
-{
-	hostlist_t hostlist = NULL;
-	hostlist_t temp_hl = NULL;
-	int sum;
-	char *hostname = NULL;
-	char *range = NULL;
-	int i;
-
-	if(NULL == host_name_list)
-		return NULL;
-
-	hostlist = slurm_hostlist_create(host_name_list);
-	sum = slurm_hostlist_count(hostlist);
-
-	if (sum < node_num) {
-		error ("node_num > sum of host in hostlist");
-		slurm_hostlist_destroy(hostlist);
-		return NULL;
-	}
-
-	temp_hl = slurm_hostlist_create(NULL);
-
-	for (i = 0; i < node_num; i++) {
-		hostname = slurm_hostlist_shift(hostlist);
-		if (NULL != hostname) {
-			slurm_hostlist_push_host(temp_hl, hostname);
-			free(hostname);
-		}
-	}
-
-	range = slurm_hostlist_ranged_string_malloc(temp_hl);
-
-	slurm_hostlist_destroy(temp_hl);
-	slurm_hostlist_destroy(hostlist);
-	return range;
-}
diff --git a/src/plugins/slurmctld/dynalloc/info.h b/src/plugins/slurmctld/dynalloc/info.h
deleted file mode 100644
index e09b9aebb..000000000
--- a/src/plugins/slurmctld/dynalloc/info.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/*****************************************************************************\
- *  info.h - get nodes information in slurm
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#ifndef DYNALLOC_INFO_H_
-#define DYNALLOC_INFO_H_
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
-
-#include "slurm/slurm.h"
-
-/**
- *	get total number of nodes and slots in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *		nodes: number of nodes in slurm
- *		slots: number of slots in slurm
- */
-extern void get_total_nodes_slots(uint16_t *nodes, uint16_t *slots);
-
-/**
- *	get number of available nodes and slots in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *		nodes: number of available nodes in slurm
- *		slots: number of available slots in slurm
- */
-extern void get_free_nodes_slots(uint16_t *nodes, uint16_t *slots);
-
-/**
- *	get available node list in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *	RET OUT:
- *		hostlist_t: available node list in slurm
- *
- *	Note: the return result should be slurm_hostlist_destroy(hostlist)
- */
-extern hostlist_t get_available_host_list_system_m(void);
-
-/**
- *	get the range of available node list in slurm.
- *
- *	IN:
- *	OUT Parameter:
- *	RET OUT:
- *		a string indicating the range of available node list in slurm
- *
- *	Note: the return result should be free(str)
- */
-extern char* get_available_host_list_range_sytem_m(void);
-
-/**
- *	get available node list within a given node list range
- *
- *	IN:
- *		node_list: the given node list range
- *	OUT Parameter:
- *	RET OUT
- *		available node list
- *
- * Note: the return result should be slurm_hostlist_destroy(hostlist)
- */
-extern hostlist_t choose_available_from_node_list_m(const char *node_list);
-
-/**
- *	get a subset node range with node_num nodes from a host_name_list
- *
- *	IN:
- *		host_name_list: the given host_name_list
- *		node_num: the number of host to choose
- *	OUT Parameter:
- *	RET OUT
- *		the subset node range, NULL if the node number of subset is
- *		larger than the node number in host_name_list
- *
- *	Note: the return should be free(str)
- */
-extern char* get_hostlist_subset_m(const char *host_name_list, uint16_t node_num);
-
-#endif /* DYNALLOC_INFO_H_ */
diff --git a/src/plugins/slurmctld/dynalloc/job_ports_list.c b/src/plugins/slurmctld/dynalloc/job_ports_list.c
deleted file mode 100644
index 7fd2efafc..000000000
--- a/src/plugins/slurmctld/dynalloc/job_ports_list.c
+++ /dev/null
@@ -1,112 +0,0 @@
-/*****************************************************************************\
- *  job_ports_list.c - keep the pair of (slurm_jobid, resv_ports) for future release
- *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-
-#include "job_ports_list.h"
-
-List job_ports_list = NULL;
-
-extern void append_job_ports_item(uint32_t slurm_jobid, uint16_t port_cnt,
-				  char *resv_ports, int *port_array)
-{
-	job_ports_t *item = NULL;
-
-	if (NULL == job_ports_list)
-		job_ports_list = list_create(free_job_ports_item_func);
-
-	item = xmalloc(sizeof(job_ports_t));
-	item->slurm_jobid = slurm_jobid;
-	item->port_cnt = port_cnt;
-	item->resv_ports = xstrdup(resv_ports);
-	item->port_array = xmalloc(sizeof(int) * port_cnt);
-	memcpy(item->port_array, port_array, sizeof(int)*port_cnt);
-	list_append (job_ports_list, item);
-}
-
-extern void free_job_ports_item_func(void *voiditem)
-{
-	job_ports_t *item = (job_ports_t *) voiditem;
-	if (item) {
-		xfree(item->resv_ports);
-		xfree(item->port_array);
-		xfree(item);
-	}
-}
-
-extern int find_job_ports_item_func(void *voiditem, void *key)
-{
-	job_ports_t *item = NULL;
-	uint32_t *jobid = NULL;
-
-	item = (job_ports_t *)voiditem;
-	jobid = (uint32_t *)key;
-
-	if (item->slurm_jobid == *jobid)
-		return 1;
-	else
-		return 0;
-}
-
-
-extern void print_list()
-{
-	int i, j;
-	ListIterator it = NULL;
-	job_ports_t *item = NULL;
-
-	info("count = %d", list_count (job_ports_list));
-
-	/* create iterator! */
-	it = list_iterator_create (job_ports_list);
-	/* list_next until NULL */
-	j = 0;
-	while ( NULL != (item = (job_ports_t*)list_next(it)) ) {
-		info("j = %d", j++);
-		info("item->slurm_jobid = %u", item->slurm_jobid);
-		info("item->port_cnt = %d", item->port_cnt);
-		info("item->resv_ports = %s", item->resv_ports);
-		for (i = 0; i < item->port_cnt; i++) {
-			info("item->port_array[i] = %d", item->port_array[i]);
-		}
-	}
-	list_iterator_destroy(it);
-}
diff --git a/src/plugins/slurmctld/dynalloc/msg.c b/src/plugins/slurmctld/dynalloc/msg.c
index 6c01cb092..6cabdd737 100644
--- a/src/plugins/slurmctld/dynalloc/msg.c
+++ b/src/plugins/slurmctld/dynalloc/msg.c
@@ -1,5 +1,5 @@
 /*****************************************************************************\
- *  msg.c - Message/communcation manager for dynalloc (resource dynamic
+ *  msg.c - Message/communication manager for dynalloc (resource dynamic
  *  allocation) plugin
  *****************************************************************************
  *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
diff --git a/src/plugins/slurmctld/nonstop/Makefile.in b/src/plugins/slurmctld/nonstop/Makefile.in
index f9aa0a5e0..671b97b92 100644
--- a/src/plugins/slurmctld/nonstop/Makefile.in
+++ b/src/plugins/slurmctld/nonstop/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -276,6 +279,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -325,8 +330,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -345,6 +354,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -388,6 +400,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -411,6 +424,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/slurmctld/nonstop/do_work.c b/src/plugins/slurmctld/nonstop/do_work.c
index 6a59c0ed3..42b55b1bc 100644
--- a/src/plugins/slurmctld/nonstop/do_work.c
+++ b/src/plugins/slurmctld/nonstop/do_work.c
@@ -470,10 +470,7 @@ extern void init_job_db(void)
 extern void term_job_db(void)
 {
 	pthread_mutex_lock(&job_fail_mutex);
-	if (job_fail_list) {
-		list_destroy(job_fail_list);
-		job_fail_list = NULL;
-	}
+	FREE_NULL_LIST(job_fail_list);
 	pthread_mutex_unlock(&job_fail_mutex);
 }
 
@@ -1753,7 +1750,7 @@ static void _send_event_callbacks(void)
 				      callback_jobid);
 				goto io_fini;
 			}
-			sent = _slurm_msg_sendto_timeout(fd,
+			sent = slurm_msg_sendto_timeout(fd,
 					(char *) &callback_flags,
 					sizeof(uint32_t), 0, 100000);
 			while ((slurm_shutdown_msg_conn(fd) < 0) &&
@@ -1787,7 +1784,7 @@ static void *_state_thread(void *no_data)
 
 	last_save_time = last_callback_time = time(NULL);
 	while (!thread_shutdown) {
-		sleep(1);
+		usleep(200000);
 
 		now = time(NULL);
 		if (difftime(now, last_callback_time) >= NONSTOP_EVENT_PERIOD) {
diff --git a/src/plugins/slurmctld/nonstop/msg.c b/src/plugins/slurmctld/nonstop/msg.c
index ea6a2865d..771c28564 100644
--- a/src/plugins/slurmctld/nonstop/msg.c
+++ b/src/plugins/slurmctld/nonstop/msg.c
@@ -341,7 +341,7 @@ static void *_msg_thread(void *no_data)
 			_proc_msg(new_fd, msg, cli_addr);
 			xfree(msg);
 		}
-		slurm_close_accepted_conn(new_fd);
+		slurm_close(new_fd);
 	}
 	debug("slurmctld/nonstop: message engine shutdown");
 	if (sock_fd > 0)
@@ -389,7 +389,7 @@ extern void term_msg_thread(void)
 		fd = slurm_open_stream(&addr, true);
 		if (fd != -1) {
 			/* we don't care if the open failed */
-			slurm_close_stream(fd);
+			slurm_close(fd);
 		}
 
 		debug2("waiting for slurmctld/nonstop thread to exit");
diff --git a/src/plugins/slurmctld/nonstop/nonstop.c b/src/plugins/slurmctld/nonstop/nonstop.c
index ce6b70511..9043f033f 100644
--- a/src/plugins/slurmctld/nonstop/nonstop.c
+++ b/src/plugins/slurmctld/nonstop/nonstop.c
@@ -62,12 +62,12 @@
  * only load authentication plugins if the plugin_type string has a prefix
  * of "auth/".
  *
- * plugin_version   - Specifies the version number of the plugin. This would
- * typically be the same for all plugins.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char	plugin_name[]	= "Slurmctld Fault Tolerance plugin";
 const char	plugin_type[]	= "slurmctld/nonstop";
-const uint32_t	plugin_version	= 100;
+const uint32_t	plugin_version	= SLURM_VERSION_NUMBER;
 
 extern int init(void)
 {
diff --git a/src/plugins/slurmd/Makefile.in b/src/plugins/slurmd/Makefile.in
index 6caff2463..ff0489771 100644
--- a/src/plugins/slurmd/Makefile.in
+++ b/src/plugins/slurmd/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/Makefile.in b/src/plugins/switch/Makefile.in
index ac9ec9196..c1b61188c 100644
--- a/src/plugins/switch/Makefile.in
+++ b/src/plugins/switch/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/cray/Makefile.in b/src/plugins/switch/cray/Makefile.in
index 7bd75c662..b038b47e0 100644
--- a/src/plugins/switch/cray/Makefile.in
+++ b/src/plugins/switch/cray/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -276,6 +279,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -325,8 +330,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -345,6 +354,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -388,6 +400,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -411,6 +424,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/cray/switch_cray.c b/src/plugins/switch/cray/switch_cray.c
index bfe74055f..65df561f0 100644
--- a/src/plugins/switch/cray/switch_cray.c
+++ b/src/plugins/switch/cray/switch_cray.c
@@ -91,15 +91,12 @@ uint64_t debug_flags = 0;
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[] = "switch CRAY plugin";
 const char plugin_type[] = "switch/cray";
-const uint32_t plugin_version = 100;
+const uint32_t plugin_version = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/switch/generic/Makefile.in b/src/plugins/switch/generic/Makefile.in
index a0ab1c5dd..0d87797e7 100644
--- a/src/plugins/switch/generic/Makefile.in
+++ b/src/plugins/switch/generic/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/generic/switch_generic.c b/src/plugins/switch/generic/switch_generic.c
index 486e1a192..c7b472c3b 100644
--- a/src/plugins/switch/generic/switch_generic.c
+++ b/src/plugins/switch/generic/switch_generic.c
@@ -120,15 +120,12 @@ typedef struct sw_gen_libstate {
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "switch generic plugin";
 const char plugin_type[]        = "switch/generic";
-const uint32_t plugin_version   = 110;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 uint64_t debug_flags = 0;
 pthread_mutex_t	global_lock = PTHREAD_MUTEX_INITIALIZER;
diff --git a/src/plugins/switch/none/Makefile.in b/src/plugins/switch/none/Makefile.in
index e38e340f7..8c45f7171 100644
--- a/src/plugins/switch/none/Makefile.in
+++ b/src/plugins/switch/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/none/switch_none.c b/src/plugins/switch/none/switch_none.c
index 64c54180f..20defcc37 100644
--- a/src/plugins/switch/none/switch_none.c
+++ b/src/plugins/switch/none/switch_none.c
@@ -68,15 +68,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "switch NONE plugin";
 const char plugin_type[]        = "switch/none";
-const uint32_t plugin_version   = 110;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/switch/nrt/Makefile.in b/src/plugins/switch/nrt/Makefile.in
index 57fa2a413..d557789ad 100644
--- a/src/plugins/switch/nrt/Makefile.in
+++ b/src/plugins/switch/nrt/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -321,6 +324,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -370,8 +375,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -390,6 +399,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -433,6 +445,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -456,6 +469,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/nrt/libpermapi/Makefile.in b/src/plugins/switch/nrt/libpermapi/Makefile.in
index 719ef274d..4cd25f9d2 100644
--- a/src/plugins/switch/nrt/libpermapi/Makefile.in
+++ b/src/plugins/switch/nrt/libpermapi/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/switch/nrt/libpermapi/shr_64.c b/src/plugins/switch/nrt/libpermapi/shr_64.c
index c45f33170..c8cd4d611 100644
--- a/src/plugins/switch/nrt/libpermapi/shr_64.c
+++ b/src/plugins/switch/nrt/libpermapi/shr_64.c
@@ -348,7 +348,7 @@ static void _agent_proc_connect(slurm_fd_t fe_comm_socket,uint32_t fe_auth_key)
 	}
 
 fini:	if (fe_comm_conn >= 0)
-		slurm_close_accepted_conn(fe_comm_conn);
+		slurm_close(fe_comm_conn);
 	if (buffer)
 		free_buf(buffer);
 }
@@ -1915,7 +1915,8 @@ int pe_rm_submit_job(rmhandle_t resource_mgr, job_command_t job_cmd,
 		 * So we need to set up the arbitrary distribution of it. */
 		int hostfile_count = 0;
 		char **names = pe_job_req->host_names;
-		opt.distribution = SLURM_DIST_ARBITRARY;
+		opt.distribution &= SLURM_DIST_STATE_FLAGS;
+		opt.distribution |= SLURM_DIST_ARBITRARY;
 		while (names && *names) {
 			if (opt.nodelist)
 				xstrfmtcat(opt.nodelist, ",%s",
diff --git a/src/plugins/switch/nrt/nrt.c b/src/plugins/switch/nrt/nrt.c
index c0fb737c3..7daffc7f4 100644
--- a/src/plugins/switch/nrt/nrt.c
+++ b/src/plugins/switch/nrt/nrt.c
@@ -4066,12 +4066,9 @@ _unpack_libstate(slurm_nrt_libstate_t *lp, Buf buffer)
 	/* Validate state version */
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in job_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, NRT_STATE_VERSION))
-			safe_unpack16(&protocol_version, buffer);
-		else
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, NRT_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
+
 	if (protocol_version == (uint16_t) NO_VAL) {
 		error("******************************************************");
 		error("Can not recover switch/nrt state, incompatible version");
diff --git a/src/plugins/switch/nrt/switch_nrt.c b/src/plugins/switch/nrt/switch_nrt.c
index 099be9a88..303634d85 100644
--- a/src/plugins/switch/nrt/switch_nrt.c
+++ b/src/plugins/switch/nrt/switch_nrt.c
@@ -122,15 +122,12 @@ static slurm_errtab_t slurm_errtab[] = {
  * of how this plugin satisfies that application.  SLURM will only load
  * a switch plugin if the plugin_type string has a prefix of "switch/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "switch NRT plugin";
 const char plugin_type[]        = "switch/nrt";
-const uint32_t plugin_version   = 110;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 uint64_t debug_flags = 0;
 
diff --git a/src/plugins/task/Makefile.in b/src/plugins/task/Makefile.in
index 00f5e5d17..d7f4bc059 100644
--- a/src/plugins/task/Makefile.in
+++ b/src/plugins/task/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/task/affinity/Makefile.in b/src/plugins/task/affinity/Makefile.in
index 619bc000c..32a9c92ad 100644
--- a/src/plugins/task/affinity/Makefile.in
+++ b/src/plugins/task/affinity/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -285,6 +288,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -334,8 +339,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -354,6 +363,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -397,6 +409,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -420,6 +433,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/task/affinity/affinity.h b/src/plugins/task/affinity/affinity.h
index 5e8c7adff..d8b44da48 100644
--- a/src/plugins/task/affinity/affinity.h
+++ b/src/plugins/task/affinity/affinity.h
@@ -125,5 +125,6 @@ uint16_t slurm_get_numa_node(uint16_t cpuid);
 /*** from schedutils.c ***/
 int	char_to_val(int c);
 int	str_to_cpuset(cpu_set_t *mask, const char* str);
+int	str_to_cnt(const char* str);
 char *	cpuset_to_str(const cpu_set_t *mask, char *str);
 int	val_to_char(int v);
diff --git a/src/plugins/task/affinity/cpuset.c b/src/plugins/task/affinity/cpuset.c
index c40cfd2e1..470fc78b6 100644
--- a/src/plugins/task/affinity/cpuset.c
+++ b/src/plugins/task/affinity/cpuset.c
@@ -57,6 +57,30 @@ static void _cpuset_to_cpustr(const cpu_set_t *mask, char *str)
 	}
 }
 
+static void _cpuset_to_memsstr(const cpu_set_t *mask, char *str,
+			       int cpu_cnt, int mem_cnt)
+{
+	int cpu_per_mem, i, n, nlast = -1;
+	char tmp[16];
+
+	/* Count of CPUs per memory locality */
+	cpu_per_mem = (cpu_cnt + mem_cnt - 1) / mem_cnt;
+
+	str[0] = '\0';
+	for (i = 0; i < CPU_SETSIZE; i++) {
+		if (!CPU_ISSET(i, mask))
+			continue;
+		n = i / cpu_per_mem;
+		if (nlast != n) {
+			snprintf(tmp, sizeof(tmp), "%d", n);
+			nlast = n;
+			if (str[0])
+				strcat(str, ",");
+			strcat(str, tmp);
+		}
+	}
+}
+
 int	slurm_build_cpuset(char *base, char *path, uid_t uid, gid_t gid)
 {
 	char file_path[PATH_MAX], mstr[16];
@@ -166,6 +190,7 @@ int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size,
 			 const cpu_set_t *mask)
 {
 	int fd, rc;
+	int cpu_cnt = 0, mem_cnt = 0;
 	char file_path[PATH_MAX];
 	char mstr[1 + CPU_SETSIZE * 4];
 
@@ -174,6 +199,17 @@ int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size,
 		return SLURM_ERROR;
 	}
 
+	/* Read "cpus" contents from parent directory for CPU count */
+	snprintf(file_path, sizeof(file_path), "%s/%scpus",
+		 base, cpuset_prefix);
+	fd = open(file_path, O_RDONLY);
+	if (fd >= 0) {
+		rc = read(fd, mstr, sizeof(mstr));
+		close(fd);
+		if (rc > 0)
+			cpu_cnt = str_to_cnt(mstr);
+	}
+
 	/* Set "cpus" per user request */
 	snprintf(file_path, sizeof(file_path), "%s/%scpus",
 		 path, cpuset_prefix);
@@ -204,6 +240,10 @@ int	slurm_set_cpuset(char *base, char *path, pid_t pid, size_t size,
 			error("read(%s): %m", file_path);
 			return SLURM_ERROR;
 		}
+		if (rc > 0)
+			mem_cnt = str_to_cnt(mstr);
+		if ((cpu_cnt > 1) && (mem_cnt > 1))
+			 _cpuset_to_memsstr(mask, mstr, cpu_cnt, mem_cnt);
 		snprintf(file_path, sizeof(file_path), "%s/%smems",
 			 path, cpuset_prefix);
 		fd = open(file_path, O_CREAT | O_WRONLY, 0700);
diff --git a/src/plugins/task/affinity/dist_tasks.c b/src/plugins/task/affinity/dist_tasks.c
index 373c6376b..cd4e45c6c 100644
--- a/src/plugins/task/affinity/dist_tasks.c
+++ b/src/plugins/task/affinity/dist_tasks.c
@@ -425,8 +425,9 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 	if (!(req->cpu_bind_type & bind_entity)) {
 		/* No bind unit (sockets, cores) specified by user,
 		 * pick something reasonable */
-		uint16_t task_plugin_param = slurm_get_task_plugin_param();
+		uint32_t task_plugin_param = slurm_get_task_plugin_param();
 		bool auto_def_set = false;
+		int spec_thread_cnt = 0;
 		int max_tasks = req->tasks_to_launch[(int)node_id] *
 			req->cpus_per_task;
 		char *avail_mask = _alloc_mask(req,
@@ -437,12 +438,21 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 		      "nodes:%d sockets:%d:%d cores:%d:%d threads:%d",
 		      max_tasks, whole_nodes, whole_sockets ,part_sockets,
 		      whole_cores, part_cores, whole_threads);
-
-		if ((max_tasks == whole_sockets) && (part_sockets == 0)) {
+		if ((req->job_core_spec != (uint16_t) NO_VAL) &&
+		    (req->job_core_spec &  CORE_SPEC_THREAD)  &&
+		    (req->job_core_spec != CORE_SPEC_THREAD)) {
+			spec_thread_cnt = req->job_core_spec &
+					  (~CORE_SPEC_THREAD);
+		}
+		if (((max_tasks == whole_sockets) && (part_sockets == 0)) ||
+		    (spec_thread_cnt &&
+		     (max_tasks == (whole_sockets + part_sockets)))) {
 			req->cpu_bind_type |= CPU_BIND_TO_SOCKETS;
 			goto make_auto;
 		}
-		if ((max_tasks == whole_cores) && (part_cores == 0)) {
+		if (((max_tasks == whole_cores) && (part_cores == 0)) ||
+		    (spec_thread_cnt &&
+		     (max_tasks == (whole_cores + part_cores)))) {
 			req->cpu_bind_type |= CPU_BIND_TO_CORES;
 			goto make_auto;
 		}
@@ -455,6 +465,14 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 			auto_def_set = true;
 			req->cpu_bind_type |= CPU_BIND_TO_THREADS;
 			goto make_auto;
+		} else if (task_plugin_param & CPU_AUTO_BIND_TO_CORES) {
+			auto_def_set = true;
+			req->cpu_bind_type |= CPU_BIND_TO_CORES;
+			goto make_auto;
+		} else if (task_plugin_param & CPU_AUTO_BIND_TO_SOCKETS) {
+			auto_def_set = true;
+			req->cpu_bind_type |= CPU_BIND_TO_SOCKETS;
+			goto make_auto;
 		}
 
 		if (avail_mask) {
@@ -481,7 +499,7 @@ void lllp_distribution(launch_tasks_request_msg_t *req, uint32_t node_id)
 		     req->job_id, buf_type, req->task_dist);
 	}
 
-	switch (req->task_dist) {
+	switch (req->task_dist & SLURM_DIST_STATE_BASE) {
 	case SLURM_DIST_BLOCK_BLOCK:
 	case SLURM_DIST_CYCLIC_BLOCK:
 	case SLURM_DIST_PLANE:
@@ -647,6 +665,26 @@ static char *_alloc_mask(launch_tasks_request_msg_t *req,
 		(*whole_node_cnt)++;
 	FREE_NULL_BITMAP(alloc_bitmap);
 
+	if ((req->job_core_spec != (uint16_t) NO_VAL) &&
+	    (req->job_core_spec &  CORE_SPEC_THREAD)  &&
+	    (req->job_core_spec != CORE_SPEC_THREAD)) {
+		int spec_thread_cnt;
+		spec_thread_cnt = req->job_core_spec & (~CORE_SPEC_THREAD);
+		for (t = threads - 1;
+		     ((t > 0) && (spec_thread_cnt > 0)); t--) {
+			for (c = cores - 1;
+			     ((c > 0) && (spec_thread_cnt > 0)); c--) {
+				for (s = sockets - 1;
+				     ((s >= 0) && (spec_thread_cnt > 0)); s--) {
+					i = s * cores + c;
+					i = (i * threads) + t;
+					bit_clear(alloc_mask, i);
+					spec_thread_cnt--;
+				}
+			}
+		}
+	}
+
 	/* translate abstract masks to actual hardware layout */
 	_lllp_map_abstract_masks(1, &alloc_mask);
 
@@ -680,6 +718,7 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req,
 	int job_node_id;
 	int start;
 	char *str;
+	int spec_thread_cnt = 0;
 
 	*hw_sockets = conf->sockets;
 	*hw_cores   = conf->cores;
@@ -739,6 +778,29 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req,
 		}
 	}
 
+	if ((req->job_core_spec != (uint16_t) NO_VAL) &&
+	    (req->job_core_spec &  CORE_SPEC_THREAD)  &&
+	    (req->job_core_spec != CORE_SPEC_THREAD)) {
+		spec_thread_cnt = req->job_core_spec & (~CORE_SPEC_THREAD);
+	}
+	if (spec_thread_cnt) {
+		/* Skip specialized threads as needed */
+		int i, t, c, s;
+		for (t = conf->threads - 1;
+		     ((t >= 0) && (spec_thread_cnt > 0)); t--) {
+			for (c = conf->cores - 1;
+			     ((c >= 0) && (spec_thread_cnt > 0)); c--) {
+				for (s = conf->sockets - 1;
+				     ((s >= 0) && (spec_thread_cnt > 0)); s--) {
+					i = s * conf->cores + c;
+					i = (i * conf->threads) + t;
+					bit_clear(hw_map, i);
+					spec_thread_cnt--;
+				}
+			}
+		}
+	}
+
 	str = (char *)bit_fmt_hexmask(hw_map);
 	debug3("task/affinity: job %u.%u CPU final mask for local node: %s",
 		req->job_id, req->job_step_id, str);
@@ -750,9 +812,9 @@ static bitstr_t *_get_avail_map(launch_tasks_request_msg_t *req,
 }
 
 /* helper function for _expand_masks() */
-static void _blot_mask(bitstr_t *mask, uint16_t blot)
+static void _blot_mask(bitstr_t *mask, bitstr_t *avail_map, uint16_t blot)
 {
-	uint16_t i, size = 0;
+	uint16_t i, j, size = 0;
 	int prev = -1;
 
 	if (!mask)
@@ -763,7 +825,10 @@ static void _blot_mask(bitstr_t *mask, uint16_t blot)
 			/* fill in this blot */
 			uint16_t start = (i / blot) * blot;
 			if (start != prev) {
-				bit_nset(mask, start, start+blot-1);
+				for (j = start; j < start + blot; j++) {
+					if (bit_test(avail_map, j))
+						bit_set(mask, j);
+				}
 				prev = start;
 			}
 		}
@@ -816,7 +881,7 @@ static void _expand_masks(uint16_t cpu_bind_type, const uint32_t maxtasks,
 		if (hw_threads < 2)
 			return;
 		for (i = 0; i < maxtasks; i++) {
-			_blot_mask(masks[i], hw_threads);
+			_blot_mask(masks[i], avail_map, hw_threads);
 		}
 		return;
 	}
@@ -951,9 +1016,11 @@ static int _task_layout_lllp_cyclic(launch_tasks_request_msg_t *req,
 			//info("setting %d %d", taskcount, bit);
 			bit_set(masks[taskcount], bit);
 
-			if (!already_switched
-			    && ((req->task_dist == SLURM_DIST_CYCLIC_CFULL) ||
-				(req->task_dist == SLURM_DIST_BLOCK_CFULL))) {
+			if (!already_switched &&
+			    (((req->task_dist & SLURM_DIST_STATE_BASE) ==
+			     SLURM_DIST_CYCLIC_CFULL) ||
+			    ((req->task_dist & SLURM_DIST_STATE_BASE) ==
+			     SLURM_DIST_BLOCK_CFULL))) {
 				/* This means we are laying out cpus
 				 * within a task cyclically as well. */
 				s = (s + 1) % hw_sockets;
diff --git a/src/plugins/task/affinity/schedutils.c b/src/plugins/task/affinity/schedutils.c
index 7f3a3b61a..37dec3552 100644
--- a/src/plugins/task/affinity/schedutils.c
+++ b/src/plugins/task/affinity/schedutils.c
@@ -119,6 +119,35 @@ int str_to_cpuset(cpu_set_t *mask, const char* str)
 	return 0;
 }
 
+int str_to_cnt(const char* str)
+{
+	int len = strlen(str);
+	const char *ptr = str + len - 1;
+	int cnt = 0;
+
+	/* skip 0x, it's all hex anyway */
+	if (len > 1 && !memcmp(str, "0x", 2L))
+		str += 2;
+
+	while (ptr >= str) {
+		char val = char_to_val(*ptr);
+		if (val == (char) -1)
+			return -1;
+		if (val & 1)
+			cnt++;
+		if (val & 2)
+			cnt++;
+		if (val & 4)
+			cnt++;
+		if (val & 8)
+			cnt++;
+		len--;
+		ptr--;
+	}
+
+	return cnt;
+}
+
 char * cpuset_to_str(const cpu_set_t *mask, char *str)
 {
 	int base;
diff --git a/src/plugins/task/affinity/task_affinity.c b/src/plugins/task/affinity/task_affinity.c
index c020e99eb..b5a4db787 100644
--- a/src/plugins/task/affinity/task_affinity.c
+++ b/src/plugins/task/affinity/task_affinity.c
@@ -77,15 +77,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "task affinity plugin";
 const char plugin_type[]        = "task/affinity";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/task/cgroup/Makefile.in b/src/plugins/task/cgroup/Makefile.in
index 48b46e647..8b46d7d93 100644
--- a/src/plugins/task/cgroup/Makefile.in
+++ b/src/plugins/task/cgroup/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/task/cgroup/task_cgroup.c b/src/plugins/task/cgroup/task_cgroup.c
index dc05e425c..2f4c10723 100644
--- a/src/plugins/task/cgroup/task_cgroup.c
+++ b/src/plugins/task/cgroup/task_cgroup.c
@@ -79,15 +79,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum versions for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "Tasks containment using linux cgroup";
 const char plugin_type[]        = "task/cgroup";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 static bool use_cpuset  = false;
 static bool use_memory  = false;
diff --git a/src/plugins/task/cgroup/task_cgroup_cpuset.c b/src/plugins/task/cgroup/task_cgroup_cpuset.c
index 0ffbf91ee..15f5956ef 100644
--- a/src/plugins/task/cgroup/task_cgroup_cpuset.c
+++ b/src/plugins/task/cgroup/task_cgroup_cpuset.c
@@ -3,8 +3,8 @@
  *****************************************************************************
  *  Copyright (C) 2009 CEA/DAM/DIF
  *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
- *  Portions copyright (C) 2012 Bull
- *  Written by Martin Perry <martin.perry@bull.com>
+ *  Portions copyright (C) 2012,2015 Bull/Atos
+ *  Written by Martin Perry <martin.perry@atos.net>
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -46,16 +46,16 @@
 #include <sched.h>
 #include <sys/types.h>
 
-#include "slurm/slurm_errno.h"
-#include "slurm/slurm.h"
-#include "src/slurmd/slurmstepd/slurmstepd_job.h"
-#include "src/slurmd/slurmd/slurmd.h"
-
-#include "src/common/cpu_frequency.h"
-#include "src/common/slurm_resource_info.h"
+#include <slurm/slurm.h>
+#include <slurm/slurm_errno.h>
 #include "src/common/bitstring.h"
+#include "src/common/cpu_frequency.h"
 #include "src/common/proc_args.h"
+#include "src/common/slurm_resource_info.h"
 #include "src/common/xstring.h"
+#include "src/slurmd/common/xcpuinfo.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
+#include "src/slurmd/slurmd/slurmd.h"
 
 #include "task_cgroup.h"
 
@@ -108,6 +108,9 @@ static inline int hwloc_bitmap_isequal(
 
 # endif
 
+hwloc_obj_type_t obj_types[3] = {HWLOC_OBJ_SOCKET, HWLOC_OBJ_CORE,
+			         HWLOC_OBJ_PU};
+
 static uint16_t bind_mode = CPU_BIND_NONE   | CPU_BIND_MASK   |
 			    CPU_BIND_RANK   | CPU_BIND_MAP    |
 			    CPU_BIND_LDMASK | CPU_BIND_LDRANK |
@@ -291,7 +294,10 @@ static int _xcgroup_cpuset_init(xcgroup_t* cg)
 	return XCGROUP_SUCCESS;
 }
 
-void slurm_chkaffinity(cpu_set_t *mask, stepd_step_rec_t *job, int statval)
+#ifdef HAVE_HWLOC
+
+static void
+_slurm_chkaffinity(cpu_set_t *mask, stepd_step_rec_t *job, int statval)
 {
 	char *bind_type, *action, *status, *units;
 	char mstr[1 + CPU_SETSIZE / 4];
@@ -357,7 +363,6 @@ void slurm_chkaffinity(cpu_set_t *mask, stepd_step_rec_t *job, int statval)
 			status);
 }
 
-#ifdef HAVE_HWLOC
 /*
  * Get sched cpuset for ldom
  *
@@ -602,14 +607,14 @@ static void _add_hwloc_cpuset(
 
 	/* if requested binding overlaps the granularity */
 	/* use the ancestor cpuset instead of the object one */
-	if (hwloc_compare_types(hwtype,req_hwtype) > 0) {
+	if (hwloc_compare_types(hwtype, req_hwtype) > 0) {
 
 		/* Get the parent object of req_hwtype or the */
 		/* one just above if not found (meaning of >0)*/
 		/* (useful for ldoms binding with !NUMA nodes)*/
 		pobj = obj->parent;
 		while (pobj != NULL &&
-		       hwloc_compare_types(pobj->type, req_hwtype) > 0)
+			hwloc_compare_types(pobj->type, req_hwtype) > 0)
 			pobj = pobj->parent;
 
 		if (pobj != NULL) {
@@ -626,8 +631,9 @@ static void _add_hwloc_cpuset(
 			hwloc_bitmap_or(cpuset, cpuset, obj->allowed_cpuset);
 		}
 
-	} else
+	} else {
 		hwloc_bitmap_or(cpuset, cpuset, obj->allowed_cpuset);
+	}
 }
 
 static int _task_cgroup_cpuset_dist_cyclic(
@@ -636,17 +642,49 @@ static int _task_cgroup_cpuset_dist_cyclic(
 	hwloc_bitmap_t cpuset)
 {
 	hwloc_obj_t obj;
-	uint32_t *obj_idx;
-	uint32_t i, j, sock_idx, sock_loop, ntskip, npdist, nsockets;
+	uint32_t  s_ix;		/* socket index */
+	uint32_t *c_ixc;	/* core index by socket (current taskid) */
+	uint32_t *c_ixn;	/* core index by socket (next taskid) */
+	uint32_t *t_ix;		/* thread index by core by socket */
+	uint32_t npus, ncores, nsockets;
 	uint32_t taskid = job->envtp->localid;
+	int spec_thread_cnt = 0;
+	bitstr_t *spec_threads = NULL;
+
+	uint32_t obj_idxs[3], nthreads, cps,
+		 tpc, i, j, sock_loop, ntskip, npdist;;
+	bool core_cyclic, core_fcyclic, sock_fcyclic, core_block;
 
-	if (bind_verbose)
-		info("task/cgroup: task[%u] using %s distribution "
-		     "(task_dist=%u)", taskid,
-		     format_task_dist_states(job->task_dist), job->task_dist);
 	nsockets = (uint32_t) hwloc_get_nbobjs_by_type(topology,
 						       HWLOC_OBJ_SOCKET);
-	obj_idx = xmalloc(nsockets * sizeof(uint32_t));
+	ncores = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						       HWLOC_OBJ_CORE);
+	nthreads = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						       HWLOC_OBJ_PU);
+	cps = ncores/nsockets;
+	tpc = nthreads/ncores;
+
+	sock_fcyclic = (job->task_dist & SLURM_DIST_SOCKMASK) ==
+		SLURM_DIST_SOCKCFULL ? true : false;
+	core_block = (job->task_dist & SLURM_DIST_COREMASK) ==
+		SLURM_DIST_COREBLOCK ? true : false;
+	core_cyclic = (job->task_dist & SLURM_DIST_COREMASK) ==
+		SLURM_DIST_CORECYCLIC ? true : false;
+	core_fcyclic = (job->task_dist & SLURM_DIST_COREMASK) ==
+		SLURM_DIST_CORECFULL ? true : false;
+
+	if (bind_verbose) {
+		info("task/cgroup: task[%u] using %s distribution "
+		     "(task_dist=0x%x)", taskid,
+		     format_task_dist_states(job->task_dist), job->task_dist);
+	}
+
+	npus = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						   HWLOC_OBJ_PU);
+
+	t_ix = xmalloc(ncores * sizeof(uint32_t));
+	c_ixc = xmalloc(nsockets * sizeof(uint32_t));
+	c_ixn = xmalloc(nsockets * sizeof(uint32_t));
 
 	if (hwloc_compare_types(hwtype, HWLOC_OBJ_CORE) >= 0) {
 		/* cores or threads granularity */
@@ -657,56 +695,129 @@ static int _task_cgroup_cpuset_dist_cyclic(
 		ntskip = taskid;
 		npdist = 1;
 	}
+	if ((job->job_core_spec != (uint16_t) NO_VAL) &&
+	    (job->job_core_spec &  CORE_SPEC_THREAD)  &&
+	    (job->job_core_spec != CORE_SPEC_THREAD)){
+		/* Skip specialized threads as needed */
+		int i, t, c, s;
+		int cores = ncores / nsockets;
+		int threads = npus / cores;
+		spec_thread_cnt = job->job_core_spec & (~CORE_SPEC_THREAD);
+		spec_threads = bit_alloc(npus);
+		for (t = threads - 1;
+		     ((t >= 0) && (spec_thread_cnt > 0)); t--) {
+			for (c = cores - 1;
+			     ((c >= 0) && (spec_thread_cnt > 0)); c--) {
+				for (s = nsockets - 1;
+				     ((s >= 0) && (spec_thread_cnt > 0)); s--) {
+					i = s * cores + c;
+					i = (i * threads) + t;
+					bit_set(spec_threads, i);
+					spec_thread_cnt--;
+				}
+			}
+		}
+		if (hwtype == HWLOC_OBJ_PU) {
+			for (i = 0; i <= ntskip && i < npus; i++) {
+				if (bit_test(spec_threads, i))
+					ntskip++;
+			};
+		}
+	}
 
 	/* skip objs for lower taskids, then add them to the
 	   current task cpuset. To prevent infinite loop, check
 	   that we do not loop more than npdist times around the available
 	   sockets, which is the worst scenario we should afford here. */
-	i = 0; j = 0;
-	sock_idx = 0;
-	sock_loop = 0;
-	while (i < ntskip + 1 && sock_loop < npdist + 1) {
+	i = j = s_ix = sock_loop = 0;
+	while (i < ntskip + 1 && (sock_loop/tpc) < npdist + 1) {
 		/* fill one or multiple sockets using block mode, unless
 		   otherwise stated in the job->task_dist field */
-		while ((sock_idx < nsockets) && (j < npdist)) {
+		while ((s_ix < nsockets) && (j < npdist)) {
+			if (c_ixc[s_ix] == cps)
+				c_ixc[s_ix] = 0;
 			obj = hwloc_get_obj_below_by_type(
-				topology, HWLOC_OBJ_SOCKET, sock_idx,
-				hwtype, obj_idx[sock_idx]);
+				topology, HWLOC_OBJ_SOCKET, s_ix,
+				hwtype, c_ixc[s_ix]);
 			if (obj != NULL) {
-				obj_idx[sock_idx]++;
-				j++;
-				if (i == ntskip)
-					_add_hwloc_cpuset(hwtype, req_hwtype,
-							  obj, taskid,
-							  bind_verbose, cpuset);
-				if ((j < npdist) &&
-				    ((job->task_dist ==
-				      SLURM_DIST_CYCLIC_CFULL) ||
-				     (job->task_dist ==
-				      SLURM_DIST_BLOCK_CFULL)))
-					sock_idx++;
-			} else {
-				sock_idx++;
-			}
+				if (hwloc_compare_types(hwtype, HWLOC_OBJ_PU)
+									>= 0) {
+					/* granularity is thread */
+					obj_idxs[0]=s_ix;
+					obj_idxs[1]=c_ixc[s_ix];
+					obj_idxs[2]=t_ix[(s_ix*cps)+c_ixc[s_ix]];
+					obj = hwloc_get_obj_below_array_by_type(
+						topology, 3, obj_types, obj_idxs);
+					if (obj != NULL) {
+						t_ix[(s_ix*cps)+c_ixc[s_ix]]++;
+						j++;
+						if (i == ntskip)
+							_add_hwloc_cpuset(hwtype,
+							req_hwtype, obj, taskid,
+							bind_verbose, cpuset);
+						if (j < npdist) {
+							if (core_cyclic) {
+								c_ixn[s_ix] =
+								c_ixc[s_ix] + 1;
+							} else if (core_fcyclic){
+								c_ixc[s_ix]++;
+								c_ixn[s_ix] =
+								c_ixc[s_ix];
+							}
+							if (sock_fcyclic)
+								s_ix++;
+						}
+					} else {
+						c_ixc[s_ix]++;
+						if (c_ixc[s_ix] == cps)
+							s_ix++;
+					}
+				} else {
+					/* granularity is core or larger */
+					c_ixc[s_ix]++;
+					j++;
+					if (i == ntskip)
+						_add_hwloc_cpuset(hwtype,
+							req_hwtype, obj, taskid,
+						  	bind_verbose, cpuset);
+					if ((j < npdist) && (sock_fcyclic))
+						s_ix++;
+				}
+			} else
+				s_ix++;
 		}
-		/* if it succeed, switch to the next task, starting
-		   with the next available socket, otherwise, loop back
-		   from the first socket trying to find available slots. */
+		/* if it succeeds, switch to the next task, starting
+		 * with the next available socket, otherwise, loop back
+		 * from the first socket trying to find available slots. */
 		if (j == npdist) {
-			i++; j = 0;
-			sock_idx++; // no validity check, handled by the while
+			i++;
+			j = 0;
+			if (!core_block)
+				c_ixn[s_ix] = c_ixc[s_ix] + 1;
+			memcpy(c_ixc, c_ixn, nsockets * sizeof(uint32_t));
+			s_ix++; // no validity check, handled by the while
 			sock_loop = 0;
 		} else {
 			sock_loop++;
-			sock_idx = 0;
+			s_ix = 0;
 		}
 	}
+	xfree(t_ix);
+	xfree(c_ixc);
+	xfree(c_ixn);
+
+	if (spec_threads) {
+		for (i = 0; i < npus; i++) {
+			if (bit_test(spec_threads, i)) {
+				hwloc_bitmap_clr(cpuset, i);
+			}
+		};
+		FREE_NULL_BITMAP(spec_threads);
+	}
 
-	xfree(obj_idx);
-
-	/* should never happened in normal scenario */
+	/* should never happen in normal scenario */
 	if (sock_loop > npdist) {
-		error("task/cgroup: task[%u] infinite loop broken while trying"
+		error("task/cgroup: task[%u] infinite loop broken while trying "
 		      "to provision compute elements using %s", taskid,
 		      format_task_dist_states(job->task_dist));
 		return XCGROUP_ERROR;
@@ -720,28 +831,141 @@ static int _task_cgroup_cpuset_dist_block(
 	stepd_step_rec_t *job, int bind_verbose, hwloc_bitmap_t cpuset)
 {
 	hwloc_obj_t obj;
-	uint32_t i, pfirst,plast;
+	uint32_t core_loop, ntskip, npdist;
+	uint32_t i, j, pfirst, plast;
 	uint32_t taskid = job->envtp->localid;
 	int hwdepth;
+	uint32_t npus, ncores, nsockets;
+	int spec_thread_cnt = 0;
+	bitstr_t *spec_threads = NULL;
+
+	uint32_t *thread_idx;
+	uint32_t core_idx;
+	bool core_fcyclic, core_block;
+
+	nsockets = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						       HWLOC_OBJ_SOCKET);
+	ncores = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						     HWLOC_OBJ_CORE);
+	npus = (uint32_t) hwloc_get_nbobjs_by_type(topology,
+						   HWLOC_OBJ_PU);
+
+	core_block = (job->task_dist & SLURM_DIST_COREMASK) ==
+		SLURM_DIST_COREBLOCK ? true : false;
+	core_fcyclic = (job->task_dist & SLURM_DIST_COREMASK) ==
+		SLURM_DIST_CORECFULL ? true : false;
 
-	if (bind_verbose)
+	thread_idx = xmalloc(ncores * sizeof(uint32_t));
+
+	if (bind_verbose) {
 		info("task/cgroup: task[%u] using block distribution, "
-		     "task_dist %u", taskid, job->task_dist);
-	if (hwloc_compare_types(hwtype,HWLOC_OBJ_CORE) >= 0) {
+		     "task_dist 0x%x", taskid, job->task_dist);
+	}
+
+	if ((hwloc_compare_types(hwtype, HWLOC_OBJ_PU) == 0) && !core_block) {
+		thread_idx = xmalloc(ncores * sizeof(uint32_t));
+		ntskip = taskid;
+		npdist = job->cpus_per_task;
+
+		i = 0; j = 0;
+		core_idx = 0;
+		core_loop = 0;
+		while (i < ntskip + 1 && core_loop < npdist + 1) {
+			while ((core_idx < ncores) && (j < npdist)) {
+				obj = hwloc_get_obj_below_by_type(
+					topology, HWLOC_OBJ_CORE, core_idx,
+					hwtype, thread_idx[core_idx]);
+				if (obj != NULL) {
+					thread_idx[core_idx]++;
+					j++;
+					if (i == ntskip)
+						_add_hwloc_cpuset(hwtype,
+							req_hwtype, obj, taskid,
+							bind_verbose, cpuset);
+					if ((j < npdist) && core_fcyclic)
+						core_idx++;
+				} else {
+					core_idx++;
+				}
+			}
+			if (j == npdist) {
+				i++; j = 0;
+				core_idx++; // no validity check, handled by the while
+				core_loop = 0;
+			} else {
+				core_loop++;
+				core_idx = 0;
+			}
+		}
+		xfree(thread_idx);
+
+		/* should never happen in normal scenario */
+		if (core_loop > npdist) {
+			error("task/cgroup: task[%u] infinite loop broken while "
+			      "trying to provision compute elements using %s",
+			      taskid, format_task_dist_states(job->task_dist));
+			return XCGROUP_ERROR;
+		} else
+			return XCGROUP_SUCCESS;
+	}
+
+	if (hwloc_compare_types(hwtype, HWLOC_OBJ_CORE) >= 0) {
 		/* cores or threads granularity */
-		pfirst = taskid *  job->cpus_per_task ;
+		pfirst = taskid * job->cpus_per_task ;
 		plast = pfirst + job->cpus_per_task - 1;
 	} else {
 		/* sockets or ldoms granularity */
 		pfirst = taskid;
 		plast = pfirst;
 	}
-	hwdepth = hwloc_get_type_depth(topology,hwtype);
+
+	hwdepth = hwloc_get_type_depth(topology, hwtype);
+	if ((job->job_core_spec != (uint16_t) NO_VAL) &&
+	    (job->job_core_spec &  CORE_SPEC_THREAD)  &&
+	    (job->job_core_spec != CORE_SPEC_THREAD)  &&
+	    (nsockets != 0)) {
+		/* Skip specialized threads as needed */
+		int i, t, c, s;
+		int cores = MAX(1, (ncores / nsockets));
+		int threads = npus / cores;
+		spec_thread_cnt = job->job_core_spec & (~CORE_SPEC_THREAD);
+		spec_threads = bit_alloc(npus);
+		for (t = threads - 1;
+		     ((t >= 0) && (spec_thread_cnt > 0)); t--) {
+			for (c = cores - 1;
+			     ((c >= 0) && (spec_thread_cnt > 0)); c--) {
+				for (s = nsockets - 1;
+				     ((s >= 0) && (spec_thread_cnt > 0)); s--) {
+					i = s * cores + c;
+					i = (i * threads) + t;
+					bit_set(spec_threads, i);
+					spec_thread_cnt--;
+				}
+			}
+		}
+		if (hwtype == HWLOC_OBJ_PU) {
+			for (i = 0; i <= pfirst && i < npus; i++) {
+				if (bit_test(spec_threads, i))
+					pfirst++;
+			};
+		}
+	}
+
 	for (i = pfirst; i <= plast && i < nobj ; i++) {
 		obj = hwloc_get_obj_by_depth(topology, hwdepth, (int)i);
 		_add_hwloc_cpuset(hwtype, req_hwtype, obj, taskid,
 			    bind_verbose, cpuset);
 	}
+
+	if (spec_threads) {
+		for (i = 0; i < npus; i++) {
+			if (bit_test(spec_threads, i)) {
+				hwloc_bitmap_clr(cpuset, i);
+			}
+		};
+		FREE_NULL_BITMAP(spec_threads);
+	}
+
 	return XCGROUP_SUCCESS;
 }
 
@@ -907,7 +1131,16 @@ again:
 				     "%s/step_batch", job_cgroup_path)
 			    >= PATH_MAX) {
 				error("task/cgroup: unable to build job step"
-				      " %u.batch cpuset cg relative path: %m",
+				      " %u_batch cpuset cg relative path: %m",
+				      jobid);
+				return SLURM_ERROR;
+			}
+		} else if (stepid == SLURM_EXTERN_CONT) {
+			if (snprintf(jobstep_cgroup_path, PATH_MAX,
+				     "%s/step_exter", job_cgroup_path)
+			    >= PATH_MAX) {
+				error("task/cgroup: unable to build job step"
+				      " %u_exter cpuset cg relative path: %m",
 				      jobid);
 				return SLURM_ERROR;
 			}
@@ -1132,6 +1365,13 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 	uint32_t jntasks = job->node_tasks;
 	uint32_t jnpus;
 
+	/* Allocate and initialize hwloc objects */
+	hwloc_topology_init(&topology);
+	hwloc_topology_load(topology);
+	cpuset = hwloc_bitmap_alloc();
+
+	int spec_threads = 0;
+
 	if (job->batch) {
 		jnpus = job->cpus;
 		job->cpus_per_task = job->cpus;
@@ -1143,11 +1383,6 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 	    (bind_type & CPU_BIND_VERBOSE))
 		bind_verbose = 1 ;
 
-	/* Allocate and initialize hwloc objects */
-	hwloc_topology_init(&topology);
-	hwloc_topology_load(topology);
-	cpuset = hwloc_bitmap_alloc();
-
 	if ( hwloc_get_type_depth(topology, HWLOC_OBJ_NODE) >
 	     hwloc_get_type_depth(topology, HWLOC_OBJ_SOCKET) ) {
 		/* One socket contains multiple NUMA-nodes
@@ -1221,10 +1456,16 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 						       socket_or_node);
 	nldoms = (uint32_t) hwloc_get_nbobjs_by_type(topology,
 						     HWLOC_OBJ_NODE);
+	//info("PU:%d CORE:%d SOCK:%d LDOM:%d", npus, ncores, nsockets, nldoms);
 
 	hwtype = HWLOC_OBJ_MACHINE;
 	nobj = 1;
-	if (npus >= jnpus || bind_type & CPU_BIND_TO_THREADS) {
+	if ((job->job_core_spec != (uint16_t) NO_VAL) &&
+	    (job->job_core_spec &  CORE_SPEC_THREAD)  &&
+	    (job->job_core_spec != CORE_SPEC_THREAD)) {
+		spec_threads = job->job_core_spec & (~CORE_SPEC_THREAD);
+	}
+	if (npus >= (jnpus + spec_threads) || bind_type & CPU_BIND_TO_THREADS) {
 		hwtype = HWLOC_OBJ_PU;
 		nobj = npus;
 	}
@@ -1292,12 +1533,13 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 				error("task/cgroup: task[%u] unable to set "
 				      "mask 0x%s", taskid,
 				      cpuset_to_str(&ts, mstr));
+				error("sched_setaffinity rc = %d", rc);
 				fstatus = SLURM_ERROR;
 			} else if (bind_verbose) {
 				info("task/cgroup: task[%u] mask 0x%s",
 				     taskid, cpuset_to_str(&ts, mstr));
 			}
-			slurm_chkaffinity(&ts, job, rc);
+			_slurm_chkaffinity(&ts, job, rc);
 		}
 	} else {
 		/* Bind the detected object to the taskid, respecting the
@@ -1306,52 +1548,25 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 		char *str;
 
 		if (bind_verbose) {
-			info("task/cgroup: task[%u] using %s granularity",
-			     taskid,hwloc_obj_type_string(hwtype));
+			info("task/cgroup: task[%u] using %s granularity dist %u",
+			     taskid, hwloc_obj_type_string(hwtype),
+			     job->task_dist);
 		}
 
-		/* There are two "distributions,"  controlled by the
-		 * -m option of srun and friends. The first is the
-		 * distribution of tasks to nodes.  The second is the
-		 * distribution of allocated cpus to tasks for
-		 * binding.  This code is handling the second
-		 * distribution.  Here's how the values get set, based
-		 * on the value of -m
-		 *
-		 * SLURM_DIST_CYCLIC = srun -m cyclic
-		 * SLURM_DIST_BLOCK = srun -m block
-		 * SLURM_DIST_CYCLIC_CYCLIC = srun -m cyclic:cyclic
-		 * SLURM_DIST_BLOCK_CYCLIC = srun -m block:cyclic
-		 *
-		 * In the first two cases, the user only specified the
-		 * first distribution.  The second distribution
-		 * defaults to cyclic.  In the second two cases, the
-		 * user explicitly requested a second distribution of
-		 * cyclic.  So all these four cases correspond to a
-		 * second distribution of cyclic.   So we want to call
-		 * _task_cgroup_cpuset_dist_cyclic.
-		 *
-		 * If the user explicitly specifies a second
-		 * distribution of block, or if
-		 * CR_CORE_DEFAULT_DIST_BLOCK is configured and the
-		 * user does not explicitly specify a second
-		 * distribution of cyclic, the second distribution is
-		 * block, and we need to call
-		 * _task_cgroup_cpuset_dist_block. In these cases,
-		 * task_dist would be set to SLURM_DIST_CYCLIC_BLOCK
-		 * or SLURM_DIST_BLOCK_BLOCK.
+		/* See srun man page for detailed information on --distribution
+		 * option.
 		 *
 		 * You can see the equivalent code for the
 		 * task/affinity plugin in
 		 * src/plugins/task/affinity/dist_tasks.c, around line 368
 		 */
-		switch (job->task_dist) {
+		switch (job->task_dist & SLURM_DIST_NODESOCKMASK) {
 		case SLURM_DIST_BLOCK_BLOCK:
 		case SLURM_DIST_CYCLIC_BLOCK:
 		case SLURM_DIST_PLANE:
 			/* tasks are distributed in blocks within a plane */
-			_task_cgroup_cpuset_dist_block(
-				topology, hwtype, req_hwtype,
+			_task_cgroup_cpuset_dist_block(topology,
+				hwtype, req_hwtype,
 				nobj, job, bind_verbose, cpuset);
 			break;
 		case SLURM_DIST_ARBITRARY:
@@ -1360,8 +1575,8 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 		case SLURM_DIST_UNKNOWN:
 			if (slurm_get_select_type_param()
 			    & CR_CORE_DEFAULT_DIST_BLOCK) {
-				_task_cgroup_cpuset_dist_block(
-					topology, hwtype, req_hwtype,
+				_task_cgroup_cpuset_dist_block(topology,
+					hwtype, req_hwtype,
 					nobj, job, bind_verbose, cpuset);
 				break;
 			}
@@ -1369,8 +1584,8 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 			   default dist block.
 			*/
 		default:
-			_task_cgroup_cpuset_dist_cyclic(
-				topology, hwtype, req_hwtype,
+			_task_cgroup_cpuset_dist_cyclic(topology,
+				hwtype, req_hwtype,
 				job, bind_verbose, cpuset);
 			break;
 		}
@@ -1389,7 +1604,7 @@ extern int task_cgroup_cpuset_set_task_affinity(stepd_step_rec_t *job)
 				info("task/cgroup: task[%u] set taskset '%s'",
 				     taskid, str);
 			}
-			slurm_chkaffinity(&ts, job, rc);
+			_slurm_chkaffinity(&ts, job, rc);
 		} else {
 			error("task/cgroup: task[%u] unable to build "
 			      "taskset '%s'",taskid,str);
diff --git a/src/plugins/task/cgroup/task_cgroup_devices.c b/src/plugins/task/cgroup/task_cgroup_devices.c
index 19588e7e6..327e2f1dd 100644
--- a/src/plugins/task/cgroup/task_cgroup_devices.c
+++ b/src/plugins/task/cgroup/task_cgroup_devices.c
@@ -43,14 +43,14 @@
 #include <glob.h>
 #include <sys/types.h>
 #include <sys/stat.h>
-#include <slurm/slurm_errno.h>
 #include <slurm/slurm.h>
-#include "src/slurmd/slurmstepd/slurmstepd_job.h"
-#include "src/slurmd/slurmd/slurmd.h"
-
+#include <slurm/slurm_errno.h>
 #include "src/common/xstring.h"
 #include "src/common/gres.h"
 #include "src/common/list.h"
+#include "src/slurmd/common/xcpuinfo.h"
+#include "src/slurmd/slurmd/slurmd.h"
+#include "src/slurmd/slurmstepd/slurmstepd_job.h"
 
 #include "task_cgroup.h"
 
@@ -95,7 +95,8 @@ extern int task_cgroup_devices_init(slurm_cgroup_conf_t *slurm_cgroup_conf)
 		goto error;
 	}
 
-	(void) gres_plugin_node_config_load(cpunum, conf->node_name);
+	(void) gres_plugin_node_config_load(cpunum, conf->node_name, NULL);
+
 
 	strcpy(cgroup_allowed_devices_file,
 	       slurm_cgroup_conf->allowed_devices_file);
@@ -194,6 +195,14 @@ extern int task_cgroup_devices_create(stepd_step_rec_t *job)
 				error("task/cgroup: unable to build "
 				      "step batch devices cg path : %m");
 
+			}
+		} else if (stepid == SLURM_EXTERN_CONT) {
+			cc = snprintf(jobstep_cgroup_path, PATH_MAX,
+				      "%s/step_extern", job_cgroup_path);
+			if (cc >= PATH_MAX) {
+				error("task/cgroup: unable to build "
+				      "step extern devices cg path : %m");
+
 			}
 		} else {
 
diff --git a/src/plugins/task/cgroup/task_cgroup_memory.c b/src/plugins/task/cgroup/task_cgroup_memory.c
index 2038ef745..098dfa585 100644
--- a/src/plugins/task/cgroup/task_cgroup_memory.c
+++ b/src/plugins/task/cgroup/task_cgroup_memory.c
@@ -287,6 +287,12 @@ static int memcg_initialize (xcgroup_ns_t *ns, xcgroup_t *cg,
 		mlb = mls;
 	xcgroup_set_uint64_param (cg, "memory.limit_in_bytes", mlb);
 
+	/*
+	 * Also constrain kernel memory (if available).
+	 * See https://lwn.net/Articles/516529/
+	 */
+	xcgroup_set_uint64_param (cg, "memory.kmem.limit_in_bytes", mlb);
+
 	/* this limit has to be set only if ConstrainSwapSpace is set to yes */
 	if ( constrain_swap_space ) {
 		xcgroup_set_uint64_param (cg, "memory.memsw.limit_in_bytes",
@@ -356,7 +362,14 @@ extern int task_cgroup_memory_create(stepd_step_rec_t *job)
 				      "step batch memory cg path : %m");
 
 			}
+		} else if (stepid == SLURM_EXTERN_CONT) {
+			cc = snprintf(jobstep_cgroup_path, PATH_MAX,
+				      "%s/step_extern", job_cgroup_path);
+			if (cc >= PATH_MAX) {
+				error("task/cgroup: unable to build "
+				      "step extern memory cg path : %m");
 
+			}
 		} else {
 			if (snprintf(jobstep_cgroup_path, PATH_MAX, "%s/step_%u",
 				     job_cgroup_path,stepid) >= PATH_MAX) {
diff --git a/src/plugins/task/cray/Makefile.in b/src/plugins/task/cray/Makefile.in
index 022b358c2..e4b610150 100644
--- a/src/plugins/task/cray/Makefile.in
+++ b/src/plugins/task/cray/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/task/cray/task_cray.c b/src/plugins/task/cray/task_cray.c
index 5f7f6e8ba..75e8d49f0 100644
--- a/src/plugins/task/cray/task_cray.c
+++ b/src/plugins/task/cray/task_cray.c
@@ -90,15 +90,12 @@ static uint64_t debug_flags = 0;
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "task CRAY plugin";
 const char plugin_type[]        = "task/cray";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 #ifdef HAVE_NATIVE_CRAY
 #ifdef HAVE_NUMA
@@ -441,6 +438,15 @@ extern int task_p_post_step (stepd_step_rec_t *job)
 			CRAY_ERR("snprintf failed. Return code: %d", rc);
 			return SLURM_ERROR;
 		}
+	} else if (job->stepid == SLURM_EXTERN_CONT) {
+		// Container for PAM to use for externally launched processes
+		rc = snprintf(path, sizeof(path),
+			      "/dev/cpuset/slurm/uid_%d/job_%"
+			      PRIu32 "/step_extern", job->uid, job->jobid);
+		if (rc < 0) {
+			CRAY_ERR("snprintf failed. Return code: %d", rc);
+			return SLURM_ERROR;
+		}
 	} else {
 		// Normal Job Step
 
diff --git a/src/plugins/task/none/Makefile.in b/src/plugins/task/none/Makefile.in
index 7ee49298f..7d89ee7ff 100644
--- a/src/plugins/task/none/Makefile.in
+++ b/src/plugins/task/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -272,6 +275,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -321,8 +326,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -341,6 +350,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -384,6 +396,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -407,6 +420,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/task/none/task_none.c b/src/plugins/task/none/task_none.c
index 8f8bf0935..f2dce35e5 100644
--- a/src/plugins/task/none/task_none.c
+++ b/src/plugins/task/none/task_none.c
@@ -70,15 +70,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "task NONE plugin";
 const char plugin_type[]        = "task/none";
-const uint32_t plugin_version   = 100;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/topology/3d_torus/Makefile.in b/src/plugins/topology/3d_torus/Makefile.in
index 9d4717e76..d588eeec8 100644
--- a/src/plugins/topology/3d_torus/Makefile.in
+++ b/src/plugins/topology/3d_torus/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/topology/3d_torus/topology_3d_torus.c b/src/plugins/topology/3d_torus/topology_3d_torus.c
index 930abc8dd..c5191aa45 100644
--- a/src/plugins/topology/3d_torus/topology_3d_torus.c
+++ b/src/plugins/topology/3d_torus/topology_3d_torus.c
@@ -71,15 +71,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "topology 3d_torus plugin";
 const char plugin_type[]        = "topology/3d_torus";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 extern void nodes_to_hilbert_curve(void);
 
diff --git a/src/plugins/topology/Makefile.am b/src/plugins/topology/Makefile.am
index c08e32f6d..869033c97 100644
--- a/src/plugins/topology/Makefile.am
+++ b/src/plugins/topology/Makefile.am
@@ -1,3 +1,3 @@
 # Makefile for topology plugins
 
-SUBDIRS = 3d_torus node_rank none tree
+SUBDIRS = 3d_torus hypercube node_rank none tree
diff --git a/src/plugins/topology/Makefile.in b/src/plugins/topology/Makefile.in
index 9550f6200..5fd339ff9 100644
--- a/src/plugins/topology/Makefile.in
+++ b/src/plugins/topology/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -447,7 +461,7 @@ target_vendor = @target_vendor@
 top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
-SUBDIRS = 3d_torus node_rank none tree
+SUBDIRS = 3d_torus hypercube node_rank none tree
 all: all-recursive
 
 .SUFFIXES:
diff --git a/src/plugins/topology/hypercube/Makefile.am b/src/plugins/topology/hypercube/Makefile.am
new file mode 100644
index 000000000..3c04be0fc
--- /dev/null
+++ b/src/plugins/topology/hypercube/Makefile.am
@@ -0,0 +1,12 @@
+# Makefile for topology/hypercube plugin
+
+AUTOMAKE_OPTIONS = foreign
+
+PLUGIN_FLAGS = -module -avoid-version --export-dynamic
+
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
+
+pkglib_LTLIBRARIES = topology_hypercube.la
+
+topology_hypercube_la_SOURCES = topology_hypercube.c
+topology_hypercube_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
diff --git a/src/plugins/slurmctld/dynalloc/Makefile.in b/src/plugins/topology/hypercube/Makefile.in
similarity index 88%
rename from src/plugins/slurmctld/dynalloc/Makefile.in
rename to src/plugins/topology/hypercube/Makefile.in
index 9b911a7d3..192ad15a7 100644
--- a/src/plugins/slurmctld/dynalloc/Makefile.in
+++ b/src/plugins/topology/hypercube/Makefile.in
@@ -14,7 +14,7 @@
 
 @SET_MAKE@
 
-# Makefile for dynalloc (resource dynamic allocation) plugin
+# Makefile for topology/hypercube plugin
 
 VPATH = @srcdir@
 am__is_gnu_make = test -n '$(MAKEFILE_LIST)' && test -n '$(MAKELEVEL)'
@@ -81,7 +81,7 @@ POST_UNINSTALL = :
 build_triplet = @build@
 host_triplet = @host@
 target_triplet = @target@
-subdir = src/plugins/slurmctld/dynalloc
+subdir = src/plugins/topology/hypercube
 DIST_COMMON = $(srcdir)/Makefile.in $(srcdir)/Makefile.am \
 	$(top_srcdir)/auxdir/depcomp
 ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -160,30 +163,17 @@ am__uninstall_files_from_dir = { \
   }
 am__installdirs = "$(DESTDIR)$(pkglibdir)"
 LTLIBRARIES = $(pkglib_LTLIBRARIES)
-slurmctld_dynalloc_la_LIBADD =
-am__slurmctld_dynalloc_la_SOURCES_DIST = allocate.c allocate.h \
-	allocator.c allocator.h argv.c argv.h deallocate.c \
-	deallocate.h info.c info.h job_ports_list.c job_ports_list.h \
-	msg.c msg.h slurmctld_dynalloc.c
-am__objects_1 = allocate.lo allocator.lo argv.lo deallocate.lo info.lo \
-	job_ports_list.lo msg.lo slurmctld_dynalloc.lo
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@am_slurmctld_dynalloc_la_OBJECTS =  \
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@	$(am__objects_1)
-am__EXTRA_slurmctld_dynalloc_la_SOURCES_DIST = allocate.c allocate.h \
-	allocator.c allocator.h argv.c argv.h deallocate.c \
-	deallocate.h info.c info.h job_ports_list.c job_ports_list.h \
-	msg.c msg.h slurmctld_dynalloc.c
-slurmctld_dynalloc_la_OBJECTS = $(am_slurmctld_dynalloc_la_OBJECTS)
+topology_hypercube_la_LIBADD =
+am_topology_hypercube_la_OBJECTS = topology_hypercube.lo
+topology_hypercube_la_OBJECTS = $(am_topology_hypercube_la_OBJECTS)
 AM_V_lt = $(am__v_lt_@AM_V@)
 am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
 am__v_lt_0 = --silent
 am__v_lt_1 = 
-slurmctld_dynalloc_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
+topology_hypercube_la_LINK = $(LIBTOOL) $(AM_V_lt) --tag=CC \
 	$(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link $(CCLD) \
-	$(AM_CFLAGS) $(CFLAGS) $(slurmctld_dynalloc_la_LDFLAGS) \
+	$(AM_CFLAGS) $(CFLAGS) $(topology_hypercube_la_LDFLAGS) \
 	$(LDFLAGS) -o $@
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@am_slurmctld_dynalloc_la_rpath =  \
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@	-rpath $(pkglibdir)
 AM_V_P = $(am__v_P_@AM_V@)
 am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
 am__v_P_0 = false
@@ -218,10 +208,8 @@ AM_V_CCLD = $(am__v_CCLD_@AM_V@)
 am__v_CCLD_ = $(am__v_CCLD_@AM_DEFAULT_V@)
 am__v_CCLD_0 = @echo "  CCLD    " $@;
 am__v_CCLD_1 = 
-SOURCES = $(slurmctld_dynalloc_la_SOURCES) \
-	$(EXTRA_slurmctld_dynalloc_la_SOURCES)
-DIST_SOURCES = $(am__slurmctld_dynalloc_la_SOURCES_DIST) \
-	$(am__EXTRA_slurmctld_dynalloc_la_SOURCES_DIST)
+SOURCES = $(topology_hypercube_la_SOURCES)
+DIST_SOURCES = $(topology_hypercube_la_SOURCES)
 am__can_run_installinfo = \
   case $$AM_UPDATE_INFO_DIR in \
     n|no|NO) false;; \
@@ -288,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -337,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -357,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -400,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -423,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -485,30 +484,11 @@ top_build_prefix = @top_build_prefix@
 top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
-AM_CXXFLAGS = -fexceptions
 PLUGIN_FLAGS = -module -avoid-version --export-dynamic
 AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/common
-AS_DYNALLOC_SOURCES = \
-	allocate.c	\
-	allocate.h	\
-	allocator.c	\
-	allocator.h	\
-	argv.c		\
-	argv.h		\
-	deallocate.c \
-	deallocate.h \
-	info.c	\
-	info.h	\
-	job_ports_list.c \
-	job_ports_list.h \
-	msg.c	\
-	msg.h	\
-	slurmctld_dynalloc.c
-
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@pkglib_LTLIBRARIES = slurmctld_dynalloc.la
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@slurmctld_dynalloc_la_SOURCES = $(AS_DYNALLOC_SOURCES)
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_TRUE@slurmctld_dynalloc_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
-@SLURM_ENABLE_DYNAMIC_ALLOCATION_FALSE@EXTRA_slurmctld_dynalloc_la_SOURCES = $(AS_DYNALLOC_SOURCES)
+pkglib_LTLIBRARIES = topology_hypercube.la
+topology_hypercube_la_SOURCES = topology_hypercube.c
+topology_hypercube_la_LDFLAGS = $(SO_LDFLAGS) $(PLUGIN_FLAGS)
 all: all-am
 
 .SUFFIXES:
@@ -522,9 +502,9 @@ $(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__confi
 	      exit 1;; \
 	  esac; \
 	done; \
-	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/slurmctld/dynalloc/Makefile'; \
+	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign src/plugins/topology/hypercube/Makefile'; \
 	$(am__cd) $(top_srcdir) && \
-	  $(AUTOMAKE) --foreign src/plugins/slurmctld/dynalloc/Makefile
+	  $(AUTOMAKE) --foreign src/plugins/topology/hypercube/Makefile
 .PRECIOUS: Makefile
 Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
 	@case '$?' in \
@@ -579,8 +559,8 @@ clean-pkglibLTLIBRARIES:
 	  rm -f $${locs}; \
 	}
 
-slurmctld_dynalloc.la: $(slurmctld_dynalloc_la_OBJECTS) $(slurmctld_dynalloc_la_DEPENDENCIES) $(EXTRA_slurmctld_dynalloc_la_DEPENDENCIES) 
-	$(AM_V_CCLD)$(slurmctld_dynalloc_la_LINK) $(am_slurmctld_dynalloc_la_rpath) $(slurmctld_dynalloc_la_OBJECTS) $(slurmctld_dynalloc_la_LIBADD) $(LIBS)
+topology_hypercube.la: $(topology_hypercube_la_OBJECTS) $(topology_hypercube_la_DEPENDENCIES) $(EXTRA_topology_hypercube_la_DEPENDENCIES) 
+	$(AM_V_CCLD)$(topology_hypercube_la_LINK) -rpath $(pkglibdir) $(topology_hypercube_la_OBJECTS) $(topology_hypercube_la_LIBADD) $(LIBS)
 
 mostlyclean-compile:
 	-rm -f *.$(OBJEXT)
@@ -588,14 +568,7 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocate.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/allocator.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/argv.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/deallocate.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_ports_list.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/msg.Plo@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmctld_dynalloc.Plo@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/topology_hypercube.Plo@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/src/plugins/topology/hypercube/topology_hypercube.c b/src/plugins/topology/hypercube/topology_hypercube.c
new file mode 100644
index 000000000..a70d8feb8
--- /dev/null
+++ b/src/plugins/topology/hypercube/topology_hypercube.c
@@ -0,0 +1,1440 @@
+/*****************************************************************************\
+ *  topology_hypercube.c - Build configuration information for hypercube
+ *			   switch topology
+ *  Copyright (C) 2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2014 Silicon Graphics International Corp. All rights reserved.
+ *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+ *  Written by Morris Jette <jette1@llnl.gov>
+ *  CODE-OCEC-09-009. All rights reserved.
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if     HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <limits.h>
+#include <time.h>
+#include <math.h>
+
+#include "slurm/slurm_errno.h"
+#include "src/common/bitstring.h"
+#include "src/common/log.h"
+#include "src/common/slurm_topology.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/slurmctld.h"
+
+#include "src/common/node_conf.h"
+
+
+/*
+ * These variables are required by the generic plugin interface.  If they
+ * are not found in the plugin, the plugin loader will ignore it.
+ *
+ * plugin_name - a string giving a human-readable description of the
+ * plugin.  There is no maximum length, but the symbol must refer to
+ * a valid string.
+ *
+ * plugin_type - a string suggesting the type of the plugin or its
+ * applicability to a particular form of data or method of data handling.
+ * If the low-level plugin API is used, the contents of this string are
+ * unimportant and may be anything.  SLURM uses the higher-level plugin
+ * interface which requires this string to be of the form
+ *
+ *      <application>/<method>
+ *
+ * where <application> is a description of the intended application of
+ * the plugin (e.g., "task" for task control) and <method> is a description
+ * of how this plugin satisfies that application.  SLURM will only load
+ * a task plugin if the plugin_type string has a prefix of "task/".
+ *
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
+ */
+const char plugin_name[]        = "topology hypercube plugin";
+const char plugin_type[]        = "topology/hypercube";
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
+
+typedef struct slurm_conf_switches {
+	char *switch_name;	/* name of this switch */
+	char *nodes;	/* names of nodes connected this switch */
+	char *switches;	/* names of switches connected to this switch */
+	uint32_t link_speed;		/* link speed, arbitrary units */
+} slurm_conf_switches_t;
+
+static s_p_hashtbl_t *conf_hashtbl = NULL;
+static char* topo_conf = NULL;
+
+typedef struct switch_data_struct switch_data;
+struct switch_data_struct {
+	char *name;			/* switch name */
+	bitstr_t *node_bitmap;	/* bitmap of nodes connectwed to switch */	
+	int *coordinates; /* coordinates of switch within hypercube topology */
+	int *orig_coordinates;/*original switch coordinates in hypercube topology*/
+	uint32_t link_speed;		/* link speed, arbitrary units */
+	
+	switch_data **sw_conns; /* pointers to connected switches */
+	int *sw_conn_speed; /* speed of connection to connected switches */
+	int sw_conn_cnt; /* number of switches connected to this switch */
+	char *switches;   /* name of direct descendant switches */
+	
+	struct node_record **node_conns; /* pointers to connected nodes */
+	int *node_index; /* index of connected nodes in node_record_table */
+	int node_conn_cnt; /* number of nodes connected to this switch */
+	char *nodes;			/* name of direct descendant nodes */
+	
+	int rack_number; /* the number of the rack this switch is located in */
+	int iru_number; /* the number of the IRU this switch is located in */
+	int switch_number; /* the switch number for this switch within its IRU */
+	
+	int rank; /* the hilbert rank for this switch */
+	int index; /* the index of the switch within the switch record table */
+	int distance; /* distance between to start switch in ranked switch table */
+};
+
+static switch_data *switch_data_table = NULL; 
+static int switch_data_cnt = 0; /* size of switch_data_table */
+
+
+#define switch_time_same_iru 1024
+#define switch_time_same_rack 2048
+#define switch_time_diff_rack 4096
+#define switch_time_unlinked 10000
+
+#define default_link_speed 256
+
+
+/* Topology functions sorted by group */
+//////////////////////////////////////////////////////////////////////////////
+//// Data Parsing and Switch Record Table Building Related Functions ////
+static void _validate_switches(void);
+extern int  _read_topo_file(slurm_conf_switches_t **ptr_array[]);
+static int  _parse_switches(void **dest, slurm_parser_enum_t type,
+				const char *key, const char *value,
+				const char *line, char **leftover);
+static int  _node_name2bitmap(char *node_names, bitstr_t **bitmap, 
+				  hostlist_t *invalid_hostlist);
+static int _parse_connected_nodes(switch_data *sw_record);
+static void _update_switch_connections(void);
+static int _parse_connected_switches(switch_data *sw_record);
+static int _parse_link_speed(char **sw_name);
+static int _char2int(char coord);
+static int _get_connection_time(const switch_data *sw_ptr1,
+				const switch_data *sw_ptr2);
+static void _resize_switch_connections(switch_data *sw_record, 
+				       int conns_space, int conn_count );
+static void _update_location_info(switch_data *switch_ptr);
+//////////////////////////////////////////////////////////////////////////////
+//// Coordinate Related Functions ////
+static int _coordinate_switches(void);
+static void _zero_coordinates(void);
+static int _find_new_switches(switch_data **switch_table, int record_count);
+static int _get_switch_index(switch_data **switch_table,
+			     int record_count, const switch_data *switch_ptr);
+static void _or_coordinates(const switch_data *src_ptr,switch_data *dest_ptr);
+static void _copy_coordinate(const switch_data *src_switch_ptr,
+			     switch_data *dest_switch_ptr);
+//////////////////////////////////////////////////////////////////////////////
+//// Hilbert Curve, Switch Ranking and Distance Related Functions ////
+static void _build_hypercube_switch_table( int num_curves);
+static void _transform_coordinates( int curve_num );
+static void _generate_hilbert_integers(void);
+			  // ( position [n], # bits, dimension )
+static void _axes_to_transpose(unsigned int* X, int b, int n); 
+static void _sort_switches_by_rank( int curve_num );
+static void _create_sorted_switch_distances(int curve_num,
+					    switch_data **ranked_switch_table);
+static int _get_switch_distance(const switch_data *sw_ptr1,
+				const switch_data *sw_ptr2);
+//////////////////////////////////////////////////////////////////////////////
+//// String Creation and Printing Related Function ////
+static void _print_switch_data_table(void);
+static void _print_hypercube_switch_table( int num_curves );
+static void _print_sorted_hilbert_curves( int num_curves );
+static char *_print_switch_str(switch_data *switch_ptr, int print,char *offset);
+static char *_create_coordinate_str(switch_data *switch_ptr);
+static char *_create_connection_str(switch_data *switch_ptr);
+static char *_create_conn_node_str(switch_data *switch_ptr);
+//////////////////////////////////////////////////////////////////////////////
+//// Memory Freeing and Allocating Functions ////
+static void _destroy_switches(void *ptr);
+static void _free_switch_data_table(void);
+static void _free_hypercube_switch_table(void);
+//////////////////////////////////////////////////////////////////////////////
+//////////////////////////////////////////////////////////////////////////////
+
+
+/*
+ * init() is called when the plugin is loaded, before any other functions
+ *	are called.  Put global initialization here.
+ */
+extern int init(void)
+{
+	verbose("%s loaded", plugin_name);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * fini() is called when the plugin is removed. Clear any allocated
+ *	storage here.
+ */
+extern int fini(void)
+{
+	_free_hypercube_switch_table();
+	_free_switch_data_table();
+
+	xfree(topo_conf);
+	return SLURM_SUCCESS;
+}
+
+/*
+ * topo_build_config - build or rebuild system topology information
+ *	after a system startup or reconfiguration.
+ */
+extern int topo_build_config(void)
+{
+	return SLURM_SUCCESS;
+}
+
+/*
+ * topo_generate_node_ranking  - Reads in topology.conf file and the switch 
+ * connection infomation for the Hypercube network topology. Use Hilbert Curves
+ * to sort switches into multiple 1 dimensional tables which are used in the 
+ * select plugin to find the best-fit cluster of nodes for a job. 
+ */
+extern bool topo_generate_node_ranking(void)
+{
+	int i;
+
+	// Reads in topology.conf and parses it into switch_data_table struct
+	_validate_switches();
+
+	// Sets coordinates for switches in accordance with the hypercube topology
+	_coordinate_switches();
+
+	// Prints out all of the switch information for the network
+	_print_switch_data_table();
+
+	int num_curves = hypercube_dimensions;
+
+	// Copy needed data from switch_data_table to hypercube_switch_table
+	_build_hypercube_switch_table(num_curves);
+
+	for (i = 0; i < num_curves; i++) {
+		/* Apply a linear transformation to the switches coordinates so to
+		 * produce a unique mapping from switch data to Hilbert curve */
+		_transform_coordinates(i);
+
+		// Creates Hilbert integers for each of the switches in the topology
+		_generate_hilbert_integers();
+
+		// Sort switches by their Hilbert integer ranks
+		_sort_switches_by_rank(i);
+	}
+
+	// Prints out all of the hypercube switch information for the network
+	_print_hypercube_switch_table(num_curves);
+
+	// Prints Hypercube switch tables sorted by Hilbert Curve Integers
+	_print_sorted_hilbert_curves(num_curves);
+
+	// Free the old switch data table since it is no longer needed
+	_free_switch_data_table();
+
+	// Return false to prevent SLURM from doing additional node ordering 
+	return false;
+}
+
+/*
+ * topo_get_node_addr - build node address
+ */
+extern int topo_get_node_addr(char* node_name, char** paddr, char** ppattern)
+{
+	*paddr = xstrdup(node_name);
+	*ppattern = xstrdup("node");
+	return SLURM_SUCCESS;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+//// Data Parsing and Switch Record Table Building Related Functions ////
+
+/* Reads in topology.conf and parses it into switch_data_table struct */
+static void _validate_switches(void)
+{
+	slurm_conf_switches_t *ptr, **ptr_array;
+	int i, j;
+	switch_data *switch_ptr, *prior_ptr;
+	hostlist_t invalid_hl = NULL;
+
+	_free_switch_data_table();
+
+	// Read the data from the topopolgy file into slurm_conf_switches_t struct
+	switch_data_cnt = _read_topo_file(&ptr_array);
+	if (switch_data_cnt == 0) {
+		error("No switches configured");
+		s_p_hashtbl_destroy(conf_hashtbl);
+		return;
+	}
+
+	switch_data_table = xmalloc(sizeof(switch_data) * switch_data_cnt);
+	switch_ptr = switch_data_table;
+
+	// loops through all the conf_switches found in config file 
+	// parses data into switch_data structs to build the record_table
+	for (i = 0; i < switch_data_cnt; i++, switch_ptr++) {
+		switch_data_table[i].index = i;
+		ptr = ptr_array[i];
+		switch_ptr->name = xstrdup(ptr->switch_name);
+
+		/* See if switch name has already been defined. */
+		prior_ptr = switch_data_table;
+		for (j = 0; j < i; j++, prior_ptr++) {
+			if (strcmp(switch_ptr->name, prior_ptr->name) == 0) {
+				fatal("Switch (%s) has already been defined",
+				      prior_ptr->name);
+			}
+		}
+
+		switch_ptr->link_speed = ptr->link_speed;
+
+		if (ptr->nodes) {
+			switch_ptr->nodes = xstrdup(ptr->nodes);
+			if (_node_name2bitmap(ptr->nodes,
+					      &switch_ptr->node_bitmap, 
+					      &invalid_hl)) {
+				fatal("Invalid node name (%s) in switch config (%s)",
+				      ptr->nodes, ptr->switch_name);
+			}
+
+			switch_ptr->node_conn_cnt =
+				_parse_connected_nodes(switch_ptr);
+			if (switch_ptr->node_conn_cnt < 1) {
+				error("Switch %s does not have any nodes "
+				      "connected to it",
+				      switch_ptr->name);
+			}
+		}
+
+		if (ptr->switches) {
+			switch_ptr->switches = xstrdup(ptr->switches);
+		} else if (!ptr->nodes) {
+			fatal("Switch configuration (%s) lacks children",
+			      ptr->switch_name);
+		}
+
+		_update_location_info(switch_ptr);
+	}
+
+	/* Loops through updating and verifying all the switch's connections */
+	_update_switch_connections();
+
+	s_p_hashtbl_destroy(conf_hashtbl);
+}
+
+
+/* Return count of switch configuration entries read */
+extern int  _read_topo_file(slurm_conf_switches_t **ptr_array[])
+{
+	static s_p_options_t switch_options[] = {
+		{"SwitchName", S_P_ARRAY, _parse_switches, _destroy_switches},
+		{NULL}
+	};
+	int count;
+	slurm_conf_switches_t **ptr;
+
+	debug("Reading the topology.conf file");
+	if (!topo_conf)
+		topo_conf = get_extra_conf_path("topology.conf");
+
+	conf_hashtbl = s_p_hashtbl_create(switch_options);
+	if (s_p_parse_file(conf_hashtbl, NULL, topo_conf, false) ==
+		SLURM_ERROR) {
+		fatal("something wrong with opening/reading %s: %m",
+			  topo_conf);
+	}
+
+	if (s_p_get_array((void ***)&ptr, &count, "SwitchName", conf_hashtbl))
+		*ptr_array = ptr;
+	else {
+		*ptr_array = NULL;
+		count = 0;
+	}
+	return count;
+}
+
+
+/* parses switches found in topology.config and builds conf_switches */
+static int  _parse_switches(void **dest, slurm_parser_enum_t type,
+				const char *key, const char *value,
+				const char *line, char **leftover)
+{
+	s_p_hashtbl_t *tbl;
+	slurm_conf_switches_t *s;
+	static s_p_options_t _switch_options[] = {
+		{"LinkSpeed", S_P_UINT32},
+		{"Nodes", S_P_STRING},
+		{"Switches", S_P_STRING},
+		{NULL}
+	};
+
+	tbl = s_p_hashtbl_create(_switch_options);
+	s_p_parse_line(tbl, *leftover, leftover);
+
+	s = xmalloc(sizeof(slurm_conf_switches_t));
+	s->switch_name = xstrdup(value);
+	if (!s_p_get_uint32(&s->link_speed, "LinkSpeed", tbl))
+		s->link_speed = 1;
+	s_p_get_string(&s->nodes, "Nodes", tbl);
+	s_p_get_string(&s->switches, "Switches", tbl);
+	s_p_hashtbl_destroy(tbl);
+
+	if (!s->nodes && !s->switches) {
+		error("switch %s has neither child switches nor nodes",
+		      s->switch_name);
+		_destroy_switches(s);
+		return -1;
+	}
+
+	*dest = (void *)s;
+
+	return 1;
+}
+
+
+/* _node_name2bitmap - given a node name regular expression, build a bitmap
+ *	representation, any invalid hostnames are added to a hostlist
+ * IN node_names  - set of node namess
+ * OUT bitmap     - set to bitmap, may not have all bits set on error
+ * IN/OUT invalid_hostlist - hostlist of invalid host names, initialize to NULL
+ * RET 0 if no error, otherwise EINVAL
+ * NOTE: call FREE_NULL_BITMAP(bitmap) and hostlist_destroy(invalid_hostlist)
+ *       to free memory when variables are no longer required	*/
+static int _node_name2bitmap(char *node_names, bitstr_t **bitmap, 
+			     hostlist_t *invalid_hostlist)
+{
+	char *this_node_name;
+	bitstr_t *my_bitmap;
+	hostlist_t host_list;
+
+	my_bitmap = (bitstr_t *) bit_alloc(node_record_count);
+	*bitmap = my_bitmap;
+
+	if (node_names == NULL) {
+		error("_node_name2bitmap: node_names is NULL");
+		return EINVAL;
+	}
+
+	if ( (host_list = hostlist_create(node_names)) == NULL) {
+		/* likely a badly formatted hostlist */
+		error("_node_name2bitmap: hostlist_create(%s) error", 
+			  node_names);
+		return EINVAL;
+	}
+
+	while ( (this_node_name = hostlist_shift(host_list)) ) {
+		struct node_record *node_ptr;
+		node_ptr = find_node_record(this_node_name);
+		if (node_ptr) {
+			bit_set(my_bitmap, 
+				(bitoff_t) (node_ptr - node_record_table_ptr));
+		} else {
+			fatal("Node \"%s\" specified in topology.conf but "
+			      "SLURM has no record of node. Verify that node "
+			      "\"%s\" is specified in slurm.conf",
+			      this_node_name, this_node_name);
+		}
+		free(this_node_name);
+	}
+	hostlist_destroy(host_list);
+
+	return SLURM_SUCCESS;
+}
+
+
+/* parses a switch's node list string and adds pointers to the 
+	connected nodes' data structs */
+static int _parse_connected_nodes(switch_data *sw_record)
+{
+	int max_nodes = 256; 
+	sw_record->node_conns = xmalloc(max_nodes * sizeof(struct node_record*));
+	sw_record->node_index = xmalloc(max_nodes * sizeof(int));
+	char * node_name = strtok(sw_record->nodes," ,");
+	int i, conn_count = 0;
+	struct node_record **tmp_node_conns;
+	int *tmp_node_index;
+
+	// loops through all of the node names in the node name string
+	while (node_name != NULL) {
+		if (conn_count == max_nodes){
+			fatal("%s has +%d node connections which is more than expected",
+			      sw_record->name, conn_count);
+		}
+
+		// look up node struct and add pointer to it in switch's struct
+		struct node_record *node_ptr = find_node_record(node_name);
+		if (node_ptr) {
+			sw_record->node_conns[conn_count] = node_ptr;
+			sw_record->node_index[conn_count] = (int) 
+				(node_ptr - node_record_table_ptr);
+			conn_count++;
+		} else {
+			fatal("Node \"%s\" connected to switch %s specified in "
+			      "topology.conf but SLURM has no record of node. "
+			      "Verify that node \"%s\" is specified in "
+			      "slurm.conf",
+			      node_name, sw_record->name,node_name);
+		}
+
+		node_name = strtok (NULL, " ,.-");
+	}
+
+	/* Ensure that node_index[] is in sorted order */
+	for (i = 0; i < conn_count; i++) {
+		int min_val = sw_record->node_index[i];
+		int min_idx = i;
+		int j;
+
+		for (j = i + 1; j < conn_count; j++) {
+			if (min_val > sw_record->node_index[j]) {
+				min_val = sw_record->node_index[j];
+				min_idx = j;
+			}
+		}
+
+		if (min_idx != i) {
+			struct node_record * trec = sw_record->node_conns[i];
+			int tidx = sw_record->node_index[i];
+
+			sw_record->node_conns[i] = sw_record->node_conns[min_idx];
+			sw_record->node_conns[min_idx] = trec;
+
+			sw_record->node_index[i] = sw_record->node_index[min_idx];
+			sw_record->node_index[min_idx] = tidx;
+		}
+	}
+
+	tmp_node_conns = xrealloc(sw_record->node_conns, 
+				  conn_count * sizeof(struct node_record *));
+	tmp_node_index = xrealloc(sw_record->node_index,
+				  conn_count * sizeof(int));
+
+	if ((tmp_node_conns != NULL) && (tmp_node_index != NULL)) {
+		sw_record->node_conns = tmp_node_conns;
+		sw_record->node_index = tmp_node_index;
+	} else {
+		fatal("Error (re)allocating memory for nodes for %s",
+		      sw_record->name);
+	}
+
+	return conn_count;
+}
+
+
+/* Loops through all the switches and updates and verifies their connections */
+static void _update_switch_connections(void)
+{
+	// after all of the switch structs have been built, loop through
+	//again and set all of the switch connections to point to each other
+	switch_data * switch_ptr = switch_data_table;
+	int i;
+
+	for (i = 0; i < switch_data_cnt; i++, switch_ptr++) {
+		switch_ptr->sw_conn_cnt = _parse_connected_switches(switch_ptr);
+
+		if (switch_ptr->sw_conn_cnt > hypercube_dimensions) {
+			hypercube_dimensions = switch_ptr->sw_conn_cnt;
+		}
+	}
+
+	// Malloc space for coordinates
+	switch_ptr = switch_data_table;
+
+	for (i = 0; i < switch_data_cnt; i++, switch_ptr++) {
+		switch_ptr->coordinates = xmalloc(
+			sizeof(int) * hypercube_dimensions);
+		switch_ptr->orig_coordinates = xmalloc(
+			sizeof(int) * hypercube_dimensions);
+#if 0
+		if (switch_ptr->sw_conn_cnt < hypercube_dimensions) {
+			error(
+"Switch %s is only connected to %d switches in %d-dimension hypercube topology",
+				switch_ptr->name, 
+				switch_ptr->sw_conn_cnt, hypercube_dimensions);
+		}
+#endif
+	}
+}
+
+
+/* parses a switch's switch list string and adds pointers to the 
+	connected switches' data structs */
+static int _parse_connected_switches(switch_data *sw_record)
+{
+	int conns_space = 64;
+	char * sw_name = strtok(sw_record->switches, ",-");
+	int conn_count = 0;
+	int link_speed;
+
+	sw_record->sw_conns = xmalloc(conns_space * sizeof(struct switch_data*));
+	sw_record->sw_conn_speed = xmalloc(conns_space * sizeof(int));
+
+	// loops through all of the switch names in the switch name string
+	while (sw_name != NULL) {
+		switch_data *ptr = switch_data_table;
+		int i;
+
+		if (conn_count == conns_space) {
+			fatal("%s has +%d connections which is more than "
+			      "allocated space for",
+			      sw_record->name, conn_count);
+		}
+
+		// look up node struct and add pointer to it in switch's struct
+		for (i = 0; i < switch_data_cnt; i++, ptr++) {
+			if (strcmp(ptr->name, sw_name) == 0) {
+				sw_record->sw_conns[conn_count] = ptr;
+				break;
+			}
+		}
+
+		if (i == switch_data_cnt) {
+			fatal("Could not find switch record for %s in switch "
+			      "connection list", sw_name);
+		}
+		sw_name = strtok (NULL, ",-");
+
+		// parses the link speed for this switch connection 
+		link_speed = _parse_link_speed(&sw_name);
+		if (link_speed < 1) {
+			fatal("Invalid switch speed of %s between switches "
+			      "%s and %s",
+			      sw_name, sw_record->name, ptr->name);
+		}
+
+		// creates final connection speed by dividing the 
+		// connection time between the two switches by the link_speed
+		sw_record->sw_conn_speed[conn_count] = 
+			_get_connection_time(sw_record, ptr) / link_speed;
+		conn_count++;
+	}
+	
+	// resize memory allocated for switch connections to right size
+	_resize_switch_connections( sw_record, conns_space, conn_count );
+	
+	return conn_count;
+}
+
+
+// Parses the link speed for this switch connection 
+static int _parse_link_speed(char **sw_name)
+{
+	int link_speed = 0;
+	
+	if (_char2int(*sw_name[0]) > -1) {
+		//if there is a link speed for this connection
+		int counter = 0;
+
+		while (_char2int((*sw_name)[counter]) > -1) {
+			link_speed = link_speed * 10 +
+				     _char2int((*sw_name)[counter]);
+			counter++;
+		}
+
+		if (link_speed < 1) {
+			return link_speed;
+		}
+			
+		*sw_name = strtok(NULL, ",-");
+	} else {
+		link_speed = default_link_speed;
+	}
+
+	return link_speed;
+}
+
+
+// returns the integer value for a number character
+static int _char2int(char coord)
+{
+	if ((coord >= '0') && (coord <= '9')) {
+		return (coord - '0');
+	}
+
+	return -1;
+}
+
+
+// returns the connection time for switches based on their locations
+static int _get_connection_time(const switch_data *sw_ptr1,
+				const switch_data *sw_ptr2)
+{
+	if (sw_ptr1->rack_number == sw_ptr2->rack_number){
+		if (sw_ptr1->iru_number == sw_ptr2->iru_number) {
+			return switch_time_same_iru;
+		} else {
+			return switch_time_same_rack;
+		}
+	} else {
+		return switch_time_diff_rack;
+	}
+} 
+
+
+// resize memory allocated for switch connections to right size
+static void _resize_switch_connections(switch_data * sw_record, 
+				       int conns_space, int conn_count)
+{
+
+	// resize switch connections if there are less than originally allocated for
+	if (conn_count < conns_space) {
+		switch_data **tmp_sw_conns = xrealloc(
+			sw_record->sw_conns, 
+			conn_count * sizeof(struct switch_data*));
+		int * tmp_sw_conn_speed = xrealloc(
+			sw_record->sw_conn_speed, 
+			conn_count * sizeof(int));
+
+		if ((tmp_sw_conns != NULL) && (tmp_sw_conn_speed != NULL)) {
+			sw_record->sw_conns = tmp_sw_conns;
+			sw_record->sw_conn_speed = tmp_sw_conn_speed;
+		} else {
+			fatal("Error (re)allocating memory for connected "
+			      "switches for switch %s", sw_record->name);
+		}
+	}
+}
+
+
+// extracts a switch's location from its name ( Rack, IRU, and Server number)
+static void _update_location_info(switch_data * switch_ptr)
+{
+	char *name = switch_ptr->name;
+	int name_len = strlen(name);
+	uint32_t sw_num[3] = {0, 0, 0}; // numbers store rack, IRU & switch numbers
+	char name_char[3] = {'r', 'i', 's'};
+	int i, j = 0; 
+
+	// loop through all characters in servers name extracting numbers
+	for (i = 0; i < 3; i++) {
+		if ((name_char[i] != name[j]) || (_char2int(name[j + 1]) < 0)) {
+			fatal("switch %s lacks valid naming syntax", name);
+		}
+
+		j++;
+		while ((_char2int(name[j]) > -1) && (j < name_len)) {
+			sw_num[i] = sw_num[i] * 10 + _char2int(name[j]);
+			if (sw_num[i] > 1023) {
+				fatal("switch %s has %c value that exceeds "
+				      "limit (%d>1023)",
+				      name, name_char[i], sw_num[i]);
+			}
+
+			j++;
+		}
+	}
+
+	if (j < name_len) {
+		fatal("switch %s lacks valid naming syntax", name);
+	}
+	switch_ptr->rack_number = sw_num[0];
+	switch_ptr->iru_number = sw_num[1];
+	switch_ptr->switch_number = sw_num[2];
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+//// Coordinate Related Functions ////
+
+/*
+ * Sets coordinates for the switches in accordance with the hypercube topology
+ * - First, it picks one switch to be the starting point of the coordinate 
+ * system and assigns it all zero coordinates
+ * - Second, move outwards from starting switch, by assigning coordinates to 
+ * all of the switches connected to the starting switch. Each of these
+ * secondary switches has zeros for coordinates except has a 1 in 1 of its 
+ * dimensions, with each switch having a 1 in a different dimension.
+ * - Lastly, continue to move out from the secondary switches by finding others
+ *  switches that they are connected to, giving them coordinates, & repeating
+ */
+static int _coordinate_switches(void)
+{
+	int counter, j;
+
+	// create a temp record_table that will store all switches that 
+	// have been assigned coordinates
+	switch_data ** coordinated_switch_data_table =
+		xmalloc(sizeof(struct switch_data*) * switch_data_cnt);
+	int coordinated_switch_data_count = 0;
+	switch_data *switch_ptr = NULL;
+
+	_zero_coordinates();
+
+	// Find origin node and add to coordinated_switch_data_table
+	counter = 0;
+	switch_ptr = &switch_data_table[counter];
+	while (switch_ptr->sw_conn_cnt < hypercube_dimensions) {
+		switch_ptr = &switch_data_table[++counter];
+	}	
+
+	coordinated_switch_data_table[coordinated_switch_data_count] = switch_ptr;
+	coordinated_switch_data_count++;
+	
+	/* Add 1st round of switches to coordinate system and assign coordinates */
+	for (j = 0; j < switch_ptr->sw_conn_cnt; j++) {
+		switch_ptr->sw_conns[j]->coordinates[j] = 1;
+		coordinated_switch_data_table[coordinated_switch_data_count] =
+			switch_ptr->sw_conns[j];
+		coordinated_switch_data_count++;
+	}
+
+	// while there are still switches without coordinates continue to loop
+	while (coordinated_switch_data_count < switch_data_cnt) {
+		coordinated_switch_data_count = _find_new_switches(
+				coordinated_switch_data_table,
+				coordinated_switch_data_count);
+	}
+
+	debug("Finished calculating coordinates for switches");
+	xfree(coordinated_switch_data_table);
+
+	return 1;
+}
+
+
+/* Sets all of the coordinates in the switches equal to zero */
+static void _zero_coordinates(void)
+{
+	int i, j;
+
+	for (i = 0; i < switch_data_cnt; i++) {
+		for (j = 0; j < hypercube_dimensions; j++) {
+			switch_data_table[i].coordinates[j] = 0;
+		}
+	}
+}
+
+
+/*
+ * Finds & adds neighboring switch to coordinated table & gives them coordinates
+ * - In order for a switch to be given coordinates, it has to be connected 
+ * to two switches that already have coordinates. When a neighboring switch is
+ * found without coordinates it is added to a temp list. Then once that switch
+ * is found by another neighboring switch, the new switch is added to the 
+ * coordinated switch list and given coordinates equal to the OR of the 
+ * coordinates of the two neighboring switches that found it.
+ * - If the program cannot find any more uncoordinated switches with two 
+ * coordinated neighbors, but there are still switches that need coordinates,
+ * then the program resorts to coordinating switches based on only 1 neighbor
+ */
+static int _find_new_switches(switch_data **switch_table, int record_count)
+{
+	switch_data **temp_record_table = xmalloc( 
+		sizeof(struct switch_data*) * switch_data_cnt);
+	int i, j, temp_record_count = 0, old_record_count = record_count;
+	switch_data *switch_ptr;
+
+	// loop through all of the switches with coordinates
+	for (i = 0; i < record_count; i++) {
+		switch_ptr = switch_table[i];
+
+		// loop through all of the switches that a switch is connected to 
+		for (j = 0; j < switch_ptr->sw_conn_cnt; j++) {
+			int index = _get_switch_index(
+				temp_record_table, 
+				temp_record_count, switch_ptr->sw_conns[j]);
+
+			/*
+			 * If this is an uncoordinated switch and it was on the 
+			 * temp_record_table, meaning that it was already found by
+			 * one neighboring switch, then give it coordinates and
+			 * add it to the switch_table
+			 */
+			if (index > -1) {
+				_or_coordinates(switch_ptr, switch_ptr->sw_conns[j]);
+				switch_table[record_count] =
+					switch_ptr->sw_conns[j];
+				record_count++;
+				temp_record_table[index] = NULL;
+			}
+
+			/*
+			 * If the switch was not already on the temp_record_table,
+			 * but it doesn't have coordinates, then add it to the
+			 * temp_record_table
+			 */
+			else if (_get_switch_index(switch_table, record_count, 
+						   switch_ptr->sw_conns[j]) < 0) {
+				_copy_coordinate(switch_ptr, switch_ptr->sw_conns[j]);
+				temp_record_table[temp_record_count] =
+					switch_ptr->sw_conns[j];
+				temp_record_count++;
+			}
+		}
+	}
+
+	// if there are no more uncoordinated switches with 2 coordinated neighbors
+	if (record_count == old_record_count) {
+		if (temp_record_count == 0) {
+			fatal("Could not coordinate all switches listed."
+			      "Please recheck switch connections in "
+			      "topology.conf file");
+		}
+
+		// Add switches that only have 1 coordinated neighbor to switch_table
+		for (i = 0; i < temp_record_count; i++) {
+			switch_ptr = temp_record_table[i];
+			if (switch_ptr != NULL) {
+				switch_table[record_count] = temp_record_table[i];
+				switch_table[record_count]->coordinates[j] = 1;
+				record_count++;
+				temp_record_table[i] = NULL;
+			}
+		}
+	}
+
+	xfree(temp_record_table);
+	return record_count;
+}
+
+
+/* Return index of a given switch name or -1 if not found */
+static int _get_switch_index(switch_data ** switch_table,
+			     int record_count, const switch_data * switch_ptr)
+{
+	int i;
+
+	for (i = 0; i < record_count; i++) {
+		const switch_data * ptr = switch_table[i];
+
+		if (ptr != NULL) {
+			if (strcmp(ptr->name, switch_ptr->name) == 0) {
+				return i;
+			}
+		}
+	}
+
+	return -1;
+}
+
+
+/* Bitwise OR on coordiantes of 1st & 2nd switch saves result in 2nd switch */
+static void _or_coordinates(const switch_data *src_ptr, switch_data *dest_ptr)
+{
+	int i;
+
+	for (i = 0; i < hypercube_dimensions; i++) {
+		dest_ptr->coordinates[i] = src_ptr->coordinates[i] | 
+			dest_ptr->coordinates[i];
+	}
+}
+
+
+/* Copies the coordiantes of the first switch to the second switch */
+static void _copy_coordinate(const switch_data *src_switch_ptr,
+				 switch_data *dest_switch_ptr)
+{
+	int i;
+
+	for (i = 0; i < hypercube_dimensions; i++) {
+		dest_switch_ptr->coordinates[i] =
+			src_switch_ptr->coordinates[i];
+	}
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+//// Hilbert Curve, Switch Ranking and Distance Related Functions ////
+
+
+/*
+ * Allocates memory for hypercube_switch_table and hypercube_switches and  
+ * copy important data from switch_data_table to hypercube_switch_table
+ */
+static void _build_hypercube_switch_table(int num_curves)
+{
+	int i, j;
+
+	_free_hypercube_switch_table();
+	hypercube_switch_cnt = switch_data_cnt;
+	hypercube_switch_table = 
+		xmalloc(sizeof(struct hypercube_switch) * switch_data_cnt);
+	
+	// copy important data from switch_data_table to hypercube_switch_table
+	for (i = 0; i < switch_data_cnt; i++ ) {
+		hypercube_switch_table[i].switch_index =
+			switch_data_table[i].index;
+		hypercube_switch_table[i].switch_name = xmalloc(
+			sizeof(char) * (strlen(switch_data_table[i].name) + 1));
+			
+		strcpy(hypercube_switch_table[i].switch_name, 
+			switch_data_table[i].name);
+		hypercube_switch_table[i].node_bitmap = 
+			bit_copy(switch_data_table[i].node_bitmap);
+		hypercube_switch_table[i].node_cnt =
+			switch_data_table[i].node_conn_cnt;
+		hypercube_switch_table[i].avail_cnt = 0;
+		hypercube_switch_table[i].node_index = xmalloc(
+			sizeof(int) * hypercube_switch_table[i].node_cnt);
+
+		for (j = 0; j < hypercube_switch_table[i].node_cnt; j++) {
+			hypercube_switch_table[i].node_index[j] =
+				switch_data_table[i].node_index[j];
+		}
+
+		hypercube_switch_table[i].distance = xmalloc(
+			sizeof(int32_t) * num_curves);
+		assert(num_curves >= hypercube_dimensions);
+		for (j = 0; j < hypercube_dimensions; j++) {
+			hypercube_switch_table[i].distance[j] = 0;
+		}
+	}
+
+	// allocated space for the pointers to each of the different curves
+	hypercube_switches =
+		xmalloc(sizeof(struct hypercube_switch **) * num_curves);
+}
+
+
+/* apply a linear transformation to the switches coordinates so to produce
+	a unique mapping from switch data to Hilbert curve */
+static void _transform_coordinates(int curve_num)
+{
+	int i, j, dim;
+
+	// if it is the first curve, set up orig_coordinates struct
+	// and copy coordinates to orig_coordinates to be stored 
+	if (curve_num == 0) {
+		for (i = 0; i < switch_data_cnt; i++) {
+			for (j = 0; j < hypercube_dimensions; j++) {
+				switch_data_table[i].orig_coordinates[j] =
+					switch_data_table[i].coordinates[j];
+			}
+		}
+		return;
+	}
+
+	// copy the original coordinates to the temp coordinates of the switch
+	// and center the coordinates around the origin of coordinate system
+	for (i = 0; i < switch_data_cnt; i++) {
+		for (j = 0; j < hypercube_dimensions; j++) {
+			switch_data_table[i].coordinates[j] =
+				2 * switch_data_table[i].orig_coordinates[j] - 1;
+		}
+	}
+
+	// apply a linear transformation to centered coordinates
+	dim = (curve_num + 1 ) % hypercube_dimensions;
+	for (i = 0; i < switch_data_cnt; i++) {
+		int temp = switch_data_table[i].coordinates[curve_num];
+
+		switch_data_table[i].coordinates[curve_num] = 
+			switch_data_table[i].coordinates[dim];
+		switch_data_table[i].coordinates[dim] = -1 * temp;
+	}
+
+	// uncenter the coordinates back to the range [0,1]
+	for (i = 0; i < switch_data_cnt; i++) {
+		for (j = 0; j < hypercube_dimensions; j++) {
+			switch_data_table[i].coordinates[j] = 
+				(switch_data_table[i].coordinates[j] + 1 ) / 2;
+		}
+	}
+}
+
+
+/*
+ * Creates Hilbert integers for each of the switches in the topology.
+ * Hilbert Curve algorithm and AxestoTranspose function taken from torus 
+ * topology plugin and modified slightly to account for hypercube topology.
+ */
+static void _generate_hilbert_integers(void)
+{
+	switch_data * switch_ptr = switch_data_table;
+	int counter, switch_rank;
+	int i, j;
+	unsigned int hilbert[hypercube_dimensions];
+	
+	for (i = 0; i < switch_data_cnt; i++, switch_ptr++) {
+		for (j = 0; j < hypercube_dimensions; j++) {
+			hilbert[j] = switch_ptr->coordinates[j];
+		}
+
+		/*
+		 * Gray encode switch coordinates and then use the output to 
+		 * create switch's rank
+		 */
+		_axes_to_transpose(hilbert, 1, hypercube_dimensions);
+
+		for (j = hypercube_dimensions - 1, counter = 0, switch_rank = 0;
+		     j >= 0; j--, counter++) {
+			switch_rank += (hilbert[j] & 1) << counter;
+		}
+		switch_ptr->rank = switch_rank;
+	}
+}
+
+
+/* Runs Hilbert Curve Algorithm on switch coordinates to create Gray code
+ * that can be used to make the Hilbert Integer for the switch */
+// 			      ( position [n], # bits, dimension )
+static void _axes_to_transpose(unsigned int * x, int b, int n) 
+{
+	unsigned int p, q, t;
+	int i;
+
+	// Inverse undo
+	for (q = 1 << (b - 1); q > 1; q >>= 1) {
+		p = q - 1;
+		if (x[0] & q) {
+			x[0] ^= p; // invert
+		}
+
+		for (i = 1; i < n; i++) {
+			if (x[i] & q) {
+				x[0] ^= p; // invert
+			} else { // exchange
+				t = (x[0] ^ x[i]) & p;
+				x[0] ^= t;
+				x[i] ^= t;
+			}
+		}
+	}
+
+	// Gray encode (inverse of decode)
+	for (i = 1; i < n; i++) {
+		x[i] ^= x[i-1];
+	}
+	t = x[n-1];
+	for (i = 1; i < b; i <<= 1) {
+		x[n-1] ^= x[n-1] >> i;
+	}
+	t ^= x[n-1];
+	for (i = n - 2; i >= 0; i--) {
+		x[i] ^= t;
+	}
+}
+
+
+/*
+ * Sort switches by their Hilbert integer ranks
+ */
+static void _sort_switches_by_rank(int curve_num)
+{
+	int i, j, min_inx;
+	uint32_t min_val;
+	switch_data ** ranked_switch_table = xmalloc( 
+		sizeof(struct switch_data*) * switch_data_cnt);
+
+	for (i = 0; i < switch_data_cnt; i++) {
+		ranked_switch_table[i] = &switch_data_table[i];
+	}
+
+	/* Now we need to sort the switch records */
+	for (i = 0; i < switch_data_cnt; i++) {
+		min_val = ranked_switch_table[i]->rank;
+		min_inx = i;
+		for (j = i + 1; j < switch_data_cnt; j++) {
+			if (ranked_switch_table[j]->rank < min_val) {
+				min_val = ranked_switch_table[j]->rank;
+				min_inx = j;
+			}
+		}
+
+		if (min_inx != i) {	// swap records 
+			switch_data * sw_record_tmp = ranked_switch_table[i];
+
+			ranked_switch_table[i] = ranked_switch_table[min_inx];
+			ranked_switch_table[min_inx] = sw_record_tmp; 
+		}
+	}
+
+	for (i = 0; i < switch_data_cnt; i++) {
+		ranked_switch_table[i]->rank = i;
+	}
+
+	_create_sorted_switch_distances(curve_num, ranked_switch_table);
+
+	xfree(ranked_switch_table);
+}
+
+
+// Calculate and update distances for sorted switches
+static void _create_sorted_switch_distances(
+	int curve_num, switch_data **ranked_switch_table)
+{
+	int i, total_distance = 0;
+
+	/* Create distance from switches to first switch in ranked table */
+	total_distance += _get_switch_distance(
+		ranked_switch_table[0], 
+		ranked_switch_table[switch_data_cnt - 1]);
+	ranked_switch_table[0]->distance = total_distance;
+
+	/* Keep adding up so we have distance back to [0] */
+	for (i = 1; i < switch_data_cnt; i++) {
+		total_distance += _get_switch_distance(
+			ranked_switch_table[i], 
+			ranked_switch_table[i - 1]);
+		ranked_switch_table[i]->distance = total_distance;
+	}
+
+	/* Copy distances to hypercube_switch_table and add sorted pointers */
+	hypercube_switches[curve_num] = 
+		xmalloc(sizeof(struct hypercube_switch *) * switch_data_cnt);
+
+	for (i = 0; i < switch_data_cnt; i++ ) {
+		int index = ranked_switch_table[i]->index;
+		
+		hypercube_switch_table[index].distance[curve_num] = 
+			ranked_switch_table[i]->distance;
+		hypercube_switches[curve_num][i] =
+			&hypercube_switch_table[index];
+	}
+}
+	
+
+/* returns the connection distance for two neighbor switches in ranked table */
+static int _get_switch_distance(const switch_data *sw_ptr1,
+				const switch_data *sw_ptr2)
+{
+	int i;
+
+	for (i = 0; i < sw_ptr1->sw_conn_cnt; i++) {
+		if (sw_ptr1->sw_conns[i] == sw_ptr2) {
+			return sw_ptr1->sw_conn_speed[i];
+		}
+	}
+
+	/*
+	 * The switches are not linked in the Hilbert path of this machine.
+	 * We return a really big number to indicate this.
+	 */
+	return switch_time_unlinked;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+//// String Creation and Printing Related Function ////
+
+/* prints switch_strings for all switches in the switch record table */
+static void _print_switch_data_table(void)
+{
+	switch_data *switch_ptr = switch_data_table;
+	int i;
+
+	debug("Switch record table has %d switch records in it",
+	      switch_data_cnt);
+	for (i = 0; i < switch_data_cnt; i++, switch_ptr++) {
+		_print_switch_str(switch_ptr, 1, "    ");
+	}
+}
+
+
+/* prints name and coordinates of all switches in hypercube switch table*/
+static void _print_hypercube_switch_table( int num_curves )
+{
+	char distances[512], nodes[512];
+	int i, j;
+
+	debug("Hypercube table has %d switch records in it",
+	      hypercube_switch_cnt);
+	for (i = 0; i < hypercube_switch_cnt; i++ ) {
+		strcpy(distances, "Distances: ");
+		for ( j = 0; j < num_curves; j++ ){
+			if (hypercube_switch_table[i].distance[j]) {
+				sprintf(distances, "%s%d, ", distances, 
+					hypercube_switch_table[i].distance[j]);
+			} else
+				break;
+		}
+		strcpy(nodes, "Node Index: ");
+		for ( j = 0; j < hypercube_switch_table[i].node_cnt; j++ ) {
+			sprintf(nodes, "%s%d, ", nodes,
+				hypercube_switch_table[i].node_index[j]);
+		}
+		debug("    %s: %d - %s %s", switch_data_table[i].name,
+		      i, distances,nodes);
+	}
+}
+
+
+/* Prints Hypercube switch tables sorted by Hilbert Curve Integers */
+static void _print_sorted_hilbert_curves( int num_curves )
+{
+	int i, j;
+	char s[256];
+
+	debug("Hilbert Curves Ranking Created for %d Hilbert Curves",
+	      num_curves);
+	for ( i = 0 ; i < hypercube_switch_cnt ; i++ ) {
+		strcpy(s, "-- ");
+		for ( j = 0 ; j < num_curves ; j++ ) {
+			sprintf(s,"%s%7s -%4d,  ", s,
+				hypercube_switches[j][i]->switch_name,
+				hypercube_switches[j][i]->switch_index);
+		}
+		debug("%s", s);
+	}
+}
+
+
+/* returns a string of a switch's name coordinates and connections */
+static char *_print_switch_str(switch_data *switch_ptr, int print, char *offset)
+{
+//XXX overrun possibility
+	char *str = xmalloc(sizeof(char) * 1024);
+	char *coordinates = _create_coordinate_str(switch_ptr);
+	char *connections = _create_connection_str(switch_ptr);
+	char *conn_nodes = _create_conn_node_str(switch_ptr);
+
+	sprintf(str, "%s%s -- coordinates: %s -- connections:%s -- nodes:%s",
+		offset, switch_ptr->name, coordinates, connections, conn_nodes);
+	xfree(coordinates);
+	xfree(connections);
+	xfree(conn_nodes);
+
+	if (print == 1) {
+		debug("%s", str);
+		xfree(str);
+		return NULL;
+	}
+	return str;
+}
+
+
+/* returns a string of the coordinates for a switch */
+static char *_create_coordinate_str(switch_data *switch_ptr)
+{
+	int i;
+	char *str = xmalloc( sizeof(char) * 1024);
+
+	strcpy(str,"(");
+	for (i = 0; i < hypercube_dimensions; i++) {
+		char buf[5];
+		sprintf(buf, "%d,",switch_ptr->coordinates[i]);
+		strcat(str, buf);
+	}
+	str[strlen(str)-1] = ')';
+	return str;
+}
+
+
+/* returns a string of the connections for a switch */
+static char *_create_connection_str(switch_data *switch_ptr)
+{
+	int i;
+	char *str = xmalloc(sizeof(char) * 1024);
+
+	strcpy(str,"");
+	for (i = 0; i < switch_ptr->sw_conn_cnt; i++) {
+		char buf[64];
+		sprintf(buf, "%s-%d,", switch_ptr->sw_conns[i]->name,
+			switch_ptr->sw_conn_speed[i] );
+		strcat(str, buf);
+	}
+
+	str[strlen(str)-1] = '\0';
+	return str;
+}
+
+
+/* returns a string of the names of the connected nodes for a switch */
+static char *_create_conn_node_str(switch_data *switch_ptr)
+{
+	int i;
+	char *str = xmalloc( sizeof(char) * 1024);
+
+	strcpy(str,"");
+	for (i = 0; i < switch_ptr->node_conn_cnt; i++) {
+		char buf[64];
+		sprintf(buf, "%s,",switch_ptr->node_conns[i]->name);
+		strcat(str, buf);
+	}
+	str[strlen(str)-1] = '\0';
+	return str;
+}
+
+
+//////////////////////////////////////////////////////////////////////////////
+//// Memory Freeing and Allocating Functions ////
+
+/* Free all memory associated with slurm_conf_switches_t structure */
+static void _destroy_switches(void *ptr)
+{
+	slurm_conf_switches_t *s = (slurm_conf_switches_t *)ptr;
+	xfree(s->nodes);
+	xfree(s->switch_name);
+	xfree(s->switches);
+	xfree(ptr);
+}
+
+
+/* Free all memory associated with switch_data_table structure */
+static void _free_switch_data_table(void)
+{
+	int i;
+
+	if (switch_data_table) {
+		for (i = 0; i < switch_data_cnt; i++) {
+			xfree(switch_data_table[i].name);
+			xfree(switch_data_table[i].nodes);
+			xfree(switch_data_table[i].switches);
+			xfree(switch_data_table[i].coordinates);
+			xfree(switch_data_table[i].orig_coordinates);
+			xfree(switch_data_table[i].sw_conns);
+			xfree(switch_data_table[i].sw_conn_speed);
+			xfree(switch_data_table[i].node_conns);
+			xfree(switch_data_table[i].node_index);
+			FREE_NULL_BITMAP(switch_data_table[i].node_bitmap);
+		}
+		xfree(switch_data_table);
+	}
+}
+
+/* Free all memory associated with hypercube_switch_table structure */
+static void _free_hypercube_switch_table(void)
+{
+	int i;
+
+	if (hypercube_switch_table) {
+		for (i = 0; i < hypercube_switch_cnt ; i++) {
+			xfree(hypercube_switch_table[i].switch_name);
+			xfree(hypercube_switch_table[i].node_index);
+			xfree(hypercube_switch_table[i].distance);
+			FREE_NULL_BITMAP(hypercube_switch_table[i].node_bitmap);
+		}
+		xfree(hypercube_switch_table);
+	}
+}
diff --git a/src/plugins/topology/node_rank/Makefile.in b/src/plugins/topology/node_rank/Makefile.in
index 8d24b9d24..bd2f081f5 100644
--- a/src/plugins/topology/node_rank/Makefile.in
+++ b/src/plugins/topology/node_rank/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/topology/node_rank/topology_node_rank.c b/src/plugins/topology/node_rank/topology_node_rank.c
index f5ac057bb..beaa92db3 100644
--- a/src/plugins/topology/node_rank/topology_node_rank.c
+++ b/src/plugins/topology/node_rank/topology_node_rank.c
@@ -77,15 +77,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum versions for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "topology node_rank plugin";
 const char plugin_type[]        = "topology/node_rank";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/topology/none/Makefile.in b/src/plugins/topology/none/Makefile.in
index 5e26efa00..fc2f72294 100644
--- a/src/plugins/topology/none/Makefile.in
+++ b/src/plugins/topology/none/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/topology/none/topology_none.c b/src/plugins/topology/none/topology_none.c
index e1460f638..dfc17728d 100644
--- a/src/plugins/topology/none/topology_none.c
+++ b/src/plugins/topology/none/topology_none.c
@@ -69,15 +69,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "topology NONE plugin";
 const char plugin_type[]        = "topology/none";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 /*
  * init() is called when the plugin is loaded, before any other functions
diff --git a/src/plugins/topology/tree/Makefile.in b/src/plugins/topology/tree/Makefile.in
index 62b118a11..012161c1a 100644
--- a/src/plugins/topology/tree/Makefile.in
+++ b/src/plugins/topology/tree/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -273,6 +276,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -322,8 +327,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -342,6 +351,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -385,6 +397,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -408,6 +421,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/plugins/topology/tree/topology_tree.c b/src/plugins/topology/tree/topology_tree.c
index 1b422af04..d9c32cccc 100644
--- a/src/plugins/topology/tree/topology_tree.c
+++ b/src/plugins/topology/tree/topology_tree.c
@@ -74,15 +74,12 @@
  * of how this plugin satisfies that application.  SLURM will only load
  * a task plugin if the plugin_type string has a prefix of "task/".
  *
- * plugin_version - an unsigned 32-bit integer giving the version number
- * of the plugin.  If major and minor revisions are desired, the major
- * version number may be multiplied by a suitable magnitude constant such
- * as 100 or 1000.  Various SLURM versions will likely require a certain
- * minimum version for their plugins as this API matures.
+ * plugin_version - an unsigned 32-bit integer containing the Slurm version
+ * (major.minor.micro combined into a single number).
  */
 const char plugin_name[]        = "topology tree plugin";
 const char plugin_type[]        = "topology/tree";
-const uint32_t plugin_version   = 101;
+const uint32_t plugin_version   = SLURM_VERSION_NUMBER;
 
 typedef struct slurm_conf_switches {
 	uint32_t link_speed;	/* link speed, arbitrary units */
diff --git a/src/sacct/Makefile.in b/src/sacct/Makefile.in
index c065aff31..d6d3f3286 100644
--- a/src/sacct/Makefile.in
+++ b/src/sacct/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sacct/options.c b/src/sacct/options.c
index 747ac2c55..7a93a1984 100644
--- a/src/sacct/options.c
+++ b/src/sacct/options.c
@@ -37,15 +37,18 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
+#include "src/common/parse_time.h"
 #include "src/common/proc_args.h"
 #include "src/common/read_config.h"
-#include "src/common/parse_time.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "sacct.h"
 #include <time.h>
 
 /* getopt_long options, integers but not characters */
-#define OPT_LONG_NAME	0x100
+#define OPT_LONG_NAME	   0x100
+#define OPT_LONG_DELIMITER 0x101
+#define OPT_LONG_NOCONVERT 0x102
 
 void _help_fields_msg(void);
 void _help_msg(void);
@@ -61,6 +64,7 @@ List print_fields_list = NULL;
 ListIterator print_fields_itr = NULL;
 int field_count = 0;
 List g_qos_list = NULL;
+List g_tres_list = NULL;
 
 void _help_fields_msg(void)
 {
@@ -182,7 +186,8 @@ static int _addto_id_char_list(List char_list, char *names, bool gid)
 /* returns number of objects added to list */
 static int _addto_state_char_list(List char_list, char *names)
 {
-	int i=0, start=0, c;
+	int i=0, start=0;
+	uint32_t c;
 	char *name = NULL, *tmp_char = NULL;
 	ListIterator itr = NULL;
 	char quote_c = '\0';
@@ -213,7 +218,7 @@ static int _addto_state_char_list(List char_list, char *names)
 					name = xmalloc((i-start+1));
 					memcpy(name, names+start, (i-start));
 					c = job_state_num(name);
-					if (c == -1)
+					if (c == NO_VAL)
 						fatal("unrecognized job "
 						      "state value");
 					xfree(name);
@@ -267,167 +272,10 @@ static int _addto_state_char_list(List char_list, char *names)
 	return count;
 }
 
-/* returns number of objects added to list */
-static int _addto_step_list(List step_list, char *names)
-{
-	int i=0, start=0;
-	char *name = NULL, *dot = NULL;
-	slurmdb_selected_step_t *selected_step = NULL;
-	slurmdb_selected_step_t *curr_step = NULL;
-
-	ListIterator itr = NULL;
-	char quote_c = '\0';
-	int quote = 0;
-	int count = 0;
-
-	if (!step_list) {
-		error("No list was given to fill in");
-		return 0;
-	}
-
-	itr = list_iterator_create(step_list);
-	if (names) {
-		if (names[i] == '\"' || names[i] == '\'') {
-			quote_c = names[i];
-			quote = 1;
-			i++;
-		}
-		start = i;
-		while(names[i]) {
-			//info("got %d - %d = %d", i, start, i-start);
-			if (quote && names[i] == quote_c)
-				break;
-			else if (names[i] == '\"' || names[i] == '\'')
-				names[i] = '`';
-			else if (names[i] == ',') {
-				if ((i-start) > 0) {
-					char *dot = NULL;
-					name = xmalloc((i-start+1));
-					memcpy(name, names+start, (i-start));
-
-					selected_step = xmalloc(
-						sizeof(slurmdb_selected_step_t));
-					dot = strstr(name, ".");
-					if (dot == NULL) {
-						debug2("No jobstep requested");
-						selected_step->stepid = NO_VAL;
-					} else {
-						*dot++ = 0;
-						/* can't use NO_VAL
-						 * since that means all */
-						if (!strcmp(dot, "batch"))
-							selected_step->stepid =
-								INFINITE;
-						else
-							selected_step->stepid =
-								atoi(dot);
-					}
-
-					dot = strstr(name, "_");
-					if (dot == NULL) {
-						debug2("No jobarray requested");
-						selected_step->array_task_id =
-							NO_VAL;
-					} else {
-						*dot++ = 0;
-						/* INFINITE means give
-						 * me all the tasks of
-						 * the array */
-						if (!dot)
-							selected_step->
-								array_task_id =
-								INFINITE;
-						else
-							selected_step->
-								array_task_id =
-								atoi(dot);
-					}
-
-					selected_step->jobid = atoi(name);
-					xfree(name);
-
-					while((curr_step = list_next(itr))) {
-						if ((curr_step->jobid
-						    == selected_step->jobid)
-						   && (curr_step->stepid
-						       == selected_step->
-						       stepid))
-							break;
-					}
-
-					if (!curr_step) {
-						list_append(step_list,
-							    selected_step);
-						count++;
-					} else
-						slurmdb_destroy_selected_step(
-							selected_step);
-					list_iterator_reset(itr);
-				}
-				i++;
-				start = i;
-			}
-			i++;
-		}
-		if ((i-start) > 0) {
-			name = xmalloc((i-start)+1);
-			memcpy(name, names+start, (i-start));
-
-			selected_step =
-				xmalloc(sizeof(slurmdb_selected_step_t));
-
-			dot = strstr(name, ".");
-			if (dot == NULL) {
-				debug2("No jobstep requested");
-				selected_step->stepid = NO_VAL;
-			} else {
-				*dot++ = 0;
-				/* can't use NO_VAL since that means all */
-				if (!strcmp(dot, "batch"))
-					selected_step->stepid = INFINITE;
-				else
-					selected_step->stepid = atoi(dot);
-			}
-			dot = strstr(name, "_");
-			if (dot == NULL) {
-				debug2("No jobarray requested");
-				selected_step->array_task_id =
-					NO_VAL;
-			} else {
-				*dot++ = 0;
-				/* INFINITE means give me all the tasks of
-				 * the array */
-				if (dot[0])
-					selected_step->array_task_id =
-						atoi(dot);
-			}
-
-			selected_step->jobid = atoi(name);
-			xfree(name);
-
-			while((curr_step = list_next(itr))) {
-				if ((curr_step->jobid == selected_step->jobid)
-				   && (curr_step->stepid
-				       == selected_step->stepid))
-					break;
-			}
-
-			if (!curr_step) {
-				list_append(step_list, selected_step);
-				count++;
-			} else
-				slurmdb_destroy_selected_step(
-					selected_step);
-		}
-	}
-	list_iterator_destroy(itr);
-	return count;
-}
-
 void _help_msg(void)
 {
-	printf("\
-sacct [<OPTION>]                                                            \n\
+    printf("\
+sacct [<OPTION>]                                                            \n \
     Valid <OPTION> values are:                                              \n\
      -a, --allusers:                                                        \n\
 	           Display jobs for all users. By default, only the         \n\
@@ -439,6 +287,11 @@ sacct [<OPTION>]                                                            \n\
      -b, --brief:                                                           \n\
 	           Equivalent to '--format=jobstep,state,error'.            \n\
      -c, --completion: Use job completion instead of accounting data.       \n\
+     --delimiter:                                                           \n\
+                 ASCII characters used to separate the fields when\n\
+                 specifying the  -p  or  -P options. The default\n\
+                 delimiter is a '|'. This options is ignored if\n\
+                 -p or -P options are not specified.\n\
      -D, --duplicates:                                                      \n\
 	           If SLURM job ids are reset, some job numbers will        \n\
 	           probably appear more than once refering to different jobs.\n\
@@ -485,7 +338,8 @@ sacct [<OPTION>]                                                            \n\
                              maxrsstask,averss,maxpages,maxpagesnode,       \n\
                              maxpagestask,avepages,mincpu,mincpunode,       \n\
                              mincputask,avecpu,ntasks,alloccpus,elapsed,    \n\
-                             state,exitcode,avecpufreq,consumedenergy,      \n\
+                             state,exitcode,avecpufreq,reqcpufreqmin,       \n\
+                             reqcpufreqmax,reqcpufreqgov,consumedenergy,    \n\
                              maxdiskread,maxdiskreadnode,maxdiskreadtask,   \n\
                              avediskread,maxdiskwrite,maxdiskwritenode,     \n\
                              maxdiskwritetask,avediskread,allocgres,reqgres \n\
@@ -498,10 +352,13 @@ sacct [<OPTION>]                                                            \n\
      -n, --noheader:                                                        \n\
 	           No header will be added to the beginning of output.      \n\
                    The default is to print a header.                        \n\
+     --noconvert:                                                           \n\
+		   Don't convert units from their original type             \n\
+		   (e.g. 2048M won't be converted to 2G).                   \n\
      -N, --nodelist:                                                        \n\
                    Display jobs that ran on any of these nodes,             \n\
                    can be one or more using a ranged string.                \n\
-         --name:                                                            \n\
+     --name:                                                                \n\
                    Display jobs that have any of these name(s).             \n\
      -o, --format:                                                          \n\
 	           Comma separated list of fields. (use \"--helpformat\"    \n\
@@ -563,13 +420,13 @@ void _init_params()
 	memset(&params, 0, sizeof(sacct_parameters_t));
 	params.job_cond = xmalloc(sizeof(slurmdb_job_cond_t));
 	params.job_cond->without_usage_truncation = 1;
+	params.convert_flags = CONVERT_NUM_UNIT_EXACT;
 }
 
 int get_data(void)
 {
 	slurmdb_job_rec_t *job = NULL;
 	slurmdb_step_rec_t *step = NULL;
-
 	ListIterator itr = NULL;
 	ListIterator itr_step = NULL;
 	slurmdb_job_cond_t *job_cond = params.job_cond;
@@ -598,7 +455,6 @@ int get_data(void)
 		itr_step = list_iterator_create(job->steps);
 		while((step = list_next(itr_step)) != NULL) {
 			/* now aggregate the aggregatable */
-			job->alloc_cpus = MAX(job->alloc_cpus, step->ncpus);
 
 			if (step->state < JOB_COMPLETE)
 				continue;
@@ -647,6 +503,7 @@ void parse_command_line(int argc, char **argv)
                 {"allocations",    no_argument,       0,    'X'},
                 {"brief",          no_argument,       0,    'b'},
                 {"completion",     no_argument,       0,    'c'},
+                {"delimiter",      required_argument, 0,    OPT_LONG_DELIMITER},
                 {"duplicates",     no_argument,       0,    'D'},
                 {"helpformat",     no_argument,       0,    'e'},
                 {"help-fields",    no_argument,       0,    'e'},
@@ -666,6 +523,7 @@ void parse_command_line(int argc, char **argv)
                 {"cluster",        required_argument, 0,    'M'},
                 {"clusters",       required_argument, 0,    'M'},
                 {"nodelist",       required_argument, 0,    'N'},
+                {"noconvert",      no_argument,       0,    OPT_LONG_NOCONVERT},
                 {"noheader",       no_argument,       0,    'n'},
                 {"fields",         required_argument, 0,    'o'},
                 {"format",         required_argument, 0,    'o'},
@@ -714,6 +572,9 @@ void parse_command_line(int argc, char **argv)
 		case 'c':
 			params.opt_completion = 1;
 			break;
+		case OPT_LONG_DELIMITER:
+			fields_delimiter = optarg;
+			break;
 		case 'C':
 			/* 'C' is deprecated since 'M' is cluster on
 			   everything else.
@@ -785,7 +646,7 @@ void parse_command_line(int argc, char **argv)
 			if (!job_cond->step_list)
 				job_cond->step_list = list_create(
 					slurmdb_destroy_selected_step);
-			_addto_step_list(job_cond->step_list, optarg);
+			slurm_addto_step_list(job_cond->step_list, optarg);
 			break;
 		case 'k':
 			job_cond->timelimit_min = time_str2mins(optarg);
@@ -805,6 +666,9 @@ void parse_command_line(int argc, char **argv)
 		case 'l':
 			long_output = true;
 			break;
+		case OPT_LONG_NOCONVERT:
+			params.convert_flags |= CONVERT_NUM_UNIT_NO;
+			break;
 		case 'n':
 			print_fields_have_header = 0;
 			break;
@@ -941,7 +805,8 @@ void parse_command_line(int argc, char **argv)
 		*/
 		if (!job_cond->state_list
 		    || !list_count(job_cond->state_list)) {
-			if (!localtime_r(&job_cond->usage_start, &start_tm)) {
+			if (!slurm_localtime_r(&job_cond->usage_start,
+					       &start_tm)) {
 				error("Couldn't get localtime from %ld",
 				      (long)job_cond->usage_start);
 				return;
@@ -950,16 +815,16 @@ void parse_command_line(int argc, char **argv)
 			start_tm.tm_min = 0;
 			start_tm.tm_hour = 0;
 			start_tm.tm_isdst = -1;
-			job_cond->usage_start = mktime(&start_tm);
+			job_cond->usage_start = slurm_mktime(&start_tm);
 		}
 	}
 
 	if (verbosity > 0) {
 		char start_char[25], end_char[25];
 
-		slurm_ctime_r(&job_cond->usage_start, start_char);
+		slurm_ctime2_r(&job_cond->usage_start, start_char);
 		if (job_cond->usage_end)
-			slurm_ctime_r(&job_cond->usage_end, end_char);
+			slurm_ctime2_r(&job_cond->usage_end, end_char);
 		else
 			sprintf(end_char, "Now");
 		info("Jobs eligible from %s - %s", start_char, end_char);
@@ -1009,8 +874,7 @@ void parse_command_line(int argc, char **argv)
 	if (all_clusters) {
 		if (job_cond->cluster_list
 		   && list_count(job_cond->cluster_list)) {
-			list_destroy(job_cond->cluster_list);
-			job_cond->cluster_list = NULL;
+			FREE_NULL_LIST(job_cond->cluster_list);
 		}
 		debug2("Clusters requested:\tall");
 	} else if (job_cond->cluster_list
@@ -1043,9 +907,9 @@ void parse_command_line(int argc, char **argv)
 		all_users = 1;
 
 	if (all_users) {
-		if (job_cond->userid_list && list_count(job_cond->userid_list)) {
-			list_destroy(job_cond->userid_list);
-			job_cond->userid_list = NULL;
+		if (job_cond->userid_list &&
+		    list_count(job_cond->userid_list)) {
+			FREE_NULL_LIST(job_cond->userid_list);
 		}
 		debug2("Userids requested:\tall");
 	} else if (job_cond->userid_list && list_count(job_cond->userid_list)) {
@@ -1327,12 +1191,10 @@ void sacct_fini()
 {
 	if (print_fields_itr)
 		list_iterator_destroy(print_fields_itr);
-	if (print_fields_list)
-		list_destroy(print_fields_list);
-	if (jobs)
-		list_destroy(jobs);
-	if (g_qos_list)
-		list_destroy(g_qos_list);
+	FREE_NULL_LIST(print_fields_list);
+	FREE_NULL_LIST(jobs);
+	FREE_NULL_LIST(g_qos_list);
+	FREE_NULL_LIST(g_tres_list);
 
 	if (params.opt_completion)
 		g_slurm_jobcomp_fini();
diff --git a/src/sacct/print.c b/src/sacct/print.c
index b57dbf38b..a359d5413 100644
--- a/src/sacct/print.c
+++ b/src/sacct/print.c
@@ -112,7 +112,8 @@ static void _print_small_double(
 		return;
 
 	if (dub > 1)
-		convert_num_unit((double)dub, outbuf, buf_size, units);
+		convert_num_unit((double)dub, outbuf, buf_size, units,
+				 params.convert_flags);
 	else if (dub > 0)
 		snprintf(outbuf, buf_size, "%.2fM", dub);
 	else
@@ -212,6 +213,8 @@ void print_fields(type_t type, void *object)
 	struct	group *gr = NULL;
 	char outbuf[FORMAT_STRING_SIZE];
 	bool got_stats = false;
+	int cpu_tres_rec_count = 0;
+	int step_cpu_tres_rec_count = 0;
 
 	switch(type) {
 	case JOB:
@@ -224,14 +227,32 @@ void print_fields(type_t type, void *object)
 		*/
 		if (!step)
 			job->track_steps = 1;
+		else
+			step_cpu_tres_rec_count =
+				slurmdb_find_tres_count_in_string(
+					step->tres_alloc_str, TRES_CPU);
+
 		if (job->stats.cpu_min != NO_VAL)
 			got_stats = true;
+
 		job_comp = NULL;
+
+		cpu_tres_rec_count = slurmdb_find_tres_count_in_string(
+			job->tres_alloc_str, TRES_CPU);
 		break;
 	case JOBSTEP:
 		job = step->job_ptr;
+
 		if (step->stats.cpu_min != NO_VAL)
 			got_stats = true;
+
+		if (!(step_cpu_tres_rec_count =
+		      slurmdb_find_tres_count_in_string(
+			      step->tres_alloc_str, TRES_CPU)))
+			step_cpu_tres_rec_count =
+				slurmdb_find_tres_count_in_string(
+					job->tres_alloc_str, TRES_CPU);
+
 		job_comp = NULL;
 		break;
 	case JOBCOMP:
@@ -246,21 +267,26 @@ void print_fields(type_t type, void *object)
 	while((field = list_next(print_fields_itr))) {
 		char *tmp_char = NULL, id[FORMAT_STRING_SIZE];
 		int tmp_int = NO_VAL, tmp_int2 = NO_VAL;
-		double tmp_dub = (double)NO_VAL;
-		uint32_t tmp_uint32 = (uint32_t)NO_VAL;
-		uint64_t tmp_uint64 = (uint64_t)NO_VAL;
+		double tmp_dub = (double)NO_VAL; /* don't use NO_VAL64
+						    unless we can
+						    confirm the values
+						    coming in are
+						    NO_VAL64 */
+		uint32_t tmp_uint32 = NO_VAL;
+		uint64_t tmp_uint64 = NO_VAL64;
 
 		memset(&outbuf, 0, sizeof(outbuf));
 		switch(field->type) {
 		case PRINT_ALLOC_CPUS:
 			switch(type) {
 			case JOB:
-				tmp_int = job->alloc_cpus;
+				tmp_int = cpu_tres_rec_count;
+
 				// we want to use the step info
 				if (!step)
 					break;
 			case JOBSTEP:
-				tmp_int = step->ncpus;
+				tmp_int = step_cpu_tres_rec_count;
 				break;
 			case JOBCOMP:
 			default:
@@ -323,7 +349,9 @@ void print_fields(type_t type, void *object)
 			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				convert_num_unit2((double)tmp_dub,
 						  outbuf, sizeof(outbuf),
-						  UNIT_KILO, 1000, false);
+						  UNIT_KILO, 1000,
+						  params.convert_flags &
+						  (~CONVERT_NUM_UNIT_EXACT));
 
 			field->print_routine(field,
 					     outbuf,
@@ -432,9 +460,10 @@ void print_fields(type_t type, void *object)
 				}
 			}
 			if (!fuzzy_equal(tmp_dub, NO_VAL))
-				convert_num_unit((double)tmp_dub,
-						 outbuf, sizeof(outbuf),
-						 UNIT_KILO);
+				convert_num_unit((double)tmp_dub, outbuf,
+						 sizeof(outbuf),
+						 UNIT_KILO,
+						 params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -456,9 +485,10 @@ void print_fields(type_t type, void *object)
 				}
 			}
 			if (!fuzzy_equal(tmp_dub, NO_VAL))
-				convert_num_unit((double)tmp_dub,
-						 outbuf, sizeof(outbuf),
-						 UNIT_KILO);
+				convert_num_unit((double)tmp_dub, outbuf,
+						 sizeof(outbuf),
+						 UNIT_KILO,
+						 params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -481,9 +511,10 @@ void print_fields(type_t type, void *object)
 			}
 
 			if (!fuzzy_equal(tmp_dub, NO_VAL))
-				convert_num_unit((double)tmp_dub,
-						 outbuf, sizeof(outbuf),
-						 UNIT_KILO);
+				convert_num_unit((double)tmp_dub, outbuf,
+						 sizeof(outbuf),
+						 UNIT_KILO,
+						 params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -555,7 +586,9 @@ void print_fields(type_t type, void *object)
 			if (!fuzzy_equal(tmp_dub, NO_VAL))
 				convert_num_unit2((double)tmp_dub,
 						  outbuf, sizeof(outbuf),
-						  UNIT_NONE, 1000, false);
+						  UNIT_NONE, 1000,
+						  params.convert_flags &
+						  (~CONVERT_NUM_UNIT_EXACT));
 
 			field->print_routine(field,
 					     outbuf,
@@ -585,11 +618,11 @@ void print_fields(type_t type, void *object)
 			switch(type) {
 			case JOB:
 				tmp_uint64 = (uint64_t)job->elapsed
-					* (uint64_t)job->alloc_cpus;
+					* (uint64_t)cpu_tres_rec_count;
 				break;
 			case JOBSTEP:
 				tmp_uint64 = (uint64_t)step->elapsed
-					* (uint64_t)step->ncpus;
+					* (uint64_t)step_cpu_tres_rec_count;
 				break;
 			case JOBCOMP:
 				break;
@@ -604,11 +637,11 @@ void print_fields(type_t type, void *object)
 			switch(type) {
 			case JOB:
 				tmp_uint64 = (uint64_t)job->elapsed
-					* (uint64_t)job->alloc_cpus;
+					* (uint64_t)cpu_tres_rec_count;
 				break;
 			case JOBSTEP:
 				tmp_uint64 = (uint64_t)step->elapsed
-					* (uint64_t)step->ncpus;
+					* (uint64_t)step_cpu_tres_rec_count;
 				break;
 			case JOBCOMP:
 				break;
@@ -790,18 +823,22 @@ void print_fields(type_t type, void *object)
 						 job->jobid);
 			}
 
-			switch(type) {
+			switch (type) {
 			case JOB:
 				tmp_char = xstrdup(id);
 				break;
 			case JOBSTEP:
-				if (step->stepid == NO_VAL)
+				if (step->stepid == SLURM_BATCH_SCRIPT) {
 					tmp_char = xstrdup_printf(
 						"%s.batch", id);
-				else
+				} else if (step->stepid == SLURM_EXTERN_CONT) {
+					tmp_char = xstrdup_printf(
+						"%s.extern", id);
+				} else {
 					tmp_char = xstrdup_printf(
 						"%s.%u",
 						id, step->stepid);
+				}
 				break;
 			case JOBCOMP:
 				tmp_char = xstrdup_printf("%u",
@@ -816,20 +853,25 @@ void print_fields(type_t type, void *object)
 			xfree(tmp_char);
 			break;
 		case PRINT_JOBIDRAW:
-			switch(type) {
+			switch (type) {
 			case JOB:
 				tmp_char = xstrdup_printf("%u", job->jobid);
 				break;
 			case JOBSTEP:
-				if (step->stepid == NO_VAL)
+				if (step->stepid == SLURM_BATCH_SCRIPT) {
 					tmp_char = xstrdup_printf(
 						"%u.batch",
 						step->job_ptr->jobid);
-				else
+				} else if (step->stepid == SLURM_EXTERN_CONT) {
+					tmp_char = xstrdup_printf(
+						"%u.extern",
+						step->job_ptr->jobid);
+				} else {
 					tmp_char = xstrdup_printf(
 						"%u.%u",
 						step->job_ptr->jobid,
 						step->stepid);
+				}
 				break;
 			case JOBCOMP:
 				tmp_char = xstrdup_printf("%u",
@@ -1051,9 +1093,11 @@ void print_fields(type_t type, void *object)
 					break;
 				}
 				if (tmp_uint64 != (uint64_t)NO_VAL)
-					convert_num_unit((double)tmp_uint64,
-							 outbuf, sizeof(outbuf),
-							 UNIT_KILO);
+					convert_num_unit(
+						(double)tmp_uint64,
+						outbuf, sizeof(outbuf),
+						UNIT_KILO,
+						params.convert_flags);
 			}
 
 			field->print_routine(field,
@@ -1127,9 +1171,11 @@ void print_fields(type_t type, void *object)
 					break;
 				}
 				if (tmp_uint64 != (uint64_t)NO_VAL)
-					convert_num_unit((double)tmp_uint64,
-							 outbuf, sizeof(outbuf),
-							 UNIT_KILO);
+					convert_num_unit(
+						(double)tmp_uint64,
+						outbuf, sizeof(outbuf),
+						UNIT_KILO,
+						params.convert_flags);
 			}
 
 			field->print_routine(field,
@@ -1205,9 +1251,11 @@ void print_fields(type_t type, void *object)
 				}
 
 				if (tmp_uint64 != (uint64_t)NO_VAL)
-					convert_num_unit((double)tmp_uint64,
-							 outbuf, sizeof(outbuf),
-							 UNIT_KILO);
+					convert_num_unit(
+						(double)tmp_uint64,
+						outbuf, sizeof(outbuf),
+						UNIT_KILO,
+						params.convert_flags);
 			}
 
 			field->print_routine(field,
@@ -1383,8 +1431,9 @@ void print_fields(type_t type, void *object)
 				tmp_int = hostlist_count(hl);
 				hostlist_destroy(hl);
 			}
-			convert_num_unit((double)tmp_int,
-					 outbuf, sizeof(outbuf), UNIT_NONE);
+			convert_num_unit((double)tmp_int, outbuf,
+					 sizeof(outbuf), UNIT_NONE,
+					 params.convert_flags);
 			field->print_routine(field,
 					     outbuf,
 					     (curr_inx == field_count));
@@ -1393,7 +1442,7 @@ void print_fields(type_t type, void *object)
 			switch(type) {
 			case JOB:
 				if (!job->track_steps && !step)
-					tmp_int = job->alloc_cpus;
+					tmp_int = cpu_tres_rec_count;
 				// we want to use the step info
 				if (!step)
 					break;
@@ -1496,7 +1545,45 @@ void print_fields(type_t type, void *object)
 					     tmp_int,
 					     (curr_inx == field_count));
 			break;
-		case PRINT_REQ_CPUFREQ:
+		case PRINT_REQ_CPUFREQ_MIN:
+			switch (type) {
+			case JOB:
+				if (!job->track_steps && !step)
+					tmp_dub = NO_VAL;
+				// we want to use the step info
+				if (!step)
+					break;
+			case JOBSTEP:
+				tmp_dub = step->req_cpufreq_min;
+				break;
+			default:
+				break;
+			}
+			cpu_freq_to_string(outbuf, sizeof(outbuf), tmp_dub);
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_REQ_CPUFREQ_MAX:
+			switch (type) {
+			case JOB:
+				if (!job->track_steps && !step)
+					tmp_dub = NO_VAL;
+				// we want to use the step info
+				if (!step)
+					break;
+			case JOBSTEP:
+				tmp_dub = step->req_cpufreq_max;
+				break;
+			default:
+				break;
+			}
+			cpu_freq_to_string(outbuf, sizeof(outbuf), tmp_dub);
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_REQ_CPUFREQ_GOV:
 			switch (type) {
 			case JOB:
 				if (!job->track_steps && !step)
@@ -1505,7 +1592,7 @@ void print_fields(type_t type, void *object)
 				if (!step)
 					break;
 			case JOBSTEP:
-				tmp_dub = step->req_cpufreq;
+				tmp_dub = step->req_cpufreq_gov;
 				break;
 			default:
 				break;
@@ -1521,7 +1608,7 @@ void print_fields(type_t type, void *object)
 				tmp_int = job->req_cpus;
 				break;
 			case JOBSTEP:
-				tmp_int = step->ncpus;
+				tmp_int = step_cpu_tres_rec_count;
 				break;
 			case JOBCOMP:
 
@@ -1573,7 +1660,8 @@ void print_fields(type_t type, void *object)
 				}
 				convert_num_unit((double)tmp_uint32,
 						 outbuf, sizeof(outbuf),
-						 UNIT_MEGA);
+						 UNIT_MEGA,
+						 params.convert_flags);
 				if (per_cpu)
 					sprintf(outbuf+strlen(outbuf), "c");
 				else
@@ -1603,8 +1691,8 @@ void print_fields(type_t type, void *object)
 				break;
 			}
 			field->print_routine(field,
-						tmp_char,
-						(curr_inx == field_count));
+					     tmp_char,
+					     (curr_inx == field_count));
 			break;
 		case PRINT_RESERVATION_ID:
 			switch(type) {
@@ -1627,8 +1715,8 @@ void print_fields(type_t type, void *object)
 			if (tmp_uint32 == (uint32_t)NO_VAL)
 				tmp_uint32 = NO_VAL;
 			field->print_routine(field,
-						tmp_uint32,
-						(curr_inx == field_count));
+					     tmp_uint32,
+					     (curr_inx == field_count));
 			break;
 		case PRINT_RESV:
 			switch(type) {
@@ -1860,6 +1948,66 @@ void print_fields(type_t type, void *object)
 			}
 			tmp_char = _elapsed_time(tmp_int, tmp_int2);
 
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_TRESA:
+			switch(type) {
+			case JOB:
+				tmp_char = job->tres_alloc_str;
+				break;
+			case JOBSTEP:
+				tmp_char = step->tres_alloc_str;
+				break;
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					acct_db_conn, &tres_cond);
+			}
+
+			tmp_char = slurmdb_make_tres_string_from_simple(
+				tmp_char, g_tres_list);
+
+			field->print_routine(field,
+					     tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_TRESR:
+			switch(type) {
+			case JOB:
+				tmp_char = job->tres_req_str;
+				break;
+			case JOBSTEP:
+			case JOBCOMP:
+			default:
+				tmp_char = NULL;
+				break;
+			}
+
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					acct_db_conn, &tres_cond);
+			}
+
+			tmp_char = slurmdb_make_tres_string_from_simple(
+				tmp_char, g_tres_list);
+
 			field->print_routine(field,
 					     tmp_char,
 					     (curr_inx == field_count));
diff --git a/src/sacct/sacct.c b/src/sacct/sacct.c
index 20f79212b..c7a70b4a6 100644
--- a/src/sacct/sacct.c
+++ b/src/sacct/sacct.c
@@ -47,6 +47,8 @@ print_field_t fields[] = {
 	{10, "AllocCPUS", print_fields_uint, PRINT_ALLOC_CPUS},
 	{12, "AllocGRES", print_fields_str, PRINT_ALLOC_GRES},
 	{10, "Account", print_fields_str, PRINT_ACCOUNT},
+	{10, "TRESAlloc", print_fields_str, PRINT_TRESA},
+	{10, "TRESReq", print_fields_str, PRINT_TRESR},
 	{7,  "AssocID", print_fields_uint, PRINT_ASSOCID},
 	{10, "AveCPU", print_fields_str, PRINT_AVECPU},
 	{10, "AveCPUFreq", print_fields_str, PRINT_ACT_CPUFREQ},
@@ -100,7 +102,10 @@ print_field_t fields[] = {
 	{10, "Partition", print_fields_str, PRINT_PARTITION},
 	{10, "QOS", print_fields_str, PRINT_QOS},
 	{6,  "QOSRAW", print_fields_uint, PRINT_QOSRAW},
-	{10, "ReqCPUFreq", print_fields_str, PRINT_REQ_CPUFREQ},
+	{10, "ReqCPUFreq", print_fields_str, PRINT_REQ_CPUFREQ_MAX}, /* vestigial */
+	{13, "ReqCPUFreqMin", print_fields_str, PRINT_REQ_CPUFREQ_MIN},
+	{13, "ReqCPUFreqMax", print_fields_str, PRINT_REQ_CPUFREQ_MAX},
+	{13, "ReqCPUFreqGov", print_fields_str, PRINT_REQ_CPUFREQ_GOV},
 	{8,  "ReqCPUS", print_fields_uint, PRINT_REQ_CPUS},
 	{12, "ReqGRES", print_fields_str, PRINT_REQ_GRES},
 	{10, "ReqMem", print_fields_str, PRINT_REQ_MEM},
diff --git a/src/sacct/sacct.h b/src/sacct/sacct.h
index 02dd2784a..5bc804d5a 100644
--- a/src/sacct/sacct.h
+++ b/src/sacct/sacct.h
@@ -69,7 +69,7 @@
 #define BRIEF_COMP_FIELDS "jobid,uid,state"
 #define DEFAULT_FIELDS "jobid,jobname,partition,account,alloccpus,state,exitcode"
 #define DEFAULT_COMP_FIELDS "jobid,uid,jobname,partition,nnodes,nodelist,state,end"
-#define LONG_FIELDS "jobid,jobidraw,jobname,partition,maxvmsize,maxvmsizenode,maxvmsizetask,avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,alloccpus,elapsed,state,exitcode,avecpufreq,reqcpufreq,reqmem,consumedenergy,maxdiskread,maxdiskreadnode,maxdiskreadtask,avediskread,maxdiskwrite,maxdiskwritenode,maxdiskwritetask,avediskwrite,allocgres,reqgres"
+#define LONG_FIELDS "jobid,jobidraw,jobname,partition,maxvmsize,maxvmsizenode,maxvmsizetask,avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,alloccpus,elapsed,state,exitcode,avecpufreq,reqcpufreqmin,reqcpufreqmax,reqcpufreqgov,reqmem,consumedenergy,maxdiskread,maxdiskreadnode,maxdiskreadtask,avediskread,maxdiskwrite,maxdiskwritenode,maxdiskwritetask,avediskwrite,allocgres,reqgres,tres"
 
 #define LONG_COMP_FIELDS "jobid,uid,jobname,partition,nnodes,nodelist,state,start,end,timelimit"
 
@@ -95,6 +95,8 @@ typedef enum {
 		PRINT_ACCOUNT,
 		PRINT_ALLOC_CPUS,
 		PRINT_ALLOC_GRES,
+		PRINT_TRESA,
+		PRINT_TRESR,
 		PRINT_ASSOCID,
 		PRINT_AVECPU,
 		PRINT_ACT_CPUFREQ,
@@ -146,7 +148,9 @@ typedef enum {
 		PRINT_PRIO,
 		PRINT_QOS,
 		PRINT_QOSRAW,
-		PRINT_REQ_CPUFREQ,
+		PRINT_REQ_CPUFREQ_MIN,
+		PRINT_REQ_CPUFREQ_MAX,
+		PRINT_REQ_CPUFREQ_GOV,
 		PRINT_REQ_CPUS,
 		PRINT_REQ_GRES,
 		PRINT_REQ_MEM,
@@ -166,10 +170,11 @@ typedef enum {
 		PRINT_USER,
 		PRINT_USERCPU,
 		PRINT_WCKEY,
-		PRINT_WCKEYID,
+		PRINT_WCKEYID
 } sacct_print_types_t;
 
 typedef struct {
+	uint32_t convert_flags;
 	slurmdb_job_cond_t *job_cond;
 	int opt_completion;	/* --completion */
 	int opt_dup;		/* --duplicates; +1 = explicitly set */
@@ -186,11 +191,11 @@ extern print_field_t fields[];
 extern sacct_parameters_t params;
 
 extern List jobs;
-
 extern List print_fields_list;
 extern ListIterator print_fields_itr;
 extern int field_count;
 extern List g_qos_list;
+extern List g_tres_list;
 
 /* process.c */
 char *find_hostname(uint32_t pos, char *hosts);
diff --git a/src/sacctmgr/Makefile.am b/src/sacctmgr/Makefile.am
index 480acd7f1..468992c59 100644
--- a/src/sacctmgr/Makefile.am
+++ b/src/sacctmgr/Makefile.am
@@ -20,13 +20,15 @@ sacctmgr_SOURCES =	\
 	event_functions.c	\
 	file_functions.c	\
 	job_functions.c		\
+	reservation_functions.c	\
 	resource_functions.c	\
 	sacctmgr.c sacctmgr.h	\
 	qos_functions.c		\
 	txn_functions.c		\
 	user_functions.c	\
 	wckey_functions.c	\
-	problem_functions.c
+	problem_functions.c     \
+	tres_function.c
 
 sacctmgr_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 
diff --git a/src/sacctmgr/Makefile.in b/src/sacctmgr/Makefile.in
index 945b8f464..9ef8a26db 100644
--- a/src/sacctmgr/Makefile.in
+++ b/src/sacctmgr/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -139,10 +142,11 @@ am_sacctmgr_OBJECTS = account_functions.$(OBJEXT) \
 	config_functions.$(OBJEXT) cluster_functions.$(OBJEXT) \
 	common.$(OBJEXT) event_functions.$(OBJEXT) \
 	file_functions.$(OBJEXT) job_functions.$(OBJEXT) \
-	resource_functions.$(OBJEXT) sacctmgr.$(OBJEXT) \
-	qos_functions.$(OBJEXT) txn_functions.$(OBJEXT) \
-	user_functions.$(OBJEXT) wckey_functions.$(OBJEXT) \
-	problem_functions.$(OBJEXT)
+	reservation_functions.$(OBJEXT) resource_functions.$(OBJEXT) \
+	sacctmgr.$(OBJEXT) qos_functions.$(OBJEXT) \
+	txn_functions.$(OBJEXT) user_functions.$(OBJEXT) \
+	wckey_functions.$(OBJEXT) problem_functions.$(OBJEXT) \
+	tres_function.$(OBJEXT)
 sacctmgr_OBJECTS = $(am_sacctmgr_OBJECTS)
 am__DEPENDENCIES_1 =
 sacctmgr_DEPENDENCIES = $(top_builddir)/src/db_api/libslurmdb.o \
@@ -256,6 +260,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -305,8 +311,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -325,6 +335,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -368,6 +381,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -391,6 +405,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -468,13 +483,15 @@ sacctmgr_SOURCES = \
 	event_functions.c	\
 	file_functions.c	\
 	job_functions.c		\
+	reservation_functions.c	\
 	resource_functions.c	\
 	sacctmgr.c sacctmgr.h	\
 	qos_functions.c		\
 	txn_functions.c		\
 	user_functions.c	\
 	wckey_functions.c	\
-	problem_functions.c
+	problem_functions.c     \
+	tres_function.c
 
 sacctmgr_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
 all: all-am
@@ -582,8 +599,10 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/job_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/problem_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/qos_functions.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reservation_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/resource_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sacctmgr.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/tres_function.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/txn_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/user_functions.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/wckey_functions.Po@am__quote@
diff --git a/src/sacctmgr/account_functions.c b/src/sacctmgr/account_functions.c
index ac08a613d..274521082 100644
--- a/src/sacctmgr/account_functions.c
+++ b/src/sacctmgr/account_functions.c
@@ -49,7 +49,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int a_set = 0;
 	int u_set = 0;
 	int end = 0;
-	slurmdb_association_cond_t *assoc_cond = NULL;
+	slurmdb_assoc_cond_t *assoc_cond = NULL;
 	int command_len = 0;
 	int option = 0;
 
@@ -61,7 +61,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 	if (!acct_cond->assoc_cond) {
 		acct_cond->assoc_cond =
-			xmalloc(sizeof(slurmdb_association_cond_t));
+			xmalloc(sizeof(slurmdb_assoc_cond_t));
 	}
 
 	assoc_cond = acct_cond->assoc_cond;
@@ -141,7 +141,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if (slurm_addto_char_list(acct_cond->organization_list,
 						 argv[i]+end))
 				u_set = 1;
-		} else if (!(a_set = sacctmgr_set_association_cond(
+		} else if (!(a_set = sacctmgr_set_assoc_cond(
 				    assoc_cond, argv[i], argv[i]+end,
 				    command_len, option))) {
 			exit_code=1;
@@ -167,7 +167,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 		    List acct_list,
 		    List cluster_list,
 		    slurmdb_account_rec_t *acct,
-		    slurmdb_association_rec_t *assoc)
+		    slurmdb_assoc_rec_t *assoc)
 {
 	int i;
 	int u_set = 0;
@@ -233,15 +233,14 @@ static int _set_rec(int *start, int argc, char *argv[],
 			uint32_t usage;
 			if (!assoc)
 				continue;
-			assoc->usage = xmalloc(sizeof(
-						assoc_mgr_association_usage_t));
+			assoc->usage = xmalloc(sizeof(slurmdb_assoc_usage_t));
 			if (get_uint(argv[i]+end, &usage,
 				     "RawUsage") == SLURM_SUCCESS) {
 				assoc->usage->usage_raw = usage;
 				a_set = 1;
 			}
 		} else if (!assoc ||
-			  (assoc && !(a_set = sacctmgr_set_association_rec(
+			  (assoc && !(a_set = sacctmgr_set_assoc_rec(
 					      assoc, argv[i], argv[i]+end,
 					      command_len, option)))) {
 			exit_code=1;
@@ -289,8 +288,7 @@ static int _isdefault_old(List acct_list)
 		rc = 1;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	return rc;
 }
@@ -302,7 +300,7 @@ static int _isdefault(int cond_set, List acct_list, List assoc_list)
 	ListIterator itr2 = NULL;
 	char *acct;
 	char *output = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	if (!acct_list || !list_count(acct_list)
 	    || !assoc_list || !list_count(assoc_list))
@@ -359,8 +357,8 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	int i=0;
 	ListIterator itr = NULL, itr_c = NULL;
 	slurmdb_account_rec_t *acct = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	slurmdb_assoc_cond_t assoc_cond;
 	List name_list = list_create(slurm_destroy_char);
 	List cluster_list = list_create(slurm_destroy_char);
 	char *cluster = NULL;
@@ -374,10 +372,10 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	int limit_set = 0;
 	slurmdb_account_rec_t *start_acct =
 		xmalloc(sizeof(slurmdb_account_rec_t));
-	slurmdb_association_rec_t *start_assoc =
-		xmalloc(sizeof(slurmdb_association_rec_t));
+	slurmdb_assoc_rec_t *start_assoc =
+		xmalloc(sizeof(slurmdb_assoc_rec_t));
 
-	slurmdb_init_association_rec(start_assoc, 0);
+	slurmdb_init_assoc_rec(start_assoc, 0);
 
 	for (i=0; i<argc; i++) {
 		int command_len = strlen(argv[i]);
@@ -391,9 +389,9 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		return SLURM_ERROR;
 
 	if (!name_list || !list_count(name_list)) {
-		list_destroy(name_list);
-		list_destroy(cluster_list);
-		slurmdb_destroy_association_rec(start_assoc);
+		FREE_NULL_LIST(name_list);
+		FREE_NULL_LIST(cluster_list);
+		slurmdb_destroy_assoc_rec(start_assoc);
 		slurmdb_destroy_account_rec(start_acct);
 		exit_code=1;
 		fprintf(stderr, " Need name of account to add.\n");
@@ -401,7 +399,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	} else {
 		slurmdb_account_cond_t account_cond;
 		memset(&account_cond, 0, sizeof(slurmdb_account_cond_t));
-		memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+		memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 
 		assoc_cond.acct_list = name_list;
 		account_cond.assoc_cond = &assoc_cond;
@@ -414,9 +412,9 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		exit_code=1;
 		fprintf(stderr, " Problem getting accounts from database.  "
 			"Contact your admin.\n");
-		list_destroy(name_list);
-		list_destroy(cluster_list);
-		slurmdb_destroy_association_rec(start_assoc);
+		FREE_NULL_LIST(name_list);
+		FREE_NULL_LIST(cluster_list);
+		slurmdb_destroy_assoc_rec(start_assoc);
 		slurmdb_destroy_account_rec(start_acct);
 		return SLURM_ERROR;
 	}
@@ -433,11 +431,11 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 			fprintf(stderr,
 				" Problem getting clusters from database.  "
 				"Contact your admin.\n");
-			list_destroy(name_list);
-			list_destroy(cluster_list);
-			slurmdb_destroy_association_rec(start_assoc);
+			FREE_NULL_LIST(name_list);
+			FREE_NULL_LIST(cluster_list);
+			slurmdb_destroy_assoc_rec(start_assoc);
 			slurmdb_destroy_account_rec(start_acct);
-			list_destroy(local_account_list);
+			FREE_NULL_LIST(local_account_list);
 			return SLURM_ERROR;
 		}
 
@@ -447,11 +445,11 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 				"  Can't add accounts, no cluster "
 				"defined yet.\n"
 				" Please contact your administrator.\n");
-			list_destroy(name_list);
-			list_destroy(cluster_list);
-			slurmdb_destroy_association_rec(start_assoc);
+			FREE_NULL_LIST(name_list);
+			FREE_NULL_LIST(cluster_list);
+			slurmdb_destroy_assoc_rec(start_assoc);
 			slurmdb_destroy_account_rec(start_acct);
-			list_destroy(local_account_list);
+			FREE_NULL_LIST(local_account_list);
 			return SLURM_ERROR;
 		}
 		if (!cluster_list)
@@ -464,21 +462,21 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 			list_append(cluster_list, xstrdup(cluster_rec->name));
 		}
 		list_iterator_destroy(itr_c);
-		list_destroy(tmp_list);
+		FREE_NULL_LIST(tmp_list);
 	} else if (sacctmgr_validate_cluster_list(cluster_list)
 		   != SLURM_SUCCESS) {
-		slurmdb_destroy_association_rec(start_assoc);
+		slurmdb_destroy_assoc_rec(start_assoc);
 		slurmdb_destroy_account_rec(start_acct);
-		list_destroy(local_account_list);
+		FREE_NULL_LIST(local_account_list);
 
 		return SLURM_ERROR;
 	}
 
 
 	acct_list = list_create(slurmdb_destroy_account_rec);
-	assoc_list = list_create(slurmdb_destroy_association_rec);
+	assoc_list = list_create(slurmdb_destroy_assoc_rec);
 
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 
 	assoc_cond.acct_list = list_create(NULL);
 	itr = list_iterator_create(name_list);
@@ -488,18 +486,18 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	list_append(assoc_cond.acct_list, start_assoc->parent_acct);
 
 	assoc_cond.cluster_list = cluster_list;
-	local_assoc_list = acct_storage_g_get_associations(
+	local_assoc_list = acct_storage_g_get_assocs(
 		db_conn, my_uid, &assoc_cond);
-	list_destroy(assoc_cond.acct_list);
+	FREE_NULL_LIST(assoc_cond.acct_list);
 	if (!local_assoc_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem getting associations from database.  "
 			"Contact your admin.\n");
-		list_destroy(name_list);
-		list_destroy(cluster_list);
-		slurmdb_destroy_association_rec(start_assoc);
+		FREE_NULL_LIST(name_list);
+		FREE_NULL_LIST(cluster_list);
+		slurmdb_destroy_assoc_rec(start_assoc);
 		slurmdb_destroy_account_rec(start_acct);
-		list_destroy(local_account_list);
+		FREE_NULL_LIST(local_account_list);
 		return SLURM_ERROR;
 	}
 
@@ -517,7 +515,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		if (!sacctmgr_find_account_from_list(local_account_list, name)) {
 			acct = xmalloc(sizeof(slurmdb_account_rec_t));
 			acct->assoc_list =
-				list_create(slurmdb_destroy_association_rec);
+				list_create(slurmdb_destroy_assoc_rec);
 			acct->name = xstrdup(name);
 			if (start_acct->description)
 				acct->description =
@@ -558,31 +556,16 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 				continue;
 			}
 
-			assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-			slurmdb_init_association_rec(assoc, 0);
+			assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+			slurmdb_init_assoc_rec(assoc, 0);
 			assoc->acct = xstrdup(name);
 			assoc->cluster = xstrdup(cluster);
 			assoc->def_qos_id = start_assoc->def_qos_id;
+
 			assoc->parent_acct = xstrdup(start_assoc->parent_acct);
 			assoc->shares_raw = start_assoc->shares_raw;
 
-			assoc->grp_cpu_mins = start_assoc->grp_cpu_mins;
-			assoc->grp_cpu_run_mins = start_assoc->grp_cpu_run_mins;
-			assoc->grp_cpus = start_assoc->grp_cpus;
-			assoc->grp_jobs = start_assoc->grp_jobs;
-			assoc->grp_mem = start_assoc->grp_mem;
-			assoc->grp_nodes = start_assoc->grp_nodes;
-			assoc->grp_submit_jobs = start_assoc->grp_submit_jobs;
-			assoc->grp_wall = start_assoc->grp_wall;
-
-			assoc->max_cpu_mins_pj = start_assoc->max_cpu_mins_pj;
-			assoc->max_cpus_pj = start_assoc->max_cpus_pj;
-			assoc->max_jobs = start_assoc->max_jobs;
-			assoc->max_nodes_pj = start_assoc->max_nodes_pj;
-			assoc->max_submit_jobs = start_assoc->max_submit_jobs;
-			assoc->max_wall_pj = start_assoc->max_wall_pj;
-
-			assoc->qos_list = copy_char_list(start_assoc->qos_list);
+			slurmdb_copy_assoc_rec_limits(assoc, start_assoc);
 
 			if (acct)
 				list_append(acct->assoc_list, assoc);
@@ -598,8 +581,8 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 		list_iterator_destroy(itr_c);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(local_account_list);
-	list_destroy(local_assoc_list);
+	FREE_NULL_LIST(local_account_list);
+	FREE_NULL_LIST(local_assoc_list);
 
 
 	if (!list_count(acct_list) && !list_count(assoc_list)) {
@@ -648,7 +631,7 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 
 	if (rc == SLURM_SUCCESS) {
 		if (list_count(assoc_list))
-			rc = acct_storage_g_add_associations(db_conn, my_uid,
+			rc = acct_storage_g_add_assocs(db_conn, my_uid,
 							     assoc_list);
 	} else {
 		exit_code=1;
@@ -676,12 +659,12 @@ extern int sacctmgr_add_account(int argc, char *argv[])
 	}
 
 end_it:
-	list_destroy(name_list);
-	list_destroy(cluster_list);
-	list_destroy(acct_list);
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(name_list);
+	FREE_NULL_LIST(cluster_list);
+	FREE_NULL_LIST(acct_list);
+	FREE_NULL_LIST(assoc_list);
 
-	slurmdb_destroy_association_rec(start_assoc);
+	slurmdb_destroy_assoc_rec(start_assoc);
 	slurmdb_destroy_account_rec(start_acct);
 	return rc;
 }
@@ -696,7 +679,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	slurmdb_account_rec_t *acct = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	int field_count = 0;
 
@@ -718,7 +701,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_account_cond(acct_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	} else if (!list_count(format_list)) {
 		slurm_addto_char_list(format_list, "Acc,Des,O");
@@ -739,18 +722,18 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 				 "when querying with the withassoc option.\n"
 				 "Are you sure you want to continue?")) {
 			printf("Aborted\n");
-			list_destroy(format_list);
+			FREE_NULL_LIST(format_list);
 			slurmdb_destroy_account_cond(acct_cond);
 			return SLURM_SUCCESS;
 		}
 	}
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
 		slurmdb_destroy_account_cond(acct_cond);
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -760,7 +743,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 	if (!acct_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem with query.\n");
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -806,7 +789,7 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 							 field_count));
 						break;
 					default:
-						sacctmgr_print_association_rec(
+						sacctmgr_print_assoc_rec(
 							assoc, field, NULL,
 							(curr_inx ==
 							 field_count));
@@ -868,8 +851,8 @@ extern int sacctmgr_list_account(int argc, char *argv[])
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(acct_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(acct_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
@@ -880,14 +863,14 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 	slurmdb_account_cond_t *acct_cond =
 		xmalloc(sizeof(slurmdb_account_cond_t));
 	slurmdb_account_rec_t *acct = xmalloc(sizeof(slurmdb_account_rec_t));
-	slurmdb_association_rec_t *assoc =
-		xmalloc(sizeof(slurmdb_association_rec_t));
+	slurmdb_assoc_rec_t *assoc =
+		xmalloc(sizeof(slurmdb_assoc_rec_t));
 
 	int i=0;
 	int cond_set = 0, prev_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 
-	slurmdb_init_association_rec(assoc, 0);
+	slurmdb_init_assoc_rec(assoc, 0);
 
 	for (i=0; i<argc; i++) {
 		int command_len = strlen(argv[i]);
@@ -909,14 +892,14 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 	if (exit_code) {
 		slurmdb_destroy_account_cond(acct_cond);
 		slurmdb_destroy_account_rec(acct);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		return SLURM_ERROR;
 	} else if (!rec_set) {
 		exit_code=1;
 		fprintf(stderr, " You didn't give me anything to set\n");
 		slurmdb_destroy_account_cond(acct_cond);
 		slurmdb_destroy_account_rec(acct);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		return SLURM_ERROR;
 	} else if (!cond_set) {
 		if (!commit_check("You didn't set any conditions with 'WHERE'.\n"
@@ -924,7 +907,7 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 			printf("Aborted\n");
 			slurmdb_destroy_account_cond(acct_cond);
 			slurmdb_destroy_account_rec(acct);
-			slurmdb_destroy_association_rec(assoc);
+			slurmdb_destroy_assoc_rec(assoc);
 			return SLURM_SUCCESS;
 		}
 	}
@@ -939,7 +922,7 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 
 		slurmdb_destroy_account_cond(acct_cond);
 		slurmdb_destroy_account_rec(acct);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		return rc;
 	}
 
@@ -975,8 +958,7 @@ extern int sacctmgr_modify_account(int argc, char *argv[])
 			rc = SLURM_ERROR;
 		}
 
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 
 assoc_start:
@@ -1003,7 +985,7 @@ assoc_start:
 			}
 		}
 
-		ret_list = acct_storage_g_modify_associations(
+		ret_list = acct_storage_g_modify_assocs(
 			db_conn, my_uid, acct_cond->assoc_cond, assoc);
 
 		if (ret_list && list_count(ret_list)) {
@@ -1038,8 +1020,7 @@ assoc_start:
 			rc = SLURM_ERROR;
 		}
 
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 
 assoc_end:
@@ -1055,7 +1036,7 @@ assoc_end:
 	}
 	slurmdb_destroy_account_cond(acct_cond);
 	slurmdb_destroy_account_rec(acct);
-	slurmdb_destroy_association_rec(assoc);
+	slurmdb_destroy_assoc_rec(assoc);
 
 	return rc;
 }
@@ -1122,7 +1103,7 @@ extern int sacctmgr_delete_account(int argc, char *argv[])
 	}
 
 	acct_cond->assoc_cond->only_defs = 1;
-	local_assoc_list = acct_storage_g_get_associations(
+	local_assoc_list = acct_storage_g_get_assocs(
 		db_conn, my_uid, acct_cond->assoc_cond);
 	acct_cond->assoc_cond->only_defs = 0;
 
@@ -1131,7 +1112,7 @@ extern int sacctmgr_delete_account(int argc, char *argv[])
 		ret_list = acct_storage_g_remove_accounts(
 			db_conn, my_uid, acct_cond);
 	} else if (cond_set & 2) {
-		ret_list = acct_storage_g_remove_associations(
+		ret_list = acct_storage_g_remove_assocs(
 			db_conn, my_uid, acct_cond->assoc_cond);
 	}
 	rc = errno;
@@ -1200,10 +1181,8 @@ extern int sacctmgr_delete_account(int argc, char *argv[])
 
 end_it:
 
-	if (ret_list)
-		list_destroy(ret_list);
-	if (local_assoc_list)
-		list_destroy(local_assoc_list);
+	FREE_NULL_LIST(ret_list);
+	FREE_NULL_LIST(local_assoc_list);
 
 	return rc;
 }
diff --git a/src/sacctmgr/archive_functions.c b/src/sacctmgr/archive_functions.c
index 9995b14fe..091af2179 100644
--- a/src/sacctmgr/archive_functions.c
+++ b/src/sacctmgr/archive_functions.c
@@ -545,7 +545,7 @@ extern int sacctmgr_archive_load(int argc, char *argv[])
 			fatal("getcwd failed: %m");
 
 		if ((fullpath = search_path(cwd, arch_rec->archive_file,
-					    true, mode))) {
+					    true, mode, false))) {
 			xfree(arch_rec->archive_file);
 			arch_rec->archive_file = fullpath;
 		}
diff --git a/src/sacctmgr/association_functions.c b/src/sacctmgr/association_functions.c
index d6b81bde9..3679c780e 100644
--- a/src/sacctmgr/association_functions.c
+++ b/src/sacctmgr/association_functions.c
@@ -41,7 +41,7 @@
 #include "src/sacctmgr/sacctmgr.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
-		     slurmdb_association_cond_t *assoc_cond,
+		     slurmdb_assoc_cond_t *assoc_cond,
 		     List format_list)
 {
 	int i, end = 0;
@@ -119,7 +119,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			if (format_list)
 				slurm_addto_char_list(format_list,
 						      argv[i]+end);
-		} else if (!(set = sacctmgr_set_association_cond(
+		} else if (!(set = sacctmgr_set_assoc_cond(
 				    assoc_cond, argv[i], argv[i]+end,
 				    command_len, option)) || exit_code) {
 			exit_code = 1;
@@ -133,18 +133,18 @@ static int _set_cond(int *start, int argc, char *argv[],
 }
 
 extern bool sacctmgr_check_default_qos(uint32_t qos_id,
-				       slurmdb_association_cond_t *assoc_cond)
+				       slurmdb_assoc_cond_t *assoc_cond)
 {
 	char *object = NULL;
 	ListIterator itr;
-	slurmdb_association_rec_t *assoc;
+	slurmdb_assoc_rec_t *assoc;
 	List no_access_list = NULL;
 	List assoc_list = NULL;
 
 	if (qos_id == NO_VAL)
 		return true;
 
-	assoc_list = acct_storage_g_get_associations(
+	assoc_list = acct_storage_g_get_assocs(
 		db_conn, my_uid, assoc_cond);
 	if (!assoc_list) {
 		fprintf(stderr, "Couldn't get a list back for checking qos.\n");
@@ -210,7 +210,7 @@ extern bool sacctmgr_check_default_qos(uint32_t qos_id,
 		}
 	}
 	list_iterator_destroy(itr);
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(assoc_list);
 
 	if (!no_access_list)
 		return true;
@@ -224,13 +224,13 @@ extern bool sacctmgr_check_default_qos(uint32_t qos_id,
 	while ((object = list_next(itr)))
 		fprintf(stderr, "%s\n", object);
 	list_iterator_destroy(itr);
-	list_destroy(no_access_list);
+	FREE_NULL_LIST(no_access_list);
 
 	return 0;
 }
 
 
-extern int sacctmgr_set_association_cond(slurmdb_association_cond_t *assoc_cond,
+extern int sacctmgr_set_assoc_cond(slurmdb_assoc_cond_t *assoc_cond,
 					 char *type, char *value,
 					 int command_len, int option)
 {
@@ -286,115 +286,6 @@ extern int sacctmgr_set_association_cond(slurmdb_association_cond_t *assoc_cond,
 			set = 1;
 		else
 			exit_code = 1;
-	} else if (!strncasecmp (type, "FairShare", MAX(command_len, 1))
-		   || !strncasecmp (type, "Shares", MAX(command_len, 1))) {
-		if (!assoc_cond->fairshare_list)
-			assoc_cond->fairshare_list =
-				list_create(slurm_destroy_char);
-
-		if (slurm_addto_char_list(assoc_cond->fairshare_list, value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpCPUMins", MAX(command_len, 7))) {
-		if (!assoc_cond->grp_cpu_mins_list)
-			assoc_cond->grp_cpu_mins_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_cpu_mins_list, value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpCPURunMins", MAX(command_len, 7))) {
-		if (!assoc_cond->grp_cpu_run_mins_list)
-			assoc_cond->grp_cpu_run_mins_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_cpu_run_mins_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpCpus", MAX(command_len, 7))) {
-		if (!assoc_cond->grp_cpus_list)
-			assoc_cond->grp_cpus_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_cpus_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpJobs", MAX(command_len, 4))) {
-		if (!assoc_cond->grp_jobs_list)
-			assoc_cond->grp_jobs_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_jobs_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpMemory", MAX(command_len, 4))) {
-		if (!assoc_cond->grp_mem_list)
-			assoc_cond->grp_mem_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_mem_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpNodes", MAX(command_len, 4))) {
-		if (!assoc_cond->grp_nodes_list)
-			assoc_cond->grp_nodes_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_nodes_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpSubmitJobs", MAX(command_len, 4))) {
-		if (!assoc_cond->grp_submit_jobs_list)
-			assoc_cond->grp_submit_jobs_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_submit_jobs_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "GrpWall", MAX(command_len, 4))) {
-		if (!assoc_cond->grp_wall_list)
-			assoc_cond->grp_wall_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->grp_wall_list, value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxCPUMinsPerJob",
-				 MAX(command_len, 7))) {
-		if (!assoc_cond->max_cpu_mins_pj_list)
-			assoc_cond->max_cpu_mins_pj_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_cpu_mins_pj_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxCPURunMins", MAX(command_len, 7))) {
-		if (!assoc_cond->max_cpu_run_mins_list)
-			assoc_cond->max_cpu_run_mins_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_cpu_run_mins_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxCpusPerJob", MAX(command_len, 7))) {
-		if (!assoc_cond->max_cpus_pj_list)
-			assoc_cond->max_cpus_pj_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_cpus_pj_list, value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxJobs", MAX(command_len, 4))) {
-		if (!assoc_cond->max_jobs_list)
-			assoc_cond->max_jobs_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_jobs_list, value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxNodesPerJob", MAX(command_len, 4))) {
-		if (!assoc_cond->max_nodes_pj_list)
-			assoc_cond->max_nodes_pj_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_nodes_pj_list, value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxSubmitJobs", MAX(command_len, 4))) {
-		if (!assoc_cond->max_submit_jobs_list)
-			assoc_cond->max_submit_jobs_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_submit_jobs_list,
-					 value))
-			set = 1;
-	} else if (!strncasecmp (type, "MaxWallDurationPerJob",
-				 MAX(command_len, 4))) {
-		if (!assoc_cond->max_wall_pj_list)
-			assoc_cond->max_wall_pj_list =
-				list_create(slurm_destroy_char);
-		if (slurm_addto_char_list(assoc_cond->max_wall_pj_list, value))
-			set = 1;
 	} else if (!strncasecmp (type, "Partitions", MAX(command_len, 3))) {
 		if (!assoc_cond->partition_list)
 			assoc_cond->partition_list =
@@ -415,8 +306,8 @@ extern int sacctmgr_set_association_cond(slurmdb_association_cond_t *assoc_cond,
 			g_qos_list = acct_storage_g_get_qos(
 				db_conn, my_uid, NULL);
 
-		if (slurmdb_addto_qos_char_list(assoc_cond->qos_list, g_qos_list,
-					       value, option))
+		if (slurmdb_addto_qos_char_list(assoc_cond->qos_list,
+						g_qos_list, value, option))
 			set = 1;
 	} else if (!strncasecmp (type, "Users", MAX(command_len, 1))) {
 		if (!assoc_cond->user_list)
@@ -428,12 +319,15 @@ extern int sacctmgr_set_association_cond(slurmdb_association_cond_t *assoc_cond,
 	return set;
 }
 
-extern int sacctmgr_set_association_rec(slurmdb_association_rec_t *assoc,
-					char *type, char *value,
-					int command_len, int option)
+extern int sacctmgr_set_assoc_rec(slurmdb_assoc_rec_t *assoc,
+				  char *type, char *value,
+				  int command_len, int option)
 {
 	int set = 0;
 	uint32_t mins = NO_VAL;
+	uint64_t tmp64;
+	char *tmp_char = NULL;
+	uint32_t tres_flags = TRES_STR_FLAG_SORT_ID | TRES_STR_FLAG_REPLACE;
 
 	if (!assoc)
 		return set;
@@ -468,34 +362,118 @@ extern int sacctmgr_set_association_rec(slurmdb_association_rec_t *assoc,
 			set = 1;
 		}
 	} else if (!strncasecmp(type, "GrpCPUMins", MAX(command_len, 7))) {
-		if (get_uint64(value, &assoc->grp_cpu_mins,
-			       "GrpCPUMins") == SLURM_SUCCESS)
+		if (get_uint64(value, &tmp64,
+			       "GrpCPUMins") == SLURM_SUCCESS) {
 			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_CPU, tmp64);
+
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres_mins, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "GrpCPURunMins", MAX(command_len, 7))) {
-		if (get_uint64(value, &assoc->grp_cpu_run_mins,
-			       "GrpCPURunMins") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "GrpCPURunMins") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_CPU, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres_run_mins, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "GrpCpus", MAX(command_len, 7))) {
-		if (get_uint(value, &assoc->grp_cpus,
-			     "GrpCpus") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "GrpCpus") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_CPU, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "GrpJobs", MAX(command_len, 4))) {
 		if (get_uint(value, &assoc->grp_jobs,
 			     "GrpJobs") == SLURM_SUCCESS)
 			set = 1;
 	} else if (!strncasecmp(type, "GrpMemory", MAX(command_len, 4))) {
-		if (get_uint(value, &assoc->grp_mem,
-			     "GrpMemory") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "GrpMemory") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_MEM, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "GrpNodes", MAX(command_len, 4))) {
-		if (get_uint(value, &assoc->grp_nodes,
-			     "GrpNodes") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "GrpNodes") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_NODE, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "GrpSubmitJobs",
 				MAX(command_len, 4))) {
 		if (get_uint(value, &assoc->grp_submit_jobs,
 			     "GrpSubmitJobs") == SLURM_SUCCESS)
 			set = 1;
+	} else if (!strncasecmp(type, "GrpTRES", MAX(command_len, 7))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
+	} else if (!strncasecmp(type, "GrpTRESMins", MAX(command_len, 8))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres_mins, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
+	} else if (!strncasecmp(type, "GrpTRESRunMins", MAX(command_len, 8))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->grp_tres_run_mins, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "GrpWall", MAX(command_len, 4))) {
 		mins = time_str2mins(value);
 		if (mins != NO_VAL) {
@@ -507,29 +485,120 @@ extern int sacctmgr_set_association_rec(slurmdb_association_rec_t *assoc,
 		}
 	} else if (!strncasecmp(type, "MaxCPUMinsPerJob",
 				MAX(command_len, 7))) {
-		if (get_uint64(value, &assoc->max_cpu_mins_pj,
-			       "MaxCPUMins") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "MaxCPUMinsPerJob") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_CPU, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_mins_pj, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "MaxCPURunMins", MAX(command_len, 7))) {
-		if (get_uint64(value, &assoc->max_cpu_run_mins,
-			       "MaxCPURunMins") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "MaxCPURunMins") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_CPU, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_run_mins, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "MaxCpusPerJob", MAX(command_len, 7))) {
-		if (get_uint(value, &assoc->max_cpus_pj,
-			     "MaxCpus") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "MaxCpusPerJob") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_CPU, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_pj, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "MaxJobs", MAX(command_len, 4))) {
 		if (get_uint(value, &assoc->max_jobs,
 			     "MaxJobs") == SLURM_SUCCESS)
 			set = 1;
 	} else if (!strncasecmp(type, "MaxNodesPerJob", MAX(command_len, 4))) {
-		if (get_uint(value, &assoc->max_nodes_pj,
-			     "MaxNodes") == SLURM_SUCCESS)
-			set = 1;
+		if (get_uint64(value, &tmp64,
+			       "MaxNodes") == SLURM_SUCCESS) {
+			set = 1;
+			tmp_char = xstrdup_printf(
+				"%d=%"PRIu64, TRES_NODE, tmp64);
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_pj, tmp_char,
+				tres_flags);
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "MaxSubmitJobs", MAX(command_len, 4))) {
 		if (get_uint(value, &assoc->max_submit_jobs,
 			     "MaxSubmitJobs") == SLURM_SUCCESS)
 			set = 1;
+	} else if (!strncasecmp(type, "MaxTRESPerJob", MAX(command_len, 7))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_pj, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
+	} else if (!strncasecmp(type, "MaxTRESPerNode", MAX(command_len, 11))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_pn, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
+	} else if (!strncasecmp(type, "MaxTRESMinsPerJob",
+				MAX(command_len, 8))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_mins_pj, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
+	} else if (!strncasecmp(type, "MaxTRESRunMins", MAX(command_len, 8))) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+
+		if ((tmp_char = slurmdb_format_tres_str(
+			     value, g_tres_list, 1))) {
+			slurmdb_combine_tres_strings(
+				&assoc->max_tres_run_mins, tmp_char,
+				tres_flags);
+			set = 1;
+			xfree(tmp_char);
+		}
 	} else if (!strncasecmp(type, "MaxWallDurationPerJob",
 				MAX(command_len, 4))) {
 		mins = time_str2mins(value);
@@ -562,9 +631,9 @@ extern int sacctmgr_set_association_rec(slurmdb_association_rec_t *assoc,
 	return set;
 }
 
-extern void sacctmgr_print_association_rec(slurmdb_association_rec_t *assoc,
-					   print_field_t *field, List tree_list,
-					   bool last)
+extern void sacctmgr_print_assoc_rec(slurmdb_assoc_rec_t *assoc,
+				     print_field_t *field, List tree_list,
+				     bool last)
 {
 	char *print_acct = NULL;
 	char *tmp_char = NULL;
@@ -615,22 +684,48 @@ extern void sacctmgr_print_association_rec(slurmdb_association_rec_t *assoc,
 			field->print_routine(field, assoc->shares_raw, last);
 		break;
 	case PRINT_GRPCM:
-		field->print_routine(field, assoc->grp_cpu_mins, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->grp_tres_mins, TRES_CPU),
+				     last);
 		break;
 	case PRINT_GRPCRM:
-		field->print_routine(field, assoc->grp_cpu_run_mins, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->grp_tres_run_mins,
+					     TRES_CPU),
+				     last);
 		break;
 	case PRINT_GRPC:
-		field->print_routine(field, assoc->grp_cpus, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->grp_tres,
+					     TRES_CPU),
+				     last);
+		break;
+	case PRINT_GRPTM:
+		field->print_routine(field, assoc->grp_tres_mins, last);
+		break;
+	case PRINT_GRPTRM:
+		field->print_routine(field, assoc->grp_tres_run_mins, last);
+		break;
+	case PRINT_GRPT:
+		field->print_routine(field, assoc->grp_tres, last);
 		break;
 	case PRINT_GRPJ:
 		field->print_routine(field, assoc->grp_jobs, last);
 		break;
 	case PRINT_GRPMEM:
-		field->print_routine(field, assoc->grp_mem, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->grp_tres, TRES_MEM),
+				     last);
 		break;
 	case PRINT_GRPN:
-		field->print_routine(field, assoc->grp_nodes, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->grp_tres, TRES_NODE),
+				     last);
 		break;
 	case PRINT_GRPS:
 		field->print_routine(field, assoc->grp_submit_jobs, last);
@@ -645,19 +740,44 @@ extern void sacctmgr_print_association_rec(slurmdb_association_rec_t *assoc,
 		field->print_routine(field, assoc->lft, last);
 		break;
 	case PRINT_MAXCM:
-		field->print_routine(field, assoc->max_cpu_mins_pj, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->max_tres_mins_pj, TRES_CPU),
+				     last);
 		break;
 	case PRINT_MAXCRM:
-		field->print_routine(field, assoc->max_cpu_run_mins, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->max_tres_run_mins,
+					     TRES_CPU),
+				     last);
 		break;
 	case PRINT_MAXC:
-		field->print_routine(field, assoc->max_cpus_pj, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->max_tres_pj, TRES_CPU),
+				     last);
+		break;
+	case PRINT_MAXTM:
+		field->print_routine(field, assoc->max_tres_mins_pj, last);
+		break;
+	case PRINT_MAXTRM:
+		field->print_routine(field, assoc->max_tres_run_mins, last);
+		break;
+	case PRINT_MAXT:
+		field->print_routine(field, assoc->max_tres_pj, last);
+		break;
+	case PRINT_MAXTN:
+		field->print_routine(field, assoc->max_tres_pn, last);
 		break;
 	case PRINT_MAXJ:
 		field->print_routine(field, assoc->max_jobs, last);
 		break;
 	case PRINT_MAXN:
-		field->print_routine(field, assoc->max_nodes_pj, last);
+		field->print_routine(field,
+				     slurmdb_find_tres_count_in_string(
+					     assoc->max_tres_pj, TRES_NODE),
+				     last);
 		break;
 	case PRINT_MAXS:
 		field->print_routine(field, assoc->max_submit_jobs, last);
@@ -696,13 +816,13 @@ extern void sacctmgr_print_association_rec(slurmdb_association_rec_t *assoc,
 	}
 }
 
-extern int sacctmgr_list_association(int argc, char *argv[])
+extern int sacctmgr_list_assoc(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	slurmdb_association_cond_t *assoc_cond =
-		xmalloc(sizeof(slurmdb_association_cond_t));
+	slurmdb_assoc_cond_t *assoc_cond =
+		xmalloc(sizeof(slurmdb_assoc_cond_t));
 	List assoc_list = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	int i=0;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
@@ -725,36 +845,36 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 	}
 
 	if (exit_code) {
-		slurmdb_destroy_association_cond(assoc_cond);
-		list_destroy(format_list);
+		slurmdb_destroy_assoc_cond(assoc_cond);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	} else if (!list_count(format_list)) {
 		slurm_addto_char_list(format_list, "Cluster,Account,User,Part");
 		if (!assoc_cond->without_parent_limits)
 			slurm_addto_char_list(format_list,
-					      "Share,GrpJ,GrpN,GrpCPUs,GrpMEM,"
-					      "GrpS,GrpWall,GrpCPUMins,MaxJ,"
-					      "MaxN,MaxCPUs,MaxS,MaxW,"
-					      "MaxCPUMins,QOS,DefaultQOS,GrpCPURunMins");
+					      "Share,GrpJ,GrpTRES,"
+					      "GrpS,GrpWall,GrpTRESMins,MaxJ,"
+					      "MaxTRES,MaxTRESPerN,MaxS,MaxW,"
+					      "MaxTRESMins,QOS,DefaultQOS,"
+					      "GrpTRESRunMins");
 	}
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
-		slurmdb_destroy_association_cond(assoc_cond);
-		list_destroy(print_fields_list);
+		slurmdb_destroy_assoc_cond(assoc_cond);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
-	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
-						     assoc_cond);
-	slurmdb_destroy_association_cond(assoc_cond);
+	assoc_list = acct_storage_g_get_assocs(db_conn, my_uid, assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
 
 	if (!assoc_list) {
 		exit_code=1;
 		fprintf(stderr, " Error with request: %s\n",
 			slurm_strerror(errno));
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -778,7 +898,7 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 			last_cluster = assoc->cluster;
 		}
 		while((field = list_next(itr2))) {
-			sacctmgr_print_association_rec(
+			sacctmgr_print_assoc_rec(
 				assoc, field, tree_list,
 				(curr_inx == field_count));
 			curr_inx++;
@@ -787,24 +907,23 @@ extern int sacctmgr_list_association(int argc, char *argv[])
 		printf("\n");
 	}
 
-	if (tree_list)
-		list_destroy(tree_list);
+	FREE_NULL_LIST(tree_list);
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(assoc_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(assoc_list);
+	FREE_NULL_LIST(print_fields_list);
 	tree_display = 0;
 	return rc;
 }
 
-/* extern int sacctmgr_modify_association(int argc, char *argv[]) */
+/* extern int sacctmgr_modify_assoc(int argc, char *argv[]) */
 /* { */
 /* 	int rc = SLURM_SUCCESS; */
 /* 	return rc; */
 /* } */
 
-/* extern int sacctmgr_delete_association(int argc, char *argv[]) */
+/* extern int sacctmgr_delete_assoc(int argc, char *argv[]) */
 /* { */
 /* 	int rc = SLURM_SUCCESS; */
 /* 	return rc; */
diff --git a/src/sacctmgr/cluster_functions.c b/src/sacctmgr/cluster_functions.c
index 1fc182436..5cd63cb0b 100644
--- a/src/sacctmgr/cluster_functions.c
+++ b/src/sacctmgr/cluster_functions.c
@@ -144,7 +144,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 static int _set_rec(int *start, int argc, char *argv[],
 		    List name_list,
-		    slurmdb_association_rec_t *assoc,
+		    slurmdb_assoc_rec_t *assoc,
 		    uint16_t *classification)
 {
 	int i;
@@ -188,15 +188,19 @@ static int _set_rec(int *start, int argc, char *argv[],
 					set = 1;
 			}
 		} else if (!strncasecmp(argv[i], "GrpCPURunMins",
-					 MAX(command_len, 7))) {
+					 MAX(command_len, 7)) ||
+			   !strncasecmp(argv[i], "GrpTRESRunMins",
+					MAX(command_len, 8))) {
 			exit_code=1;
-			fprintf(stderr, "GrpCPURunMins is not a valid option "
+			fprintf(stderr, "GrpTRESRunMins is not a valid option "
 				"for the root association of a cluster.\n");
 			break;
 		} else if (!strncasecmp(argv[i], "GrpCPUMins",
-					 MAX(command_len, 7))) {
+					 MAX(command_len, 7)) ||
+			   !strncasecmp(argv[i], "GrpTRESMins",
+					MAX(command_len, 8))) {
 			exit_code=1;
-			fprintf(stderr, "GrpCPUMins is not a valid option "
+			fprintf(stderr, "GrpTRESMins is not a valid option "
 				"for the root association of a cluster.\n");
 			break;
 		} else if (!strncasecmp(argv[i], "GrpWall",
@@ -205,7 +209,7 @@ static int _set_rec(int *start, int argc, char *argv[],
 			fprintf(stderr, "GrpWall is not a valid option "
 				"for the root association of a cluster.\n");
 		} else if (!assoc ||
-			  (assoc && !(set = sacctmgr_set_association_rec(
+			  (assoc && !(set = sacctmgr_set_assoc_rec(
 					      assoc, argv[i], argv[i]+end,
 					      command_len, option)))) {
 			exit_code=1;
@@ -228,14 +232,14 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 	slurmdb_cluster_rec_t *cluster = NULL;
 	List name_list = list_create(slurm_destroy_char);
 	List cluster_list = NULL;
-	slurmdb_association_rec_t start_assoc;
+	slurmdb_assoc_rec_t start_assoc;
 
 	int limit_set = 0;
 	ListIterator itr = NULL, itr_c = NULL;
 	char *name = NULL;
 	uint16_t class = 0;
 
-	slurmdb_init_association_rec(&start_assoc, 0);
+	slurmdb_init_assoc_rec(&start_assoc, 0);
 
 	for (i=0; i<argc; i++) {
 		int command_len = strlen(argv[i]);
@@ -246,10 +250,10 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 				      name_list, &start_assoc, &class);
 	}
 	if (exit_code) {
-		list_destroy(name_list);
+		FREE_NULL_LIST(name_list);
 		return SLURM_ERROR;
 	} else if (!list_count(name_list)) {
-		list_destroy(name_list);
+		FREE_NULL_LIST(name_list);
 		exit_code=1;
 		fprintf(stderr, " Need name of cluster to add.\n");
 		return SLURM_ERROR;
@@ -289,9 +293,9 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 		}
 		list_iterator_destroy(itr);
 		list_iterator_destroy(itr_c);
-		list_destroy(temp_list);
+		FREE_NULL_LIST(temp_list);
 		if (!list_count(name_list)) {
-			list_destroy(name_list);
+			FREE_NULL_LIST(name_list);
 			return SLURM_ERROR;
 		}
 	}
@@ -315,8 +319,8 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 		cluster->name = xstrdup(name);
 		cluster->classification = class;
 		cluster->root_assoc =
-			xmalloc(sizeof(slurmdb_association_rec_t));
-		slurmdb_init_association_rec(cluster->root_assoc, 0);
+			xmalloc(sizeof(slurmdb_assoc_rec_t));
+		slurmdb_init_assoc_rec(cluster->root_assoc, 0);
 		printf("  Name          = %s\n", cluster->name);
 		if (cluster->classification)
 			printf("  Classification= %s\n",
@@ -325,33 +329,16 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 		cluster->root_assoc->def_qos_id = start_assoc.def_qos_id;
 		cluster->root_assoc->shares_raw = start_assoc.shares_raw;
 
-		cluster->root_assoc->grp_cpus = start_assoc.grp_cpus;
-		cluster->root_assoc->grp_jobs = start_assoc.grp_jobs;
-		cluster->root_assoc->grp_mem = start_assoc.grp_mem;
-		cluster->root_assoc->grp_nodes = start_assoc.grp_nodes;
-		cluster->root_assoc->grp_submit_jobs =
-			start_assoc.grp_submit_jobs;
-
-		cluster->root_assoc->max_cpu_mins_pj =
-			start_assoc.max_cpu_mins_pj;
-		cluster->root_assoc->max_cpus_pj = start_assoc.max_cpus_pj;
-		cluster->root_assoc->max_jobs = start_assoc.max_jobs;
-		cluster->root_assoc->max_nodes_pj = start_assoc.max_nodes_pj;
-		cluster->root_assoc->max_submit_jobs =
-			start_assoc.max_submit_jobs;
-		cluster->root_assoc->max_wall_pj = start_assoc.max_wall_pj;
-
-		cluster->root_assoc->qos_list =
-			copy_char_list(start_assoc.qos_list);
+		slurmdb_copy_assoc_rec_limits(
+			cluster->root_assoc, &start_assoc);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(name_list);
+	FREE_NULL_LIST(name_list);
 
 	if (limit_set) {
 		printf(" Default Limits\n");
 		sacctmgr_print_assoc_limits(&start_assoc);
-		if (start_assoc.qos_list)
-			list_destroy(start_assoc.qos_list);
+		FREE_NULL_LIST(start_assoc.qos_list);
 	}
 
 	if (!list_count(cluster_list)) {
@@ -384,7 +371,7 @@ extern int sacctmgr_add_cluster(int argc, char *argv[])
 	}
 
 end_it:
-	list_destroy(cluster_list);
+	FREE_NULL_LIST(cluster_list);
 
 	return rc;
 }
@@ -420,7 +407,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_cluster_cond(cluster_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	}
 
@@ -429,18 +416,19 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 				      "Cl,Controlh,Controlp,RPC");
 		if (!without_limits)
 			slurm_addto_char_list(format_list,
-					      "Fa,GrpJ,GrpN,GrpS,MaxJ,MaxN,"
-					      "MaxS,MaxW,QOS,DefaultQOS");
+					      "Fa,GrpJ,GrpTRES,GrpS,MaxJ,"
+					      "MaxTRES,MaxS,MaxW,QOS,"
+					      "DefaultQOS");
 	}
 
 	cluster_cond->with_deleted = with_deleted;
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
 		slurmdb_destroy_cluster_cond(cluster_cond);
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -451,7 +439,7 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 	if (!cluster_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem with query.\n");
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -461,9 +449,9 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 	field_count = list_count(print_fields_list);
 
-	while((cluster = list_next(itr))) {
+	while ((cluster = list_next(itr))) {
 		int curr_inx = 1;
-		slurmdb_association_rec_t *assoc = cluster->root_assoc;
+
 		/* set up the working cluster rec so nodecnt's and node names
 		 * are handled correctly */
 		working_cluster_rec = cluster;
@@ -491,36 +479,22 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 							     classification),
 						     (curr_inx == field_count));
 				break;
-			case PRINT_CPUS:
-			{
-				char tmp_char[9];
-				convert_num_unit((float)cluster->cpu_count,
-						 tmp_char, sizeof(tmp_char),
-						 UNIT_NONE);
+			case PRINT_TRES:
+				if (!g_tres_list) {
+					slurmdb_tres_cond_t tres_cond;
+					memset(&tres_cond, 0,
+					       sizeof(slurmdb_tres_cond_t));
+					tres_cond.with_deleted = 1;
+					g_tres_list = slurmdb_tres_get(
+						db_conn, &tres_cond);
+				}
+
+				tmp_char = slurmdb_make_tres_string_from_simple(
+					cluster->tres_str, g_tres_list);
 				field->print_routine(field,
 						     tmp_char,
 						     (curr_inx == field_count));
-				break;
-			}
-			case PRINT_DQOS:
-				if (!g_qos_list) {
-					g_qos_list = acct_storage_g_get_qos(
-						db_conn,
-						my_uid,
-						NULL);
-				}
-				tmp_char = slurmdb_qos_str(g_qos_list,
-							   assoc->def_qos_id);
-				field->print_routine(
-					field,
-					tmp_char,
-					(curr_inx == field_count));
-				break;
-			case PRINT_FAIRSHARE:
-				field->print_routine(
-					field,
-					assoc->shares_raw,
-					(curr_inx == field_count));
+				xfree(tmp_char);
 				break;
 			case PRINT_FLAGS:
 			{
@@ -533,64 +507,6 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 				xfree(tmp_char);
 				break;
 			}
-			case PRINT_GRPC:
-				field->print_routine(field,
-						     assoc->grp_cpus,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_GRPJ:
-				field->print_routine(field,
-						     assoc->grp_jobs,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_GRPMEM:
-				field->print_routine(field,
-						     assoc->grp_mem,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_GRPN:
-				field->print_routine(field,
-						     assoc->grp_nodes,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_GRPS:
-				field->print_routine(field,
-						     assoc->grp_submit_jobs,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_MAXCM:
-				field->print_routine(
-					field,
-					assoc->max_cpu_mins_pj,
-					(curr_inx == field_count));
-				break;
-			case PRINT_MAXC:
-				field->print_routine(field,
-						     assoc->max_cpus_pj,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_MAXJ:
-				field->print_routine(field,
-						     assoc->max_jobs,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_MAXN:
-				field->print_routine(field,
-						     assoc->max_nodes_pj,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_MAXS:
-				field->print_routine(field,
-						     assoc->max_submit_jobs,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_MAXW:
-				field->print_routine(
-					field,
-					assoc->max_wall_pj,
-					(curr_inx == field_count));
-				break;
-
 			case PRINT_NODECNT:
 			{
 				hostlist_t hl = hostlist_create(cluster->nodes);
@@ -611,21 +527,6 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 					cluster->nodes,
 					(curr_inx == field_count));
 				break;
-			case PRINT_QOS:
-				if (!g_qos_list)
-					g_qos_list = acct_storage_g_get_qos(
-						db_conn, my_uid, NULL);
-
-				field->print_routine(field,
-						     g_qos_list,
-						     assoc->qos_list,
-						     (curr_inx == field_count));
-				break;
-			case PRINT_QOS_RAW:
-				field->print_routine(field,
-						     assoc->qos_list,
-						     (curr_inx == field_count));
-				break;
 			case PRINT_RPC_VERSION:
 				field->print_routine(
 					field,
@@ -639,9 +540,10 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 					(curr_inx == field_count));
 				break;
 			default:
-				field->print_routine(
-					field, NULL,
-					(curr_inx == field_count));
+				sacctmgr_print_assoc_rec(cluster->root_assoc,
+							 field, NULL,
+							 (curr_inx ==
+							  field_count));
 				break;
 			}
 			curr_inx++;
@@ -654,8 +556,8 @@ extern int sacctmgr_list_cluster(int argc, char *argv[])
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(cluster_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(cluster_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
@@ -664,16 +566,16 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
 	int i=0;
-	slurmdb_association_rec_t *assoc =
-		xmalloc(sizeof(slurmdb_association_rec_t));
-	slurmdb_association_cond_t *assoc_cond =
-		xmalloc(sizeof(slurmdb_association_cond_t));
+	slurmdb_assoc_rec_t *assoc =
+		xmalloc(sizeof(slurmdb_assoc_rec_t));
+	slurmdb_assoc_cond_t *assoc_cond =
+		xmalloc(sizeof(slurmdb_assoc_cond_t));
 	int cond_set = 0, prev_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 	uint16_t class_rec = 0;
 	slurmdb_cluster_cond_t cluster_cond;
 
-	slurmdb_init_association_rec(assoc, 0);
+	slurmdb_init_assoc_rec(assoc, 0);
 
 	assoc_cond->cluster_list = list_create(slurm_destroy_char);
 	assoc_cond->acct_list = list_create(NULL);
@@ -738,8 +640,7 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 		/* we are only looking for the clusters returned from
 		   this query, so we free the cluster_list and replace
 		   it */
-		if (assoc_cond->cluster_list)
-			list_destroy(assoc_cond->cluster_list);
+		FREE_NULL_LIST(assoc_cond->cluster_list);
 		assoc_cond->cluster_list = temp_list;
 	}
 
@@ -754,7 +655,7 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 
 	list_append(assoc_cond->acct_list, "root");
 	notice_thread_init();
-	ret_list = acct_storage_g_modify_associations(
+	ret_list = acct_storage_g_modify_assocs(
 		db_conn, my_uid, assoc_cond, assoc);
 
 	if (ret_list && list_count(ret_list)) {
@@ -776,8 +677,7 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 		rc = SLURM_ERROR;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	if (class_rec) {
 		slurmdb_cluster_rec_t cluster_rec;
@@ -809,8 +709,7 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 			rc = SLURM_ERROR;
 		}
 
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 
 	notice_thread_fini();
@@ -824,8 +723,8 @@ extern int sacctmgr_modify_cluster(int argc, char *argv[])
 		}
 	}
 end_it:
-	slurmdb_destroy_association_cond(assoc_cond);
-	slurmdb_destroy_association_rec(assoc);
+	slurmdb_destroy_assoc_cond(assoc_cond);
+	slurmdb_destroy_assoc_rec(assoc);
 
 	return rc;
 }
@@ -891,7 +790,7 @@ extern int sacctmgr_delete_cluster(int argc, char *argv[])
 			while((object = list_next(itr))) {
 				fprintf(stderr,"  %s\n", object);
 			}
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			acct_storage_g_commit(db_conn, 0);
 			return rc;
 		}
@@ -916,8 +815,7 @@ extern int sacctmgr_delete_cluster(int argc, char *argv[])
 		rc = SLURM_ERROR;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	return rc;
 }
@@ -927,8 +825,8 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	slurmdb_user_cond_t user_cond;
 	slurmdb_user_rec_t *user = NULL;
 	slurmdb_hierarchical_rec_t *slurmdb_hierarchical_rec = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	slurmdb_assoc_cond_t assoc_cond;
 	List assoc_list = NULL;
 	List acct_list = NULL;
 	List user_list = NULL;
@@ -1029,7 +927,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	user_cond.with_wckeys = 1;
 	user_cond.with_assocs = 1;
 
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.without_parent_limits = 1;
 	assoc_cond.with_raw_qos = 1;
 	assoc_cond.cluster_list = list_create(NULL);
@@ -1074,7 +972,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 	xfree(user_name);
 
 	/* assoc_cond is set up above */
-	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
+	assoc_list = acct_storage_g_get_assocs(db_conn, my_uid,
 						     &assoc_cond);
 	FREE_NULL_LIST(assoc_cond.cluster_list);
 	if (!assoc_list) {
@@ -1118,7 +1016,7 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		    "(root is created by default)...\n"
 		    "# Parent - 'root'\n"
 		    "# Account - 'cs':MaxNodesPerJob=5:MaxJobs=4:"
-		    "MaxCPUMins=20:FairShare=399:"
+		    "MaxTRESMins=cpu=20:FairShare=399:"
 		    "MaxWallDuration=40:Description='Computer Science':"
 		    "Organization='LC'\n"
 		    "# Any of the options after a ':' can be left out and "
@@ -1129,13 +1027,13 @@ extern int sacctmgr_dump_cluster (int argc, char *argv[])
 		    "fashion...\n"
 		    "# Parent - 'cs'\n"
 		    "# Account - 'test':MaxNodesPerJob=1:MaxJobs=1:"
-		    "MaxCPUMins=1:FairShare=1:"
+		    "MaxTRESMins=cpu=1:FairShare=1:"
 		    "MaxWallDuration=1:"
 		    "Description='Test Account':Organization='Test'\n"
 		    "# To add users to a account add a line like this after a "
 		    "Parent - 'line'\n"
 		    "# User - 'lipari':MaxNodesPerJob=2:MaxJobs=3:"
-		    "MaxCPUMins=4:FairShare=1:"
+		    "MaxTRESMins=cpu=4:FairShare=1:"
 		    "MaxWallDurationPerJob=1\n") < 0) {
 		exit_code = 1;
 		fprintf(stderr, "Can't write to file");
diff --git a/src/sacctmgr/common.c b/src/sacctmgr/common.c
index 60431cd28..8ca874a32 100644
--- a/src/sacctmgr/common.c
+++ b/src/sacctmgr/common.c
@@ -195,6 +195,16 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("% Allowed");
 		field->len = 10;
 		field->print_routine = print_fields_uint16;
+	} else if (!strncasecmp("Associations", object, MAX(command_len, 2))) {
+		field->type = PRINT_ASSOC_NAME;
+		field->name = xstrdup("Assocs");
+		field->len = 10;
+		field->print_routine = print_fields_str;
+	} else if (!strncasecmp("TRES", object, MAX(command_len, 2))) {
+		field->type = PRINT_TRES;
+		field->name = xstrdup("TRES");
+		field->len = 20;
+		field->print_routine = print_fields_str;
 	} else if (!strncasecmp("Classification", object,
 				MAX(command_len, 3))) {
 		field->type = PRINT_CLASS;
@@ -207,7 +217,7 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("Cluster Nodes");
 		field->len = 20;
 		field->print_routine = print_fields_str;
-	} else if (!strncasecmp("Cluster", object, MAX(command_len, 2))) {
+	} else if (!strncasecmp("Clusters", object, MAX(command_len, 2))) {
 		field->type = PRINT_CLUSTER;
 		field->name = xstrdup("Cluster");
 		field->len = 10;
@@ -242,10 +252,11 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("# Used");
 		field->len = 10;
 		field->print_routine = print_fields_uint32;
-	} else if (!strncasecmp("CPUCount", object, MAX(command_len, 2))) {
+	} else if (!strncasecmp("CPUCount", object,
+				MAX(command_len, 2))) {
 		field->type = PRINT_CPUS;
-		field->name = xstrdup("CPUCount");
-		field->len = 9;
+		field->name = xstrdup("CPU Cnt");
+		field->len = 7;
 		field->print_routine = print_fields_str;
 	} else if (!strncasecmp("DefaultAccount", object,
 				MAX(command_len, 8))) {
@@ -273,11 +284,6 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("Duration");
 		field->len = 13;
 		field->print_routine = print_fields_time_from_secs;
-	} else if (!strncasecmp("End", object, MAX(command_len, 2))) {
-		field->type = PRINT_END;
-		field->name = xstrdup("End");
-		field->len = 19;
-		field->print_routine = print_fields_date;
 	} else if (!strncasecmp("EventRaw", object, MAX(command_len, 6))) {
 		field->type = PRINT_EVENTRAW;
 		field->name = xstrdup("EventRaw");
@@ -298,6 +304,11 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("GraceTime");
 		field->len = 10;
 		field->print_routine = print_fields_time_from_secs;
+	} else if (!strncasecmp("GrpCPUs", object, MAX(command_len, 6))) {
+		field->type = PRINT_GRPC;
+		field->name = xstrdup("GrpCPUs");
+		field->len = 8;
+		field->print_routine = print_fields_uint64;
 	} else if (!strncasecmp("GrpCPUMins", object, MAX(command_len, 7))) {
 		field->type = PRINT_GRPCM;
 		field->name = xstrdup("GrpCPUMins");
@@ -308,11 +319,22 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("GrpCPURunMins");
 		field->len = 13;
 		field->print_routine = print_fields_uint64;
-	} else if (!strncasecmp("GrpCPUs", object, MAX(command_len, 7))) {
-		field->type = PRINT_GRPC;
-		field->name = xstrdup("GrpCPUs");
-		field->len = 8;
-		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("GrpTRES", object, MAX(command_len, 7))) {
+		field->type = PRINT_GRPT;
+		field->name = xstrdup("GrpTRES");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
+	} else if (!strncasecmp("GrpTRESMins", object, MAX(command_len, 7))) {
+		field->type = PRINT_GRPTM;
+		field->name = xstrdup("GrpTRESMins");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
+	} else if (!strncasecmp("GrpTRESRunMins",
+				object, MAX(command_len, 7))) {
+		field->type = PRINT_GRPTRM;
+		field->name = xstrdup("GrpTRESRunMins");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
 	} else if (!strncasecmp("GrpJobs", object, MAX(command_len, 4))) {
 		field->type = PRINT_GRPJ;
 		field->name = xstrdup("GrpJobs");
@@ -374,13 +396,43 @@ static print_field_t *_get_print_field(char *object)
 		field->type = PRINT_MAXC;
 		field->name = xstrdup("MaxCPUs");
 		field->len = 8;
-		field->print_routine = print_fields_uint;
+		field->print_routine = print_fields_uint64;
 	} else if (!strncasecmp("MaxCPUsPerUser", object,
 				MAX(command_len, 11))) {
 		field->type = PRINT_MAXCU;
 		field->name = xstrdup("MaxCPUsPU");
 		field->len = 9;
 		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("MaxTRESPerJob",
+				object, MAX(command_len, 7))) {
+		field->type = PRINT_MAXT;
+		field->name = xstrdup("MaxTRES");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
+	} else if (!strncasecmp("MaxTRESPerNode",
+				object, MAX(command_len, 11))) {
+		field->type = PRINT_MAXTN;
+		field->name = xstrdup("MaxTRESPerNode");
+		field->len = 14;
+		field->print_routine = sacctmgr_print_tres;
+	} else if (!strncasecmp("MaxTRESMinsPerJob", object,
+				MAX(command_len, 8))) {
+		field->type = PRINT_MAXTM;
+		field->name = xstrdup("MaxTRESMins");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
+	} else if (!strncasecmp("MaxTRESRunMinsPerUser",
+				object, MAX(command_len, 8))) {
+		field->type = PRINT_MAXTRM;
+		field->name = xstrdup("MaxTRESRunMinsPU");
+		field->len = 15;
+		field->print_routine = sacctmgr_print_tres;
+	} else if (!strncasecmp("MaxTRESPerUser", object,
+				MAX(command_len, 11))) {
+		field->type = PRINT_MAXTU;
+		field->name = xstrdup("MaxTRESPU");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
 	} else if (!strncasecmp("MaxJobs", object, MAX(command_len, 4))) {
 		field->type = PRINT_MAXJ;
 		field->name = xstrdup("MaxJobs");
@@ -426,6 +478,11 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("MinCPUs");
 		field->len = 8;
 		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("MinTRESPerJob", object, MAX(command_len, 7))) {
+		field->type = PRINT_MINT;
+		field->name = xstrdup("MinTRES");
+		field->len = 13;
+		field->print_routine = sacctmgr_print_tres;
 	} else if (!strncasecmp("Name", object, MAX(command_len, 2))) {
 		field->type = PRINT_NAME;
 		field->name = xstrdup("Name");
@@ -472,7 +529,7 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("PreemptMode");
 		field->len = 11;
 		field->print_routine = print_fields_str;
-	/* Preempt needs to follow PreemptMode */
+		/* Preempt needs to follow PreemptMode */
 	} else if (!strncasecmp("Preempt", object, MAX(command_len, 7))) {
 		field->type = PRINT_PREE;
 		field->name = xstrdup("Preempt");
@@ -498,6 +555,12 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("QOS_RAW");
 		field->len = 10;
 		field->print_routine = print_fields_char_list;
+	} else if (!strncasecmp("Reason", object,
+				MAX(command_len, 1))) {
+		field->type = PRINT_REASON;
+		field->name = xstrdup("Reason");
+		field->len = 30;
+		field->print_routine = print_fields_str;
 	} else if (!strncasecmp("RGT", object, MAX(command_len, 1))) {
 		field->type = PRINT_RGT;
 		field->name = xstrdup("RGT");
@@ -519,11 +582,40 @@ static print_field_t *_get_print_field(char *object)
 		field->name = xstrdup("Share");
 		field->len = 9;
 		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("StateRaw", object,
+				MAX(command_len, 6))) {
+		field->type = PRINT_STATERAW;
+		field->name = xstrdup("StateRaw");
+		field->len = 8;
+		field->print_routine = print_fields_uint;
+	} else if (!strncasecmp("State", object, MAX(command_len, 1))) {
+		field->type = PRINT_STATE;
+		field->name = xstrdup("State");
+		field->len = 6;
+		field->print_routine = print_fields_str;
 	} else if (!strncasecmp("TimeStamp", object, MAX(command_len, 2))) {
 		field->type = PRINT_TS;
 		field->name = xstrdup("Time");
 		field->len = 19;
 		field->print_routine = print_fields_date;
+	} else if (!strncasecmp("TimeStart", object, MAX(command_len, 7)) ||
+		   !strncasecmp("Start", object, MAX(command_len, 3))) {
+		field->type = PRINT_TIMESTART;
+		field->name = xstrdup("TimeStart");
+		field->len = 19;
+		field->print_routine = print_fields_date;
+	} else if (!strncasecmp("TimeEnd", object, MAX(command_len, 5)) ||
+		   !strncasecmp("End", object, MAX(command_len, 2))) {
+		field->type = PRINT_TIMEEND;
+		field->name = xstrdup("TimeEnd");
+		field->len = 19;
+		field->print_routine = print_fields_date;
+	} else if (!strncasecmp("TRES", object,
+				MAX(command_len, 2))) {
+		field->type = PRINT_TRES;
+		field->name = xstrdup("TRES");
+		field->len = 20;
+		field->print_routine = print_fields_str;
 	} else if (!strncasecmp("Type", object, MAX(command_len, 2))) {
 		field->type = PRINT_TYPE;
 		field->name = xstrdup("Type");
@@ -629,7 +721,7 @@ extern int commit_check(char *warning)
 	return 0;
 }
 
-extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
+extern int sacctmgr_remove_assoc_usage(slurmdb_assoc_cond_t *assoc_cond)
 {
 	List update_list = NULL;
 	List local_assoc_list = NULL;
@@ -640,7 +732,7 @@ extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
 	char *account = NULL;
 	char *cluster = NULL;
 	char *user = NULL;
-	slurmdb_association_rec_t* rec = NULL;
+	slurmdb_assoc_rec_t* rec = NULL;
 	slurmdb_cluster_rec_t* cluster_rec = NULL;
 	slurmdb_update_object_t* update_obj = NULL;
 	slurmdb_cluster_cond_t cluster_cond;
@@ -667,7 +759,7 @@ extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
 		return rc;
 	}
 
-	local_assoc_list = acct_storage_g_get_associations(
+	local_assoc_list = acct_storage_g_get_assocs(
 		db_conn, my_uid, assoc_cond);
 
 	slurmdb_init_cluster_cond(&cluster_cond, 0);
@@ -697,7 +789,7 @@ extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
 		if (itr3) {
 			while ((user = list_next(itr3))) {
 				while ((account = list_next(itr2))) {
-					rec = sacctmgr_find_association_from_list(
+					rec = sacctmgr_find_assoc_from_list(
 						local_assoc_list,
 						user, account, cluster, "*");
 					if (!rec) {
@@ -718,7 +810,7 @@ extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
 			list_iterator_reset(itr3);
 		} else {
 			while ((account = list_next(itr2))) {
-				rec = sacctmgr_find_association_from_list(
+				rec = sacctmgr_find_assoc_from_list(
 					local_assoc_list,
 					NULL, account, cluster, "*");
 				if (!rec) {
@@ -742,7 +834,7 @@ extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond)
 				cluster_rec->control_port,
 				cluster_rec->rpc_version);
 		}
-		list_destroy(update_list);
+		FREE_NULL_LIST(update_list);
 	}
 end_it:
 	list_iterator_destroy(itr);
@@ -750,8 +842,8 @@ end_it:
 	if (itr3)
 		list_iterator_destroy(itr3);
 
-	list_destroy(local_assoc_list);
-	list_destroy(local_cluster_list);
+	FREE_NULL_LIST(local_assoc_list);
+	FREE_NULL_LIST(local_cluster_list);
 
 	return rc;
 }
@@ -854,12 +946,12 @@ end_it:
 	return rc;
 }
 
-extern slurmdb_association_rec_t *sacctmgr_find_account_base_assoc(
+extern slurmdb_assoc_rec_t *sacctmgr_find_account_base_assoc(
 	char *account, char *cluster)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	char *temp = "root";
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	List assoc_list = NULL;
 
 	if (!cluster)
@@ -868,7 +960,7 @@ extern slurmdb_association_rec_t *sacctmgr_find_account_base_assoc(
 	if (account)
 		temp = account;
 
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.acct_list = list_create(NULL);
 	list_append(assoc_cond.cluster_list, temp);
 	assoc_cond.cluster_list = list_create(NULL);
@@ -876,22 +968,22 @@ extern slurmdb_association_rec_t *sacctmgr_find_account_base_assoc(
 	assoc_cond.user_list = list_create(NULL);
 	list_append(assoc_cond.user_list, "");
 
-	assoc_list = acct_storage_g_get_associations(db_conn, my_uid,
+	assoc_list = acct_storage_g_get_assocs(db_conn, my_uid,
 						     &assoc_cond);
 
-	list_destroy(assoc_cond.acct_list);
-	list_destroy(assoc_cond.cluster_list);
-	list_destroy(assoc_cond.user_list);
+	FREE_NULL_LIST(assoc_cond.acct_list);
+	FREE_NULL_LIST(assoc_cond.cluster_list);
+	FREE_NULL_LIST(assoc_cond.user_list);
 
 	if (assoc_list)
 		assoc = list_pop(assoc_list);
 
-	list_destroy(assoc_list);
+	FREE_NULL_LIST(assoc_list);
 
 	return assoc;
 }
 
-extern slurmdb_association_rec_t *sacctmgr_find_root_assoc(char *cluster)
+extern slurmdb_assoc_rec_t *sacctmgr_find_root_assoc(char *cluster)
 {
 	return sacctmgr_find_account_base_assoc(NULL, cluster);
 }
@@ -900,14 +992,14 @@ extern slurmdb_user_rec_t *sacctmgr_find_user(char *name)
 {
 	slurmdb_user_rec_t *user = NULL;
 	slurmdb_user_cond_t user_cond;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	List user_list = NULL;
 
 	if (!name)
 		return NULL;
 
 	memset(&user_cond, 0, sizeof(slurmdb_user_cond_t));
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.user_list = list_create(NULL);
 	list_append(assoc_cond.user_list, name);
 	user_cond.assoc_cond = &assoc_cond;
@@ -915,12 +1007,12 @@ extern slurmdb_user_rec_t *sacctmgr_find_user(char *name)
 	user_list = acct_storage_g_get_users(db_conn, my_uid,
 					     &user_cond);
 
-	list_destroy(assoc_cond.user_list);
+	FREE_NULL_LIST(assoc_cond.user_list);
 
 	if (user_list)
 		user = list_pop(user_list);
 
-	list_destroy(user_list);
+	FREE_NULL_LIST(user_list);
 
 	return user;
 }
@@ -929,14 +1021,14 @@ extern slurmdb_account_rec_t *sacctmgr_find_account(char *name)
 {
 	slurmdb_account_rec_t *account = NULL;
 	slurmdb_account_cond_t account_cond;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	List account_list = NULL;
 
 	if (!name)
 		return NULL;
 
 	memset(&account_cond, 0, sizeof(slurmdb_account_cond_t));
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.acct_list = list_create(NULL);
 	list_append(assoc_cond.acct_list, name);
 	account_cond.assoc_cond = &assoc_cond;
@@ -944,12 +1036,12 @@ extern slurmdb_account_rec_t *sacctmgr_find_account(char *name)
 	account_list = acct_storage_g_get_accounts(db_conn, my_uid,
 						   &account_cond);
 
-	list_destroy(assoc_cond.acct_list);
+	FREE_NULL_LIST(assoc_cond.acct_list);
 
 	if (account_list)
 		account = list_pop(account_list);
 
-	list_destroy(account_list);
+	FREE_NULL_LIST(account_list);
 
 	return account;
 }
@@ -970,22 +1062,22 @@ extern slurmdb_cluster_rec_t *sacctmgr_find_cluster(char *name)
 	cluster_list = acct_storage_g_get_clusters(db_conn, my_uid,
 						   &cluster_cond);
 
-	list_destroy(cluster_cond.cluster_list);
+	FREE_NULL_LIST(cluster_cond.cluster_list);
 
 	if (cluster_list)
 		cluster = list_pop(cluster_list);
 
-	list_destroy(cluster_list);
+	FREE_NULL_LIST(cluster_list);
 
 	return cluster;
 }
 
-extern slurmdb_association_rec_t *sacctmgr_find_association_from_list(
+extern slurmdb_assoc_rec_t *sacctmgr_find_assoc_from_list(
 	List assoc_list, char *user, char *account,
 	char *cluster, char *partition)
 {
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t * assoc = NULL;
+	slurmdb_assoc_rec_t * assoc = NULL;
 
 	if (!assoc_list)
 		return NULL;
@@ -1017,11 +1109,11 @@ extern slurmdb_association_rec_t *sacctmgr_find_association_from_list(
 	return assoc;
 }
 
-extern slurmdb_association_rec_t *sacctmgr_find_account_base_assoc_from_list(
+extern slurmdb_assoc_rec_t *sacctmgr_find_account_base_assoc_from_list(
 	List assoc_list, char *account, char *cluster)
 {
 	ListIterator itr = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	char *temp = "root";
 
 	if (!cluster || !assoc_list)
@@ -1249,7 +1341,7 @@ extern int get_uint64(char *in_value, uint64_t *out_value, char *type)
 	xfree(meat);
 
 	if (num < 0)
-		*out_value = INFINITE;		/* flag to clear */
+		*out_value = INFINITE64; /* flag to clear */
 	else
 		*out_value = (uint64_t) num;
 	return SLURM_SUCCESS;
@@ -1377,26 +1469,6 @@ end_it:
 	return count;
 }
 
-extern List copy_char_list(List char_list)
-{
-	List ret_list = NULL;
-	char *tmp_char = NULL;
-	ListIterator itr = NULL;
-
-	if (!char_list || !list_count(char_list))
-		return NULL;
-
-	itr = list_iterator_create(char_list);
-	ret_list = list_create(slurm_destroy_char);
-
-	while((tmp_char = list_next(itr)))
-		list_append(ret_list, xstrdup(tmp_char));
-
-	list_iterator_destroy(itr);
-
-	return ret_list;
-}
-
 extern void sacctmgr_print_coord_list(
 	print_field_t *field, List value, int last)
 {
@@ -1490,8 +1562,46 @@ extern void sacctmgr_print_qos_bitstr(print_field_t *field, List qos_list,
 	xfree(print_this);
 }
 
-extern void sacctmgr_print_assoc_limits(slurmdb_association_rec_t *assoc)
+extern void sacctmgr_print_tres(print_field_t *field, char *tres_simple_str,
+				int last)
 {
+	int abs_len = abs(field->len);
+	char *print_this;
+
+	if (!g_tres_list) {
+		slurmdb_tres_cond_t tres_cond;
+		memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+		tres_cond.with_deleted = 1;
+		g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+	}
+
+	print_this = slurmdb_make_tres_string_from_simple(
+		tres_simple_str, g_tres_list);
+
+	if (!print_this)
+		print_this = xstrdup("");
+
+	if (print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
+	    && last)
+		printf("%s", print_this);
+	else if (print_fields_parsable_print)
+		printf("%s|", print_this);
+	else {
+		if (strlen(print_this) > abs_len)
+			print_this[abs_len-1] = '+';
+
+		if (field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, print_this);
+		else
+			printf("%-*.*s ", abs_len, abs_len, print_this);
+	}
+	xfree(print_this);
+}
+
+extern void sacctmgr_print_assoc_limits(slurmdb_assoc_rec_t *assoc)
+{
+	char *tmp_char;
+
 	if (!assoc)
 		return;
 
@@ -1500,38 +1610,36 @@ extern void sacctmgr_print_assoc_limits(slurmdb_association_rec_t *assoc)
 	else if (assoc->shares_raw != NO_VAL)
 		printf("  Fairshare     = %u\n", assoc->shares_raw);
 
-	if (assoc->grp_cpu_mins == INFINITE)
-		printf("  GrpCPUMins    = NONE\n");
-	else if (assoc->grp_cpu_mins != NO_VAL)
-		printf("  GrpCPUMins    = %"PRIu64"\n",
-		       assoc->grp_cpu_mins);
-
-	if (assoc->grp_cpus == INFINITE)
-		printf("  GrpCPUs       = NONE\n");
-	else if (assoc->grp_cpus != NO_VAL)
-		printf("  GrpCPUs       = %u\n", assoc->grp_cpus);
-
 	if (assoc->grp_jobs == INFINITE)
 		printf("  GrpJobs       = NONE\n");
 	else if (assoc->grp_jobs != NO_VAL)
 		printf("  GrpJobs       = %u\n", assoc->grp_jobs);
 
-	if (assoc->grp_mem == INFINITE)
-		printf("  GrpMemory     = NONE\n");
-	else if (assoc->grp_mem != NO_VAL)
-		printf("  GrpMemory     = %u\n", assoc->grp_mem);
-
-	if (assoc->grp_nodes == INFINITE)
-		printf("  GrpNodes      = NONE\n");
-	else if (assoc->grp_nodes != NO_VAL)
-		printf("  GrpNodes      = %u\n", assoc->grp_nodes);
-
 	if (assoc->grp_submit_jobs == INFINITE)
 		printf("  GrpSubmitJobs = NONE\n");
 	else if (assoc->grp_submit_jobs != NO_VAL)
 		printf("  GrpSubmitJobs = %u\n",
 		       assoc->grp_submit_jobs);
 
+	if (assoc->grp_tres) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->grp_tres, g_tres_list);
+		printf("  GrpTRES       = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (assoc->grp_tres_mins) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->grp_tres_mins, g_tres_list);
+		printf("  GrpTRESMins   = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (assoc->grp_tres_run_mins) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->grp_tres_run_mins, g_tres_list);
+		printf("  GrpTRESRunMins= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+
 	if (assoc->grp_wall == INFINITE)
 		printf("  GrpWall       = NONE\n");
 	else if (assoc->grp_wall != NO_VAL) {
@@ -1541,33 +1649,42 @@ extern void sacctmgr_print_assoc_limits(slurmdb_association_rec_t *assoc)
 		printf("  GrpWall       = %s\n", time_buf);
 	}
 
-	if (assoc->max_cpu_mins_pj == (uint64_t)INFINITE)
-		printf("  MaxCPUMins    = NONE\n");
-	else if (assoc->max_cpu_mins_pj != (uint64_t)NO_VAL)
-		printf("  MaxCPUMins    = %"PRIu64"\n",
-		       assoc->max_cpu_mins_pj);
-
-	if (assoc->max_cpus_pj == INFINITE)
-		printf("  MaxCPUs       = NONE\n");
-	else if (assoc->max_cpus_pj != NO_VAL)
-		printf("  MaxCPUs       = %u\n", assoc->max_cpus_pj);
-
 	if (assoc->max_jobs == INFINITE)
 		printf("  MaxJobs       = NONE\n");
 	else if (assoc->max_jobs != NO_VAL)
 		printf("  MaxJobs       = %u\n", assoc->max_jobs);
 
-	if (assoc->max_nodes_pj == INFINITE)
-		printf("  MaxNodes      = NONE\n");
-	else if (assoc->max_nodes_pj != NO_VAL)
-		printf("  MaxNodes      = %u\n", assoc->max_nodes_pj);
-
 	if (assoc->max_submit_jobs == INFINITE)
 		printf("  MaxSubmitJobs = NONE\n");
 	else if (assoc->max_submit_jobs != NO_VAL)
 		printf("  MaxSubmitJobs = %u\n",
 		       assoc->max_submit_jobs);
 
+	if (assoc->max_tres_pj) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_pj, g_tres_list);
+		printf("  MaxTRES       = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (assoc->max_tres_pn) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_pn, g_tres_list);
+		printf("  MaxTRESPerNode= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (assoc->max_tres_mins_pj) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_mins_pj, g_tres_list);
+		printf("  MaxTRESMins   = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (assoc->max_tres_run_mins) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_run_mins, g_tres_list);
+		printf("  MaxTRESRUNMins= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+
 	if (assoc->max_wall_pj == INFINITE)
 		printf("  MaxWall       = NONE\n");
 	else if (assoc->max_wall_pj != NO_VAL) {
@@ -1592,6 +1709,8 @@ extern void sacctmgr_print_assoc_limits(slurmdb_association_rec_t *assoc)
 
 extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 {
+	char *tmp_char;
+
 	if (!qos)
 		return;
 
@@ -1609,38 +1728,36 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 	else if (qos->grace_time != NO_VAL)
 		printf("  GraceTime      = %d\n", qos->grace_time);
 
-	if (qos->grp_cpu_mins == INFINITE)
-		printf("  GrpCPUMins     = NONE\n");
-	else if (qos->grp_cpu_mins != NO_VAL)
-		printf("  GrpCPUMins     = %"PRIu64"\n",
-		       qos->grp_cpu_mins);
-
-	if (qos->grp_cpus == INFINITE)
-		printf("  GrpCPUs        = NONE\n");
-	else if (qos->grp_cpus != NO_VAL)
-		printf("  GrpCPUs        = %u\n", qos->grp_cpus);
-
 	if (qos->grp_jobs == INFINITE)
 		printf("  GrpJobs        = NONE\n");
 	else if (qos->grp_jobs != NO_VAL)
 		printf("  GrpJobs        = %u\n", qos->grp_jobs);
 
-	if (qos->grp_mem == INFINITE)
-		printf("  GrpMemory      = NONE\n");
-	else if (qos->grp_mem != NO_VAL)
-		printf("  GrpMemory      = %u\n", qos->grp_mem);
-
-	if (qos->grp_nodes == INFINITE)
-		printf("  GrpNodes       = NONE\n");
-	else if (qos->grp_nodes != NO_VAL)
-		printf("  GrpNodes       = %u\n", qos->grp_nodes);
-
 	if (qos->grp_submit_jobs == INFINITE)
 		printf("  GrpSubmitJobs  = NONE\n");
 	else if (qos->grp_submit_jobs != NO_VAL)
 		printf("  GrpSubmitJobs  = %u\n",
 		       qos->grp_submit_jobs);
 
+	if (qos->grp_tres) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->grp_tres, g_tres_list);
+		printf("  GrpTRES       = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (qos->grp_tres_mins) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->grp_tres_mins, g_tres_list);
+		printf("  GrpTRESMins   = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (qos->grp_tres_run_mins) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->grp_tres_run_mins, g_tres_list);
+		printf("  GrpTRESRunMins= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+
 	if (qos->grp_wall == INFINITE)
 		printf("  GrpWall        = NONE\n");
 	else if (qos->grp_wall != NO_VAL) {
@@ -1650,43 +1767,43 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 		printf("  GrpWall        = %s\n", time_buf);
 	}
 
-	if (qos->max_cpu_mins_pj == (uint64_t)INFINITE)
-		printf("  MaxCPUMins     = NONE\n");
-	else if (qos->max_cpu_mins_pj != (uint64_t)NO_VAL)
-		printf("  MaxCPUMins     = %"PRIu64"\n",
-		       qos->max_cpu_mins_pj);
-
-	if (qos->max_cpus_pj == INFINITE)
-		printf("  MaxCPUs        = NONE\n");
-	else if (qos->max_cpus_pj != NO_VAL)
-		printf("  MaxCPUs        = %u\n", qos->max_cpus_pj);
-
-	if (qos->max_cpus_pu == INFINITE)
-		printf("  MaxCPUsPerUser        = NONE\n");
-	else if (qos->max_cpus_pu != NO_VAL)
-		printf("  MaxCPUsPerUser        = %u\n", qos->max_cpus_pu);
-
-	if (qos->max_jobs_pu == INFINITE)
-		printf("  MaxJobs        = NONE\n");
-	else if (qos->max_jobs_pu != NO_VAL)
-		printf("  MaxJobs        = %u\n", qos->max_jobs_pu);
-
-	if (qos->max_nodes_pj == INFINITE)
-		printf("  MaxNodes       = NONE\n");
-	else if (qos->max_nodes_pj != NO_VAL)
-		printf("  MaxNodes       = %u\n", qos->max_nodes_pj);
-
-	if (qos->max_nodes_pu == INFINITE)
-		printf("  MaxNodesPerUser       = NONE\n");
-	else if (qos->max_nodes_pu != NO_VAL)
-		printf("  MaxNodesPerUser       = %u\n", qos->max_nodes_pu);
-
 	if (qos->max_submit_jobs_pu == INFINITE)
 		printf("  MaxSubmitJobs  = NONE\n");
 	else if (qos->max_submit_jobs_pu != NO_VAL)
 		printf("  MaxSubmitJobs  = %u\n",
 		       qos->max_submit_jobs_pu);
 
+	if (qos->max_tres_pj) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->max_tres_pj, g_tres_list);
+		printf("  MaxTRESPerJob = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (qos->max_tres_pn) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->max_tres_pn, g_tres_list);
+		printf("  MaxTRESPerNode= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (qos->max_tres_pu) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->max_tres_pu, g_tres_list);
+		printf("  MaxTRESPerUser= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (qos->max_tres_mins_pj) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->max_tres_mins_pj, g_tres_list);
+		printf("  MaxTRESMins   = %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+	if (qos->max_tres_run_mins_pu) {
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			qos->max_tres_run_mins_pu, g_tres_list);
+		printf("  MaxTRESRUNMins= %s\n", tmp_char);
+		xfree(tmp_char);
+	}
+
 	if (qos->max_wall_pj == INFINITE)
 		printf("  MaxWall        = NONE\n");
 	else if (qos->max_wall_pj != NO_VAL) {
@@ -1696,11 +1813,6 @@ extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos)
 		printf("  MaxWall        = %s\n", time_buf);
 	}
 
-	if (qos->min_cpus_pj == INFINITE)
-		printf("  MinCPUs        = NONE\n");
-	else if (qos->min_cpus_pj != NO_VAL)
-		printf("  MinCPUs        = %u\n", qos->min_cpus_pj);
-
 	if (qos->preempt_list) {
 		char *temp_char = get_qos_complete_str(g_qos_list,
 						       qos->preempt_list);
@@ -1797,7 +1909,7 @@ extern int sacctmgr_validate_cluster_list(List cluster_list)
 	}
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr_c);
-	list_destroy(temp_list);
+	FREE_NULL_LIST(temp_list);
 
 	if (!list_count(cluster_list))
 		rc = SLURM_ERROR;
diff --git a/src/sacctmgr/config_functions.c b/src/sacctmgr/config_functions.c
index 10e44c292..17aa00fb9 100644
--- a/src/sacctmgr/config_functions.c
+++ b/src/sacctmgr/config_functions.c
@@ -86,8 +86,7 @@ static void _free_dbd_config(void)
 	if (!dbd_config_list)
 		return;
 
-	list_destroy(dbd_config_list);
-	dbd_config_list = NULL;
+	FREE_NULL_LIST(dbd_config_list);
 }
 
 static void _load_slurm_config(void)
diff --git a/src/sacctmgr/event_functions.c b/src/sacctmgr/event_functions.c
index 112558f3e..ada0863b1 100644
--- a/src/sacctmgr/event_functions.c
+++ b/src/sacctmgr/event_functions.c
@@ -37,11 +37,12 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
+#include <grp.h>
 
-#include "src/sacctmgr/sacctmgr.h"
+#include "src/common/slurm_time.h"
 #include "src/common/slurmdbd_defs.h"
 #include "src/common/uid.h"
-#include <grp.h>
+#include "src/sacctmgr/sacctmgr.h"
 
 static uint32_t _decode_node_state(char *val)
 {
@@ -341,7 +342,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 				}
 			}
 			list_iterator_destroy(itr);
-			list_destroy(tmp_list);
+			FREE_NULL_LIST(tmp_list);
 		} else if (!strncasecmp (argv[i], "Clusters",
 					 MAX(command_len, 1))) {
 			if (!event_cond->cluster_list)
@@ -425,7 +426,8 @@ static int _set_cond(int *start, int argc, char *argv[],
 		if (!event_cond->state_list) {
 			struct tm start_tm;
 
-			if (!localtime_r(&event_cond->period_start, &start_tm)) {
+			if (!slurm_localtime_r(&event_cond->period_start,
+					       &start_tm)) {
 				fprintf(stderr,
 					" Couldn't get localtime from %ld",
 					(long)event_cond->period_start);
@@ -437,7 +439,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			start_tm.tm_hour = 0;
 			start_tm.tm_mday--;
 			start_tm.tm_isdst = -1;
-			event_cond->period_start = mktime(&start_tm);
+			event_cond->period_start = slurm_mktime(&start_tm);
 		}
 	}
 
@@ -455,7 +457,6 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 	int i=0;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
-	char *object = NULL;
 	int field_count = 0;
 
 	print_field_t *field = NULL;
@@ -470,11 +471,12 @@ extern int sacctmgr_list_event(int argc, char *argv[])
                 struct tm start_tm;
 		event_cond->period_start = time(NULL);
 
-                if (!localtime_r(&event_cond->period_start, &start_tm)) {
+                if (!slurm_localtime_r(&event_cond->period_start,
+				       &start_tm)) {
                         fprintf(stderr,
                                 " Couldn't get localtime from %ld",
                                 (long)event_cond->period_start);
-                        exit_code=1;
+                        exit_code = 1;
                         return 0;
                 }
                 start_tm.tm_sec = 0;
@@ -482,7 +484,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
                 start_tm.tm_hour = 0;
                 start_tm.tm_mday--;
                 start_tm.tm_isdst = -1;
-                event_cond->period_start = mktime(&start_tm);
+                event_cond->period_start = slurm_mktime(&start_tm);
         }
 
 	for (i=0; i<argc; i++) {
@@ -495,7 +497,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_event_cond(event_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	}
 
@@ -512,111 +514,11 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 					      "End,State,Reason,User");
 	}
 
-	itr = list_iterator_create(format_list);
-	while((object = list_next(itr))) {
-		char *tmp_char = NULL;
-		int command_len = 0;
-		int newlen = 0;
-
-		if ((tmp_char = strstr(object, "\%"))) {
-			newlen = atoi(tmp_char+1);
-			tmp_char[0] = '\0';
-		}
-
-		command_len = strlen(object);
-
-		field = xmalloc(sizeof(print_field_t));
-		if (!strncasecmp("ClusterNodes", object,
-				       MAX(command_len, 8))) {
-			field->type = PRINT_CLUSTER_NODES;
-			field->name = xstrdup("Cluster Nodes");
-			field->len = 20;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("Cluster", object,
-				       MAX(command_len, 1))) {
-			field->type = PRINT_CLUSTER;
-			field->name = xstrdup("Cluster");
-			field->len = 10;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("CPUs", object,
-				MAX(command_len, 2))) {
-			field->type = PRINT_CPUS;
-			field->name = xstrdup("CPUs");
-			field->len = 7;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("Duration", object,
-				       MAX(command_len, 2))) {
-			field->type = PRINT_DURATION;
-			field->name = xstrdup("Duration");
-			field->len = 13;
-			field->print_routine = print_fields_time_from_secs;
-		} else if (!strncasecmp("End", object, MAX(command_len, 2))) {
-			field->type = PRINT_END;
-			field->name = xstrdup("End");
-			field->len = 19;
-			field->print_routine = print_fields_date;
-		} else if (!strncasecmp("EventRaw", object,
-				MAX(command_len, 6))) {
-			field->type = PRINT_EVENTRAW;
-			field->name = xstrdup("EventRaw");
-			field->len = 8;
-			field->print_routine = print_fields_uint;
-		} else if (!strncasecmp("Event", object,
-				MAX(command_len, 2))) {
-			field->type = PRINT_EVENT;
-			field->name = xstrdup("Event");
-			field->len = 7;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("NodeName", object,
-				       MAX(command_len, 1))) {
-			field->type = PRINT_NODENAME;
-			field->name = xstrdup("Node Name");
-			field->len = -15;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("Reason", object, MAX(command_len, 1))) {
-			field->type = PRINT_REASON;
-			field->name = xstrdup("Reason");
-			field->len = 30;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("Start", object,
-				       MAX(command_len, 1))) {
-			field->type = PRINT_START;
-			field->name = xstrdup("Start");
-			field->len = 19;
-			field->print_routine = print_fields_date;
-		} else if (!strncasecmp("StateRaw", object,
-				       MAX(command_len, 6))) {
-			field->type = PRINT_STATERAW;
-			field->name = xstrdup("StateRaw");
-			field->len = 8;
-			field->print_routine = print_fields_uint;
-		} else if (!strncasecmp("State", object, MAX(command_len, 1))) {
-			field->type = PRINT_STATE;
-			field->name = xstrdup("State");
-			field->len = 6;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("User", object, MAX(command_len, 1))) {
-			field->type = PRINT_USER;
-			field->name = xstrdup("User");
-			field->len = 15;
-			field->print_routine = print_fields_str;
-		} else {
-			exit_code=1;
-			fprintf(stderr, " Unknown field '%s'\n", object);
-			xfree(field);
-			continue;
-		}
-
-		if (newlen)
-			field->len = newlen;
-
-		list_append(print_fields_list, field);
-	}
-	list_iterator_destroy(itr);
-	list_destroy(format_list);
+	print_fields_list = sacctmgr_process_format_list(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -627,7 +529,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 		exit_code=1;
 		fprintf(stderr, " Error with request: %s\n",
 			slurm_strerror(errno));
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 	itr = list_iterator_create(event_list);
@@ -640,6 +542,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 		int curr_inx = 1;
 		char tmp[20], *tmp_char;
 		time_t newend = event->period_end;
+
 		while((field = list_next(itr2))) {
 			switch(field->type) {
 			case PRINT_CLUSTER:
@@ -653,8 +556,12 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 					(curr_inx == field_count));
 				break;
 			case PRINT_CPUS:
-				convert_num_unit((float)event->cpu_count,
-						 tmp, sizeof(tmp), UNIT_NONE);
+				convert_num_unit(
+					(float)slurmdb_find_tres_count_in_string(
+						event->tres_str, TRES_CPU),
+					tmp, sizeof(tmp),
+					UNIT_NONE, CONVERT_NUM_UNIT_EXACT);
+
 				field->print_routine(
 					field,
 					tmp,
@@ -669,7 +576,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 						   - event->period_start),
 					(curr_inx == field_count));
 				break;
-			case PRINT_END:
+			case PRINT_TIMEEND:
 				field->print_routine(field,
 						     event->period_end,
 						     (curr_inx == field_count));
@@ -681,7 +588,8 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 			case PRINT_EVENT:
 				if (event->event_type == SLURMDB_EVENT_CLUSTER)
 					tmp_char = "Cluster";
-				else if (event->event_type == SLURMDB_EVENT_NODE)
+				else if (event->event_type ==
+					 SLURMDB_EVENT_NODE)
 					tmp_char = "Node";
 				else
 					tmp_char = "Unknown";
@@ -694,7 +602,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 						     event->node_name,
 						     (curr_inx == field_count));
 				break;
-			case PRINT_START:
+			case PRINT_TIMESTART:
 				field->print_routine(field,
 						     event->period_start,
 						     (curr_inx == field_count));
@@ -718,6 +626,25 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 						     tmp_char,
 						     (curr_inx == field_count));
 				break;
+			case PRINT_TRES:
+				if (!g_tres_list) {
+					slurmdb_tres_cond_t tres_cond;
+					memset(&tres_cond, 0,
+					       sizeof(slurmdb_tres_cond_t));
+					tres_cond.with_deleted = 1;
+					g_tres_list = slurmdb_tres_get(
+						db_conn, &tres_cond);
+				}
+
+				tmp_char = slurmdb_make_tres_string_from_simple(
+					event->tres_str, g_tres_list);
+
+				field->print_routine(
+					field,
+					tmp_char,
+					(curr_inx == field_count));
+				xfree(tmp_char);
+				break;
 			case PRINT_USER:
 				if (event->reason_uid != NO_VAL) {
 					tmp_char = uid_to_string_cached(
@@ -742,7 +669,7 @@ extern int sacctmgr_list_event(int argc, char *argv[])
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(event_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(event_list);
+	FREE_NULL_LIST(print_fields_list);
 	return rc;
 }
diff --git a/src/sacctmgr/file_functions.c b/src/sacctmgr/file_functions.c
index 77f21bcfc..9a4fbf963 100644
--- a/src/sacctmgr/file_functions.c
+++ b/src/sacctmgr/file_functions.c
@@ -43,33 +43,15 @@
 
 typedef struct {
 	slurmdb_admin_level_t admin;
+	slurmdb_assoc_rec_t assoc_rec;
 	uint16_t classification;
 	List coord_list; /* char *list */
 	char *def_acct;
-	uint32_t def_qos_id;
 	char *def_wckey;
 	char *desc;
-	uint32_t fairshare;
-
-	uint64_t grp_cpu_mins;
-	uint32_t grp_cpus;
-	uint32_t grp_jobs;
-	uint32_t grp_mem;
-	uint32_t grp_nodes;
-	uint32_t grp_submit_jobs;
-	uint32_t grp_wall;
-
-	uint64_t max_cpu_mins_pj;
-	uint32_t max_cpus_pj;
-	uint32_t max_jobs;
-	uint32_t max_nodes_pj;
-	uint32_t max_submit_jobs;
-	uint32_t max_wall_pj;
 
 	char *name;
 	char *org;
-	char *part;
-	List qos_list;
 	List wckey_list;
 } sacctmgr_file_opts_t;
 
@@ -85,28 +67,9 @@ static int _init_sacctmgr_file_opts(sacctmgr_file_opts_t *file_opts)
 		return SLURM_ERROR;
 
 	memset(file_opts, 0, sizeof(sacctmgr_file_opts_t));
-
+	slurmdb_init_assoc_rec(&file_opts->assoc_rec, 0);
 	file_opts->admin = SLURMDB_ADMIN_NOTSET;
 
-	file_opts->fairshare = NO_VAL;
-
-	file_opts->def_qos_id = NO_VAL;
-
-	file_opts->grp_cpu_mins = (uint64_t)NO_VAL;
-	file_opts->grp_cpus = NO_VAL;
-	file_opts->grp_jobs = NO_VAL;
-	file_opts->grp_mem = NO_VAL;
-	file_opts->grp_nodes = NO_VAL;
-	file_opts->grp_submit_jobs = NO_VAL;
-	file_opts->grp_wall = NO_VAL;
-
-	file_opts->max_cpu_mins_pj = (uint64_t)NO_VAL;
-	file_opts->max_cpus_pj = NO_VAL;
-	file_opts->max_jobs = NO_VAL;
-	file_opts->max_nodes_pj = NO_VAL;
-	file_opts->max_submit_jobs = NO_VAL;
-	file_opts->max_wall_pj = NO_VAL;
-
 	return SLURM_SUCCESS;
 }
 
@@ -211,29 +174,21 @@ static void _destroy_sacctmgr_file_opts(void *object)
 	sacctmgr_file_opts_t *file_opts = (sacctmgr_file_opts_t *)object;
 
 	if (file_opts) {
-		if (file_opts->coord_list)
-			list_destroy(file_opts->coord_list);
+		slurmdb_free_assoc_rec_members(&file_opts->assoc_rec);
+		FREE_NULL_LIST(file_opts->coord_list);
 		xfree(file_opts->def_acct);
 		xfree(file_opts->def_wckey);
 		xfree(file_opts->desc);
 		xfree(file_opts->name);
 		xfree(file_opts->org);
-		xfree(file_opts->part);
-		if (file_opts->qos_list) {
-			list_destroy(file_opts->qos_list);
-			file_opts->qos_list = NULL;
-		}
-		if (file_opts->wckey_list) {
-			list_destroy(file_opts->wckey_list);
-			file_opts->wckey_list = NULL;
-		}
+		FREE_NULL_LIST(file_opts->wckey_list);
 		xfree(file_opts);
 	}
 }
 
 static sacctmgr_file_opts_t *_parse_options(char *options)
 {
-	int start=0, i=0, end=0, mins, quote = 0;
+	int start=0, i=0, end=0, quote = 0;
  	char *sub = NULL;
 	sacctmgr_file_opts_t *file_opts = xmalloc(sizeof(sacctmgr_file_opts_t));
 	char *option = NULL;
@@ -308,25 +263,6 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 		} else if (!strncasecmp (sub, "DefaultAccount",
 					 MAX(command_len, 8))) {
 			file_opts->def_acct = xstrdup(option);
-		} else if (!strncasecmp (sub, "DefaultQOS",
-					 MAX(command_len, 8))) {
-			if (!g_qos_list) {
-				g_qos_list = acct_storage_g_get_qos(
-					db_conn, my_uid, NULL);
-			}
-
-			file_opts->def_qos_id = str_2_slurmdb_qos(
-				g_qos_list, option);
-
-			if (file_opts->def_qos_id == NO_VAL) {
-				exit_code=1;
-				fprintf(stderr,
-					"You gave a bad qos '%s'.  "
-					"Use 'list qos' to get "
-					"complete list.\n",
-					option);
-				break;
-			}
 		} else if (!strncasecmp (sub, "DefaultWCKey",
 					 MAX(command_len, 8))) {
 			file_opts->def_wckey = xstrdup(option);
@@ -337,173 +273,18 @@ static sacctmgr_file_opts_t *_parse_options(char *options)
 		} else if (!strncasecmp (sub, "Description",
 					 MAX(command_len, 3))) {
 			file_opts->desc = xstrdup(option);
-		} else if (!strncasecmp (sub, "FairShare",
-					 MAX(command_len, 1))
-			   || !strncasecmp (sub, "Shares",
-					    MAX(command_len, 1))) {
-			if (get_uint(option, &file_opts->fairshare,
-				     "FairShare") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad FairShare value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpCPUMins",
-					 MAX(command_len, 7))) {
-			if (get_uint64(option, &file_opts->grp_cpu_mins,
-				       "GrpCPUMins") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpCPUMins value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpCPUs", MAX(command_len, 7))) {
-			if (get_uint(option, &file_opts->grp_cpus,
-				     "GrpCPUs") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpCPUs value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpJobs", MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->grp_jobs,
-				     "GrpJobs") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpJobs value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpMemory",
-					 MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->grp_mem,
-				     "GrpMemory") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpMemory value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpNodes",
-					 MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->grp_nodes,
-				     "GrpNodes") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpNodes value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpSubmitJobs",
-					 MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->grp_submit_jobs,
-				     "GrpSubmitJobs") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpJobs value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "GrpWall", MAX(command_len, 4))) {
-			mins = time_str2mins(option);
-			if (mins >= 0) {
-				file_opts->grp_wall
-					= (uint32_t) mins;
-			} else if (strcmp(option, "-1")) {
-				file_opts->grp_wall = INFINITE;
-			} else {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad GrpWall time format: %s\n",
-					option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "MaxCPUMinsPerJob",
-					 MAX(command_len, 7))
-			   || !strncasecmp (sub, "MaxProcSecPerJob",
-					    MAX(command_len, 4))) {
-			if (get_uint64(option, &file_opts->max_cpu_mins_pj,
-				       "MaxCPUMins") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad MaxCPUMins value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "MaxCPUsPerJob",
-					 MAX(command_len, 7))) {
-			if (get_uint(option, &file_opts->max_cpus_pj,
-				     "MaxCPUs") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad MaxCPUs value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "MaxJobs", MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->max_jobs,
-				     "MaxJobs") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad MaxJobs value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "MaxNodesPerJob",
-					 MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->max_nodes_pj,
-				     "MaxNodes") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad MaxNodes value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "MaxSubmitJobs",
-					 MAX(command_len, 4))) {
-			if (get_uint(option, &file_opts->max_submit_jobs,
-				     "MaxSubmitJobs") != SLURM_SUCCESS) {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad MaxJobs value: %s\n", option);
-				break;
-			}
-		} else if (!strncasecmp (sub, "MaxWallDurationPerJob",
-					 MAX(command_len, 4))) {
-			mins = time_str2mins(option);
-			if (mins >= 0) {
-				file_opts->max_wall_pj
-					= (uint32_t) mins;
-			} else if (strcmp(option, "-1")) {
-				file_opts->max_wall_pj = INFINITE;
-			} else {
-				exit_code=1;
-				fprintf(stderr,
-					" Bad MaxWall time format: %s\n",
-					option);
-				break;
-			}
 		} else if (!strncasecmp (sub, "Organization",
 					 MAX(command_len, 1))) {
 			file_opts->org = xstrdup(option);
-		} else if (!strncasecmp (sub, "Partition",
-					 MAX(command_len, 1))) {
-			file_opts->part = xstrdup(option);
-		} else if (!strncasecmp (sub, "QosLevel", MAX(command_len, 1))
-			   || !strncasecmp (sub, "Expedite",
-					    MAX(command_len, 1))) {
-			if (!file_opts->qos_list) {
-				file_opts->qos_list =
-					list_create(slurm_destroy_char);
-			}
-
-			if (!g_qos_list) {
-				g_qos_list = acct_storage_g_get_qos(
-					db_conn, my_uid, NULL);
-			}
-
-			slurmdb_addto_qos_char_list(file_opts->qos_list,
-						    g_qos_list,
-						    option, option2);
 		} else if (!strncasecmp (sub, "WCKeys",
 					 MAX(command_len, 2))) {
 			if (!file_opts->wckey_list)
 				file_opts->wckey_list =
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(file_opts->wckey_list, option);
-		} else {
+		} else if (!sacctmgr_set_assoc_rec(
+				   &file_opts->assoc_rec, sub, option,
+				   command_len, option2)) {
 			exit_code=1;
 			fprintf(stderr, " Unknown option: %s\n", sub);
 			break;
@@ -541,9 +322,8 @@ static int _print_out_assoc(List assoc_list, bool user, bool add)
 	List print_fields_list = NULL;
 	ListIterator itr, itr2;
 	print_field_t *field = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	int rc = SLURM_SUCCESS;
-	char *tmp_char = NULL;
 
 	if (!assoc_list || !list_count(assoc_list))
 		return rc;
@@ -556,12 +336,12 @@ static int _print_out_assoc(List assoc_list, bool user, bool add)
 		slurm_addto_char_list(format_list,
 				      "Account,ParentName");
 	slurm_addto_char_list(format_list,
-			      "Share,GrpCPUM,GrpCPUs,GrpJ,"
-			      "GrpMEM,GrpN,GrpS,GrpW,MaxCPUM,MaxCPUs,"
-			      "MaxJ,MaxS,MaxN,MaxW,QOS,DefaultQOS");
+			      "Share,GrpTRESM,GrpTRESR,GrpTRES,GrpJ,"
+			      "GrpMEM,GrpN,GrpS,GrpW,MaxTRESM,MaxTRES,"
+			      "MaxTRESPerN,MaxJ,MaxS,MaxN,MaxW,QOS,DefaultQOS");
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	print_fields_header(print_fields_list);
 
@@ -569,135 +349,30 @@ static int _print_out_assoc(List assoc_list, bool user, bool add)
 	itr2 = list_iterator_create(print_fields_list);
 	while ((assoc = list_next(itr))) {
 		while ((field = list_next(itr2))) {
-			switch(field->type) {
-			case PRINT_ACCT:
-				field->print_routine(field,
-						     assoc->acct);
-				break;
-			case PRINT_DQOS:
-				if (!g_qos_list)
-					g_qos_list = acct_storage_g_get_qos(
-						db_conn,
-						my_uid,
-						NULL);
-
-				tmp_char = slurmdb_qos_str(
-					g_qos_list,
-					assoc->def_qos_id);
-				field->print_routine(
-					field,
-					tmp_char);
-				break;
-			case PRINT_FAIRSHARE:
-				field->print_routine(field,
-						     assoc->shares_raw);
-				break;
-			case PRINT_GRPCM:
-				field->print_routine(
-					field,
-					assoc->grp_cpu_mins);
-				break;
-			case PRINT_GRPC:
-				field->print_routine(field,
-						     assoc->grp_cpus);
-				break;
-			case PRINT_GRPJ:
-				field->print_routine(field,
-						     assoc->grp_jobs);
-				break;
-			case PRINT_GRPMEM:
-				field->print_routine(field,
-						     assoc->grp_mem);
-				break;
-			case PRINT_GRPN:
-				field->print_routine(field,
-						     assoc->grp_nodes);
-				break;
-			case PRINT_GRPS:
-				field->print_routine(field,
-						     assoc->grp_submit_jobs);
-				break;
-			case PRINT_GRPW:
-				field->print_routine(
-					field,
-					assoc->grp_wall);
-				break;
-			case PRINT_MAXCM:
-				field->print_routine(
-					field,
-					assoc->max_cpu_mins_pj);
-				break;
-			case PRINT_MAXC:
-				field->print_routine(field,
-						     assoc->max_cpus_pj);
-				break;
-			case PRINT_MAXJ:
-				field->print_routine(field,
-						     assoc->max_jobs);
-				break;
-			case PRINT_MAXN:
-				field->print_routine(field,
-						     assoc->max_nodes_pj);
-				break;
-			case PRINT_MAXS:
-				field->print_routine(field,
-						     assoc->max_submit_jobs);
-				break;
-			case PRINT_MAXW:
-				field->print_routine(
-					field,
-					assoc->max_wall_pj);
-				break;
-			case PRINT_PNAME:
-				field->print_routine(field,
-						     assoc->parent_acct);
-				break;
-			case PRINT_PART:
-				field->print_routine(field,
-						     assoc->partition);
-				break;
-			case PRINT_QOS:
-				if (!g_qos_list)
-					g_qos_list = acct_storage_g_get_qos(
-						db_conn, my_uid, NULL);
-
-				field->print_routine(
-					field,
-					g_qos_list,
-					assoc->qos_list);
-				break;
-			case PRINT_USER:
-				field->print_routine(field,
-						     assoc->user);
-				break;
-			default:
-				field->print_routine(
-					field, NULL);
-				break;
-			}
+			sacctmgr_print_assoc_rec(assoc, field, NULL, 0);
 		}
 		list_iterator_reset(itr2);
 		printf("\n");
 	}
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr2);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(print_fields_list);
 	if (add)
-		rc = acct_storage_g_add_associations(db_conn,
-						     my_uid, assoc_list);
+		rc = acct_storage_g_add_assocs(db_conn,
+					       my_uid, assoc_list);
 	printf("--------------------------------------------------------------\n\n");
 
 	return rc;
 }
 
 static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
-		      slurmdb_association_rec_t *assoc,
+		      slurmdb_assoc_rec_t *assoc,
 		      sacctmgr_mod_type_t mod_type,
 		      char *parent)
 {
 	int changed = 0;
-	slurmdb_association_rec_t mod_assoc;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_rec_t mod_assoc;
+	slurmdb_assoc_cond_t assoc_cond;
 	char *type = NULL;
 	char *name = NULL;
 	char *my_info = NULL;
@@ -719,178 +394,190 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 		return 0;
 		break;
 	}
-	slurmdb_init_association_rec(&mod_assoc, 0);
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	slurmdb_init_assoc_rec(&mod_assoc, 0);
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 
-	if ((file_opts->fairshare != NO_VAL)
-	    && (assoc->shares_raw != file_opts->fairshare)) {
-		mod_assoc.shares_raw = file_opts->fairshare;
+	if ((file_opts->assoc_rec.shares_raw != NO_VAL)
+	    && (assoc->shares_raw != file_opts->assoc_rec.shares_raw)) {
+		mod_assoc.shares_raw = file_opts->assoc_rec.shares_raw;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed fairshare",
 			   type, name,
 			   assoc->shares_raw,
-			   file_opts->fairshare);
+			   file_opts->assoc_rec.shares_raw);
 	}
 
-	if ((file_opts->grp_cpu_mins != NO_VAL)
-	    && (assoc->grp_cpu_mins != file_opts->grp_cpu_mins)) {
-		mod_assoc.grp_cpu_mins = file_opts->grp_cpu_mins;
+	if (file_opts->assoc_rec.grp_tres_mins
+	    && xstrcmp(assoc->grp_tres_mins,
+		       file_opts->assoc_rec.grp_tres_mins)) {
+		mod_assoc.grp_tres_mins = file_opts->assoc_rec.grp_tres_mins;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s "
-			   "%8"PRIu64" -> %"PRIu64"\n",
-			   " Changed GrpCPUMins",
+			   "%8s -> %s\n",
+			   " Changed GrpTRESMins",
 			   type, name,
-			   assoc->grp_cpu_mins,
-			   file_opts->grp_cpu_mins);
+			   assoc->grp_tres_mins,
+			   file_opts->assoc_rec.grp_tres_mins);
 	}
 
-	if ((file_opts->grp_cpus != NO_VAL)
-	    && (assoc->grp_cpus != file_opts->grp_cpus)) {
-		mod_assoc.grp_cpus = file_opts->grp_cpus;
+	if (file_opts->assoc_rec.grp_tres_run_mins
+	    && xstrcmp(assoc->grp_tres_run_mins,
+		file_opts->assoc_rec.grp_tres_run_mins)) {
+		mod_assoc.grp_tres_run_mins =
+			file_opts->assoc_rec.grp_tres_run_mins;
 		changed = 1;
 		xstrfmtcat(my_info,
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpCpus",
+			   "%-30.30s for %-7.7s %-10.10s "
+			   "%8s -> %s\n",
+			   " Changed GrpTRESRunMins",
 			   type, name,
-			   assoc->grp_cpus,
-			   file_opts->grp_cpus);
+			   assoc->grp_tres_run_mins,
+			   file_opts->assoc_rec.grp_tres_run_mins);
 	}
 
-	if ((file_opts->grp_jobs != NO_VAL)
-	    && (assoc->grp_jobs != file_opts->grp_jobs)) {
-		mod_assoc.grp_jobs = file_opts->grp_jobs;
+	if (file_opts->assoc_rec.grp_tres
+	    && xstrcmp(assoc->grp_tres, file_opts->assoc_rec.grp_tres)) {
+		mod_assoc.grp_tres = file_opts->assoc_rec.grp_tres;
 		changed = 1;
 		xstrfmtcat(my_info,
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpJobs",
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed GrpTRES",
 			   type, name,
-			   assoc->grp_jobs,
-			   file_opts->grp_jobs);
+			   assoc->grp_tres,
+			   file_opts->assoc_rec.grp_tres);
 	}
 
-	if ((file_opts->grp_mem != NO_VAL)
-	   && (assoc->grp_mem != file_opts->grp_mem)) {
-		mod_assoc.grp_mem = file_opts->grp_mem;
+	if ((file_opts->assoc_rec.grp_jobs != NO_VAL)
+	    && (assoc->grp_jobs != file_opts->assoc_rec.grp_jobs)) {
+		mod_assoc.grp_jobs = file_opts->assoc_rec.grp_jobs;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpMemory",
+			   " Changed GrpJobs",
 			   type, name,
-			   assoc->grp_mem,
-			   file_opts->grp_mem);
+			   assoc->grp_jobs,
+			   file_opts->assoc_rec.grp_jobs);
 	}
 
-	if ((file_opts->grp_nodes != NO_VAL)
-	    && (assoc->grp_nodes != file_opts->grp_nodes)) {
-		mod_assoc.grp_nodes = file_opts->grp_nodes;
+	if ((file_opts->assoc_rec.grp_submit_jobs != NO_VAL)
+	    && (assoc->grp_submit_jobs !=
+		file_opts->assoc_rec.grp_submit_jobs)) {
+		mod_assoc.grp_submit_jobs =
+			file_opts->assoc_rec.grp_submit_jobs;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpNodes",
+			   " Changed GrpSubmitJobs",
 			   type, name,
-			   assoc->grp_nodes,
-			   file_opts->grp_nodes);
+			   assoc->grp_submit_jobs,
+			   file_opts->assoc_rec.grp_submit_jobs);
 	}
 
-	if ((file_opts->grp_submit_jobs != NO_VAL)
-	    && (assoc->grp_submit_jobs != file_opts->grp_submit_jobs)) {
-		mod_assoc.grp_submit_jobs = file_opts->grp_submit_jobs;
+	if ((file_opts->assoc_rec.grp_wall != NO_VAL)
+	    && (assoc->grp_wall != file_opts->assoc_rec.grp_wall)) {
+		mod_assoc.grp_wall = file_opts->assoc_rec.grp_wall;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpSubmitJobs",
+			   " Changed GrpWallDuration",
 			   type, name,
-			   assoc->grp_submit_jobs,
-			   file_opts->grp_submit_jobs);
+			   assoc->grp_wall,
+			   file_opts->assoc_rec.grp_wall);
 	}
 
-	if ((file_opts->grp_wall != NO_VAL)
-	    && (assoc->grp_wall != file_opts->grp_wall)) {
-		mod_assoc.grp_wall = file_opts->grp_wall;
+	if (file_opts->assoc_rec.max_tres_mins_pj
+	    && xstrcmp(assoc->max_tres_mins_pj,
+		       file_opts->assoc_rec.max_tres_mins_pj)) {
+		mod_assoc.max_tres_mins_pj =
+			file_opts->assoc_rec.max_tres_mins_pj;
 		changed = 1;
 		xstrfmtcat(my_info,
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed GrpWallDuration",
+			   "%-30.30s for %-7.7s %-10.10s "
+			   "%8s -> %s\n",
+			   " Changed MaxTRESMinsPerJob",
 			   type, name,
-			   assoc->grp_wall,
-			   file_opts->grp_wall);
+			   assoc->max_tres_mins_pj,
+			   file_opts->assoc_rec.max_tres_mins_pj);
 	}
 
-	if ((file_opts->max_cpu_mins_pj != (uint64_t)NO_VAL)
-	    && (assoc->max_cpu_mins_pj != file_opts->max_cpu_mins_pj)) {
-		mod_assoc.max_cpu_mins_pj =
-			file_opts->max_cpu_mins_pj;
+	if (file_opts->assoc_rec.max_tres_run_mins
+	    && xstrcmp(assoc->max_tres_run_mins,
+		       file_opts->assoc_rec.max_tres_run_mins)) {
+		mod_assoc.max_tres_run_mins =
+			file_opts->assoc_rec.max_tres_run_mins;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s "
-			   "%8"PRIu64" -> %"PRIu64"\n",
-			   " Changed MaxCPUMinsPerJob",
+			   "%8s -> %s\n",
+			   " Changed MaxTRESRunMins",
 			   type, name,
-			   assoc->max_cpu_mins_pj,
-			   file_opts->max_cpu_mins_pj);
+			   assoc->max_tres_run_mins,
+			   file_opts->assoc_rec.max_tres_run_mins);
 	}
 
-	if ((file_opts->max_cpus_pj != NO_VAL)
-	    && (assoc->max_cpus_pj != file_opts->max_cpus_pj)) {
-		mod_assoc.max_cpus_pj = file_opts->max_cpus_pj;
+	if (file_opts->assoc_rec.max_tres_pj
+	    && xstrcmp(assoc->max_tres_pj, file_opts->assoc_rec.max_tres_pj)) {
+		mod_assoc.max_tres_pj = file_opts->assoc_rec.max_tres_pj;
 		changed = 1;
 		xstrfmtcat(my_info,
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxCpusPerJob",
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed MaxTRESPerJob",
 			   type, name,
-			   assoc->max_cpus_pj,
-			   file_opts->max_cpus_pj);
+			   assoc->max_tres_pj,
+			   file_opts->assoc_rec.max_tres_pj);
 	}
 
-	if ((file_opts->max_jobs != NO_VAL)
-	    && (assoc->max_jobs != file_opts->max_jobs)) {
-		mod_assoc.max_jobs = file_opts->max_jobs;
+	if (file_opts->assoc_rec.max_tres_pn
+	    && xstrcmp(assoc->max_tres_pn, file_opts->assoc_rec.max_tres_pn)) {
+		mod_assoc.max_tres_pn = file_opts->assoc_rec.max_tres_pn;
 		changed = 1;
 		xstrfmtcat(my_info,
-			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxJobs",
+			   "%-30.30s for %-7.7s %-10.10s %8s -> %s\n",
+			   " Changed MaxTRESPerNode",
 			   type, name,
-			   assoc->max_jobs,
-			   file_opts->max_jobs);
+			   assoc->max_tres_pn,
+			   file_opts->assoc_rec.max_tres_pn);
 	}
 
-	if ((file_opts->max_nodes_pj != NO_VAL)
-	    && (assoc->max_nodes_pj != file_opts->max_nodes_pj)) {
-		mod_assoc.max_nodes_pj = file_opts->max_nodes_pj;
+	if ((file_opts->assoc_rec.max_jobs != NO_VAL)
+	    && (assoc->max_jobs != file_opts->assoc_rec.max_jobs)) {
+		mod_assoc.max_jobs = file_opts->assoc_rec.max_jobs;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
-			   " Changed MaxNodesPerJob",
+			   " Changed MaxJobs",
 			   type, name,
-			   assoc->max_nodes_pj,
-			   file_opts->max_nodes_pj);
+			   assoc->max_jobs,
+			   file_opts->assoc_rec.max_jobs);
 	}
 
-	if ((file_opts->max_submit_jobs != NO_VAL)
-	    && (assoc->max_submit_jobs != file_opts->max_submit_jobs)) {
-		mod_assoc.max_submit_jobs = file_opts->max_submit_jobs;
+	if ((file_opts->assoc_rec.max_submit_jobs != NO_VAL)
+	    && (assoc->max_submit_jobs !=
+		file_opts->assoc_rec.max_submit_jobs)) {
+		mod_assoc.max_submit_jobs =
+			file_opts->assoc_rec.max_submit_jobs;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxSubmitJobs",
 			   type, name,
 			   assoc->max_submit_jobs,
-			   file_opts->max_submit_jobs);
+			   file_opts->assoc_rec.max_submit_jobs);
 	}
 
-	if ((file_opts->max_wall_pj != NO_VAL)
-	    && (assoc->max_wall_pj != file_opts->max_wall_pj)) {
-		mod_assoc.max_wall_pj =	file_opts->max_wall_pj;
+	if ((file_opts->assoc_rec.max_wall_pj != NO_VAL)
+	    && (assoc->max_wall_pj != file_opts->assoc_rec.max_wall_pj)) {
+		mod_assoc.max_wall_pj =	file_opts->assoc_rec.max_wall_pj;
 		changed = 1;
 		xstrfmtcat(my_info,
 			   "%-30.30s for %-7.7s %-10.10s %8d -> %d\n",
 			   " Changed MaxWallDurationPerJob",
 			   type, name,
 			   assoc->max_wall_pj,
-			   file_opts->max_wall_pj);
+			   file_opts->assoc_rec.max_wall_pj);
 	}
 	if (assoc->parent_acct && parent
 	    && strcmp(assoc->parent_acct, parent)) {
@@ -904,11 +591,13 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 			   parent);
 	}
 
-	if (assoc->qos_list && list_count(assoc->qos_list)
-	    && file_opts->qos_list && list_count(file_opts->qos_list)) {
+	if (assoc->qos_list && list_count(assoc->qos_list) &&
+	    file_opts->assoc_rec.qos_list &&
+	    list_count(file_opts->assoc_rec.qos_list)) {
 		ListIterator now_qos_itr =
 			list_iterator_create(assoc->qos_list),
-			new_qos_itr = list_iterator_create(file_opts->qos_list);
+			new_qos_itr =
+			list_iterator_create(file_opts->assoc_rec.qos_list);
 		char *now_qos = NULL, *new_qos = NULL;
 
 		if (!mod_assoc.qos_list)
@@ -937,12 +626,12 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 			xfree(new_qos);
 			changed = 1;
 		} else {
-			list_destroy(mod_assoc.qos_list);
-			mod_assoc.qos_list = NULL;
+			FREE_NULL_LIST(mod_assoc.qos_list);
 		}
-	} else if (file_opts->qos_list && list_count(file_opts->qos_list)) {
-		char *new_qos = get_qos_complete_str(g_qos_list,
-						     file_opts->qos_list);
+	} else if (file_opts->assoc_rec.qos_list &&
+		   list_count(file_opts->assoc_rec.qos_list)) {
+		char *new_qos = get_qos_complete_str(
+			g_qos_list, file_opts->assoc_rec.qos_list);
 
 		if (new_qos) {
 			xstrfmtcat(my_info,
@@ -951,8 +640,8 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 				   type, name,
 				   new_qos);
 			xfree(new_qos);
-			mod_assoc.qos_list = file_opts->qos_list;
-			file_opts->qos_list = NULL;
+			mod_assoc.qos_list = file_opts->assoc_rec.qos_list;
+			file_opts->assoc_rec.qos_list = NULL;
 			changed = 1;
 		}
 	}
@@ -977,21 +666,18 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 		}
 
 		notice_thread_init();
-		ret_list = acct_storage_g_modify_associations(
+		ret_list = acct_storage_g_modify_assocs(
 			db_conn, my_uid,
 			&assoc_cond,
 			&mod_assoc);
 		notice_thread_fini();
 
-		if (mod_assoc.qos_list)
-			list_destroy(mod_assoc.qos_list);
+		FREE_NULL_LIST(mod_assoc.qos_list);
 
-		list_destroy(assoc_cond.cluster_list);
-		list_destroy(assoc_cond.acct_list);
-		if (assoc_cond.user_list)
-			list_destroy(assoc_cond.user_list);
-		if (assoc_cond.partition_list)
-			list_destroy(assoc_cond.partition_list);
+		FREE_NULL_LIST(assoc_cond.cluster_list);
+		FREE_NULL_LIST(assoc_cond.acct_list);
+		FREE_NULL_LIST(assoc_cond.user_list);
+		FREE_NULL_LIST(assoc_cond.partition_list);
 
 /* 		if (ret_list && list_count(ret_list)) { */
 /* 			char *object = NULL; */
@@ -1005,7 +691,7 @@ static int _mod_assoc(sacctmgr_file_opts_t *file_opts,
 
 		if (ret_list) {
 			printf("%s", my_info);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 		} else
 			changed = 0;
 		xfree(my_info);
@@ -1050,7 +736,7 @@ static int _mod_cluster(sacctmgr_file_opts_t *file_opts,
 							  &mod_cluster);
 		notice_thread_fini();
 
-		list_destroy(cluster_cond.cluster_list);
+		FREE_NULL_LIST(cluster_cond.cluster_list);
 
 /* 		if (ret_list && list_count(ret_list)) { */
 /* 			char *object = NULL; */
@@ -1064,7 +750,7 @@ static int _mod_cluster(sacctmgr_file_opts_t *file_opts,
 
 		if (ret_list) {
 			printf("%s", my_info);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 		} else
 			changed = 0;
 		xfree(my_info);
@@ -1089,11 +775,11 @@ static int _mod_acct(sacctmgr_file_opts_t *file_opts,
 	char *desc = NULL, *org = NULL, *my_info = NULL;
 	slurmdb_account_rec_t mod_acct;
 	slurmdb_account_cond_t acct_cond;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 
 	memset(&mod_acct, 0, sizeof(slurmdb_account_rec_t));
 	memset(&acct_cond, 0, sizeof(slurmdb_account_cond_t));
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 
 	if (file_opts->desc)
 		desc = xstrdup(file_opts->desc);
@@ -1138,7 +824,7 @@ static int _mod_acct(sacctmgr_file_opts_t *file_opts,
 							  &mod_acct);
 		notice_thread_fini();
 
-		list_destroy(assoc_cond.acct_list);
+		FREE_NULL_LIST(assoc_cond.acct_list);
 
 /* 		if (ret_list && list_count(ret_list)) { */
 /* 			char *object = NULL; */
@@ -1152,7 +838,7 @@ static int _mod_acct(sacctmgr_file_opts_t *file_opts,
 
 		if (ret_list) {
 			printf("%s", my_info);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 		} else
 			changed = 0;
 		xfree(my_info);
@@ -1171,7 +857,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 	slurmdb_user_rec_t mod_user;
 	slurmdb_user_cond_t user_cond;
 	List ret_list = NULL;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 
 	if (!user || !user->name) {
 		fatal(" We need a user name in _mod_user");
@@ -1179,7 +865,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 
 	memset(&mod_user, 0, sizeof(slurmdb_user_rec_t));
 	memset(&user_cond, 0, sizeof(slurmdb_user_cond_t));
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 
 	assoc_cond.user_list = list_create(NULL);
 	list_append(assoc_cond.user_list, user->name);
@@ -1250,7 +936,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 
 		if (ret_list) {
 			printf("%s", my_info);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			set = 1;
 		}
 		xfree(my_info);
@@ -1331,7 +1017,7 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 			notice_thread_fini();
 			set = 1;
 		}
-		list_destroy(add_list);
+		FREE_NULL_LIST(add_list);
 	}
 
 	if ((!user->wckey_list || !list_count(user->wckey_list))
@@ -1408,10 +1094,10 @@ static int _mod_user(sacctmgr_file_opts_t *file_opts,
 			set = 1;
 		}
 		list_transfer(user->wckey_list, add_list);
-		list_destroy(add_list);
+		FREE_NULL_LIST(add_list);
 	}
 
-	list_destroy(assoc_cond.user_list);
+	FREE_NULL_LIST(assoc_cond.user_list);
 
 	return set;
 }
@@ -1438,13 +1124,13 @@ static slurmdb_user_rec_t *_set_user_up(sacctmgr_file_opts_t *file_opts,
 
 	if (file_opts->coord_list) {
 		slurmdb_user_cond_t user_cond;
-		slurmdb_association_cond_t assoc_cond;
+		slurmdb_assoc_cond_t assoc_cond;
 		ListIterator coord_itr = NULL;
 		char *temp_char = NULL;
 		slurmdb_coord_rec_t *coord = NULL;
 
 		memset(&user_cond, 0, sizeof(slurmdb_user_cond_t));
-		memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+		memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 		assoc_cond.user_list = list_create(NULL);
 		list_append(assoc_cond.user_list, user->name);
 		user_cond.assoc_cond = &assoc_cond;
@@ -1454,7 +1140,7 @@ static slurmdb_user_rec_t *_set_user_up(sacctmgr_file_opts_t *file_opts,
 					 file_opts->coord_list,
 					 &user_cond);
 		notice_thread_fini();
-		list_destroy(assoc_cond.user_list);
+		FREE_NULL_LIST(assoc_cond.user_list);
 		user->coord_accts = list_create(slurmdb_destroy_coord_rec);
 		coord_itr = list_iterator_create(file_opts->coord_list);
 		while ((temp_char = list_next(coord_itr))) {
@@ -1514,11 +1200,11 @@ static slurmdb_account_rec_t *_set_acct_up(sacctmgr_file_opts_t *file_opts,
 	return acct;
 }
 
-static slurmdb_association_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
+static slurmdb_assoc_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
 						sacctmgr_mod_type_t mod_type,
 						char *cluster, char *parent)
 {
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	if (!cluster) {
 		error("No cluster name was given for _set_assoc_up");
@@ -1530,8 +1216,8 @@ static slurmdb_association_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
 		return NULL;
 	}
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	slurmdb_init_association_rec(assoc, 0);
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	slurmdb_init_assoc_rec(assoc, 0);
 
 	switch(mod_type) {
 	case MOD_CLUSTER:
@@ -1546,40 +1232,23 @@ static slurmdb_association_rec_t *_set_assoc_up(sacctmgr_file_opts_t *file_opts,
 	case MOD_USER:
 		assoc->acct = xstrdup(parent);
 		assoc->cluster = xstrdup(cluster);
-		assoc->partition = xstrdup(file_opts->part);
+		assoc->partition = xstrdup(file_opts->assoc_rec.partition);
 		assoc->user = xstrdup(file_opts->name);
 		if (!strcmp(assoc->acct, file_opts->def_acct))
 			assoc->is_def = 1;
 		break;
 	default:
 		error("Unknown mod type for _set_assoc_up %d", mod_type);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		assoc = NULL;
 		break;
 	}
 
-	assoc->shares_raw = file_opts->fairshare;
-
-	assoc->def_qos_id = file_opts->def_qos_id;
+	assoc->shares_raw = file_opts->assoc_rec.shares_raw;
 
-	assoc->grp_cpu_mins = file_opts->grp_cpu_mins;
-	assoc->grp_cpus = file_opts->grp_cpus;
-	assoc->grp_jobs = file_opts->grp_jobs;
-	assoc->grp_mem = file_opts->grp_mem;
-	assoc->grp_nodes = file_opts->grp_nodes;
-	assoc->grp_submit_jobs = file_opts->grp_submit_jobs;
-	assoc->grp_wall = file_opts->grp_wall;
-
-	assoc->max_cpu_mins_pj = file_opts->max_cpu_mins_pj;
-	assoc->max_cpus_pj = file_opts->max_cpus_pj;
-	assoc->max_jobs = file_opts->max_jobs;
-	assoc->max_nodes_pj = file_opts->max_nodes_pj;
-	assoc->max_submit_jobs = file_opts->max_submit_jobs;
-	assoc->max_wall_pj = file_opts->max_wall_pj;
-
-	if (file_opts->qos_list && list_count(file_opts->qos_list))
-		assoc->qos_list = copy_char_list(file_opts->qos_list);
+	assoc->def_qos_id = file_opts->assoc_rec.def_qos_id;
 
+	slurmdb_copy_assoc_rec_limits(assoc, &file_opts->assoc_rec);
 
 	return assoc;
 }
@@ -1719,13 +1388,13 @@ static int _print_file_slurmdb_hierarchical_rec_children(
 }
 
 extern int print_file_add_limits_to_line(char **line,
-					 slurmdb_association_rec_t *assoc)
+					 slurmdb_assoc_rec_t *assoc)
 {
+	char *tmp_char;
 	if (!assoc)
 		return SLURM_ERROR;
 
 	if (assoc->def_qos_id && (assoc->def_qos_id != NO_VAL)) {
-		char *tmp_char;
 		if (!g_qos_list)
 			g_qos_list = acct_storage_g_get_qos(
 				db_conn, my_uid, NULL);
@@ -1735,40 +1404,109 @@ extern int print_file_add_limits_to_line(char **line,
 	if (assoc->shares_raw != INFINITE)
 		xstrfmtcat(*line, ":Fairshare=%u", assoc->shares_raw);
 
-	if (assoc->grp_cpu_mins != (uint64_t)INFINITE)
-		xstrfmtcat(*line, ":GrpCPUMins=%"PRIu64, assoc->grp_cpu_mins);
+	if (assoc->grp_tres_mins) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->grp_tres_mins, g_tres_list);
+		xstrfmtcat(*line, ":GrpTRESMins=%s", tmp_char);
+		xfree(tmp_char);
+	}
 
-	if (assoc->grp_cpus != INFINITE)
-		xstrfmtcat(*line, ":GrpCPUs=%u", assoc->grp_cpus);
+	if (assoc->grp_tres_run_mins) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->grp_tres_run_mins, g_tres_list);
+		xstrfmtcat(*line, ":GrpTRESRunMins=%s", tmp_char);
+		xfree(tmp_char);
+	}
+
+	if (assoc->grp_tres) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->grp_tres, g_tres_list);
+		xstrfmtcat(*line, ":GrpTRES=%s", tmp_char);
+		xfree(tmp_char);
+	}
 
 	if (assoc->grp_jobs != INFINITE)
 		xstrfmtcat(*line, ":GrpJobs=%u", assoc->grp_jobs);
 
-	if (assoc->grp_mem != INFINITE)
-		xstrfmtcat(*line, ":GrpMemory=%u", assoc->grp_mem);
-
-	if (assoc->grp_nodes != INFINITE)
-		xstrfmtcat(*line, ":GrpNodes=%u", assoc->grp_nodes);
-
 	if (assoc->grp_submit_jobs != INFINITE)
 		xstrfmtcat(*line, ":GrpSubmitJobs=%u", assoc->grp_submit_jobs);
 
 	if (assoc->grp_wall != INFINITE)
 		xstrfmtcat(*line, ":GrpWall=%u", assoc->grp_wall);
 
-	if (assoc->max_cpu_mins_pj != (uint64_t)INFINITE)
-		xstrfmtcat(*line, ":MaxCPUMinsPerJob=%"PRIu64,
-			   assoc->max_cpu_mins_pj);
+	if (assoc->max_tres_mins_pj) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_mins_pj, g_tres_list);
+		xstrfmtcat(*line, ":MaxTRESMinsPerJob=%s", tmp_char);
+		xfree(tmp_char);
+	}
+
+	if (assoc->max_tres_run_mins) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_run_mins, g_tres_list);
+		xstrfmtcat(*line, ":MaxTRESRunMins=%s", tmp_char);
+		xfree(tmp_char);
+	}
 
-	if (assoc->max_cpus_pj != INFINITE)
-		xstrfmtcat(*line, ":MaxCPUsPerJob=%u", assoc->max_cpus_pj);
+	if (assoc->max_tres_pj) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_pj, g_tres_list);
+		xstrfmtcat(*line, ":MaxTRESPerJob=%s", tmp_char);
+		xfree(tmp_char);
+	}
+
+	if (assoc->max_tres_pn) {
+		if (!g_tres_list) {
+			slurmdb_tres_cond_t tres_cond;
+			memset(&tres_cond, 0, sizeof(slurmdb_tres_cond_t));
+			tres_cond.with_deleted = 1;
+			g_tres_list = slurmdb_tres_get(db_conn, &tres_cond);
+		}
+		tmp_char = slurmdb_make_tres_string_from_simple(
+			assoc->max_tres_pn, g_tres_list);
+		xstrfmtcat(*line, ":MaxTRESPerNode=%s", tmp_char);
+		xfree(tmp_char);
+	}
 
 	if (assoc->max_jobs != INFINITE)
 		xstrfmtcat(*line, ":MaxJobs=%u", assoc->max_jobs);
 
-	if (assoc->max_nodes_pj != INFINITE)
-		xstrfmtcat(*line, ":MaxNodesPerJob=%u", assoc->max_nodes_pj);
-
 	if (assoc->max_submit_jobs != INFINITE)
 		xstrfmtcat(*line, ":MaxSubmitJobs=%u", assoc->max_submit_jobs);
 
@@ -1844,7 +1582,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 	int rc = SLURM_SUCCESS;
 
 	sacctmgr_file_opts_t *file_opts = NULL;
-	slurmdb_association_rec_t *assoc = NULL, *assoc2 = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL, *assoc2 = NULL;
 	slurmdb_account_rec_t *acct = NULL, *acct2 = NULL;
 	slurmdb_cluster_rec_t *cluster = NULL;
 	slurmdb_user_rec_t *user = NULL, *user2 = NULL;
@@ -1945,13 +1683,13 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 
 	/* These are new info so they need to be freed here */
 	acct_list = list_create(slurmdb_destroy_account_rec);
-	slurmdb_assoc_list = list_create(slurmdb_destroy_association_rec);
+	slurmdb_assoc_list = list_create(slurmdb_destroy_assoc_rec);
 	user_list = list_create(slurmdb_destroy_user_rec);
-	user_assoc_list = list_create(slurmdb_destroy_association_rec);
+	user_assoc_list = list_create(slurmdb_destroy_assoc_rec);
 
 	mod_acct_list = list_create(slurmdb_destroy_account_rec);
 	mod_user_list = list_create(slurmdb_destroy_user_rec);
-	mod_assoc_list = list_create(slurmdb_destroy_association_rec);
+	mod_assoc_list = list_create(slurmdb_destroy_assoc_rec);
 
 	format_list = list_create(slurm_destroy_char);
 
@@ -1994,7 +1732,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 
 		if (!strcasecmp("Machine", object)
 		    || !strcasecmp("Cluster", object)) {
-			slurmdb_association_cond_t assoc_cond;
+			slurmdb_assoc_cond_t assoc_cond;
 
 			if (cluster_name && !cluster_name_set) {
 				exit_code=1;
@@ -2026,7 +1764,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 			user_cond.with_wckeys = 1;
 
 			memset(&assoc_cond, 0,
-			       sizeof(slurmdb_association_cond_t));
+			       sizeof(slurmdb_assoc_cond_t));
 			assoc_cond.cluster_list = list_create(NULL);
 			assoc_cond.with_raw_qos = 1;
 			assoc_cond.without_parent_limits = 1;
@@ -2046,8 +1784,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				fprintf(stderr, " Your uid (%u) is not in the "
 					"accounting system, can't load file.\n",
 					my_uid);
-				if (curr_user_list)
-					list_destroy(curr_user_list);
+				FREE_NULL_LIST(curr_user_list);
 				return;
 
 			} else {
@@ -2060,8 +1797,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 						" Your user does not have "
 						"sufficient "
 						"privileges to load files.\n");
-					if (curr_user_list)
-						list_destroy(curr_user_list);
+					FREE_NULL_LIST(curr_user_list);
 					return;
 				}
 			}
@@ -2089,7 +1825,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				ret_list = acct_storage_g_remove_clusters(
 					db_conn, my_uid, &cluster_cond);
 				notice_thread_fini();
-				list_destroy(cluster_cond.cluster_list);
+				FREE_NULL_LIST(cluster_cond.cluster_list);
 
 				if (!ret_list) {
 					exit_code=1;
@@ -2135,13 +1871,13 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					    cluster->root_assoc);
 
 				(void) _print_out_assoc(temp_assoc_list, 0, 0);
-				list_destroy(temp_assoc_list);
+				FREE_NULL_LIST(temp_assoc_list);
 				notice_thread_init();
 
 				rc = acct_storage_g_add_clusters(
 					db_conn, my_uid, cluster_list);
 				notice_thread_fini();
-				list_destroy(cluster_list);
+				FREE_NULL_LIST(cluster_list);
 
 				if (rc != SLURM_SUCCESS) {
 					exit_code=1;
@@ -2164,13 +1900,13 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 			_destroy_sacctmgr_file_opts(file_opts);
 
 			/* assoc_cond if set up above */
-			curr_assoc_list = acct_storage_g_get_associations(
+			curr_assoc_list = acct_storage_g_get_assocs(
 				db_conn, my_uid, &assoc_cond);
-			list_destroy(assoc_cond.cluster_list);
+			FREE_NULL_LIST(assoc_cond.cluster_list);
 
 			if (!curr_assoc_list) {
 				exit_code=1;
-				fprintf(stderr, " Problem getting associations "
+				fprintf(stderr, " Problem getting assocs "
 					"for this cluster\n");
 				rc = SLURM_ERROR;
 				break;
@@ -2286,7 +2022,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					debug2("already modified this account");
 				}
 
-				assoc2 = sacctmgr_find_association_from_list(
+				assoc2 = sacctmgr_find_assoc_from_list(
 					mod_assoc_list,
 					NULL, file_opts->name,
 					cluster_name,
@@ -2294,8 +2030,8 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 
 				if (!assoc2) {
 					assoc2 = xmalloc(
-						sizeof(slurmdb_association_rec_t));
-					slurmdb_init_association_rec(assoc2, 0);
+						sizeof(slurmdb_assoc_rec_t));
+					slurmdb_init_assoc_rec(assoc2, 0);
 					list_append(mod_assoc_list, assoc2);
 					assoc2->cluster = xstrdup(cluster_name);
 					assoc2->acct = xstrdup(file_opts->name);
@@ -2338,22 +2074,23 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				/* don't add anything to the
 				   curr_assoc_list */
 			} else if (!(assoc =
-				     sacctmgr_find_association_from_list(
+				     sacctmgr_find_assoc_from_list(
 					     curr_assoc_list,
 					     file_opts->name, parent,
-					     cluster_name, file_opts->part))
-				   && !sacctmgr_find_association_from_list(
+					     cluster_name,
+					     file_opts->assoc_rec.partition))
+				   && !sacctmgr_find_assoc_from_list(
 					   user_assoc_list,
 					   file_opts->name, parent,
 					   cluster_name,
-					   file_opts->part)) {
+					   file_opts->assoc_rec.partition)) {
 
 				/* This means the user was added
 				 * during this round but this is a new
 				 * association we are adding
 				 */
 				if (!user)
-					goto new_association;
+					goto new_assoc;
 
 				/* This means there could be a change
 				 * on the user.
@@ -2371,7 +2108,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 				} else {
 					debug2("already modified this user");
 				}
-			new_association:
+			new_assoc:
 				assoc = _set_assoc_up(file_opts, MOD_USER,
 						      cluster_name, parent);
 
@@ -2393,22 +2130,22 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 					debug2("already modified this user");
 				}
 
-				assoc2 = sacctmgr_find_association_from_list(
+				assoc2 = sacctmgr_find_assoc_from_list(
 					mod_assoc_list,
 					file_opts->name, parent,
 					cluster_name,
-					file_opts->part);
+					file_opts->assoc_rec.partition);
 
 				if (!assoc2) {
 					assoc2 = xmalloc(
-						sizeof(slurmdb_association_rec_t));
-					slurmdb_init_association_rec(assoc2, 0);
+						sizeof(slurmdb_assoc_rec_t));
+					slurmdb_init_assoc_rec(assoc2, 0);
 					list_append(mod_assoc_list, assoc2);
 					assoc2->cluster = xstrdup(cluster_name);
 					assoc2->acct = xstrdup(parent);
 					assoc2->user = xstrdup(file_opts->name);
-					assoc2->partition =
-						xstrdup(file_opts->part);
+					assoc2->partition = xstrdup(
+						file_opts->assoc_rec.partition);
 					if (_mod_assoc(file_opts,
 						       assoc, MOD_USER, parent))
 						set = 1;
@@ -2470,7 +2207,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 		}
 		list_iterator_destroy(itr);
 		list_iterator_destroy(itr2);
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		rc = acct_storage_g_add_accounts(db_conn, my_uid, acct_list);
 		printf("-----------------------------"
 		       "----------------------\n\n");
@@ -2537,7 +2274,7 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 		}
 		list_iterator_destroy(itr);
 		list_iterator_destroy(itr2);
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 
 		rc = acct_storage_g_add_users(db_conn, my_uid, user_list);
 		printf("---------------------------"
@@ -2572,20 +2309,16 @@ extern void load_sacctmgr_cfg_file (int argc, char *argv[])
 			slurm_strerror(rc));
 	}
 
-	list_destroy(format_list);
-	list_destroy(mod_acct_list);
-	list_destroy(acct_list);
-	list_destroy(slurmdb_assoc_list);
-	list_destroy(mod_user_list);
-	list_destroy(user_list);
-	list_destroy(user_assoc_list);
-	list_destroy(mod_assoc_list);
-	if (curr_acct_list)
-		list_destroy(curr_acct_list);
-	if (curr_assoc_list)
-		list_destroy(curr_assoc_list);
-	if (curr_cluster_list)
-		list_destroy(curr_cluster_list);
-	if (curr_user_list)
-		list_destroy(curr_user_list);
+	FREE_NULL_LIST(format_list);
+	FREE_NULL_LIST(mod_acct_list);
+	FREE_NULL_LIST(acct_list);
+	FREE_NULL_LIST(slurmdb_assoc_list);
+	FREE_NULL_LIST(mod_user_list);
+	FREE_NULL_LIST(user_list);
+	FREE_NULL_LIST(user_assoc_list);
+	FREE_NULL_LIST(mod_assoc_list);
+	FREE_NULL_LIST(curr_acct_list);
+	FREE_NULL_LIST(curr_assoc_list);
+	FREE_NULL_LIST(curr_cluster_list);
+	FREE_NULL_LIST(curr_user_list);
 }
diff --git a/src/sacctmgr/job_functions.c b/src/sacctmgr/job_functions.c
index c1aeb5aae..6c91a1234 100644
--- a/src/sacctmgr/job_functions.c
+++ b/src/sacctmgr/job_functions.c
@@ -228,8 +228,7 @@ extern int sacctmgr_modify_job(int argc, char *argv[])
 		rc = SLURM_ERROR;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	notice_thread_fini();
 
diff --git a/src/sacctmgr/problem_functions.c b/src/sacctmgr/problem_functions.c
index 362c048ad..c875adb60 100644
--- a/src/sacctmgr/problem_functions.c
+++ b/src/sacctmgr/problem_functions.c
@@ -40,7 +40,7 @@
 #include "src/sacctmgr/sacctmgr.h"
 
 static int _set_cond(int *start, int argc, char *argv[],
-		     slurmdb_association_cond_t *assoc_cond,
+		     slurmdb_assoc_cond_t *assoc_cond,
 		     List format_list)
 {
 	int i, end = 0;
@@ -127,10 +127,10 @@ static int _set_cond(int *start, int argc, char *argv[],
 extern int sacctmgr_list_problem(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	slurmdb_association_cond_t *assoc_cond =
-		xmalloc(sizeof(slurmdb_association_cond_t));
+	slurmdb_assoc_cond_t *assoc_cond =
+		xmalloc(sizeof(slurmdb_assoc_cond_t));
 	List assoc_list = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	int i=0;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
@@ -152,29 +152,29 @@ extern int sacctmgr_list_problem(int argc, char *argv[])
 	}
 
 	if (exit_code) {
-		slurmdb_destroy_association_cond(assoc_cond);
-		list_destroy(format_list);
+		slurmdb_destroy_assoc_cond(assoc_cond);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	} else if (!list_count(format_list))
 		slurm_addto_char_list(format_list, "Cl,Acct,User,Problem");
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
-		slurmdb_destroy_association_cond(assoc_cond);
-		list_destroy(print_fields_list);
+		slurmdb_destroy_assoc_cond(assoc_cond);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
 	assoc_list = acct_storage_g_get_problems(db_conn, my_uid, assoc_cond);
-	slurmdb_destroy_association_cond(assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
 
 	if (!assoc_list) {
 		exit_code=1;
 		fprintf(stderr, " Error with request: %s\n",
 			slurm_strerror(errno));
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -228,13 +228,12 @@ extern int sacctmgr_list_problem(int argc, char *argv[])
 		printf("\n");
 	}
 
-	if (tree_list)
-		list_destroy(tree_list);
+	FREE_NULL_LIST(tree_list);
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(assoc_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(assoc_list);
+	FREE_NULL_LIST(print_fields_list);
 	tree_display = 0;
 	return rc;
 }
diff --git a/src/sacctmgr/qos_functions.c b/src/sacctmgr/qos_functions.c
index 3e28e97ad..782600844 100644
--- a/src/sacctmgr/qos_functions.c
+++ b/src/sacctmgr/qos_functions.c
@@ -216,11 +216,6 @@ static int _set_cond(int *start, int argc, char *argv[],
 					" Bad Preempt Mode given: %s\n",
 					argv[i]);
 				exit_code = 1;
-			} else if (qos_cond->preempt_mode ==
-				   PREEMPT_MODE_SUSPEND) {
-				printf("PreemptType and PreemptMode "
-					"values incompatible\n");
-				exit_code = 1;
 			} else
 				set = 1;
 		} else {
@@ -244,6 +239,9 @@ static int _set_rec(int *start, int argc, char *argv[],
 	int end = 0;
 	int command_len = 0;
 	int option = 0;
+	uint64_t tmp64;
+	char *tmp_char = NULL;
+	uint32_t tres_flags = TRES_STR_FLAG_SORT_ID | TRES_STR_FLAG_REPLACE;
 
 	for (i=(*start); i<argc; i++) {
 		end = parse_option_end(argv[i]);
@@ -268,18 +266,16 @@ static int _set_rec(int *start, int argc, char *argv[],
 					   MAX(command_len, 1))) {
 			if (name_list)
 				slurm_addto_char_list(name_list, argv[i]+end);
-		} else if (!strncasecmp (argv[i], "Description",
+		} else if (!qos)
+			continue;
+		else if (!strncasecmp (argv[i], "Description",
 					 MAX(command_len, 1))) {
-			if (!qos)
-				continue;
 			if (!qos->description)
 				qos->description =
 					strip_quotes(argv[i]+end, NULL, 1);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Flags",
 					 MAX(command_len, 2))) {
-			if (!qos)
-				continue;
 			qos->flags = str_2_qos_flags(argv[i]+end, option);
 			if (qos->flags == QOS_FLAG_NOTSET) {
 				char *tmp_char = NULL;
@@ -297,66 +293,140 @@ static int _set_rec(int *start, int argc, char *argv[],
 				set = 1;
 		} else if (!strncasecmp (argv[i], "GraceTime",
 					 MAX(command_len, 3))) {
-			if (!qos)
-				continue;
 			if (get_uint(argv[i]+end, &qos->grace_time,
 			             "GraceTime") == SLURM_SUCCESS) {
 				set = 1;
 			}
 		} else if (!strncasecmp (argv[i], "GrpCPUMins",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
 			if (get_uint64(argv[i]+end,
-				       &qos->grp_cpu_mins,
-				       "GrpCPUMins") == SLURM_SUCCESS)
+				       &tmp64,
+				       "GrpCPUMins") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres_mins, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "GrpCPURunMins",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
-			if (get_uint64(argv[i]+end, &qos->grp_cpu_run_mins,
-				       "GrpCPURunMins") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "GrpCPURunMins") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres_run_mins, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "GrpCPUs",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end, &qos->grp_cpus,
-				     "GrpCPUs") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "GrpCPUs") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "GrpJobs",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
 			if (get_uint(argv[i]+end, &qos->grp_jobs,
 			    "GrpJobs") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "GrpMemory",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end, &qos->grp_mem,
-				     "GrpMemory") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "GrpMemory") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_MEM, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "GrpNodes",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end, &qos->grp_nodes,
-			    "GrpNodes") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "GrpNodes") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_NODE, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "GrpSubmitJobs",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
 			if (get_uint(argv[i]+end, &qos->grp_submit_jobs,
 			    "GrpSubmitJobs") == SLURM_SUCCESS)
 				set = 1;
+		} else if (!strncasecmp(argv[i], "GrpTRES",
+					MAX(command_len, 7))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "GrpTRESMins",
+					MAX(command_len, 8))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(db_conn,
+							       &tres_cond);
+			}
+
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres_mins, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "GrpTRESRunMins",
+					MAX(command_len, 8))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->grp_tres_run_mins, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "GrpWall",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
 				qos->grp_wall	= (uint32_t) mins;
@@ -369,60 +439,170 @@ static int _set_rec(int *start, int argc, char *argv[],
 			}
 		} else if (!strncasecmp (argv[i], "MaxCPUMinsPerJob",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
 			if (get_uint64(argv[i]+end,
-				       &qos->max_cpu_mins_pj,
-				       "MaxCPUMins") == SLURM_SUCCESS)
+				       &tmp64,
+				       "MaxCPUMins") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_mins_pj, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "MaxCPUsPerJob",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end, &qos->max_cpus_pj,
-			    "MaxCPUs") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "MaxCPUs") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pj, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "MaxCPUsPerUser",
 					 MAX(command_len, 11))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end, &qos->max_cpus_pu,
-			    "MaxCPUsPerUser") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "MaxCPUsPerUser") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pu, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "MaxJobsPerUser",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
 			if (get_uint(argv[i]+end, &qos->max_jobs_pu,
 			    "MaxJobs") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "MaxNodesPerJob",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end,
-			    &qos->max_nodes_pj,
-			    "MaxNodes") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "MaxNodesPerJob") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_NODE, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pj, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "MaxNodesPerUser",
 					 MAX(command_len, 8))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end,
-			    &qos->max_nodes_pu,
-			    "MaxNodesPerUser") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "MaxNodesPerUser") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_NODE, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pu, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "MaxSubmitJobsPerUser",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
 			if (get_uint(argv[i]+end, &qos->max_submit_jobs_pu,
 			    "MaxSubmitJobs") == SLURM_SUCCESS)
 				set = 1;
+		} else if (!strncasecmp(argv[i], "MaxTRESPerJob",
+					MAX(command_len, 7))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pj, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "MaxTRESPerNode",
+					MAX(command_len, 11))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pn, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "MaxTRESPerUser",
+					MAX(command_len, 11))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_pu, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "MaxTRESMinsPerJob",
+					MAX(command_len, 8))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_mins_pj, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "MaxTRESRunMins",
+					MAX(command_len, 8))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->max_tres_run_mins_pu, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "MaxWallDurationPerJob",
 					 MAX(command_len, 4))) {
-			if (!qos)
-				continue;
 			mins = time_str2mins(argv[i]+end);
 			if (mins != NO_VAL) {
 				qos->max_wall_pj = (uint32_t) mins;
@@ -435,33 +615,47 @@ static int _set_rec(int *start, int argc, char *argv[],
 			}
 		} else if (!strncasecmp (argv[i], "MinCPUsPerJob",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
-			if (get_uint(argv[i]+end, &qos->min_cpus_pj,
-			    "MinCPUs") == SLURM_SUCCESS)
+			if (get_uint64(argv[i]+end, &tmp64,
+				       "MinCPUs") == SLURM_SUCCESS) {
 				set = 1;
+				tmp_char = xstrdup_printf(
+					"%d=%"PRIu64, TRES_CPU, tmp64);
+				slurmdb_combine_tres_strings(
+					&qos->min_tres_pj, tmp_char,
+					tres_flags);
+				xfree(tmp_char);
+			}
+		} else if (!strncasecmp(argv[i], "MinTRESPerJob",
+					MAX(command_len, 7))) {
+			if (!g_tres_list) {
+				slurmdb_tres_cond_t tres_cond;
+				memset(&tres_cond, 0,
+				       sizeof(slurmdb_tres_cond_t));
+				tres_cond.with_deleted = 1;
+				g_tres_list = slurmdb_tres_get(
+					db_conn, &tres_cond);
+			}
+			if ((tmp_char = slurmdb_format_tres_str(
+				     argv[i]+end, g_tres_list, 1))) {
+				slurmdb_combine_tres_strings(
+					&qos->min_tres_pj, tmp_char,
+					tres_flags);
+				set = 1;
+				xfree(tmp_char);
+			}
 		} else if (!strncasecmp (argv[i], "PreemptMode",
 					 MAX(command_len, 8))) {
-			if (!qos)
-				continue;
 			qos->preempt_mode = preempt_mode_num(argv[i]+end);
 			if (qos->preempt_mode == (uint16_t)NO_VAL) {
 				fprintf(stderr,
 					" Bad Preempt Mode given: %s\n",
 					argv[i]);
 				exit_code = 1;
-			} else if (qos->preempt_mode == PREEMPT_MODE_SUSPEND) {
-				printf("PreemptType and PreemptMode "
-					"values incompatible\n");
-				exit_code = 1;
 			} else
 				set = 1;
 		/* Preempt needs to follow PreemptMode */
 		} else if (!strncasecmp (argv[i], "Preempt",
 					 MAX(command_len, 7))) {
-			if (!qos)
-				continue;
-
 			if (!qos->preempt_list)
 				qos->preempt_list =
 					list_create(slurm_destroy_char);
@@ -478,18 +672,13 @@ static int _set_rec(int *start, int argc, char *argv[],
 				exit_code = 1;
 		} else if (!strncasecmp (argv[i], "Priority",
 					 MAX(command_len, 3))) {
-			if (!qos)
-				continue;
-
 			if (get_uint(argv[i]+end, &qos->priority,
 			    "Priority") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "RawUsage",
 					 MAX(command_len, 7))) {
 			uint32_t usage;
-			if (!qos)
-				continue;
-			qos->usage = xmalloc(sizeof(assoc_mgr_qos_usage_t));
+			qos->usage = xmalloc(sizeof(slurmdb_qos_usage_t));
 			if (get_uint(argv[i]+end, &usage,
 				     "RawUsage") == SLURM_SUCCESS) {
 				qos->usage->usage_raw = usage;
@@ -497,16 +686,11 @@ static int _set_rec(int *start, int argc, char *argv[],
 			}
 		} else if (!strncasecmp (argv[i], "UsageFactor",
 					 MAX(command_len, 6))) {
-			if (!qos)
-				continue;
-
 			if (get_double(argv[i]+end, &qos->usage_factor,
 			    "UsageFactor") == SLURM_SUCCESS)
 				set = 1;
 		} else if (!strncasecmp (argv[i], "UsageThreshold",
 					 MAX(command_len, 6))) {
-			if (!qos)
-				continue;
 			if (get_double(argv[i]+end, &qos->usage_thres,
 			    "UsageThreshold") == SLURM_SUCCESS)
 				set = 1;
@@ -526,8 +710,8 @@ static int _set_rec(int *start, int argc, char *argv[],
 static bool _isdefault(List qos_list)
 {
 	int rc = 0;
-	slurmdb_association_cond_t assoc_cond;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_cond_t assoc_cond;
+	slurmdb_assoc_rec_t *assoc = NULL;
 	ListIterator itr;
 	List ret_list = NULL;
 	char *name = NULL;
@@ -539,7 +723,7 @@ static bool _isdefault(List qos_list)
 	   can figure out things correctly */
 	xassert(g_qos_list);
 
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.without_parent_info = 1;
 	assoc_cond.def_qos_id_list = list_create(slurm_destroy_char);
 
@@ -553,9 +737,9 @@ static bool _isdefault(List qos_list)
 	}
 	list_iterator_destroy(itr);
 
-	ret_list = acct_storage_g_get_associations(
+	ret_list = acct_storage_g_get_assocs(
 		db_conn, my_uid, &assoc_cond);
-	list_destroy(assoc_cond.def_qos_id_list);
+	FREE_NULL_LIST(assoc_cond.def_qos_id_list);
 
 	if (!ret_list || !list_count(ret_list))
 		goto end_it;
@@ -587,8 +771,7 @@ static bool _isdefault(List qos_list)
 	list_iterator_destroy(itr);
 	rc = 1;
 end_it:
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	return rc;
 }
@@ -607,7 +790,7 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 	List qos_list = NULL;
 	char *qos_str = NULL;
 
-	slurmdb_init_qos_rec(start_qos, 0);
+	slurmdb_init_qos_rec(start_qos, 0, NO_VAL);
 
 	for (i=0; i<argc; i++) {
 		int command_len = strlen(argv[i]);
@@ -619,11 +802,11 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 	}
 
 	if (exit_code) {
-		list_destroy(name_list);
+		FREE_NULL_LIST(name_list);
 		xfree(description);
 		return SLURM_ERROR;
 	} else if (!list_count(name_list)) {
-		list_destroy(name_list);
+		FREE_NULL_LIST(name_list);
 		slurmdb_destroy_qos_rec(start_qos);
 		exit_code=1;
 		fprintf(stderr, " Need name of qos to add.\n");
@@ -638,7 +821,7 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 			fprintf(stderr, " Problem getting qos's "
 				"from database.  "
 				"Contact your admin.\n");
-			list_destroy(name_list);
+			FREE_NULL_LIST(name_list);
 			xfree(description);
 			return SLURM_ERROR;
 		}
@@ -651,7 +834,7 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 		qos = NULL;
 		if (!sacctmgr_find_qos_from_list(g_qos_list, name)) {
 			qos = xmalloc(sizeof(slurmdb_qos_rec_t));
-			slurmdb_init_qos_rec(qos, 0);
+			slurmdb_init_qos_rec(qos, 0, NO_VAL);
 			qos->name = xstrdup(name);
 			if (start_qos->description)
 				qos->description =
@@ -659,50 +842,16 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 			else
 				qos->description = xstrdup(name);
 
-			qos->flags = start_qos->flags;
-			qos->grace_time = start_qos->grace_time;
-			qos->grp_cpu_mins = start_qos->grp_cpu_mins;
-			qos->grp_cpu_run_mins = start_qos->grp_cpu_run_mins;
-			qos->grp_cpus = start_qos->grp_cpus;
-			qos->grp_jobs = start_qos->grp_jobs;
-			qos->grp_mem = start_qos->grp_mem;
-			qos->grp_nodes = start_qos->grp_nodes;
-			qos->grp_submit_jobs = start_qos->grp_submit_jobs;
-			qos->grp_wall = start_qos->grp_wall;
-
-			qos->max_cpu_mins_pj = start_qos->max_cpu_mins_pj;
-			qos->max_cpu_run_mins_pu =
-				start_qos->max_cpu_run_mins_pu;
-			qos->max_cpus_pj = start_qos->max_cpus_pj;
-			qos->max_cpus_pu = start_qos->max_cpus_pu;
-			qos->max_jobs_pu = start_qos->max_jobs_pu;
-			qos->max_nodes_pj = start_qos->max_nodes_pj;
-			qos->max_nodes_pu = start_qos->max_nodes_pu;
-			qos->max_submit_jobs_pu = start_qos->max_submit_jobs_pu;
-			qos->max_wall_pj = start_qos->max_wall_pj;
-
-			qos->min_cpus_pj = start_qos->min_cpus_pj;
-
-			qos->preempt_list =
-				copy_char_list(start_qos->preempt_list);
-			qos->preempt_mode = start_qos->preempt_mode;
-
-			qos->priority = start_qos->priority;
-
-			qos->usage_factor = start_qos->usage_factor;
-			qos->usage_thres = start_qos->usage_thres;
+			slurmdb_copy_qos_rec_limits(qos, start_qos);
 
 			xstrfmtcat(qos_str, "  %s\n", name);
 			list_append(qos_list, qos);
 		}
 	}
 	list_iterator_destroy(itr);
-	list_destroy(name_list);
+	FREE_NULL_LIST(name_list);
 
-	if (g_qos_list) {
-		list_destroy(g_qos_list);
-		g_qos_list = NULL;
-	}
+	FREE_NULL_LIST(g_qos_list);
 
 	if (!list_count(qos_list)) {
 		printf(" Nothing new added.\n");
@@ -746,7 +895,7 @@ extern int sacctmgr_add_qos(int argc, char *argv[])
 	}
 
 end_it:
-	list_destroy(qos_list);
+	FREE_NULL_LIST(qos_list);
 	xfree(description);
 
 	return rc;
@@ -778,25 +927,25 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_qos_cond(qos_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	} else if (!list_count(format_list)) {
 		slurm_addto_char_list(format_list,
 				      "Name,Prio,GraceT,Preempt,PreemptM,"
 				      "Flags%40,UsageThres,UsageFactor,"
-				      "GrpCPUs,GrpCPUMins,GrpCPURunMins,"
-				      "GrpJ,GrpMEM,GrpN,GrpS,GrpW,"
-				      "MaxCPUs,MaxCPUMins,MaxN,MaxW,"
-				      "MaxCPUsPerUser,"
-				      "MaxJobsPerUser,MaxNodesPerUser,"
-				      "MaxSubmitJobsPerUser,MinCPUs");
+				      "GrpTRES,GrpTRESMins,GrpTRESRunMins,"
+				      "GrpJ,GrpS,GrpW,"
+				      "MaxTRES,MaxTRESPerN,MaxTRESMins,MaxW,"
+				      "MaxTRESPerUser,"
+				      "MaxJobsPerUser,"
+				      "MaxSubmitJobsPerUser,MinTRES");
 	}
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 	qos_list = acct_storage_g_get_qos(db_conn, my_uid, qos_cond);
@@ -805,7 +954,7 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 	if (!qos_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem with query.\n");
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 	itr = list_iterator_create(qos_list);
@@ -847,19 +996,39 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			case PRINT_GRPCM:
 				field->print_routine(
 					field,
-					qos->grp_cpu_mins,
+					slurmdb_find_tres_count_in_string(
+						qos->grp_tres_mins, TRES_CPU),
 					(curr_inx == field_count));
 				break;
 			case PRINT_GRPCRM:
 				field->print_routine(
 					field,
-					qos->grp_cpu_run_mins,
+					slurmdb_find_tres_count_in_string(
+						qos->grp_tres_run_mins,
+						TRES_CPU),
 					(curr_inx == field_count));
 				break;
 			case PRINT_GRPC:
-				field->print_routine(field,
-						     qos->grp_cpus,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->grp_tres, TRES_CPU),
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPTM:
+				field->print_routine(
+					field, qos->grp_tres_mins,
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPTRM:
+				field->print_routine(
+					field, qos->grp_tres_run_mins,
+					(curr_inx == field_count));
+				break;
+			case PRINT_GRPT:
+				field->print_routine(
+					field, qos->grp_tres,
+					(curr_inx == field_count));
 				break;
 			case PRINT_GRPJ:
 				field->print_routine(field,
@@ -867,14 +1036,18 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 						     (curr_inx == field_count));
 				break;
 			case PRINT_GRPMEM:
-				field->print_routine(field,
-						     qos->grp_mem,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->grp_tres, TRES_MEM),
+					(curr_inx == field_count));
 				break;
 			case PRINT_GRPN:
-				field->print_routine(field,
-						     qos->grp_nodes,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->grp_tres, TRES_NODE),
+					(curr_inx == field_count));
 				break;
 			case PRINT_GRPS:
 				field->print_routine(field,
@@ -895,24 +1068,57 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 			case PRINT_MAXCM:
 				field->print_routine(
 					field,
-					qos->max_cpu_mins_pj,
+					slurmdb_find_tres_count_in_string(
+						qos->max_tres_mins_pj,
+						TRES_CPU),
 					(curr_inx == field_count));
 				break;
 			case PRINT_MAXCRM:
 				field->print_routine(
 					field,
-					qos->max_cpu_run_mins_pu,
+					slurmdb_find_tres_count_in_string(
+						qos->max_tres_run_mins_pu,
+						TRES_CPU),
 					(curr_inx == field_count));
 				break;
 			case PRINT_MAXC:
-				field->print_routine(field,
-						     qos->max_cpus_pj,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->max_tres_pj, TRES_CPU),
+					(curr_inx == field_count));
 				break;
 			case PRINT_MAXCU:
-				field->print_routine(field,
-						     qos->max_cpus_pu,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->max_tres_pu, TRES_CPU),
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXTM:
+				field->print_routine(
+					field, qos->max_tres_mins_pj,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXTRM:
+				field->print_routine(
+					field, qos->max_tres_run_mins_pu,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXT:
+				field->print_routine(
+					field, qos->max_tres_pj,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXTN:
+				field->print_routine(
+					field, qos->max_tres_pn,
+					(curr_inx == field_count));
+				break;
+			case PRINT_MAXTU:
+				field->print_routine(
+					field, qos->max_tres_pu,
+					(curr_inx == field_count));
 				break;
 			case PRINT_MAXJ:
 				field->print_routine(field,
@@ -920,14 +1126,18 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 						     (curr_inx == field_count));
 				break;
 			case PRINT_MAXN:
-				field->print_routine(field,
-						     qos->max_nodes_pj,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->max_tres_pj, TRES_NODE),
+					(curr_inx == field_count));
 				break;
 			case PRINT_MAXNU:
-				field->print_routine(field,
-						     qos->max_nodes_pu,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->max_tres_pu, TRES_NODE),
+					(curr_inx == field_count));
 				break;
 			case PRINT_MAXS:
 				field->print_routine(field,
@@ -941,9 +1151,16 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 					(curr_inx == field_count));
 				break;
 			case PRINT_MINC:
-				field->print_routine(field,
-						     qos->min_cpus_pj,
-						     (curr_inx == field_count));
+				field->print_routine(
+					field,
+					slurmdb_find_tres_count_in_string(
+						qos->min_tres_pj, TRES_CPU),
+					(curr_inx == field_count));
+				break;
+			case PRINT_MINT:
+				field->print_routine(
+					field, qos->min_tres_pj,
+					(curr_inx == field_count));
 				break;
 			case PRINT_NAME:
 				field->print_routine(
@@ -995,8 +1212,8 @@ extern int sacctmgr_list_qos(int argc, char *argv[])
 	}
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(qos_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(qos_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
@@ -1010,15 +1227,15 @@ extern int sacctmgr_modify_qos(int argc, char *argv[])
 	int cond_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 
-	slurmdb_init_qos_rec(qos, 0);
+	slurmdb_init_qos_rec(qos, 0, NO_VAL);
 
 	for (i=0; i<argc; i++) {
 		int command_len = strlen(argv[i]);
-		if (!strncasecmp (argv[i], "Where", MAX(command_len, 5))) {
+		if (!strncasecmp(argv[i], "Where", MAX(command_len, 5))) {
 			i++;
 			cond_set += _set_cond(&i, argc, argv, qos_cond, NULL);
 
-		} else if (!strncasecmp (argv[i], "Set", MAX(command_len, 3))) {
+		} else if (!strncasecmp(argv[i], "Set", MAX(command_len, 3))) {
 			i++;
 			rec_set += _set_rec(&i, argc, argv, NULL, qos);
 		} else {
@@ -1081,8 +1298,7 @@ extern int sacctmgr_modify_qos(int argc, char *argv[])
 		rc = SLURM_ERROR;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	notice_thread_fini();
 
@@ -1182,8 +1398,7 @@ extern int sacctmgr_delete_qos(int argc, char *argv[])
 	}
 
 end_it:
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	return rc;
 }
diff --git a/src/sacctmgr/reservation_functions.c b/src/sacctmgr/reservation_functions.c
new file mode 100644
index 000000000..f7a2f699f
--- /dev/null
+++ b/src/sacctmgr/reservation_functions.c
@@ -0,0 +1,310 @@
+/*****************************************************************************\
+ *  reservation_functions.c - functions dealing with RESERVATION in the
+ *                        accounting system.
+ *****************************************************************************
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by David Bigagli <david@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/slurm_time.h"
+#include "src/sacctmgr/sacctmgr.h"
+#include "src/common/assoc_mgr.h"
+
+static int _set_cond(int *start, int argc, char *argv[],
+		     slurmdb_reservation_cond_t *reservation_cond,
+		     List format_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int command_len = 0;
+
+	if (!reservation_cond) {
+		exit_code=1;
+		fprintf(stderr, "No reservation_cond given");
+		return -1;
+	}
+
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (!end)
+			command_len=strlen(argv[i]);
+		else {
+			command_len=end-1;
+			if (argv[i][end] == '=') {
+				end++;
+			}
+		}
+
+		if (!strncasecmp(argv[i], "Set", MAX(command_len, 3))) {
+			i--;
+			break;
+		} else if (!end && !strncasecmp(argv[i], "where",
+					       MAX(command_len, 5))) {
+			continue;
+		} else if (!strncasecmp(argv[i], "Clusters",
+					 MAX(command_len, 1))) {
+			if (!reservation_cond->cluster_list) {
+				reservation_cond->cluster_list =
+					list_create(slurm_destroy_char);
+			}
+			if (slurm_addto_char_list(reservation_cond->name_list,
+						  argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp(argv[i], "Names",
+					 MAX(command_len, 2))) {
+			if (!reservation_cond->name_list) {
+				reservation_cond->name_list =
+					list_create(slurm_destroy_char);
+			}
+			if (slurm_addto_char_list(reservation_cond->name_list,
+						  argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp(argv[i], "Format",
+					 MAX(command_len, 1))) {
+			if (format_list)
+				slurm_addto_char_list(format_list, argv[i]+end);
+		} else if (!strncasecmp(argv[i], "Ids",
+					 MAX(command_len, 1))) {
+			if (!reservation_cond->id_list) {
+				reservation_cond->id_list =
+					list_create(slurm_destroy_char);
+			}
+			if (slurm_addto_char_list(reservation_cond->id_list,
+						 argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp(argv[i], "Nodes",
+					 MAX(command_len, 2))) {
+			xfree(reservation_cond->nodes);
+			reservation_cond->nodes = strip_quotes(
+				argv[i]+end, NULL, 1);
+			set = 1;
+		} else if (!strncasecmp(argv[i], "Start",
+					 MAX(command_len, 5))) {
+			reservation_cond->time_start =
+				parse_time(argv[i]+end, 1);
+			if (errno == ESLURM_INVALID_TIME_VALUE)
+				exit_code = 1;
+			else
+				set = 1;
+		} else if (!strncasecmp(argv[i], "End",
+					 MAX(command_len, 5))) {
+			reservation_cond->time_end =
+				parse_time(argv[i]+end, 1);
+			if (errno == ESLURM_INVALID_TIME_VALUE)
+				exit_code = 1;
+			else
+				set = 1;
+		} else {
+			exit_code=1;
+			fprintf(stderr, " Unknown condition: %s\n"
+				" Use keyword 'set' to modify value\n",
+				argv[i]);
+		}
+	}
+
+	(*start) = i;
+
+	if (set)
+		return 1;
+
+	return 0;
+}
+
+/* sacctmgr_list_reservation()
+ */
+int sacctmgr_list_reservation(int argc, char **argv)
+{
+        List reservation_list;
+        ListIterator itr;
+	ListIterator itr2;
+	List format_list = list_create(slurm_destroy_char);
+	List print_fields_list;
+        slurmdb_reservation_cond_t *reservation_cond =
+		xmalloc(sizeof(slurmdb_reservation_cond_t));
+        slurmdb_reservation_rec_t *reservation;
+	int field_count, i;
+	print_field_t *field;
+	char *tmp_char;
+
+ 	/* If we don't have any arguments make sure we set up the
+	   time correctly for just the past day.
+	*/
+	if (argc == 0) {
+                struct tm start_tm;
+		reservation_cond->time_start = time(NULL);
+
+                if (!slurm_localtime_r(&reservation_cond->time_start,
+				       &start_tm)) {
+                        fprintf(stderr,
+                                " Couldn't get localtime from %ld",
+                                (long)reservation_cond->time_start);
+                        exit_code = 1;
+                        return 0;
+                }
+                start_tm.tm_sec = 0;
+                start_tm.tm_min = 0;
+                start_tm.tm_hour = 0;
+                start_tm.tm_mday--;
+                start_tm.tm_isdst = -1;
+                reservation_cond->time_start = slurm_mktime(&start_tm);
+        }
+   	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp(argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp(argv[i], "Set", MAX(command_len, 3)))
+			i++;
+		_set_cond(&i, argc, argv, reservation_cond, format_list);
+	}
+
+	if (exit_code) {
+		slurmdb_destroy_reservation_cond(reservation_cond);
+		FREE_NULL_LIST(format_list);
+		return SLURM_ERROR;
+	}
+
+	if (!list_count(format_list)) {
+		/* Append to the format list the fields
+		 * we want to print, these are the data structure
+		 * members of the type returned by slurmdbd
+		 */
+		slurm_addto_char_list(format_list,
+				      "Cluster,Name%15,TRES%30,"
+				      "TimeStart,TimeEnd");
+	}
+
+	reservation_list = acct_storage_g_get_reservations(
+		db_conn, my_uid, reservation_cond);
+	slurmdb_destroy_reservation_cond(reservation_cond);
+
+	if (!reservation_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with query.\n");
+		FREE_NULL_LIST(format_list);
+		return SLURM_ERROR;
+	}
+
+
+	/* Process the format list creating a list of
+	 * print field_t structures
+	 */
+	print_fields_list = sacctmgr_process_format_list(format_list);
+	FREE_NULL_LIST(format_list);
+
+        itr = list_iterator_create(reservation_list);
+	itr2 = list_iterator_create(print_fields_list);
+	print_fields_header(print_fields_list);
+	field_count = list_count(print_fields_list);
+
+	/* For each reservation prints the data structure members
+	 */
+        while ((reservation = list_next(itr))) {
+		while ((field = list_next(itr2))) {
+			switch (field->type) {
+			case PRINT_ASSOC_NAME:
+				field->print_routine(
+					field,
+					reservation->assocs,
+					field_count);
+				break;
+			case PRINT_CLUSTER:
+				field->print_routine(
+					field,
+					reservation->cluster,
+					field_count);
+				break;
+			case PRINT_FLAGS:
+				tmp_char = reservation_flags_string(
+					reservation->flags);
+				field->print_routine(
+					field,
+					tmp_char,
+					field_count);
+				xfree(tmp_char);
+				break;
+			case PRINT_ID:
+				field->print_routine(field,
+						     reservation->id,
+						     field_count);
+				break;
+			case PRINT_NAME:
+				field->print_routine(field,
+						     reservation->name,
+						     field_count);
+				break;
+			case PRINT_NODENAME:
+				field->print_routine(
+					field,
+					reservation->nodes,
+					field_count);
+				break;
+			case PRINT_TIMEEND:
+				field->print_routine(
+					field,
+					reservation->time_end,
+					field_count);
+				break;
+			case PRINT_TIMESTART:
+				field->print_routine(
+					field,
+					reservation->time_start,
+					field_count);
+				break;
+			case PRINT_TRES:
+				if (!g_tres_list) {
+					slurmdb_tres_cond_t tres_cond;
+					memset(&tres_cond, 0,
+					       sizeof(slurmdb_tres_cond_t));
+					tres_cond.with_deleted = 1;
+					g_tres_list = slurmdb_tres_get(
+						db_conn, &tres_cond);
+				}
+
+				tmp_char = slurmdb_make_tres_string_from_simple(
+					reservation->tres_str, g_tres_list);
+				field->print_routine(field,
+						     tmp_char,
+						     field_count);
+				xfree(tmp_char);
+				break;
+			}
+		}
+		list_iterator_reset(itr2);
+		printf("\n");
+        }
+	list_iterator_destroy(itr);
+	list_iterator_destroy(itr2);
+	FREE_NULL_LIST(reservation_list);
+	FREE_NULL_LIST(print_fields_list);
+
+        return 0;
+}
diff --git a/src/sacctmgr/sacctmgr.c b/src/sacctmgr/sacctmgr.c
index e1a8c1aea..c3bb2d4b8 100644
--- a/src/sacctmgr/sacctmgr.c
+++ b/src/sacctmgr/sacctmgr.c
@@ -59,6 +59,7 @@ void *db_conn = NULL;
 uint32_t my_uid = 0;
 List g_qos_list = NULL;
 List g_res_list = NULL;
+List g_tres_list = NULL;
 bool tree_display = 0;
 
 static void	_add_it (int argc, char *argv[]);
@@ -242,10 +243,10 @@ main (int argc, char *argv[])
 		exit_code = local_exit_code;
 	acct_storage_g_close_connection(&db_conn);
 	slurm_acct_storage_fini();
-	if (g_qos_list)
-		list_destroy(g_qos_list);
-	if (g_res_list)
-		list_destroy(g_res_list);
+	FREE_NULL_LIST(g_qos_list);
+	FREE_NULL_LIST(g_res_list);
+	FREE_NULL_LIST(g_tres_list);
+
 	exit(exit_code);
 }
 
@@ -637,7 +638,7 @@ static void _show_it (int argc, char *argv[])
 		error_code = sacctmgr_list_account((argc - 1), &argv[1]);
 	} else if (strncasecmp(argv[0], "Associations",
 				MAX(command_len, 2)) == 0) {
-		error_code = sacctmgr_list_association((argc - 1), &argv[1]);
+		error_code = sacctmgr_list_assoc((argc - 1), &argv[1]);
 	} else if (strncasecmp(argv[0], "Clusters",
 				MAX(command_len, 2)) == 0) {
 		error_code = sacctmgr_list_cluster((argc - 1), &argv[1]);
@@ -652,8 +653,11 @@ static void _show_it (int argc, char *argv[])
 		error_code = sacctmgr_list_problem((argc - 1), &argv[1]);
 	} else if (strncasecmp(argv[0], "QOS", MAX(command_len, 1)) == 0) {
 		error_code = sacctmgr_list_qos((argc - 1), &argv[1]);
-	} else if (!strncasecmp(argv[0], "Resource", MAX(command_len, 1))) {
+	} else if (!strncasecmp(argv[0], "Resource", MAX(command_len, 4))) {
 		error_code = sacctmgr_list_res((argc - 1), &argv[1]);
+	} else if (!strncasecmp(argv[0], "Reservations", MAX(command_len, 4)) ||
+		   !strncasecmp(argv[0], "Resv", MAX(command_len, 4))) {
+		error_code = sacctmgr_list_reservation((argc - 1), &argv[1]);
 	} else if (!strncasecmp(argv[0], "Transactions", MAX(command_len, 1))
 		   || !strncasecmp(argv[0], "Txn", MAX(command_len, 1))) {
 		error_code = sacctmgr_list_txn((argc - 1), &argv[1]);
@@ -661,6 +665,8 @@ static void _show_it (int argc, char *argv[])
 		error_code = sacctmgr_list_user((argc - 1), &argv[1]);
 	} else if (strncasecmp(argv[0], "WCKeys", MAX(command_len, 1)) == 0) {
 		error_code = sacctmgr_list_wckey((argc - 1), &argv[1]);
+	} else if (strncasecmp(argv[0], "tres", MAX(command_len, 2)) == 0) {
+		error_code = sacctmgr_list_tres(argc - 1, &argv[1]);
 	} else {
 	helpme:
 		exit_code = 1;
@@ -668,8 +674,8 @@ static void _show_it (int argc, char *argv[])
 		fprintf(stderr, "Input line must include ");
 		fprintf(stderr, "\"Account\", \"Association\", "
 			"\"Cluster\", \"Configuration\",\n\"Event\", "
-			"\"Problem\", \"QOS\", \"Resource\", \"Transaction\", "
-			"\"User\", or \"WCKey\"\n");
+			"\"Problem\", \"QOS\", \"Resource\", \"Reservation\", "
+			"\"Transaction\", \"TRES\", \"User\", or \"WCKey\"\n");
 	}
 
 	if (error_code != SLURM_SUCCESS) {
@@ -846,7 +852,8 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
   <ENTITY> may be \"account\", \"association\", \"cluster\",               \n\
                   \"configuration\", \"coordinator\", \"event\", \"job\",  \n\
-                  \"problem\", \"qos\", \"resource\", \"transaction\",     \n\
+                  \"problem\", \"qos\", \"resource\", \"reservation\",     \n\
+                  \"transaction\", \"tres\",                               \n\
                    \"user\" or \"wckey\"                                   \n\
                                                                            \n\
   <SPECS> are different for each command entity pair.                      \n\
@@ -855,15 +862,15 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             WithDeleted, WithCoordinators, WithRawQOS,     \n\
                             and WOPLimits                                  \n\
        add account        - Clusters=, DefaultQOS=, Description=, Fairshare=,\n\
-                            GrpCPUMins=, GrpCPUs=, GrpJobs=, GrpMemory=,   \n\
-                            GrpNodes=, GrpSubmitJob=, GrpWall=, MaxCPUMins=,\n\
-                            MaxCPUs=, MaxJobs=, MaxNodes=, MaxSubmitJobs=, \n\
+                            GrpTRESMins=, GrpTRES=, GrpJobs=, GrpMemory=,   \n\
+                            GrpNodes=, GrpSubmitJob=, GrpWall=, MaxTRESMins=,\n\
+                            MaxTRES=, MaxJobs=, MaxNodes=, MaxSubmitJobs=, \n\
                             MaxWall=, Names=, Organization=, Parent=,      \n\
                             and QosLevel=                                  \n\
        modify account     - (set options) DefaultQOS=, Description=,       \n\
-                            Fairshare=, GrpCPUMins=, GrpCPURunMins=,       \n\
-                            GrpCPUs=, GrpJobs=, GrpMemory=, GrpNodes=,     \n\
-                            GrpSubmitJob=, GrpWall=, MaxCPUMins=, MaxCPUs=,\n\
+                            Fairshare=, GrpTRESMins=, GrpTRESRunMins=,       \n\
+                            GrpTRES=, GrpJobs=, GrpMemory=, GrpNodes=,     \n\
+                            GrpSubmitJob=, GrpWall=, MaxTRESMins=, MaxTRES=,\n\
                             MaxJobs=, MaxNodes=, MaxSubmitJobs=, MaxWall=, \n\
                             Names=, Organization=, Parent=, and QosLevel=  \n\
                             RawUsage= (with admin privileges only)         \n\
@@ -880,14 +887,14 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                                                                            \n\
        list cluster       - Classification=, DefaultQOS=, Flags=, Format=, \n\
                             Names=, RPC=, and WOLimits                     \n\
-       add cluster        - DefaultQOS=, Fairshare=, GrpCPUs=, GrpJobs=,   \n\
-                            GrpMemory=, GrpNodes=, GrpSubmitJob=, MaxCPUMins=,\n\
-                            MaxJobs=, MaxNodes=, MaxSubmitJobs=, MaxWall=, \n\
-                            Name=, and QosLevel=                           \n\
-       modify cluster     - (set options) DefaultQOS=, Fairshare=, GrpCPUs=,\n\
-                            GrpJobs=, GrpMemory=, GrpNodes=, GrpSubmitJob=, \n\
-                            MaxCPUMins=, MaxJobs=, MaxNodes=, MaxSubmitJobs=,\n\
-                            MaxWall=, and QosLevel=                        \n\
+       add cluster        - DefaultQOS=, Fairshare=, GrpTRES=, GrpJobs=,   \n\
+                            GrpMemory=, GrpNodes=, GrpSubmitJob=,          \n\
+                            MaxTRESMins=, MaxJobs=, MaxNodes=,             \n\
+                            MaxSubmitJobs=, MaxWall=, Name=, and QosLevel= \n\
+       modify cluster     - (set options) DefaultQOS=, Fairshare=, GrpTRES=,\n\
+                            GrpJobs=, GrpMemory=, GrpNodes=, GrpSubmitJob=,\n\
+                            MaxTRESMins=, MaxJobs=, MaxNodes=,             \n\
+                            MaxSubmitJobs=, MaxWall=, and QosLevel=        \n\
                             (where options) Classification=, Flags=,       \n\
                             and Names=                                     \n\
        delete cluster     - Classification=, DefaultQOS=, Flags=, and Names=\n\
@@ -935,21 +942,25 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             (where options) Clusters=, Names=, Servers=,   \n\
        delete resource    - Clusters=, Names=                              \n\
                                                                            \n\
+       list reservation   - Clusters=, End=, ID=, Names=, Nodes=, Start=   \n\
+                                                                           \n\
        list transactions  - Accounts=, Action=, Actor=, Clusters=, End=,   \n\
                             Format=, ID=, Start=, User=, and WithAssoc     \n\
                                                                            \n\
+       list tres          - ID=, Name=, Type=, WithDeleted                 \n\
+                                                                           \n\
        list user          - AdminLevel=, DefaultAccount=,                  \n\
                             DefaultWCKey=, Format=, Names=,                \n\
                             QosLevel=, WithAssoc, WithCoordinators,        \n\
                             WithDeleted, WithRawQOS, and WOPLimits         \n\
        add user           - Accounts=, AdminLevel=, Clusters=,             \n\
                             DefaultAccount=, DefaultQOS=, DefaultWCKey=,   \n\
-                            Fairshare=, MaxCPUMins=, MaxCPUs=,             \n\
+                            Fairshare=, MaxTRESMins=, MaxTRES=,            \n\
                             MaxJobs=, MaxNodes=, MaxSubmitJobs=, MaxWall=, \n\
                             Names=, Partitions=, and QosLevel=             \n\
        modify user        - (set options) AdminLevel=, DefaultAccount=,    \n\
                             DefaultQOS=, DefaultWCKey=, Fairshare=,        \n\
-                            MaxCPUMins=, MaxCPUs=, MaxJobs=, MaxNodes=,    \n\
+                            MaxTRESMins=, MaxTRES=, MaxJobs=, MaxNodes=,    \n\
                             MaxSubmitJobs=, MaxWall=, and QosLevel=,       \n\
                             RawUsage= (with admin privileges only)         \n\
                             (where options) Accounts=, AdminLevel=,        \n\
@@ -976,23 +987,23 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
        Account            - Account, Coordinators, Description, Organization\n\
                                                                            \n\
        Association        - Account, Cluster, DefaultQOS, Fairshare,       \n\
-                            GrpCPUMins, GrpCPURunMins, GrpCPUs, GrpJobs,   \n\
+                            GrpTRESMins, GrpTRESRunMins, GrpTRES, GrpJobs, \n\
                             GrpMemory, GrpNodes, GrpSubmitJob, GrpWall,    \n\
-                            ID, LFT, MaxCPUMins, MaxCPUs,                  \n\
+                            ID, LFT, MaxTRESMins, MaxTRES,                  \n\
                             MaxJobs, MaxNodes, MaxSubmitJobs, MaxWall, QOS,\n\
                             ParentID, ParentName, Partition, RawQOS, RGT,  \n\
                             User                                           \n\
                                                                            \n\
        Cluster            - Classification, Cluster, ClusterNodes,         \n\
-                            ControlHost, ControlPort, CpuCount, DefaultQOS,\n\
-                            Fairshare, Flags, GrpCPUMins, GrpCPUs, GrpJobs,\n\
-                            GrpMemory, GrpNodes, GrpSubmitJob, MaxCPUMins, \n\
-                            MaxCPUs, MaxJobs, MaxNodes, MaxSubmitJobs,     \n\
-                            MaxWall, NodeCount, PluginIDSelect, RPC        \n\
+                            ControlHost, ControlPort, DefaultQOS,          \n\
+                            Fairshare, Flags, GrpTRESMins, GrpTRES GrpJobs,\n\
+                            GrpMemory, GrpNodes, GrpSubmitJob, MaxTRESMins, \n\
+                            MaxTRES, MaxJobs, MaxNodes, MaxSubmitJobs,     \n\
+                            MaxWall, NodeCount, PluginIDSelect, RPC, TRES  \n\
                                                                            \n\
-       Event              - Cluster, ClusterNodes, CPUs, Duration, End,    \n\
+       Event              - Cluster, ClusterNodes, Duration, End,          \n\
                             Event, EventRaw, NodeName, Reason, Start,      \n\
-                            State, StateRaw, User                          \n\
+                            State, StateRaw, TRES, User                    \n\
                                                                            \n\
        QOS                - Description, Flags, GraceTime, GrpCPUMins,     \n\
                             GrpCPURunMins, GrpCPUs, GrpJobs, GrpMemory,    \n\
@@ -1007,8 +1018,13 @@ sacctmgr [<OPTION>] [<COMMAND>]                                            \n\
                             Description, Flags, Manager, Name,             \n\
                             PercentAllowed, PercentUsed, Server, and Type  \n\
                                                                            \n\
+       Reservation        - Assoc, Cluster, End, Flags, ID, Name,          \n\
+                            NodeNames, Start, TRES                         \n\
+                                                                           \n\
        Transactions       - Action, Actor, Info, TimeStamp, Where          \n\
                                                                            \n\
+       TRES               - ID, Name, Type                                 \n\
+                                                                           \n\
        User               - AdminLevel, Coordinators, DefaultAccount,      \n\
                             DefaultWCKey, User                             \n\
                                                                            \n\
diff --git a/src/sacctmgr/sacctmgr.h b/src/sacctmgr/sacctmgr.h
index ab47ee6df..aca2e098a 100644
--- a/src/sacctmgr/sacctmgr.h
+++ b/src/sacctmgr/sacctmgr.h
@@ -93,6 +93,7 @@ typedef enum {
 	PRINT_ACCT,
 	PRINT_CLUSTER,
 	PRINT_COORDS,
+	PRINT_CPUS,
 	PRINT_DESC,
 	PRINT_FLAGS,
 	PRINT_NAME,
@@ -107,6 +108,9 @@ typedef enum {
 	PRINT_GRPCM,
 	PRINT_GRPCRM,
 	PRINT_GRPC,
+	PRINT_GRPTM,
+	PRINT_GRPTRM,
+	PRINT_GRPT,
 	PRINT_GRPJ,
 	PRINT_GRPMEM,
 	PRINT_GRPN,
@@ -116,12 +120,18 @@ typedef enum {
 	PRINT_MAXCRM,
 	PRINT_MAXC,
 	PRINT_MAXCU,
+	PRINT_MAXTM,
+	PRINT_MAXTRM,
+	PRINT_MAXT,
+	PRINT_MAXTN,
+	PRINT_MAXTU,
 	PRINT_MAXJ,
 	PRINT_MAXN,
 	PRINT_MAXNU,
 	PRINT_MAXS,
 	PRINT_MAXW,
 	PRINT_MINC,
+	PRINT_MINT,
 
 	/* ASSOCIATION */
 	PRINT_DQOS = 2000,
@@ -135,7 +145,7 @@ typedef enum {
 	PRINT_CHOST = 3000,
 	PRINT_CPORT,
 	PRINT_CLASS,
-	PRINT_CPUS,
+	PRINT_TRES,
 	PRINT_NODECNT,
 	PRINT_CLUSTER_NODES,
 	PRINT_RPC_VERSION,
@@ -170,12 +180,12 @@ typedef enum {
 
 	/* EVENT */
 	PRINT_DURATION,
-	PRINT_END,
+	PRINT_TIMEEND,
 	PRINT_EVENTRAW,
 	PRINT_EVENT,
 	PRINT_NODENAME,
 	PRINT_REASON,
-	PRINT_START,
+	PRINT_TIMESTART,
 	PRINT_STATERAW,
 	PRINT_STATE,
 
@@ -188,6 +198,10 @@ typedef enum {
 	PRINT_ALLOWED,
 	PRINT_ALLOCATED,
 	PRINT_USED,
+
+	/* RESERVATION */
+	PRINT_ASSOC_NAME = 10000,
+
 } sacctmgr_print_t;
 
 
@@ -204,23 +218,24 @@ extern void *db_conn;
 extern uint32_t my_uid;
 extern List g_qos_list;
 extern List g_res_list;
+extern List g_tres_list;
 
 extern bool tree_display;
 
 extern bool sacctmgr_check_default_qos(uint32_t qos_id,
-				       slurmdb_association_cond_t *assoc_cond);
+				       slurmdb_assoc_cond_t *assoc_cond);
 
-extern int sacctmgr_set_association_cond(slurmdb_association_cond_t *assoc_cond,
+extern int sacctmgr_set_assoc_cond(slurmdb_assoc_cond_t *assoc_cond,
 					 char *type, char *value,
 					 int command_len, int option);
-extern int sacctmgr_set_association_rec(slurmdb_association_rec_t *assoc_rec,
+extern int sacctmgr_set_assoc_rec(slurmdb_assoc_rec_t *assoc_rec,
 					char *type, char *value,
 					int command_len, int option);
-extern void sacctmgr_print_association_rec(slurmdb_association_rec_t *assoc,
+extern void sacctmgr_print_assoc_rec(slurmdb_assoc_rec_t *assoc,
 					   print_field_t *field, List tree_list,
 					   bool last);
 
-extern int sacctmgr_add_association(int argc, char *argv[]);
+extern int sacctmgr_add_assoc(int argc, char *argv[]);
 extern int sacctmgr_add_user(int argc, char *argv[]);
 extern int sacctmgr_add_account(int argc, char *argv[]);
 extern int sacctmgr_add_cluster(int argc, char *argv[]);
@@ -228,7 +243,7 @@ extern int sacctmgr_add_coord(int argc, char *argv[]);
 extern int sacctmgr_add_qos(int argc, char *argv[]);
 extern int sacctmgr_add_res(int argc, char *argv[]);
 
-extern int sacctmgr_list_association(int argc, char *argv[]);
+extern int sacctmgr_list_assoc(int argc, char *argv[]);
 extern int sacctmgr_list_user(int argc, char *argv[]);
 extern int sacctmgr_list_account(int argc, char *argv[]);
 extern int sacctmgr_list_cluster(int argc, char *argv[]);
@@ -238,8 +253,10 @@ extern int sacctmgr_list_problem(int argc, char *argv[]);
 extern int sacctmgr_list_qos(int argc, char *argv[]);
 extern int sacctmgr_list_res(int argc, char *argv[]);
 extern int sacctmgr_list_wckey(int argc, char *argv[]);
+extern int sacctmgr_list_tres(int, char **);
+extern int sacctmgr_list_reservation(int argc, char **argv);
 
-extern int sacctmgr_modify_association(int argc, char *argv[]);
+extern int sacctmgr_modify_assoc(int argc, char *argv[]);
 extern int sacctmgr_modify_user(int argc, char *argv[]);
 extern int sacctmgr_modify_account(int argc, char *argv[]);
 extern int sacctmgr_modify_cluster(int argc, char *argv[]);
@@ -247,7 +264,7 @@ extern int sacctmgr_modify_job(int argc, char *argv[]);
 extern int sacctmgr_modify_qos(int argc, char *argv[]);
 extern int sacctmgr_modify_res(int argc, char *argv[]);
 
-extern int sacctmgr_delete_association(int argc, char *argv[]);
+extern int sacctmgr_delete_assoc(int argc, char *argv[]);
 extern int sacctmgr_delete_user(int argc, char *argv[]);
 extern int sacctmgr_delete_account(int argc, char *argv[]);
 extern int sacctmgr_delete_cluster(int argc, char *argv[]);
@@ -273,7 +290,6 @@ extern int get_double(char *in_value, double *out_value, char *type);
 extern int addto_qos_char_list(List char_list, List qos_list, char *names,
 			       int option);
 extern int addto_action_char_list(List char_list, char *names);
-extern List copy_char_list(List qos_list);
 extern void sacctmgr_print_coord_list(
 	print_field_t *field, List value, int last);
 extern void sacctmgr_print_qos_list(print_field_t *field, List qos_list,
@@ -281,18 +297,20 @@ extern void sacctmgr_print_qos_list(print_field_t *field, List qos_list,
 extern void sacctmgr_print_qos_bitstr(print_field_t *field, List qos_list,
 				      bitstr_t *value, int last);
 
-extern void sacctmgr_print_assoc_limits(slurmdb_association_rec_t *assoc);
+extern void sacctmgr_print_tres(print_field_t *field, char *tres_simple_str,
+				int last);
+extern void sacctmgr_print_assoc_limits(slurmdb_assoc_rec_t *assoc);
 extern void sacctmgr_print_qos_limits(slurmdb_qos_rec_t *qos);
-extern int sacctmgr_remove_assoc_usage(slurmdb_association_cond_t *assoc_cond);
+extern int sacctmgr_remove_assoc_usage(slurmdb_assoc_cond_t *assoc_cond);
 extern int sacctmgr_remove_qos_usage(slurmdb_qos_cond_t *qos_cond);
 extern int sort_coord_list(void *, void *);
 extern List sacctmgr_process_format_list(List format_list);
 extern int sacctmgr_validate_cluster_list(List cluster_list);
 
 /* you need to free the objects returned from these functions */
-extern slurmdb_association_rec_t *sacctmgr_find_account_base_assoc(
+extern slurmdb_assoc_rec_t *sacctmgr_find_account_base_assoc(
 	char *account, char *cluster);
-extern slurmdb_association_rec_t *sacctmgr_find_root_assoc(char *cluster);
+extern slurmdb_assoc_rec_t *sacctmgr_find_root_assoc(char *cluster);
 extern slurmdb_user_rec_t *sacctmgr_find_user(char *name);
 extern slurmdb_account_rec_t *sacctmgr_find_account(char *name);
 extern slurmdb_cluster_rec_t *sacctmgr_find_cluster(char *name);
@@ -301,10 +319,10 @@ extern slurmdb_cluster_rec_t *sacctmgr_find_cluster(char *name);
  * they are pointing to an object in the list given
  */
 
-extern slurmdb_association_rec_t *sacctmgr_find_association_from_list(
+extern slurmdb_assoc_rec_t *sacctmgr_find_assoc_from_list(
 	List assoc_list, char *user, char *account,
 	char *cluster, char *partition);
-extern slurmdb_association_rec_t *sacctmgr_find_account_base_assoc_from_list(
+extern slurmdb_assoc_rec_t *sacctmgr_find_account_base_assoc_from_list(
 	List assoc_list, char *account, char *cluster);
 extern slurmdb_res_rec_t *sacctmgr_find_res_from_list(
 	List res_list, uint32_t id, char *name, char *server);
@@ -322,7 +340,7 @@ extern slurmdb_wckey_rec_t *sacctmgr_find_wckey_from_list(
 
 /* file_functions.c */
 extern int print_file_add_limits_to_line(char **line,
-					 slurmdb_association_rec_t *assoc);
+					 slurmdb_assoc_rec_t *assoc);
 
 extern int print_file_slurmdb_hierarchical_rec_list(FILE *fd,
 					  List slurmdb_hierarchical_rec_list,
diff --git a/src/sacctmgr/tres_function.c b/src/sacctmgr/tres_function.c
new file mode 100644
index 000000000..05de8f9cb
--- /dev/null
+++ b/src/sacctmgr/tres_function.c
@@ -0,0 +1,215 @@
+/*****************************************************************************\
+ *  tres_functions.c - functions dealing with TRES in the
+ *                        accounting system.
+ *****************************************************************************
+ *  Copyright (C) 2015 SchedMD LLC.
+ *  Written by David Bigagli <david@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/sacctmgr/sacctmgr.h"
+#include "src/common/assoc_mgr.h"
+
+static int _set_cond(int *start, int argc, char *argv[],
+		     slurmdb_tres_cond_t *tres_cond,
+		     List format_list)
+{
+	int i;
+	int set = 0;
+	int end = 0;
+	int command_len = 0;
+
+	if (!tres_cond) {
+		exit_code=1;
+		fprintf(stderr, "No tres_cond given");
+		return -1;
+	}
+
+	for (i=(*start); i<argc; i++) {
+		end = parse_option_end(argv[i]);
+		if (!end)
+			command_len=strlen(argv[i]);
+		else {
+			command_len=end-1;
+			if (argv[i][end] == '=') {
+				end++;
+			}
+		}
+
+		if (!strncasecmp(argv[i], "Set", MAX(command_len, 3))) {
+			i--;
+			break;
+		} else if (!end &&
+			   !strncasecmp(argv[i], "WithDeleted",
+					 MAX(command_len, 5))) {
+			tres_cond->with_deleted = 1;
+		} else if (!end && !strncasecmp(argv[i], "where",
+					       MAX(command_len, 5))) {
+			continue;
+		} else if (!end
+			  || !strncasecmp(argv[i], "Type",
+					   MAX(command_len, 2))) {
+			if (!tres_cond->type_list) {
+				tres_cond->type_list =
+					list_create(slurm_destroy_char);
+			}
+			if (slurm_addto_char_list(
+				   tres_cond->type_list,
+				   argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp(argv[i], "Names",
+					 MAX(command_len, 1))) {
+			if (!tres_cond->name_list) {
+				tres_cond->name_list =
+					list_create(slurm_destroy_char);
+			}
+			if (slurm_addto_char_list(tres_cond->name_list,
+						  argv[i]+end))
+				set = 1;
+		} else if (!strncasecmp(argv[i], "Format",
+					 MAX(command_len, 1))) {
+			if (format_list)
+				slurm_addto_char_list(format_list, argv[i]+end);
+		} else if (!strncasecmp(argv[i], "Ids",
+					 MAX(command_len, 1))) {
+			if (!tres_cond->id_list) {
+				tres_cond->id_list =
+					list_create(slurm_destroy_char);
+			}
+			if (slurm_addto_char_list(tres_cond->id_list,
+						 argv[i]+end))
+				set = 1;
+		} else {
+			exit_code=1;
+			fprintf(stderr, " Unknown condition: %s\n"
+				" Use keyword 'set' to modify value\n",
+				argv[i]);
+		}
+	}
+
+	(*start) = i;
+
+	if (set)
+		return 1;
+
+	return 0;
+}
+
+/* sacctmgr_list_tres()
+ */
+int sacctmgr_list_tres(int argc, char **argv)
+{
+        List tres_list;
+        ListIterator itr;
+	ListIterator itr2;
+	List format_list = list_create(slurm_destroy_char);
+	List print_fields_list;
+        slurmdb_tres_cond_t *tres_cond = xmalloc(sizeof(slurmdb_tres_cond_t));
+        slurmdb_tres_rec_t *tres;
+	int field_count, i;
+	print_field_t *field;
+
+    	for (i=0; i<argc; i++) {
+		int command_len = strlen(argv[i]);
+		if (!strncasecmp(argv[i], "Where", MAX(command_len, 5))
+		    || !strncasecmp(argv[i], "Set", MAX(command_len, 3)))
+			i++;
+		_set_cond(&i, argc, argv, tres_cond, format_list);
+	}
+
+	if (exit_code) {
+		slurmdb_destroy_tres_cond(tres_cond);
+		FREE_NULL_LIST(format_list);
+		return SLURM_ERROR;
+	}
+
+	if (!list_count(format_list)) {
+		/* Append to the format list the fields
+		 * we want to print, these are the data structure
+		 * members of the type returned by slurmdbd
+		 */
+		slurm_addto_char_list(format_list, "Type,Name%15,ID");
+	}
+
+	tres_list = acct_storage_g_get_tres(db_conn, my_uid, tres_cond);
+	slurmdb_destroy_tres_cond(tres_cond);
+
+	if (!tres_list) {
+		exit_code=1;
+		fprintf(stderr, " Problem with query.\n");
+		FREE_NULL_LIST(format_list);
+		return SLURM_ERROR;
+	}
+
+
+	/* Process the format list creating a list of
+	 * print field_t structures
+	 */
+	print_fields_list = sacctmgr_process_format_list(format_list);
+	FREE_NULL_LIST(format_list);
+
+        itr = list_iterator_create(tres_list);
+	itr2 = list_iterator_create(print_fields_list);
+	print_fields_header(print_fields_list);
+	field_count = list_count(print_fields_list);
+
+	/* For each tres prints the data structure members
+	 */
+        while ((tres = list_next(itr))) {
+		while ((field = list_next(itr2))) {
+			switch (field->type) {
+				case PRINT_NAME:
+					field->print_routine(field,
+							     tres->name,
+							     field_count);
+					break;
+				case PRINT_ID:
+					field->print_routine(field,
+							     tres->id,
+							     field_count);
+					break;
+				case PRINT_TYPE:
+					field->print_routine(field,
+							     tres->type,
+							     field_count);
+					break;
+			}
+		}
+		list_iterator_reset(itr2);
+		printf("\n");
+        }
+	list_iterator_destroy(itr);
+	list_iterator_destroy(itr2);
+	FREE_NULL_LIST(tres_list);
+	FREE_NULL_LIST(print_fields_list);
+
+        return 0;
+}
diff --git a/src/sacctmgr/txn_functions.c b/src/sacctmgr/txn_functions.c
index aff4effcb..c11b24487 100644
--- a/src/sacctmgr/txn_functions.c
+++ b/src/sacctmgr/txn_functions.c
@@ -185,7 +185,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_txn_cond(txn_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	}
 
@@ -198,10 +198,10 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 	}
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -212,7 +212,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 		exit_code=1;
 		fprintf(stderr, " Error with request: %s\n",
 			slurm_strerror(errno));
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 	itr = list_iterator_create(txn_list);
@@ -288,7 +288,7 @@ extern int sacctmgr_list_txn(int argc, char *argv[])
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(txn_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(txn_list);
+	FREE_NULL_LIST(print_fields_list);
 	return rc;
 }
diff --git a/src/sacctmgr/user_functions.c b/src/sacctmgr/user_functions.c
index 7fbdcd45e..126ca1f84 100644
--- a/src/sacctmgr/user_functions.c
+++ b/src/sacctmgr/user_functions.c
@@ -55,7 +55,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int u_set = 0;
 	int a_set = 0;
 	int end = 0;
-	slurmdb_association_cond_t *assoc_cond = NULL;
+	slurmdb_assoc_cond_t *assoc_cond = NULL;
 	int command_len = 0;
 	int option = 0;
 
@@ -66,7 +66,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 	if (!user_cond->assoc_cond)
 		user_cond->assoc_cond =
-			xmalloc(sizeof(slurmdb_association_cond_t));
+			xmalloc(sizeof(slurmdb_assoc_cond_t));
 
 	assoc_cond = user_cond->assoc_cond;
 
@@ -175,7 +175,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 				slurm_addto_char_list(format_list, argv[i]+end);
 			}
-		} else if (!(a_set = sacctmgr_set_association_cond(
+		} else if (!(a_set = sacctmgr_set_assoc_cond(
 				    assoc_cond, argv[i], argv[i]+end,
 				    command_len, option))) {
 			exit_code=1;
@@ -199,7 +199,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 
 static int _set_rec(int *start, int argc, char *argv[],
 		    slurmdb_user_rec_t *user,
-		    slurmdb_association_rec_t *assoc)
+		    slurmdb_assoc_rec_t *assoc)
 {
 	int i;
 	int u_set = 0;
@@ -260,15 +260,14 @@ static int _set_rec(int *start, int argc, char *argv[],
 			uint32_t usage;
 			if (!assoc)
 				continue;
-			assoc->usage = xmalloc(sizeof(
-						assoc_mgr_association_usage_t));
+			assoc->usage = xmalloc(sizeof(slurmdb_assoc_usage_t));
 			if (get_uint(argv[i]+end, &usage,
 				     "RawUsage") == SLURM_SUCCESS) {
 				assoc->usage->usage_raw = usage;
 				a_set = 1;
 			}
 		} else if (!assoc ||
-			  (assoc && !(a_set = sacctmgr_set_association_rec(
+			  (assoc && !(a_set = sacctmgr_set_assoc_rec(
 					      assoc, argv[i], argv[i]+end,
 					      command_len, option)))) {
 			exit_code=1;
@@ -316,7 +315,7 @@ static int _check_and_set_cluster_list(List cluster_list)
 		cluster_rec->name = NULL;
 	}
 	list_iterator_destroy(itr_c);
-	list_destroy(tmp_list);
+	FREE_NULL_LIST(tmp_list);
 
 	if (!list_count(cluster_list)) {
 		exit_code=1;
@@ -329,7 +328,7 @@ static int _check_and_set_cluster_list(List cluster_list)
 	return rc;
 }
 
-static int _check_default_associations(char *def_acct,
+static int _check_default_assocs(char *def_acct,
 				       List user_list, List cluster_list)
 {
 	char *user = NULL, *cluster = NULL;
@@ -338,7 +337,7 @@ static int _check_default_associations(char *def_acct,
 	ListIterator itr = NULL;
 	ListIterator itr_c = NULL;
 	regret_t *regret = NULL;
-	slurmdb_association_cond_t assoc_cond;
+	slurmdb_assoc_cond_t assoc_cond;
 	int rc = SLURM_SUCCESS;
 
 	if (!def_acct)
@@ -350,14 +349,14 @@ static int _check_default_associations(char *def_acct,
 	if (!list_count(user_list) || !list_count(cluster_list))
 		return SLURM_ERROR;
 
-	memset(&assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+	memset(&assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 	assoc_cond.user_list = user_list;
 	assoc_cond.cluster_list = cluster_list;
 	assoc_cond.acct_list = list_create(NULL);
 	list_append(assoc_cond.acct_list, def_acct);
-	local_assoc_list = acct_storage_g_get_associations(
+	local_assoc_list = acct_storage_g_get_assocs(
 		db_conn, my_uid, &assoc_cond);
-	list_destroy(assoc_cond.acct_list);
+	FREE_NULL_LIST(assoc_cond.acct_list);
 
 	itr = list_iterator_create(user_list);
 	itr_c = list_iterator_create(cluster_list);
@@ -373,7 +372,7 @@ static int _check_default_associations(char *def_acct,
 	 */
 	while((user = list_next(itr))) {
 		while((cluster = list_next(itr_c))) {
-			if (!sacctmgr_find_association_from_list(
+			if (!sacctmgr_find_assoc_from_list(
 				local_assoc_list,
 				user, def_acct, cluster, "*")) {
 				regret = xmalloc(sizeof(regret_t));
@@ -394,7 +393,7 @@ static int _check_default_associations(char *def_acct,
 	}
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr_c);
-	list_destroy(local_assoc_list);
+	FREE_NULL_LIST(local_assoc_list);
 
 	if (regret_list) {
 		itr = list_iterator_create(regret_list);
@@ -409,7 +408,7 @@ static int _check_default_associations(char *def_acct,
 		list_iterator_destroy(itr);
 		exit_code=1;
 		rc = SLURM_ERROR;
-		list_destroy(regret_list);
+		FREE_NULL_LIST(regret_list);
 	}
 
 	return rc;
@@ -443,7 +442,7 @@ static int _check_default_wckeys(char *def_wckey,
 	list_append(wckey_cond.name_list, def_wckey);
 	local_wckey_list = acct_storage_g_get_wckeys(
 		db_conn, my_uid, &wckey_cond);
-	list_destroy(wckey_cond.name_list);
+	FREE_NULL_LIST(wckey_cond.name_list);
 
 	itr = list_iterator_create(user_list);
 	itr_c = list_iterator_create(cluster_list);
@@ -480,7 +479,7 @@ static int _check_default_wckeys(char *def_wckey,
 	}
 	list_iterator_destroy(itr);
 	list_iterator_destroy(itr_c);
-	list_destroy(local_wckey_list);
+	FREE_NULL_LIST(local_wckey_list);
 
 	if (regret_list) {
 		itr = list_iterator_create(regret_list);
@@ -495,7 +494,7 @@ static int _check_default_wckeys(char *def_wckey,
 		list_iterator_destroy(itr);
 		exit_code=1;
 		rc = SLURM_ERROR;
-		list_destroy(regret_list);
+		FREE_NULL_LIST(regret_list);
 	}
 
 	return rc;
@@ -578,8 +577,7 @@ static int _check_coord_request(slurmdb_user_cond_t *user_cond, bool check)
 		exit_code=1;
 		fprintf(stderr, " Problem getting users from database.  "
 			"Contact your admin.\n");
-		if (local_acct_list)
-			list_destroy(local_acct_list);
+		FREE_NULL_LIST(local_acct_list);
 		return SLURM_ERROR;
 	}
 
@@ -608,10 +606,8 @@ static int _check_coord_request(slurmdb_user_cond_t *user_cond, bool check)
 		list_iterator_destroy(itr2);
 	}
 
-	if (local_acct_list)
-		list_destroy(local_acct_list);
-	if (local_user_list)
-		list_destroy(local_user_list);
+	FREE_NULL_LIST(local_acct_list);
+	FREE_NULL_LIST(local_user_list);
 
 	return rc;
 }
@@ -619,7 +615,7 @@ static int _check_coord_request(slurmdb_user_cond_t *user_cond, bool check)
 static void _check_user_has_default_assoc(char *user_name, List assoc_list)
 {
 	ListIterator itr = list_iterator_create(assoc_list);
-	slurmdb_association_rec_t *assoc;
+	slurmdb_assoc_rec_t *assoc;
 	bool def_found = 0;
 	char *last_cluster = NULL;
 
@@ -658,11 +654,11 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 	ListIterator itr_p = NULL;
 	ListIterator itr_w = NULL;
 	slurmdb_user_rec_t *user = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
-	slurmdb_association_rec_t start_assoc;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t start_assoc;
 	char *default_acct = NULL;
 	char *default_wckey = NULL;
-	slurmdb_association_cond_t *assoc_cond = NULL;
+	slurmdb_assoc_cond_t *assoc_cond = NULL;
 	slurmdb_wckey_rec_t *wckey = NULL;
 	slurmdb_wckey_cond_t *wckey_cond = NULL;
 	slurmdb_admin_level_t admin_level = SLURMDB_ADMIN_NOTSET;
@@ -690,9 +686,9 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 /* 		       " Please contact your administrator.\n"); */
 /* 		return SLURM_ERROR; */
 /* 	} */
-	slurmdb_init_association_rec(&start_assoc, 0);
+	slurmdb_init_assoc_rec(&start_assoc, 0);
 
-	assoc_cond = xmalloc(sizeof(slurmdb_association_cond_t));
+	assoc_cond = xmalloc(sizeof(slurmdb_assoc_cond_t));
 
 	assoc_cond->user_list = list_create(slurm_destroy_char);
 	assoc_cond->acct_list = list_create(slurm_destroy_char);
@@ -752,10 +748,10 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					 MAX(command_len, 1))) {
 			slurm_addto_char_list(wckey_cond->name_list,
 					      argv[i]+end);
-		} else if (!(limit_set = sacctmgr_set_association_rec(
+		} else if (!(limit_set = sacctmgr_set_assoc_rec(
 				    &start_assoc, argv[i], argv[i]+end,
 				    command_len, option))
-			  && !(limit_set = sacctmgr_set_association_cond(
+			  && !(limit_set = sacctmgr_set_assoc_cond(
 				       assoc_cond, argv[i], argv[i]+end,
 				       command_len, option))) {
 			exit_code=1;
@@ -765,20 +761,20 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_wckey_cond(wckey_cond);
-		slurmdb_destroy_association_cond(assoc_cond);
+		slurmdb_destroy_assoc_cond(assoc_cond);
 		return SLURM_ERROR;
 	} else if (!list_count(assoc_cond->user_list)) {
 		slurmdb_destroy_wckey_cond(wckey_cond);
-		slurmdb_destroy_association_cond(assoc_cond);
+		slurmdb_destroy_assoc_cond(assoc_cond);
 		exit_code=1;
 		fprintf(stderr, " Need name of user to add.\n");
 		return SLURM_ERROR;
 	} else {
  		slurmdb_user_cond_t user_cond;
- 		slurmdb_association_cond_t temp_assoc_cond;
+		slurmdb_assoc_cond_t temp_assoc_cond;
 
 		memset(&user_cond, 0, sizeof(slurmdb_user_cond_t));
-		memset(&temp_assoc_cond, 0, sizeof(slurmdb_association_cond_t));
+		memset(&temp_assoc_cond, 0, sizeof(slurmdb_assoc_cond_t));
 		user_cond.with_wckeys = 1;
 		user_cond.with_assocs = 1;
 
@@ -795,7 +791,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 		fprintf(stderr, " Problem getting users from database.  "
 			"Contact your admin.\n");
 		slurmdb_destroy_wckey_cond(wckey_cond);
-		slurmdb_destroy_association_cond(assoc_cond);
+		slurmdb_destroy_assoc_cond(assoc_cond);
 		return SLURM_ERROR;
 	}
 
@@ -804,26 +800,24 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 		if (_check_and_set_cluster_list(assoc_cond->cluster_list)
 		    != SLURM_SUCCESS) {
 			slurmdb_destroy_wckey_cond(wckey_cond);
-			slurmdb_destroy_association_cond(assoc_cond);
-			list_destroy(local_user_list);
-			if (local_acct_list)
-				list_destroy(local_acct_list);
+			slurmdb_destroy_assoc_cond(assoc_cond);
+			FREE_NULL_LIST(local_user_list);
+			FREE_NULL_LIST(local_acct_list);
 			return SLURM_ERROR;
 		}
 	} else if (sacctmgr_validate_cluster_list(assoc_cond->cluster_list)
 		   != SLURM_SUCCESS) {
 		slurmdb_destroy_wckey_cond(wckey_cond);
-		slurmdb_destroy_association_cond(assoc_cond);
-		list_destroy(local_user_list);
-		if (local_acct_list)
-			list_destroy(local_acct_list);
+		slurmdb_destroy_assoc_cond(assoc_cond);
+		FREE_NULL_LIST(local_user_list);
+		FREE_NULL_LIST(local_acct_list);
 		return SLURM_ERROR;
 	}
 
 	if (!list_count(assoc_cond->acct_list)) {
 		if (!list_count(wckey_cond->name_list)) {
 			slurmdb_destroy_wckey_cond(wckey_cond);
-			slurmdb_destroy_association_cond(assoc_cond);
+			slurmdb_destroy_assoc_cond(assoc_cond);
 			exit_code=1;
 			fprintf(stderr, " Need name of account to "
 				"add user to.\n");
@@ -831,7 +825,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 		}
 	} else {
  		slurmdb_account_cond_t account_cond;
-		slurmdb_association_cond_t query_assoc_cond;
+		slurmdb_assoc_cond_t query_assoc_cond;
 
 		memset(&account_cond, 0, sizeof(slurmdb_account_cond_t));
 		account_cond.assoc_cond = assoc_cond;
@@ -843,27 +837,27 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 			exit_code=1;
 			fprintf(stderr, " Problem getting accounts "
 				"from database.  Contact your admin.\n");
-			list_destroy(local_user_list);
+			FREE_NULL_LIST(local_user_list);
 			slurmdb_destroy_wckey_cond(wckey_cond);
-			slurmdb_destroy_association_cond(assoc_cond);
+			slurmdb_destroy_assoc_cond(assoc_cond);
 			return SLURM_ERROR;
 		}
 
 		memset(&query_assoc_cond, 0,
-		       sizeof(slurmdb_association_cond_t));
+		       sizeof(slurmdb_assoc_cond_t));
 		query_assoc_cond.acct_list = assoc_cond->acct_list;
 		query_assoc_cond.cluster_list = assoc_cond->cluster_list;
-		local_assoc_list = acct_storage_g_get_associations(
+		local_assoc_list = acct_storage_g_get_assocs(
 			db_conn, my_uid, &query_assoc_cond);
 
 		if (!local_assoc_list) {
 			exit_code=1;
-			fprintf(stderr, " Problem getting associations "
+			fprintf(stderr, " Problem getting assocs "
 				"from database.  Contact your admin.\n");
-			list_destroy(local_user_list);
-			list_destroy(local_acct_list);
+			FREE_NULL_LIST(local_user_list);
+			FREE_NULL_LIST(local_acct_list);
 			slurmdb_destroy_wckey_cond(wckey_cond);
-			slurmdb_destroy_association_cond(assoc_cond);
+			slurmdb_destroy_assoc_cond(assoc_cond);
 			return SLURM_ERROR;
 		}
 	}
@@ -884,7 +878,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 	/* we are adding these lists to the global lists and will be
 	   freed when they are */
 	user_list = list_create(slurmdb_destroy_user_rec);
-	assoc_list = list_create(slurmdb_destroy_association_rec);
+	assoc_list = list_create(slurmdb_destroy_assoc_rec);
 	wckey_list = list_create(slurmdb_destroy_wckey_rec);
 
 	itr = list_iterator_create(assoc_cond->user_list);
@@ -964,7 +958,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 
 			user = xmalloc(sizeof(slurmdb_user_rec_t));
 			user->assoc_list =
-				list_create(slurmdb_destroy_association_rec);
+				list_create(slurmdb_destroy_assoc_rec);
 			user->wckey_list =
 				list_create(slurmdb_destroy_wckey_rec);
 			user->name = xstrdup(name);
@@ -1015,10 +1009,10 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					}
 					continue;
 				} else if (!local_def_acct) {
-					slurmdb_association_rec_t *assoc_rec;
+					slurmdb_assoc_rec_t *assoc_rec;
 					if (user_rec
 					    && (assoc_rec =
-						sacctmgr_find_association_from_list(
+						sacctmgr_find_assoc_from_list(
 						     user_rec->assoc_list,
 						     name, NULL,
 						     cluster, "*")))
@@ -1037,14 +1031,14 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					assoc_cond->partition_list);
 				while((partition = list_next(itr_p))) {
 					partition_set = 1;
-					if (sacctmgr_find_association_from_list(
+					if (sacctmgr_find_assoc_from_list(
 						   local_assoc_list,
 						   name, account,
 						   cluster, partition))
 						continue;
 					assoc = xmalloc(
-						sizeof(slurmdb_association_rec_t));
-					slurmdb_init_association_rec(assoc, 0);
+						sizeof(slurmdb_assoc_rec_t));
+					slurmdb_init_assoc_rec(assoc, 0);
 					assoc->user = xstrdup(name);
 					assoc->acct = xstrdup(account);
 					assoc->cluster = xstrdup(cluster);
@@ -1059,33 +1053,8 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					assoc->shares_raw =
 						start_assoc.shares_raw;
 
-					assoc->grp_cpu_mins =
-						start_assoc.grp_cpu_mins;
-					assoc->grp_cpu_run_mins =
-						start_assoc.grp_cpu_run_mins;
-					assoc->grp_cpus = start_assoc.grp_cpus;
-					assoc->grp_jobs = start_assoc.grp_jobs;
-					assoc->grp_mem = start_assoc.grp_mem;
-					assoc->grp_nodes =
-						start_assoc.grp_nodes;
-					assoc->grp_submit_jobs =
-						start_assoc.grp_submit_jobs;
-					assoc->grp_wall = start_assoc.grp_wall;
-
-					assoc->max_cpu_mins_pj =
-						start_assoc.max_cpu_mins_pj;
-					assoc->max_cpus_pj =
-						start_assoc.max_cpus_pj;
-					assoc->max_jobs = start_assoc.max_jobs;
-					assoc->max_nodes_pj =
-						start_assoc.max_nodes_pj;
-					assoc->max_submit_jobs =
-						start_assoc.max_submit_jobs;
-					assoc->max_wall_pj =
-						start_assoc.max_wall_pj;
-
-					assoc->qos_list = copy_char_list(
-						start_assoc.qos_list);
+					slurmdb_copy_assoc_rec_limits(
+						assoc, &start_assoc);
 
 					if (user)
 						list_append(user->assoc_list,
@@ -1108,7 +1077,7 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 					continue;
 				}
 
-				if (sacctmgr_find_association_from_list(
+				if (sacctmgr_find_assoc_from_list(
 					   local_assoc_list,
 					   name, account, cluster, NULL)) {
 					if (!default_acct && local_def_acct)
@@ -1117,8 +1086,8 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 				}
 
 				assoc = xmalloc(
-					sizeof(slurmdb_association_rec_t));
-				slurmdb_init_association_rec(assoc, 0);
+					sizeof(slurmdb_assoc_rec_t));
+				slurmdb_init_assoc_rec(assoc, 0);
 				assoc->user = xstrdup(name);
 				if (local_def_acct
 				   && !strcmp(local_def_acct, account))
@@ -1130,29 +1099,8 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 
 				assoc->shares_raw = start_assoc.shares_raw;
 
-				assoc->grp_cpu_mins =
-					start_assoc.grp_cpu_mins;
-				assoc->grp_cpu_run_mins =
-					start_assoc.grp_cpu_run_mins;
-				assoc->grp_cpus = start_assoc.grp_cpus;
-				assoc->grp_jobs = start_assoc.grp_jobs;
-				assoc->grp_mem = start_assoc.grp_mem;
-				assoc->grp_nodes = start_assoc.grp_nodes;
-				assoc->grp_submit_jobs =
-					start_assoc.grp_submit_jobs;
-				assoc->grp_wall = start_assoc.grp_wall;
-
-				assoc->max_cpu_mins_pj =
-					start_assoc.max_cpu_mins_pj;
-				assoc->max_cpus_pj = start_assoc.max_cpus_pj;
-				assoc->max_jobs = start_assoc.max_jobs;
-				assoc->max_nodes_pj = start_assoc.max_nodes_pj;
-				assoc->max_submit_jobs =
-					start_assoc.max_submit_jobs;
-				assoc->max_wall_pj = start_assoc.max_wall_pj;
-
-				assoc->qos_list =
-					copy_char_list(start_assoc.qos_list);
+				slurmdb_copy_assoc_rec_limits(
+					assoc, &start_assoc);
 
 				if (user)
 					list_append(user->assoc_list, assoc);
@@ -1229,15 +1177,12 @@ extern int sacctmgr_add_user(int argc, char *argv[])
 	}
 no_default:
 	list_iterator_destroy(itr);
-	list_destroy(local_user_list);
-	if (local_acct_list)
-		list_destroy(local_acct_list);
-	if (local_assoc_list)
-		list_destroy(local_assoc_list);
-	if (local_wckey_list)
-		list_destroy(local_wckey_list);
+	FREE_NULL_LIST(local_user_list);
+	FREE_NULL_LIST(local_acct_list);
+	FREE_NULL_LIST(local_assoc_list);
+	FREE_NULL_LIST(local_wckey_list);
 	slurmdb_destroy_wckey_cond(wckey_cond);
-	slurmdb_destroy_association_cond(assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
 
 	if (!list_count(user_list) && !list_count(assoc_list)
 	   && !list_count(wckey_list)) {
@@ -1276,8 +1221,7 @@ no_default:
 	if (limit_set) {
 		printf(" Non Default Settings\n");
 		sacctmgr_print_assoc_limits(&start_assoc);
-		if (start_assoc.qos_list)
-			list_destroy(start_assoc.qos_list);
+		FREE_NULL_LIST(start_assoc.qos_list);
 	}
 
 	notice_thread_init();
@@ -1287,7 +1231,7 @@ no_default:
 
 	if (rc == SLURM_SUCCESS) {
 		if (list_count(assoc_list))
-			rc = acct_storage_g_add_associations(db_conn, my_uid,
+			rc = acct_storage_g_add_assocs(db_conn, my_uid,
 							     assoc_list);
 	}
 
@@ -1321,9 +1265,9 @@ no_default:
 	}
 
 end_it:
-	list_destroy(user_list);
-	list_destroy(assoc_list);
-	list_destroy(wckey_list);
+	FREE_NULL_LIST(user_list);
+	FREE_NULL_LIST(assoc_list);
+	FREE_NULL_LIST(wckey_list);
 	xfree(default_acct);
 	xfree(default_wckey);
 
@@ -1415,7 +1359,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	slurmdb_user_rec_t *user = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
 
 	print_field_t *field = NULL;
 	int field_count = 0;
@@ -1424,7 +1368,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 	List print_fields_list; /* types are of print_field_t */
 
 	user_cond->with_assocs = with_assoc_flag;
-	user_cond->assoc_cond = xmalloc(sizeof(slurmdb_association_cond_t));
+	user_cond->assoc_cond = xmalloc(sizeof(slurmdb_assoc_cond_t));
 
 	for (i=0; i<argc; i++) {
 		int command_len = strlen(argv[i]);
@@ -1437,7 +1381,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_user_cond(user_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	}
 
@@ -1468,18 +1412,18 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 				 "when querying with the withassoc option.\n"
 				 "Are you sure you want to continue?")) {
 			printf("Aborted\n");
-			list_destroy(format_list);
+			FREE_NULL_LIST(format_list);
 			slurmdb_destroy_user_cond(user_cond);
 			return SLURM_SUCCESS;
 		}
 	}
 
 	print_fields_list = sacctmgr_process_format_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
 		slurmdb_destroy_user_cond(user_cond);
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -1489,7 +1433,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 	if (!user_list) {
 		exit_code=1;
 		fprintf(stderr, " Problem with query.\n");
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -1516,7 +1460,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 				/* get the defaults */
 				if (!curr_cluster
 				    || strcmp(curr_cluster, assoc->cluster)) {
-					slurmdb_association_rec_t *assoc2;
+					slurmdb_assoc_rec_t *assoc2;
 					/* We shouldn't have to reset this
 					 * unless no default is on the
 					 * cluster. */
@@ -1606,7 +1550,7 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 							 field_count));
 						break;
 					default:
-						sacctmgr_print_association_rec(
+						sacctmgr_print_assoc_rec(
 							assoc, field, NULL,
 							(curr_inx ==
 							 field_count));
@@ -1674,8 +1618,8 @@ extern int sacctmgr_list_user(int argc, char *argv[])
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(user_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(user_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
@@ -1685,15 +1629,15 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 	int rc = SLURM_SUCCESS;
 	slurmdb_user_cond_t *user_cond = xmalloc(sizeof(slurmdb_user_cond_t));
 	slurmdb_user_rec_t *user = xmalloc(sizeof(slurmdb_user_rec_t));
-	slurmdb_association_rec_t *assoc =
-		xmalloc(sizeof(slurmdb_association_rec_t));
+	slurmdb_assoc_rec_t *assoc =
+		xmalloc(sizeof(slurmdb_assoc_rec_t));
 	int i=0;
 	int cond_set = 0, prev_set = 0, rec_set = 0, set = 0;
 	List ret_list = NULL;
 
-	slurmdb_init_association_rec(assoc, 0);
+	slurmdb_init_assoc_rec(assoc, 0);
 
-	user_cond->assoc_cond = xmalloc(sizeof(slurmdb_association_cond_t));
+	user_cond->assoc_cond = xmalloc(sizeof(slurmdb_assoc_cond_t));
 	user_cond->assoc_cond->cluster_list = list_create(slurm_destroy_char);
 	/* We need this to make sure we only change users, not
 	 * accounts if this list didn't exist it would change
@@ -1722,14 +1666,14 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 	if (exit_code) {
 		slurmdb_destroy_user_cond(user_cond);
 		slurmdb_destroy_user_rec(user);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		return SLURM_ERROR;
 	} else if (!rec_set) {
 		exit_code=1;
 		fprintf(stderr, " You didn't give me anything to set\n");
 		slurmdb_destroy_user_cond(user_cond);
 		slurmdb_destroy_user_rec(user);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		return SLURM_ERROR;
 	} else if (!cond_set) {
 		if (!commit_check("You didn't set any conditions with 'WHERE'.\n"
@@ -1737,7 +1681,7 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 			printf("Aborted\n");
 			slurmdb_destroy_user_cond(user_cond);
 			slurmdb_destroy_user_rec(user);
-			slurmdb_destroy_association_rec(assoc);
+			slurmdb_destroy_assoc_rec(assoc);
 			return SLURM_SUCCESS;
 		}
 	}
@@ -1757,7 +1701,7 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 
 		slurmdb_destroy_user_cond(user_cond);
 		slurmdb_destroy_user_rec(user);
-		slurmdb_destroy_association_rec(assoc);
+		slurmdb_destroy_assoc_rec(assoc);
 		return rc;
 	}
 
@@ -1796,7 +1740,7 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 		if (ret_list && list_count(ret_list)) {
 			set = 1;
 			if (user->default_acct
-			    && _check_default_associations(
+			    && _check_default_assocs(
 				    user->default_acct, ret_list,
 				    user_cond->assoc_cond->cluster_list)
 			    != SLURM_SUCCESS) {
@@ -1835,12 +1779,11 @@ extern int sacctmgr_modify_user(int argc, char *argv[])
 			rc = SLURM_ERROR;
 		}
 
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 
 assoc_start:
-	if (rec_set & 2) { // process the association changes
+	if (rec_set & 2) { // process the assoc changes
 		if (cond_set == 1
 		   && !list_count(user_cond->assoc_cond->user_list)) {
 			rc = SLURM_ERROR;
@@ -1851,7 +1794,7 @@ assoc_start:
 			goto assoc_end;
 		}
 
-		ret_list = acct_storage_g_modify_associations(
+		ret_list = acct_storage_g_modify_assocs(
 			db_conn, my_uid, user_cond->assoc_cond, assoc);
 
 		if (ret_list && list_count(ret_list)) {
@@ -1885,8 +1828,7 @@ assoc_start:
 			rc = SLURM_ERROR;
 		}
 
-		if (ret_list)
-			list_destroy(ret_list);
+		FREE_NULL_LIST(ret_list);
 	}
 assoc_end:
 
@@ -1902,7 +1844,7 @@ assoc_end:
 
 	slurmdb_destroy_user_cond(user_cond);
 	slurmdb_destroy_user_rec(user);
-	slurmdb_destroy_association_rec(assoc);
+	slurmdb_destroy_assoc_rec(assoc);
 
 	return rc;
 }
@@ -1949,7 +1891,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 		ret_list = acct_storage_g_remove_users(
 			db_conn, my_uid, user_cond);
 	} else if (cond_set & 2) {
-		ret_list = acct_storage_g_remove_associations(
+		ret_list = acct_storage_g_remove_assocs(
 			db_conn, my_uid, user_cond->assoc_cond);
 	}
 
@@ -1972,7 +1914,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 				fprintf(stderr,"  %s\n", object);
 			}
 			list_iterator_destroy(itr);
-			list_destroy(ret_list);
+			FREE_NULL_LIST(ret_list);
 			acct_storage_g_commit(db_conn, 0);
 			return rc;
 		}
@@ -1997,7 +1939,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 		if (cond_set & 2 && del_user_list) {
 			List user_list = NULL;
 			slurmdb_user_cond_t del_user_cond;
-			slurmdb_association_cond_t del_user_assoc_cond;
+			slurmdb_assoc_cond_t del_user_assoc_cond;
 			slurmdb_user_rec_t *user = NULL;
 
 			/* Use a fresh cond here so we check all
@@ -2006,7 +1948,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 			*/
 			memset(&del_user_cond, 0, sizeof(slurmdb_user_cond_t));
 			memset(&del_user_assoc_cond, 0,
-			       sizeof(slurmdb_association_cond_t));
+			       sizeof(slurmdb_assoc_cond_t));
 			del_user_cond.with_assocs = 1;
 			del_user_assoc_cond.user_list = del_user_list;
 			/* No need to get all the extra info about the
@@ -2017,7 +1959,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 			del_user_cond.assoc_cond = &del_user_assoc_cond;
 			user_list = acct_storage_g_get_users(
 				db_conn, my_uid, &del_user_cond);
-			list_destroy(del_user_list);
+			FREE_NULL_LIST(del_user_list);
 			del_user_list = NULL;
 
 			if (user_list) {
@@ -2041,7 +1983,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 							      user->name);
 				}
 				list_iterator_destroy(itr);
-				list_destroy(user_list);
+				FREE_NULL_LIST(user_list);
 			}
 
 			if (del_user_list) {
@@ -2050,16 +1992,15 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 				memset(&del_user_cond, 0,
 				       sizeof(slurmdb_user_cond_t));
 				memset(&del_user_assoc_cond, 0,
-				       sizeof(slurmdb_association_cond_t));
+				       sizeof(slurmdb_assoc_cond_t));
 
 				del_user_assoc_cond.user_list = del_user_list;
 				del_user_cond.assoc_cond = &del_user_assoc_cond;
 
 				del_user_ret_list = acct_storage_g_remove_users(
 					db_conn, my_uid, &del_user_cond);
-				if (del_user_ret_list)
-					list_destroy(del_user_ret_list);
-				list_destroy(del_user_list);
+				FREE_NULL_LIST(del_user_ret_list);
+				FREE_NULL_LIST(del_user_list);
 			}
 		}
 
@@ -2079,8 +2020,7 @@ extern int sacctmgr_delete_user(int argc, char *argv[])
 		rc = SLURM_ERROR;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	return rc;
 }
@@ -2186,8 +2126,7 @@ extern int sacctmgr_delete_coord(int argc, char *argv[])
 		rc = SLURM_ERROR;
 	}
 
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 	notice_thread_fini();
 	if (set) {
 		if (commit_check("Would you like to commit changes?"))
diff --git a/src/sacctmgr/wckey_functions.c b/src/sacctmgr/wckey_functions.c
index c2fc6fc85..d5f70321d 100644
--- a/src/sacctmgr/wckey_functions.c
+++ b/src/sacctmgr/wckey_functions.c
@@ -180,7 +180,7 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 
 	if (exit_code) {
 		slurmdb_destroy_wckey_cond(wckey_cond);
-		list_destroy(format_list);
+		FREE_NULL_LIST(format_list);
 		return SLURM_ERROR;
 	}
 
@@ -240,11 +240,11 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 		list_append(print_fields_list, field);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
 		slurmdb_destroy_wckey_cond(wckey_cond);
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -255,7 +255,7 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 		exit_code=1;
 		fprintf(stderr, " Error with request: %s\n",
 			slurm_strerror(errno));
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -309,8 +309,8 @@ extern int sacctmgr_list_wckey(int argc, char *argv[])
 
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(wckey_list);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(wckey_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
diff --git a/src/salloc/Makefile.in b/src/salloc/Makefile.in
index 8277c9342..39c6321e7 100644
--- a/src/salloc/Makefile.in
+++ b/src/salloc/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -250,6 +253,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -299,8 +304,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -319,6 +328,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -362,6 +374,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -385,6 +398,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/salloc/opt.c b/src/salloc/opt.c
index a389bef09..5cce18f66 100644
--- a/src/salloc/opt.c
+++ b/src/salloc/opt.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -74,6 +75,7 @@
 #include <sys/types.h>
 #include <sys/utsname.h>
 
+#include "src/common/cpu_frequency.h"
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/parse_time.h"
@@ -109,8 +111,10 @@
 #define OPT_OVERCOMMIT  0x0e
 #define OPT_ACCTG_FREQ  0x0f
 
+#define OPT_SICP        0x10
 #define OPT_MEM_BIND    0x11
 #define OPT_IMMEDIATE   0x12
+#define OPT_POWER       0x13
 #define OPT_WCKEY       0x14
 #define OPT_SIGNAL      0x15
 #define OPT_KILL_CMD    0x16
@@ -118,6 +122,8 @@
 #define OPT_PROFILE     0x18
 #define OPT_CORE_SPEC   0x19
 #define OPT_HINT	0x1a
+#define OPT_CPU_FREQ    0x1b
+#define OPT_THREAD_SPEC 0x1c
 
 /* generic getopt_long flags, integers and *not* valid characters */
 
@@ -151,6 +157,8 @@
 #define LONG_OPT_GET_USER_ENV    0x125
 #define LONG_OPT_NETWORK         0x126
 #define LONG_OPT_QOS             0x127
+#define LONG_OPT_BURST_BUFFER_SPEC  0x128
+#define LONG_OPT_BURST_BUFFER_FILE  0x129
 #define LONG_OPT_SOCKETSPERNODE  0x130
 #define LONG_OPT_CORESPERSOCKET  0x131
 #define LONG_OPT_THREADSPERCORE  0x132
@@ -168,7 +176,11 @@
 #define LONG_OPT_WAIT_ALL_NODES  0x142
 #define LONG_OPT_REQ_SWITCH      0x143
 #define LONG_OPT_PROFILE         0x144
+#define LONG_OPT_CPU_FREQ        0x145
 #define LONG_OPT_PRIORITY        0x160
+#define LONG_OPT_SICP            0x161
+#define LONG_OPT_POWER           0x162
+#define LONG_OPT_THREAD_SPEC     0x163
 
 
 /*---- global variables, defined in opt.h ----*/
@@ -180,9 +192,6 @@ int immediate_exit = 1;
 
 typedef struct env_vars env_vars_t;
 
-/* Get a decimal integer from arg */
-static int  _get_int(const char *arg, const char *what);
-
 static void  _help(void);
 
 /* fill in default options  */
@@ -198,9 +207,10 @@ static void  _opt_list(void);
 
 /* verify options sanity  */
 static bool _opt_verify(void);
-
+static char *_read_file(char *fname);
 static void  _proc_get_user_env(char *optarg);
 static void _process_env_var(env_vars_t *e, const char *val);
+
 static void  _usage(void);
 
 /*---[ end forward declarations of static functions ]---------------------*/
@@ -216,7 +226,7 @@ int initialize_and_process_args(int argc, char *argv[])
 	/* initialize options with argv */
 	_opt_args(argc, argv);
 
-	if (opt.verbose > 3)
+	if (opt.verbose)
 		_opt_list();
 
 	return 1;
@@ -318,6 +328,8 @@ static void _opt_default()
 	opt.account  = NULL;
 	opt.comment  = NULL;
 	opt.qos      = NULL;
+	opt.sicp_mode = 0;
+	opt.power_flags = 0;
 
 	opt.distribution = SLURM_DIST_UNKNOWN;
 	opt.plane_size   = NO_VAL;
@@ -361,6 +373,9 @@ static void _opt_default()
 
 	opt.bell            = BELL_AFTER_DELAY;
 	opt.acctg_freq      = NULL;
+	opt.cpu_freq_min    = NO_VAL;
+	opt.cpu_freq_max    = NO_VAL;
+	opt.cpu_freq_gov    = NO_VAL;
 	opt.no_shell	    = false;
 	opt.get_user_env_time = -1;
 	opt.get_user_env_mode = -1;
@@ -396,8 +411,10 @@ env_vars_t env_vars[] = {
   {"SALLOC_ACCOUNT",       OPT_STRING,     &opt.account,       NULL          },
   {"SALLOC_ACCTG_FREQ",    OPT_STRING,     &opt.acctg_freq,    NULL          },
   {"SALLOC_BELL",          OPT_BELL,       NULL,               NULL          },
+  {"SALLOC_BURST_BUFFER",  OPT_STRING,     &opt.burst_buffer,  NULL          },
   {"SALLOC_CONN_TYPE",     OPT_CONN_TYPE,  NULL,               NULL          },
   {"SALLOC_CORE_SPEC",     OPT_INT,        &opt.core_spec,     NULL          },
+  {"SALLOC_CPU_FREQ_REQ",  OPT_CPU_FREQ,   NULL,               NULL          },
   {"SALLOC_DEBUG",         OPT_DEBUG,      NULL,               NULL          },
   {"SALLOC_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL          },
   {"SALLOC_GEOMETRY",      OPT_GEOMETRY,   NULL,               NULL          },
@@ -412,10 +429,13 @@ env_vars_t env_vars[] = {
   {"SALLOC_NO_ROTATE",     OPT_NO_ROTATE,  NULL,               NULL          },
   {"SALLOC_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL          },
   {"SALLOC_PARTITION",     OPT_STRING,     &opt.partition,     NULL          },
+  {"SALLOC_POWER",         OPT_POWER,      NULL,               NULL          },
   {"SALLOC_PROFILE",       OPT_PROFILE,    NULL,               NULL          },
   {"SALLOC_QOS",           OPT_STRING,     &opt.qos,           NULL          },
   {"SALLOC_RESERVATION",   OPT_STRING,     &opt.reservation,   NULL          },
+  {"SALLOC_SICP",          OPT_SICP,       NULL,               NULL          },
   {"SALLOC_SIGNAL",        OPT_SIGNAL,     NULL,               NULL          },
+  {"SALLOC_THREAD_SPEC",   OPT_THREAD_SPEC,NULL,               NULL          },
   {"SALLOC_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL          },
   {"SALLOC_WAIT",          OPT_IMMEDIATE,  NULL,               NULL          },
   {"SALLOC_WAIT_ALL_NODES",OPT_INT,        &opt.wait_all_nodes,NULL          },
@@ -460,7 +480,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		*((char **) e->arg) = xstrdup(val);
 		break;
 	case OPT_INT:
-		if (val != NULL) {
+		if (val[0] != '\0') {
 			*((int *) e->arg) = (int) strtol(val, &end, 10);
 			if (!(end && *end == '\0')) {
 				error("%s=%s invalid. ignoring...",
@@ -475,7 +495,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		 *  - argument is "yes"
 		 *  - argument is a non-zero number
 		 */
-		if (val == NULL || strcmp(val, "") == 0) {
+		if (val[0] == '\0') {
 			*((bool *)e->arg) = true;
 		} else if (strcasecmp(val, "yes") == 0) {
 			*((bool *)e->arg) = true;
@@ -488,7 +508,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_DEBUG:
-		if (val != NULL) {
+		if (val[0] != '\0') {
 			opt.verbose = (int) strtol(val, &end, 10);
 			if (!(end && *end == '\0'))
 				error("%s=%s invalid", e->var, val);
@@ -538,7 +558,14 @@ _process_env_var(env_vars_t *e, const char *val)
 		info("WARNING: You are attempting to initiate a second job");
 		break;
 	case OPT_EXCLUSIVE:
-		opt.shared = 0;
+		if (val[0] == '\0') {
+			opt.shared = 0;
+		} else if (!strcasecmp(val, "user")) {
+			opt.shared = 2;
+		} else {
+			error("\"%s=%s\" -- invalid value, ignoring...",
+			      e->var, val);
+		}
 		break;
 	case OPT_OVERCOMMIT:
 		opt.overcommit = true;
@@ -563,6 +590,15 @@ _process_env_var(env_vars_t *e, const char *val)
 		xfree(opt.wckey);
 		opt.wckey = xstrdup(val);
 		break;
+
+	case OPT_POWER:
+		opt.power_flags = power_flags_id((char *)val);
+		break;
+
+	case OPT_SICP:
+		opt.sicp_mode = 1;
+		break;
+
 	case OPT_SIGNAL:
 		if (get_signal_opts((char *)val, &opt.warn_signal,
 				    &opt.warn_time, &opt.warn_flags)) {
@@ -587,36 +623,21 @@ _process_env_var(env_vars_t *e, const char *val)
 	case OPT_PROFILE:
 		opt.profile = acct_gather_profile_from_string((char *)val);
 		break;
+	case OPT_CPU_FREQ:
+		if (cpu_freq_verify_cmdline(val, &opt.cpu_freq_min,
+				&opt.cpu_freq_max, &opt.cpu_freq_gov))
+			error("Invalid --cpu-freq argument: %s. Ignored", val);
+		break;
+	case OPT_THREAD_SPEC:
+		opt.core_spec = parse_int("thread_spec", val, true) |
+					 CORE_SPEC_THREAD;
+		break;
 	default:
 		/* do nothing */
 		break;
 	}
 }
 
-/*
- *  Get a decimal integer from arg.
- *
- *  Returns the integer on success, exits program on failure.
- *
- */
-static int
-_get_int(const char *arg, const char *what)
-{
-	char *p;
-	long int result = strtol(arg, &p, 10);
-
-	if ((*p != '\0') || (result < 0L)) {
-		error ("Invalid numeric value \"%s\" for %s.", arg, what);
-		exit(error_exit);
-	}
-
-	if (result > INT_MAX) {
-		error ("Numeric argument (%ld) to big for %s.", result, what);
-	}
-
-	return (int) result;
-}
-
 void set_options(const int argc, char **argv)
 {
 	int opt_char, option_index = 0, max_val = 0;
@@ -656,6 +677,8 @@ void set_options(const int argc, char **argv)
 		{"exclude",       required_argument, 0, 'x'},
 		{"acctg-freq",    required_argument, 0, LONG_OPT_ACCTG_FREQ},
 		{"begin",         required_argument, 0, LONG_OPT_BEGIN},
+		{"bb",            required_argument, 0, LONG_OPT_BURST_BUFFER_SPEC},
+		{"bbf",           required_argument, 0, LONG_OPT_BURST_BUFFER_FILE},
 		{"bell",          no_argument,       0, LONG_OPT_BELL},
 		{"blrts-image",   required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 		{"cnload-image",  required_argument, 0, LONG_OPT_LINUX_IMAGE},
@@ -663,7 +686,8 @@ void set_options(const int argc, char **argv)
 		{"conn-type",     required_argument, 0, LONG_OPT_CONNTYPE},
 		{"contiguous",    no_argument,       0, LONG_OPT_CONT},
 		{"cores-per-socket", required_argument, 0, LONG_OPT_CORESPERSOCKET},
-		{"exclusive",     no_argument,       0, LONG_OPT_EXCLUSIVE},
+		{"cpu-freq",         required_argument, 0, LONG_OPT_CPU_FREQ},
+		{"exclusive",     optional_argument, 0, LONG_OPT_EXCLUSIVE},
 		{"get-user-env",  optional_argument, 0, LONG_OPT_GET_USER_ENV},
 		{"gid",           required_argument, 0, LONG_OPT_GID},
 		{"gres",          required_argument, 0, LONG_OPT_GRES},
@@ -689,21 +713,24 @@ void set_options(const int argc, char **argv)
 		{"ntasks-per-core",  required_argument, 0, LONG_OPT_NTASKSPERCORE},
 		{"ntasks-per-node",  required_argument, 0, LONG_OPT_NTASKSPERNODE},
 		{"ntasks-per-socket",required_argument, 0, LONG_OPT_NTASKSPERSOCKET},
-		{"qos",		  required_argument, 0, LONG_OPT_QOS},
+		{"power",         required_argument, 0, LONG_OPT_POWER},
 		{"profile",       required_argument, 0, LONG_OPT_PROFILE},
+		{"qos",		  required_argument, 0, LONG_OPT_QOS},
 		{"ramdisk-image", required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
 		{"reboot",	  no_argument,       0, LONG_OPT_REBOOT},
 		{"reservation",   required_argument, 0, LONG_OPT_RESERVATION},
+		{"sicp",          optional_argument, 0, LONG_OPT_SICP},
 		{"signal",        required_argument, 0, LONG_OPT_SIGNAL},
 		{"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE},
+		{"switches",      required_argument, 0, LONG_OPT_REQ_SWITCH},
 		{"tasks-per-node",  required_argument, 0, LONG_OPT_NTASKSPERNODE},
+		{"thread-spec",   required_argument, 0, LONG_OPT_THREAD_SPEC},
 		{"time-min",      required_argument, 0, LONG_OPT_TIME_MIN},
 		{"threads-per-core", required_argument, 0, LONG_OPT_THREADSPERCORE},
 		{"tmp",           required_argument, 0, LONG_OPT_TMP},
 		{"uid",           required_argument, 0, LONG_OPT_UID},
 		{"wait-all-nodes",required_argument, 0, LONG_OPT_WAIT_ALL_NODES},
 		{"wckey",         required_argument, 0, LONG_OPT_WCKEY},
-		{"switches",      required_argument, 0, LONG_OPT_REQ_SWITCH},
 		{NULL,            0,                 0, 0}
 	};
 	char *opt_string =
@@ -749,7 +776,8 @@ void set_options(const int argc, char **argv)
 			break;
 		case 'c':
 			opt.cpus_set = true;
-			opt.cpus_per_task = _get_int(optarg, "cpus-per-task");
+			opt.cpus_per_task = parse_int("cpus-per-task",
+						      optarg, true);
 			break;
 		case 'C':
 			xfree(opt.constraints);
@@ -790,7 +818,7 @@ void set_options(const int argc, char **argv)
 			break;
 		case 'I':
 			if (optarg)
-				opt.immediate = _get_int(optarg, "immediate");
+				opt.immediate = parse_int("immediate", optarg, true);
 			else
 				opt.immediate = DEFAULT_IMMEDIATE;
 			break;
@@ -827,7 +855,7 @@ void set_options(const int argc, char **argv)
 		case 'n':
 			opt.ntasks_set = true;
 			opt.ntasks =
-				_get_int(optarg, "number of tasks");
+				parse_int("number of tasks", optarg, true);
 			break;
 		case 'N':
 			opt.nodes_set =
@@ -860,7 +888,7 @@ void set_options(const int argc, char **argv)
 			opt.shared = 1;
 			break;
 		case 'S':
-			opt.core_spec = _get_int(optarg, "core_spec");
+			opt.core_spec = parse_int("core_spec", optarg, false);
 			break;
 		case 't':
 			xfree(opt.time_limit_str);
@@ -892,7 +920,7 @@ void set_options(const int argc, char **argv)
 		case 'W':
 			verbose("wait option has been deprecated, use "
 				"immediate option");
-			opt.immediate = _get_int(optarg, "wait");
+			opt.immediate = parse_int("wait", optarg, true);
 			break;
 		case 'x':
 			xfree(opt.exc_nodes);
@@ -904,10 +932,17 @@ void set_options(const int argc, char **argv)
 			opt.contiguous = true;
 			break;
                 case LONG_OPT_EXCLUSIVE:
-                        opt.shared = 0;
+			if (optarg == NULL) {
+				opt.shared = 0;
+			} else if (!strcasecmp(optarg, "user")) {
+				opt.shared = 2;
+			} else {
+				error("invalid exclusive option %s", optarg);
+				exit(error_exit);
+			}
                         break;
 		case LONG_OPT_MINCPU:
-			opt.mincpus = _get_int(optarg, "mincpus");
+			opt.mincpus = parse_int("mincpus", optarg, true);
 			if (opt.mincpus < 0) {
 				error("invalid mincpus constraint %s",
 				      optarg);
@@ -917,7 +952,7 @@ void set_options(const int argc, char **argv)
 		case LONG_OPT_MINCORES:
 			verbose("mincores option has been deprecated, use "
 				"cores-per-socket");
-			opt.cores_per_socket = _get_int(optarg, "mincores");
+			opt.cores_per_socket = parse_int("mincores", optarg, true);
 			if (opt.cores_per_socket < 0) {
 				error("invalid mincores constraint %s",
 				      optarg);
@@ -927,7 +962,7 @@ void set_options(const int argc, char **argv)
 		case LONG_OPT_MINSOCKETS:
 			verbose("minsockets option has been deprecated, use "
 				"sockets-per-node");
-			opt.sockets_per_node = _get_int(optarg, "minsockets");
+			opt.sockets_per_node = parse_int("minsockets", optarg, true);
 			if (opt.sockets_per_node < 0) {
 				error("invalid minsockets constraint %s",
 				      optarg);
@@ -937,7 +972,7 @@ void set_options(const int argc, char **argv)
 		case LONG_OPT_MINTHREADS:
 			verbose("minthreads option has been deprecated, use "
 				"threads-per-core");
-			opt.threads_per_core = _get_int(optarg, "minthreads");
+			opt.threads_per_core = parse_int("minthreads", optarg, true);
 			if (opt.threads_per_core < 0) {
 				error("invalid minthreads constraint %s",
 				      optarg);
@@ -1049,7 +1084,7 @@ void set_options(const int argc, char **argv)
 			opt.bell = BELL_NEVER;
 			break;
 		case LONG_OPT_JOBID:
-			opt.jobid = _get_int(optarg, "jobid");
+			opt.jobid = parse_int("jobid", optarg, true);
 			break;
 		case LONG_OPT_PROFILE:
 			opt.profile = acct_gather_profile_from_string(optarg);
@@ -1090,16 +1125,16 @@ void set_options(const int argc, char **argv)
 				opt.threads_per_core = NO_VAL;
 			break;
 		case LONG_OPT_NTASKSPERNODE:
-			opt.ntasks_per_node = _get_int(optarg,
-				"ntasks-per-node");
+			opt.ntasks_per_node = parse_int("ntasks-per-node",
+							optarg, true);
 			break;
 		case LONG_OPT_NTASKSPERSOCKET:
-			opt.ntasks_per_socket = _get_int(optarg,
-				"ntasks-per-socket");
+			opt.ntasks_per_socket = parse_int("ntasks-per-socket",
+							  optarg, true);
 			break;
 		case LONG_OPT_NTASKSPERCORE:
-			opt.ntasks_per_core = _get_int(optarg,
-				"ntasks-per-core");
+			opt.ntasks_per_core = parse_int("ntasks-per-core",
+							optarg, true);
 			break;
 		case LONG_OPT_HINT:
 			/* Keep after other options filled in */
@@ -1168,6 +1203,12 @@ void set_options(const int argc, char **argv)
 			xfree(opt.reservation);
 			opt.reservation = xstrdup(optarg);
 			break;
+		case LONG_OPT_POWER:
+			opt.power_flags = power_flags_id(optarg);
+			break;
+		case LONG_OPT_SICP:
+			opt.sicp_mode = 1;
+			break;
 		case LONG_OPT_SIGNAL:
 			if (get_signal_opts(optarg, &opt.warn_signal,
 					    &opt.warn_time, &opt.warn_flags)) {
@@ -1192,6 +1233,12 @@ void set_options(const int argc, char **argv)
 		case LONG_OPT_WAIT_ALL_NODES:
 			opt.wait_all_nodes = strtol(optarg, NULL, 10);
 			break;
+		case LONG_OPT_CPU_FREQ:
+		        if (cpu_freq_verify_cmdline(optarg, &opt.cpu_freq_min,
+					&opt.cpu_freq_max, &opt.cpu_freq_gov))
+				error("Invalid --cpu-freq argument: %s. "
+						"Ignored", optarg);
+			break;
 		case LONG_OPT_REQ_SWITCH:
 			pos_delimit = strstr(optarg,"@");
 			if (pos_delimit != NULL) {
@@ -1199,7 +1246,19 @@ void set_options(const int argc, char **argv)
 				pos_delimit++;
 				opt.wait4switch = time_str2secs(pos_delimit);
 			}
-			opt.req_switch = _get_int(optarg, "switches");
+			opt.req_switch = parse_int("switches", optarg, true);
+			break;
+		case LONG_OPT_BURST_BUFFER_SPEC:
+			xfree(opt.burst_buffer);
+			opt.burst_buffer = xstrdup(optarg);
+			break;
+		case LONG_OPT_BURST_BUFFER_FILE:
+			xfree(opt.burst_buffer);
+			opt.burst_buffer = _read_file(optarg);
+			break;
+		case LONG_OPT_THREAD_SPEC:
+			opt.core_spec = parse_int("thread_spec", optarg, true) |
+				CORE_SPEC_THREAD;
 			break;
 		default:
 			if (spank_process_option(opt_char, optarg) < 0) {
@@ -1440,7 +1499,8 @@ static bool _opt_verify(void)
 	 * The limitations of the plane distribution in the cons_res
 	 * environment are more extensive and are documented in the
 	 * SLURM reference guide.  */
-	if (opt.distribution == SLURM_DIST_PLANE && opt.plane_size) {
+	if ((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE &&
+	    opt.plane_size) {
 		if ((opt.ntasks/opt.plane_size) < opt.min_nodes) {
 			if (((opt.min_nodes-1)*opt.plane_size) >= opt.ntasks) {
 #if (0)
@@ -1508,7 +1568,8 @@ static bool _opt_verify(void)
 				xfree(opt.nodelist);
 				opt.nodelist = add_slash;
 			}
-			opt.distribution = SLURM_DIST_ARBITRARY;
+			opt.distribution &= SLURM_DIST_STATE_FLAGS;
+			opt.distribution |= SLURM_DIST_ARBITRARY;
 			if (!_valid_node_list(&opt.nodelist)) {
 				error("Failure getting NodeNames from "
 				      "hostfile");
@@ -1525,8 +1586,8 @@ static bool _opt_verify(void)
 
 	/* set up the proc and node counts based on the arbitrary list
 	   of nodes */
-	if ((opt.distribution == SLURM_DIST_ARBITRARY)
-	   && (!opt.nodes_set || !opt.ntasks_set)) {
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
+	    && (!opt.nodes_set || !opt.ntasks_set)) {
 		hostlist_t hl = hostlist_create(opt.nodelist);
 		if (!opt.ntasks_set) {
 			opt.ntasks_set = 1;
@@ -1591,6 +1652,9 @@ static bool _opt_verify(void)
 		setenvfs("SLURM_PROFILE=%s",
 			 acct_gather_profile_to_string(opt.profile));
 
+	cpu_freq_set_env("SLURM_CPU_FREQ_REQ",
+			opt.cpu_freq_min, opt.cpu_freq_max, opt.cpu_freq_gov);
+
 	return verified;
 }
 
@@ -1686,6 +1750,40 @@ extern int   spank_unset_job_env(const char *name)
 	return 0;	/* not found */
 }
 
+/* Read specified file's contents into a buffer.
+ * Caller must xfree the buffer's contents */
+static char *_read_file(char *fname)
+{
+	int fd, i, offset = 0;
+	struct stat stat_buf;
+	char *file_buf;
+
+	fd = open(fname, O_RDONLY);
+	if (fd < 0) {
+		fatal("Could not open burst buffer specification file %s: %m",
+		      fname);
+	}
+	if (fstat(fd, &stat_buf) < 0) {
+		fatal("Could not stat burst buffer specification file %s: %m",
+		      fname);
+	}
+	file_buf = xmalloc(stat_buf.st_size);
+	while (stat_buf.st_size > offset) {
+		i = read(fd, file_buf + offset, stat_buf.st_size - offset);
+		if (i < 0) {
+			if (errno == EAGAIN)
+				continue;
+			fatal("Could not read burst buffer specification "
+			      "file %s: %m", fname);
+		}
+		if (i == 0)
+			break;	/* EOF */
+		offset += i;
+	}
+	close(fd);
+	return file_buf;
+}
+
 /* helper function for printing options
  *
  * warning: returns pointer to memory allocated on the stack.
@@ -1773,6 +1871,8 @@ static void _opt_list(void)
 	if (opt.gres != NULL)
 		info("gres           : %s", opt.gres);
 	info("network        : %s", opt.network);
+	info("power          : %s", power_flags_str(opt.power_flags));
+	info("sicp           : %u", opt.sicp_mode);
 	info("profile        : `%s'",
 	     acct_gather_profile_to_string(opt.profile));
 	info("qos            : %s", opt.qos);
@@ -1826,9 +1926,19 @@ static void _opt_list(void)
 	     opt.mem_bind == NULL ? "default" : opt.mem_bind);
 	str = print_commandline(command_argc, command_argv);
 	info("user command   : `%s'", str);
+	info("cpu_freq_min   : %u", opt.cpu_freq_min);
+	info("cpu_freq_max   : %u", opt.cpu_freq_max);
+	info("cpu_freq_gov   : %u", opt.cpu_freq_gov);
 	info("switches          : %d", opt.req_switch);
 	info("wait-for-switches : %d", opt.wait4switch);
-	info("core-spec         : %d", opt.core_spec);
+	if (opt.core_spec == (uint16_t) NO_VAL)
+		info("core-spec         : NA");
+	else if (opt.core_spec & CORE_SPEC_THREAD) {
+		info("thread-spec       : %d",
+		     opt.core_spec & (~CORE_SPEC_THREAD));
+	} else
+		info("core-spec         : %d", opt.core_spec);
+	info("burst_buffer      : `%s'", opt.burst_buffer);
 	xfree(str);
 
 }
@@ -1864,8 +1974,10 @@ static void _usage(void)
 "              [--network=type] [--mem-per-cpu=MB] [--qos=qos]\n"
 "              [--mem_bind=...] [--reservation=name]\n"
 "              [--time-min=minutes] [--gres=list] [--profile=...]\n"
+"              [--cpu-freq=min[-max[:gov]] [--sicp] [--power=flags]\n"
 "              [--switches=max-switches[@max-time-to-wait]]\n"
-"              [--core-spec=cores]  [--reboot]\n"
+"              [--core-spec=cores] [--thread-spec=threads] [--reboot]\n"
+"              [--bb=burst_buffer_spec] [--bbf=burst_buffer_file]\n"
 "              [executable [args...]]\n");
 }
 
@@ -1880,8 +1992,11 @@ static void _help(void)
 "  -A, --account=name          charge job to specified account\n"
 "      --begin=time            defer job until HH:MM MM/DD/YY\n"
 "      --bell                  ring the terminal bell when the job is allocated\n"
+"      --bb=<spec>             burst buffer specifications\n"
+"      --bbf=<file_name>       burst buffer specification file\n"
 "  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
 "      --comment=name          arbitrary comment\n"
+"      --cpu-freq=min[-max[:gov]] requested cpu frequency (and governor)\n"
 "  -d, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
 "  -D, --chdir=path            change working directory\n"
 "      --get-user-env          used by Moab.  See srun man page.\n"
@@ -1905,6 +2020,7 @@ static void _help(void)
 "      --ntasks-per-node=n     number of tasks to invoke on each node\n"
 "  -N, --nodes=N               number of nodes on which to run (N = min[-max])\n"
 "  -O, --overcommit            overcommit resources\n"
+"      --power=flags           power management options\n"
 "      --priority=value        set the priority of the job to value\n"
 "      --profile=value         enable acct_gather_profile for detailed data\n"
 "                              value is all or none or any combination of\n"
@@ -1914,10 +2030,13 @@ static void _help(void)
 "  -Q, --quiet                 quiet mode (suppress informational messages)\n"
 "      --reboot                reboot compute nodes before starting job\n"
 "  -s, --share                 share nodes with other jobs\n"
+"      --sicp                  If specified, signifies job is to receive\n"
+"                              job id from the incluster reserve range.\n"
 "      --signal=[B:]num[@time] send signal when time limit within time seconds\n"
 "      --switches=max-switches{@max-time-to-wait}\n"
 "                              Optimum switches and max time to wait for optimum\n"
 "  -S, --core-spec=cores       count of reserved cores\n"
+"      --thread-spec=threads   count of reserved threads\n"
 "  -t, --time=minutes          time limit\n"
 "      --time-min=minutes      minimum time limit (if distinct)\n"
 "      --uid=user_id           user ID to run job as (user root only)\n"
@@ -1937,7 +2056,7 @@ static void _help(void)
 "  -x, --exclude=hosts...      exclude a specific list of hosts\n"
 "\n"
 "Consumable resources related options:\n"
-"      --exclusive             allocate nodes in exclusive mode when\n"
+"      --exclusive[=user]      allocate nodes in exclusive mode when\n"
 "                              cpu consumable resource is enabled\n"
 "      --mem-per-cpu=MB        maximum amount of real memory per allocated\n"
 "                              cpu required by the job.\n"
diff --git a/src/salloc/opt.h b/src/salloc/opt.h
index 230c5afb9..32378c84a 100644
--- a/src/salloc/opt.h
+++ b/src/salloc/opt.h
@@ -163,6 +163,12 @@ typedef struct salloc_options {
 				 * Prolog and Epilog		*/
 	int spank_job_env_size;	/* size of spank_job_env	*/
 	int core_spec;		/* --core-spec=n,      -S n	*/
+	char *burst_buffer;	/* -bb				*/
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
+	uint8_t power_flags;	/* Power management options	*/
+	uint8_t sicp_mode;	/* Inter-cluster job ID		*/
 } opt_t;
 
 extern opt_t opt;
diff --git a/src/salloc/salloc.c b/src/salloc/salloc.c
index 90af8153c..20443c399 100644
--- a/src/salloc/salloc.c
+++ b/src/salloc/salloc.c
@@ -58,14 +58,16 @@
 
 #include "slurm/slurm.h"
 
+#include "src/common/cpu_frequency.h"
 #include "src/common/env.h"
+#include "src/common/plugstack.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_rlimits_info.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/plugstack.h"
 
 #include "src/salloc/salloc.h"
 #include "src/salloc/opt.h"
@@ -420,7 +422,20 @@ int main(int argc, char *argv[])
 				     cluster_name);
 		xfree(cluster_name);
 	}
-
+	if (alloc->env_size) {	/* Used to set Burst Buffer environment */
+		char *key, *value, *tmp;
+		for (i = 0; i < alloc->env_size; i++) {
+			tmp = xstrdup(alloc->environment[i]);
+			key = tmp;
+			value = strchr(tmp, '=');
+			if (value) {
+				value[0] = '\0';
+				value++;
+				env_array_append(&env, key, value);
+			}
+			xfree(tmp);
+		}
+	}
 
 	env_array_set_environment(env);
 	env_array_free(env);
@@ -493,10 +508,10 @@ relinquish:
 	if (allocation_state != REVOKED) {
 		pthread_mutex_unlock(&allocation_state_lock);
 
-		info("Relinquishing job allocation %d", alloc->job_id);
+		info("Relinquishing job allocation %u", alloc->job_id);
 		if ((slurm_complete_job(alloc->job_id, status) != 0) &&
 		    (slurm_get_errno() != ESLURM_ALREADY_DONE))
-			error("Unable to clean up job allocation %d: %m",
+			error("Unable to clean up job allocation %u: %m",
 			      alloc->job_id);
 		pthread_mutex_lock(&allocation_state_lock);
 		allocation_state = REVOKED;
@@ -626,6 +641,11 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	desc->reservation = xstrdup(opt.reservation);
 	desc->profile  = opt.profile;
 	desc->wckey  = xstrdup(opt.wckey);
+
+	desc->cpu_freq_min = opt.cpu_freq_min;
+	desc->cpu_freq_max = opt.cpu_freq_max;
+	desc->cpu_freq_gov = opt.cpu_freq_gov;
+
 	if (opt.req_switch >= 0)
 		desc->req_switch = opt.req_switch;
 	if (opt.wait4switch >= 0)
@@ -664,6 +684,8 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 		desc->mail_user = xstrdup(opt.mail_user);
 	if (opt.begin)
 		desc->begin_time = opt.begin;
+	if (opt.burst_buffer)
+		desc->burst_buffer = opt.burst_buffer;
 	if (opt.account)
 		desc->account = xstrdup(opt.account);
 	if (opt.acctg_freq)
@@ -757,6 +779,11 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 		desc->spank_job_env_size = opt.spank_job_env_size;
 	}
 
+	if (opt.power_flags)
+		desc->power_flags = opt.power_flags;
+	if (opt.sicp_mode)
+		desc->sicp_mode = opt.sicp_mode;
+
 	return 0;
 }
 
@@ -923,7 +950,7 @@ static void _timeout_handler(srun_timeout_msg_t *msg)
 	if (msg->timeout != last_timeout) {
 		last_timeout = msg->timeout;
 		verbose("Job allocation time limit to be reached at %s",
-			slurm_ctime(&msg->timeout));
+			slurm_ctime2(&msg->timeout));
 	}
 }
 
diff --git a/src/sattach/Makefile.in b/src/sattach/Makefile.in
index 40eb74627..622043a2f 100644
--- a/src/sattach/Makefile.in
+++ b/src/sattach/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -250,6 +253,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -299,8 +304,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -319,6 +328,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -362,6 +374,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -385,6 +398,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sattach/sattach.c b/src/sattach/sattach.c
index 263e51c82..1570599d9 100644
--- a/src/sattach/sattach.c
+++ b/src/sattach/sattach.c
@@ -420,7 +420,7 @@ static int _attach_to_tasks(uint32_t jobid,
 	}
 
 	_handle_response_msg_list(nodes_resp, tasks_started);
-	list_destroy(nodes_resp);
+	FREE_NULL_LIST(nodes_resp);
 
 	return SLURM_SUCCESS;
 }
@@ -460,7 +460,7 @@ static message_thread_state_t *_msg_thr_create(int num_nodes, int num_tasks)
 	pthread_cond_init(&mts->cond, NULL);
 	mts->tasks_started = bit_alloc(num_tasks);
 	mts->tasks_exited = bit_alloc(num_tasks);
-	mts->msg_handle = eio_handle_create();
+	mts->msg_handle = eio_handle_create(0);
 	mts->num_resp_port = _estimate_nports(num_nodes, 48);
 	mts->resp_port = xmalloc(sizeof(uint16_t) * mts->num_resp_port);
 	for (i = 0; i < mts->num_resp_port; i++) {
@@ -577,7 +577,8 @@ static void
 _handle_msg(void *arg, slurm_msg_t *msg)
 {
 	message_thread_state_t *mts = (message_thread_state_t *)arg;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	static uid_t slurm_uid;
 	static bool slurm_uid_set = false;
 	uid_t uid = getuid();
diff --git a/src/sbatch/Makefile.am b/src/sbatch/Makefile.am
index e43fd7f8a..e641af415 100644
--- a/src/sbatch/Makefile.am
+++ b/src/sbatch/Makefile.am
@@ -7,7 +7,7 @@ AM_CPPFLAGS = -I$(top_srcdir)
 
 bin_PROGRAMS = sbatch
 
-sbatch_SOURCES = sbatch.c mult_cluster.c mult_cluster.h opt.c opt.h
+sbatch_SOURCES = sbatch.c opt.c opt.h
 
 convenience_libs = $(top_builddir)/src/api/libslurm.o $(DL_LIBS)
 
diff --git a/src/sbatch/Makefile.in b/src/sbatch/Makefile.in
index 23604d110..9ccd2f1e6 100644
--- a/src/sbatch/Makefile.in
+++ b/src/sbatch/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -134,8 +137,7 @@ CONFIG_CLEAN_FILES =
 CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 PROGRAMS = $(bin_PROGRAMS)
-am_sbatch_OBJECTS = sbatch.$(OBJEXT) mult_cluster.$(OBJEXT) \
-	opt.$(OBJEXT)
+am_sbatch_OBJECTS = sbatch.$(OBJEXT) opt.$(OBJEXT)
 sbatch_OBJECTS = $(am_sbatch_OBJECTS)
 am__DEPENDENCIES_1 =
 am__DEPENDENCIES_2 = $(top_builddir)/src/api/libslurm.o \
@@ -250,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -299,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -319,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -362,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -385,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -449,7 +462,7 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
 AM_CPPFLAGS = -I$(top_srcdir)
-sbatch_SOURCES = sbatch.c mult_cluster.c mult_cluster.h opt.c opt.h
+sbatch_SOURCES = sbatch.c opt.c opt.h
 convenience_libs = $(top_builddir)/src/api/libslurm.o $(DL_LIBS)
 sbatch_LDADD = $(convenience_libs)
 sbatch_LDFLAGS = -export-dynamic $(CMD_LDFLAGS)
@@ -547,7 +560,6 @@ mostlyclean-compile:
 distclean-compile:
 	-rm -f *.tab.c
 
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/mult_cluster.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/opt.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sbatch.Po@am__quote@
 
diff --git a/src/sbatch/mult_cluster.c b/src/sbatch/mult_cluster.c
deleted file mode 100644
index 11da8e5ad..000000000
--- a/src/sbatch/mult_cluster.c
+++ /dev/null
@@ -1,211 +0,0 @@
-/*****************************************************************************\
- *  mult_cluster.c - definitions for sbatch to submit job to multiple clusters
- *****************************************************************************
- *  Copyright (C) 2010 Lawrence Livermore National Security.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Danny Auble <da@llnl.gov>,
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission
- *  to link the code of portions of this program with the OpenSSL library under
- *  certain conditions as described in each individual source file, and
- *  distribute linked combinations including the two. You must obey the GNU
- *  General Public License in all respects for all of the code used other than
- *  OpenSSL. If you modify file(s) with this exception, you may extend this
- *  exception to your version of the file(s), but you are not obligated to do
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#include "mult_cluster.h"
-#include "src/common/xmalloc.h"
-#include "src/common/xstring.h"
-#include "src/common/parse_time.h"
-#include "src/common/read_config.h"
-
-typedef struct {
-	slurmdb_cluster_rec_t *cluster_rec;
-	int preempt_cnt;
-	time_t start_time;
-} local_cluster_rec_t;
-
-static char *local_cluster_name; /* name of local_cluster      */
-
-void _destroy_local_cluster_rec(void *object)
-{
-	xfree(object);
-}
-
-static int _sort_local_cluster(void *v1, void *v2)
-{
-	local_cluster_rec_t* rec_a = *(local_cluster_rec_t**)v1;
-	local_cluster_rec_t* rec_b = *(local_cluster_rec_t**)v2;
-
-	if (rec_a->start_time < rec_b->start_time)
-		return -1;
-	else if (rec_a->start_time > rec_b->start_time)
-		return 1;
-
-	if (rec_a->preempt_cnt < rec_b->preempt_cnt)
-		return -1;
-	else if (rec_a->preempt_cnt > rec_b->preempt_cnt)
-		return 1;
-
-	if (!strcmp(local_cluster_name, rec_a->cluster_rec->name))
-		return -1;
-	else if (!strcmp(local_cluster_name, rec_b->cluster_rec->name))
-		return 1;
-
-	return 0;
-}
-
-/*
- * We don't use the api here because it does things we aren't needing
- * like printing out information and not returning times.
- */
-local_cluster_rec_t *_job_will_run (job_desc_msg_t *req)
-{
-	slurm_msg_t req_msg, resp_msg;
-	will_run_response_msg_t *will_run_resp;
-	int rc;
-	char buf[64];
-	char *type = "processors";
-	local_cluster_rec_t *local_cluster = NULL;
-	/* req.immediate = true;    implicit */
-
-	slurm_msg_t_init(&req_msg);
-	req_msg.msg_type = REQUEST_JOB_WILL_RUN;
-	req_msg.data     = req;
-
-	rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg);
-
-	if (rc < 0) {
-		slurm_seterrno(SLURM_SOCKET_ERROR);
-		return NULL;
-	}
-	switch (resp_msg.msg_type) {
-	case RESPONSE_SLURM_RC:
-		rc = ((return_code_msg_t *) resp_msg.data)->return_code;
-		slurm_free_return_code_msg(resp_msg.data);
-		if (rc)
-			slurm_seterrno(rc);
-		break;
-	case RESPONSE_JOB_WILL_RUN:
-		if (working_cluster_rec->flags & CLUSTER_FLAG_BG)
-			type = "cnodes";
-		will_run_resp = (will_run_response_msg_t *) resp_msg.data;
-		slurm_make_time_str(&will_run_resp->start_time,
-				    buf, sizeof(buf));
-		debug("Job %u to start at %s on cluster %s using %u %s on %s",
-		      will_run_resp->job_id, buf, working_cluster_rec->name,
-		      will_run_resp->proc_cnt, type,
-		      will_run_resp->node_list);
-		local_cluster = xmalloc(sizeof(local_cluster_rec_t));
-		local_cluster->cluster_rec = working_cluster_rec;
-		local_cluster->start_time = will_run_resp->start_time;
-		if (will_run_resp->preemptee_job_id) {
-			local_cluster->preempt_cnt =
-				list_count(will_run_resp->preemptee_job_id);
-			if (opt.verbose >= LOG_LEVEL_DEBUG) {
-				ListIterator itr;
-				uint32_t *job_id_ptr;
-				char *job_list = NULL, *sep = "";
-				itr = list_iterator_create(will_run_resp->
-							   preemptee_job_id);
-				while ((job_id_ptr = list_next(itr))) {
-					if (job_list)
-						sep = ",";
-					xstrfmtcat(job_list, "%s%u",
-						   sep, *job_id_ptr);
-				}
-				debug("  Preempts: %s", job_list);
-				xfree(job_list);
-			}
-		}
-
-		slurm_free_will_run_response_msg(will_run_resp);
-		break;
-	default:
-		slurm_seterrno(SLURM_UNEXPECTED_MSG_ERROR);
-		return NULL;
-		break;
-	}
-	return local_cluster;
-}
-
-extern int sbatch_set_first_avail_cluster(job_desc_msg_t *req)
-{
-	int rc = SLURM_SUCCESS;
-	ListIterator itr;
-	local_cluster_rec_t *local_cluster = NULL;
-	char buf[64];
-	bool host_set = false;
-	List ret_list = NULL;
-
-	/* return if we only have 1 or less clusters here */
-	if (!opt.clusters || !list_count(opt.clusters)) {
-		return rc;
-	} else if (list_count(opt.clusters) == 1) {
-		working_cluster_rec = list_peek(opt.clusters);
-		return rc;
-	}
-
-	if ((req->alloc_node == NULL) &&
-	    (gethostname_short(buf, sizeof(buf)) == 0)) {
-		req->alloc_node = buf;
-		host_set = true;
-	}
-
-	ret_list = list_create(_destroy_local_cluster_rec);
-	itr = list_iterator_create(opt.clusters);
-	while ((working_cluster_rec = list_next(itr))) {
-		if ((local_cluster = _job_will_run(req)))
-			list_append(ret_list, local_cluster);
-		else
-			error("Problem with submit to cluster %s: %m",
-			      working_cluster_rec->name);
-	}
-	list_iterator_destroy(itr);
-
-	if (host_set)
-		req->alloc_node = NULL;
-
-	if (!list_count(ret_list)) {
-		error("Can't run on any of the clusters given");
-		rc = SLURM_ERROR;
-		goto end_it;
-	}
-
-	/* sort the list so the first spot is on top */
-	local_cluster_name = slurm_get_cluster_name();
-	list_sort(ret_list, (ListCmpF)_sort_local_cluster);
-	xfree(local_cluster_name);
-	local_cluster = list_peek(ret_list);
-
-	/* set up the working cluster and be done */
-	working_cluster_rec = local_cluster->cluster_rec;
-end_it:
-	list_destroy(ret_list);
-
-	return rc;
-}
-
diff --git a/src/sbatch/opt.c b/src/sbatch/opt.c
index 21a15b338..f658dc8b8 100644
--- a/src/sbatch/opt.c
+++ b/src/sbatch/opt.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -74,6 +75,7 @@
 #include <sys/types.h>
 #include <sys/utsname.h>
 
+#include "src/common/cpu_frequency.h"
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/parse_time.h"
@@ -110,21 +112,27 @@
 #define OPT_ACCTG_FREQ  0x0f
 #define OPT_NO_REQUEUE  0x10
 #define OPT_REQUEUE     0x11
+#define OPT_THREAD_SPEC 0x12
 #define OPT_MEM_BIND    0x13
 #define OPT_WCKEY       0x14
 #define OPT_SIGNAL      0x15
 #define OPT_GET_USER_ENV  0x16
 #define OPT_EXPORT        0x17
-#define OPT_CLUSTERS      0x18
+/* #define OPT_CLUSTERS      0x18 */
 #define OPT_TIME_VAL      0x19
 #define OPT_CORE_SPEC     0x1a
+#define OPT_CPU_FREQ      0x1b
+#define OPT_SICP          0x1c
+#define OPT_POWER         0x1d
 #define OPT_ARRAY_INX     0x20
 #define OPT_PROFILE       0x21
 #define OPT_HINT	  0x22
 
 /* generic getopt_long flags, integers and *not* valid characters */
 #define LONG_OPT_PROPAGATE   0x100
+#define LONG_OPT_SICP        0x101
 #define LONG_OPT_MEM_BIND    0x102
+#define LONG_OPT_POWER       0x103
 #define LONG_OPT_JOBID       0x105
 #define LONG_OPT_TMP         0x106
 #define LONG_OPT_MEM         0x107
@@ -147,6 +155,7 @@
 #define LONG_OPT_WRAP        0x118
 #define LONG_OPT_REQUEUE     0x119
 #define LONG_OPT_NETWORK     0x120
+#define LONG_OPT_BURST_BUFFER    0x126
 #define LONG_OPT_QOS             0x127
 #define LONG_OPT_SOCKETSPERNODE  0x130
 #define LONG_OPT_CORESPERSOCKET  0x131
@@ -179,7 +188,10 @@
 #define LONG_OPT_IGNORE_PBS      0x155
 #define LONG_OPT_TEST_ONLY       0x156
 #define LONG_OPT_PARSABLE        0x157
+#define LONG_OPT_CPU_FREQ        0x158
+#define LONG_OPT_THREAD_SPEC     0x159
 #define LONG_OPT_PRIORITY        0x160
+#define LONG_OPT_KILL_INV_DEP    0x161
 
 /*---- global variables, defined in opt.h ----*/
 opt_t opt;
@@ -190,10 +202,6 @@ int ignore_pbs = 0;
 
 typedef struct env_vars env_vars_t;
 
-
-/* Get a decimal integer from arg */
-static int  _get_int(const char *arg, const char *what);
-
 static void  _help(void);
 
 /* fill in default options  */
@@ -306,6 +314,9 @@ static void _opt_default()
 
 	opt.ntasks = 1;
 	opt.ntasks_set = false;
+	opt.cpu_freq_min = NO_VAL;
+	opt.cpu_freq_max = NO_VAL;
+	opt.cpu_freq_gov = NO_VAL;
 	opt.cpus_per_task = 0;
 	opt.cpus_set = false;
 	opt.min_nodes = 1;
@@ -323,6 +334,8 @@ static void _opt_default()
 	opt.time_limit = NO_VAL;
 	opt.time_min = NO_VAL;
 	opt.partition = NULL;
+	opt.sicp_mode = 0;
+	opt.power_flags = 0;
 
 	opt.job_name = NULL;
 	opt.jobid    = NO_VAL;
@@ -363,7 +376,7 @@ static void _opt_default()
 	opt.nodelist	    = NULL;
 	opt.exc_nodes	    = NULL;
 
-	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+	for (i = 0; i < HIGHEST_DIMENSIONS; i++) {
 		opt.conn_type[i]    = (uint16_t) NO_VAL;
 		opt.geometry[i]	    = 0;
 	}
@@ -399,6 +412,7 @@ static void _opt_default()
 	opt.priority = 0;
 
 	opt.test_only   = false;
+	opt.kill_invalid_dep = 0;
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -425,13 +439,15 @@ env_vars_t env_vars[] = {
   {"SBATCH_ARRAY_INX",     OPT_STRING,     &opt.array_inx,     NULL          },
   {"SBATCH_ACCTG_FREQ",    OPT_STRING,     &opt.acctg_freq,    NULL          },
   {"SBATCH_BLRTS_IMAGE",   OPT_STRING,     &opt.blrtsimage,    NULL          },
+  {"SBATCH_BURST_BUFFER",  OPT_STRING,     &opt.burst_buffer,  NULL          },
   {"SBATCH_CHECKPOINT",    OPT_STRING,     &opt.ckpt_interval_str, NULL      },
   {"SBATCH_CHECKPOINT_DIR",OPT_STRING,     &opt.ckpt_dir,      NULL          },
-  {"SBATCH_CLUSTERS",      OPT_CLUSTERS,   &opt.clusters,      NULL          },
-  {"SLURM_CLUSTERS",       OPT_CLUSTERS,   &opt.clusters,      NULL          },
+  {"SBATCH_CLUSTERS",      OPT_STRING,     &opt.clusters,      NULL          },
+  {"SLURM_CLUSTERS",       OPT_STRING,     &opt.clusters,      NULL          },
   {"SBATCH_CNLOAD_IMAGE",  OPT_STRING,     &opt.linuximage,    NULL          },
   {"SBATCH_CONN_TYPE",     OPT_CONN_TYPE,  NULL,               NULL          },
   {"SBATCH_CORE_SPEC",     OPT_INT,        &opt.core_spec,     NULL          },
+  {"SBATCH_CPU_FREQ_REQ",  OPT_CPU_FREQ,   NULL,               NULL          },
   {"SBATCH_DEBUG",         OPT_DEBUG,      NULL,               NULL          },
   {"SBATCH_DISTRIBUTION",  OPT_DISTRIB ,   NULL,               NULL          },
   {"SBATCH_EXCLUSIVE",     OPT_EXCLUSIVE,  NULL,               NULL          },
@@ -451,12 +467,15 @@ env_vars_t env_vars[] = {
   {"SBATCH_OPEN_MODE",     OPT_OPEN_MODE,  NULL,               NULL          },
   {"SBATCH_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL          },
   {"SBATCH_PARTITION",     OPT_STRING,     &opt.partition,     NULL          },
+  {"SBATCH_POWER",         OPT_POWER,      NULL,               NULL          },
   {"SBATCH_PROFILE",       OPT_PROFILE,    NULL,               NULL          },
   {"SBATCH_QOS",           OPT_STRING,     &opt.qos,           NULL          },
   {"SBATCH_RAMDISK_IMAGE", OPT_STRING,     &opt.ramdiskimage,  NULL          },
   {"SBATCH_REQUEUE",       OPT_REQUEUE,    NULL,               NULL          },
   {"SBATCH_RESERVATION",   OPT_STRING,     &opt.reservation,   NULL          },
+  {"SBATCH_SICP",          OPT_SICP,       NULL,               NULL          },
   {"SBATCH_SIGNAL",        OPT_SIGNAL,     NULL,               NULL          },
+  {"SBATCH_THREAD_SPEC",   OPT_THREAD_SPEC,NULL,               NULL          },
   {"SBATCH_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL          },
   {"SBATCH_WAIT_ALL_NODES",OPT_INT,        &opt.wait_all_nodes,NULL          },
   {"SBATCH_WCKEY",         OPT_STRING,     &opt.wckey,         NULL          },
@@ -474,7 +493,7 @@ env_vars_t env_vars[] = {
  *            environment variables. See comments above for how to
  *            extend srun to process different vars
  */
-static void _opt_env()
+static void _opt_env(void)
 {
 	char       *val = NULL;
 	env_vars_t *e   = env_vars;
@@ -502,7 +521,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		*((char **) e->arg) = xstrdup(val);
 		break;
 	case OPT_INT:
-		if (val != NULL) {
+		if (val[0] != '\0') {
 			*((int *) e->arg) = (int) strtol(val, &end, 10);
 			if (!(end && *end == '\0')) {
 				error("%s=%s invalid. ignoring...",
@@ -517,7 +536,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		 *  - argument is "yes"
 		 *  - argument is a non-zero number
 		 */
-		if (val == NULL || strcmp(val, "") == 0) {
+		if (val[0] == '\0') {
 			*((bool *)e->arg) = true;
 		} else if (strcasecmp(val, "yes") == 0) {
 			*((bool *)e->arg) = true;
@@ -534,7 +553,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		opt.array_inx = xstrdup(val);
 
 	case OPT_DEBUG:
-		if (val != NULL) {
+		if (val[0] != '\0') {
 			opt.verbose = (int) strtol(val, &end, 10);
 			if (!(end && *end == '\0'))
 				error("%s=%s invalid", e->var, val);
@@ -592,7 +611,14 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_EXCLUSIVE:
-		opt.shared = 0;
+		if (val[0] == '\0') {
+			opt.shared = 0;
+		} else if (!strcasecmp(val, "user")) {
+			opt.shared = 2;
+		} else {
+			error("\"%s=%s\" -- invalid value, ignoring...",
+			      e->var, val);
+		}
 		break;
 
 	case OPT_OVERCOMMIT:
@@ -632,18 +658,27 @@ _process_env_var(env_vars_t *e, const char *val)
 		else
 			opt.get_user_env_time = 0;
 		break;
-	case OPT_CLUSTERS:
-		if (!(opt.clusters = slurmdb_get_info_cluster((char *)val))) {
-			print_db_notok(val, 1);
-			exit(1);
-		}
-		break;
 	case OPT_TIME_VAL:
 		opt.wait4switch = time_str2secs(val);
 		break;
 	case OPT_PROFILE:
 		opt.profile = acct_gather_profile_from_string((char *)val);
 		break;
+	case OPT_CPU_FREQ:
+		if (cpu_freq_verify_cmdline(val, &opt.cpu_freq_min,
+				&opt.cpu_freq_max, &opt.cpu_freq_gov))
+			error("Invalid --cpu-freq argument: %s. Ignored", val);
+		break;
+	case OPT_POWER:
+		opt.power_flags = power_flags_id((char *)val);
+		break;
+	case OPT_SICP:
+		opt.sicp_mode = 1;
+		break;
+	case OPT_THREAD_SPEC:
+		opt.core_spec = parse_int("thread_spec", val, false) |
+					 CORE_SPEC_THREAD;
+		break;
 	default:
 		/* do nothing */
 		break;
@@ -673,6 +708,7 @@ static struct option long_options[] = {
 	{"input",         required_argument, 0, 'i'},
 	{"immediate",     no_argument,       0, 'I'},
 	{"job-name",      required_argument, 0, 'J'},
+	{"kill-on-invalid-dep", required_argument, 0, LONG_OPT_KILL_INV_DEP},
 	{"no-kill",       no_argument,       0, 'k'},
 	{"licenses",      required_argument, 0, 'L'},
 	{"distribution",  required_argument, 0, 'm'},
@@ -695,6 +731,7 @@ static struct option long_options[] = {
 	{"nodelist",      required_argument, 0, 'w'},
 	{"exclude",       required_argument, 0, 'x'},
 	{"acctg-freq",    required_argument, 0, LONG_OPT_ACCTG_FREQ},
+	{"bb",            required_argument, 0, LONG_OPT_BURST_BUFFER},
 	{"begin",         required_argument, 0, LONG_OPT_BEGIN},
 	{"blrts-image",   required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 	{"checkpoint",    required_argument, 0, LONG_OPT_CHECKPOINT},
@@ -704,13 +741,15 @@ static struct option long_options[] = {
 	{"conn-type",     required_argument, 0, LONG_OPT_CONNTYPE},
 	{"contiguous",    no_argument,       0, LONG_OPT_CONT},
 	{"cores-per-socket", required_argument, 0, LONG_OPT_CORESPERSOCKET},
-	{"exclusive",     no_argument,       0, LONG_OPT_EXCLUSIVE},
+	{"cpu-freq",         required_argument, 0, LONG_OPT_CPU_FREQ},
+	{"exclusive",     optional_argument, 0, LONG_OPT_EXCLUSIVE},
 	{"export",        required_argument, 0, LONG_OPT_EXPORT},
 	{"export-file",   required_argument, 0, LONG_OPT_EXPORT_FILE},
 	{"get-user-env",  optional_argument, 0, LONG_OPT_GET_USER_ENV},
 	{"gres",          required_argument, 0, LONG_OPT_GRES},
 	{"gid",           required_argument, 0, LONG_OPT_GID},
 	{"hint",          required_argument, 0, LONG_OPT_HINT},
+	{"ignore-pbs",    no_argument,       0, LONG_OPT_IGNORE_PBS},
 	{"ioload-image",  required_argument, 0, LONG_OPT_RAMDISK_IMAGE},
 	{"jobid",         required_argument, 0, LONG_OPT_JOBID},
 	{"linux-image",   required_argument, 0, LONG_OPT_LINUX_IMAGE},
@@ -732,6 +771,7 @@ static struct option long_options[] = {
 	{"ntasks-per-socket",required_argument, 0, LONG_OPT_NTASKSPERSOCKET},
 	{"open-mode",     required_argument, 0, LONG_OPT_OPEN_MODE},
 	{"parsable",      optional_argument, 0, LONG_OPT_PARSABLE},
+	{"power",         required_argument, 0, LONG_OPT_POWER},
 	{"propagate",     optional_argument, 0, LONG_OPT_PROPAGATE},
 	{"profile",       required_argument, 0, LONG_OPT_PROFILE},
 	{"priority",      required_argument, 0, LONG_OPT_PRIORITY},
@@ -740,9 +780,13 @@ static struct option long_options[] = {
 	{"reboot",        no_argument,       0, LONG_OPT_REBOOT},
 	{"requeue",       no_argument,       0, LONG_OPT_REQUEUE},
 	{"reservation",   required_argument, 0, LONG_OPT_RESERVATION},
+	{"sicp",          optional_argument, 0, LONG_OPT_SICP},
 	{"signal",        required_argument, 0, LONG_OPT_SIGNAL},
 	{"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE},
+	{"switches",      required_argument, 0, LONG_OPT_REQ_SWITCH},
 	{"tasks-per-node",required_argument, 0, LONG_OPT_NTASKSPERNODE},
+	{"test-only",     no_argument,       0, LONG_OPT_TEST_ONLY},
+	{"thread-spec",   required_argument, 0, LONG_OPT_THREAD_SPEC},
 	{"time-min",      required_argument, 0, LONG_OPT_TIME_MIN},
 	{"threads-per-core", required_argument, 0, LONG_OPT_THREADSPERCORE},
 	{"tmp",           required_argument, 0, LONG_OPT_TMP},
@@ -750,9 +794,6 @@ static struct option long_options[] = {
 	{"wait-all-nodes",required_argument, 0, LONG_OPT_WAIT_ALL_NODES},
 	{"wckey",         required_argument, 0, LONG_OPT_WCKEY},
 	{"wrap",          required_argument, 0, LONG_OPT_WRAP},
-	{"switches",      required_argument, 0, LONG_OPT_REQ_SWITCH},
-	{"ignore-pbs",    no_argument,       0, LONG_OPT_IGNORE_PBS},
-	{"test-only",     no_argument,       0, LONG_OPT_TEST_ONLY},
 	{NULL,            0,                 0, 0}
 };
 
@@ -853,7 +894,7 @@ char *process_options_first_pass(int argc, char **argv)
 		char *cmd       = opt.script_argv[0];
 		int  mode       = R_OK;
 
-		if ((fullpath = search_path(opt.cwd, cmd, true, mode))) {
+		if ((fullpath = search_path(opt.cwd, cmd, true, mode, false))) {
 			xfree(opt.script_argv[0]);
 			opt.script_argv[0] = fullpath;
 		}
@@ -888,7 +929,7 @@ int process_options_second_pass(int argc, char *argv[], const char *file,
 	if (!_opt_verify())
 		exit(error_exit);
 
-	if (opt.verbose > 3)
+	if (opt.verbose)
 		_opt_list();
 
 	return 1;
@@ -1160,8 +1201,7 @@ static void _set_options(int argc, char **argv)
 				       optz, &option_index)) != -1) {
 		switch (opt_char) {
 		case '?':
-			error("Try \"sbatch --help\" for more information");
-			exit(error_exit);
+			/* handled in process_options_first_pass() */
 			break;
 		case 'a':
 			xfree(opt.array_inx);
@@ -1192,7 +1232,8 @@ static void _set_options(int argc, char **argv)
 			break;
 		case 'c':
 			opt.cpus_set = true;
-			opt.cpus_per_task = _get_int(optarg, "cpus-per-task");
+			opt.cpus_per_task = parse_int("cpus-per-task",
+						      optarg, true);
 			break;
 		case 'C':
 			xfree(opt.constraints);
@@ -1233,8 +1274,8 @@ static void _set_options(int argc, char **argv)
 				exit(error_exit);
 			break;
 		case 'h':
-			_help();
-			exit(0);
+			/* handled in process_options_first_pass() */
+			break;
 		case 'H':
 			opt.hold = true;
 			break;
@@ -1269,18 +1310,13 @@ static void _set_options(int argc, char **argv)
 			}
 			break;
 		case 'M':
-			if (opt.clusters)
-				list_destroy(opt.clusters);
-			if (!(opt.clusters =
-			      slurmdb_get_info_cluster(optarg))) {
-				print_db_notok(optarg, 0);
-				exit(1);
-			}
+			xfree(opt.clusters);
+			opt.clusters = xstrdup(optarg);
 			break;
 		case 'n':
 			opt.ntasks_set = true;
 			opt.ntasks =
-				_get_int(optarg, "number of tasks");
+				parse_int("number of tasks", optarg, true);
 			break;
 		case 'N':
 			opt.nodes_set =
@@ -1313,7 +1349,7 @@ static void _set_options(int argc, char **argv)
 			opt.dependency = xstrdup(optarg);
 			break;
 		case 'Q':
-			opt.quiet++;
+			/* handled in process_options_first_pass() */
 			break;
 		case 'R':
 			opt.no_rotate = true;
@@ -1322,21 +1358,16 @@ static void _set_options(int argc, char **argv)
 			opt.shared = 1;
 			break;
 		case 'S':
-			opt.core_spec = _get_int(optarg, "core_spec");
+			opt.core_spec = parse_int("core_spec", optarg, false);
 			break;
 		case 't':
 			xfree(opt.time_limit_str);
 			opt.time_limit_str = xstrdup(optarg);
 			break;
 		case 'u':
-			_usage();
-			exit(0);
 		case 'v':
-			opt.verbose++;
-			break;
 		case 'V':
-			print_slurm_version();
-			exit(0);
+			/* handled in process_options_first_pass() */
 			break;
 		case 'w':
 			xfree(opt.nodelist);
@@ -1352,7 +1383,14 @@ static void _set_options(int argc, char **argv)
 			opt.contiguous = true;
 			break;
 		case LONG_OPT_EXCLUSIVE:
-			opt.shared = 0;
+			if (optarg == NULL) {
+				opt.shared = 0;
+			} else if (!strcasecmp(optarg, "user")) {
+				opt.shared = 2;
+			} else {
+				error("invalid exclusive option %s", optarg);
+				exit(error_exit);
+			}
 			break;
 		case LONG_OPT_MEM_BIND:
 			if (slurm_verify_mem_bind(optarg, &opt.mem_bind,
@@ -1360,17 +1398,17 @@ static void _set_options(int argc, char **argv)
 				exit(error_exit);
 			break;
 		case LONG_OPT_MINCPU:
-			opt.mincpus = _get_int(optarg, "mincpus");
+			opt.mincpus = parse_int("mincpus", optarg, true);
 			if (opt.mincpus < 0) {
-				error("invalid mincpus constraint %s",
-				      optarg);
+				error("invalid mincpus constraint %s", optarg);
 				exit(error_exit);
 			}
 			break;
 		case LONG_OPT_MINCORES:
 			verbose("mincores option has been deprecated, use "
 				"cores-per-socket");
-			opt.cores_per_socket = _get_int(optarg, "mincores");
+			opt.cores_per_socket = parse_int("mincores",
+							 optarg, true);
 			if (opt.cores_per_socket < 0) {
 				error("invalid mincores constraint %s",
 				      optarg);
@@ -1380,7 +1418,8 @@ static void _set_options(int argc, char **argv)
 		case LONG_OPT_MINSOCKETS:
 			verbose("minsockets option has been deprecated, use "
 				"sockets-per-node");
-			opt.sockets_per_node = _get_int(optarg, "minsockets");
+			opt.sockets_per_node = parse_int("minsockets",
+							 optarg, true);
 			if (opt.sockets_per_node < 0) {
 				error("invalid minsockets constraint %s",
 				      optarg);
@@ -1390,7 +1429,8 @@ static void _set_options(int argc, char **argv)
 		case LONG_OPT_MINTHREADS:
 			verbose("minthreads option has been deprecated, use "
 				"threads-per-core");
-			opt.threads_per_core = _get_int(optarg, "minthreads");
+			opt.threads_per_core = parse_int("minthreads",
+							 optarg, true);
 			if (opt.threads_per_core < 0) {
 				error("invalid minthreads constraint %s",
 				      optarg);
@@ -1421,7 +1461,7 @@ static void _set_options(int argc, char **argv)
 			}
 			break;
 		case LONG_OPT_JOBID:
-			opt.jobid = _get_int(optarg, "jobid");
+			opt.jobid = parse_int("jobid", optarg, true);
 			opt.jobid_set = true;
 			break;
 		case LONG_OPT_UID:
@@ -1465,6 +1505,10 @@ static void _set_options(int argc, char **argv)
 			xfree(opt.mail_user);
 			opt.mail_user = xstrdup(optarg);
 			break;
+		case LONG_OPT_BURST_BUFFER:
+			xfree(opt.burst_buffer);
+			opt.burst_buffer = xstrdup(optarg);
+			break;
 		case LONG_OPT_NICE:
 			if (optarg)
 				opt.nice = strtol(optarg, NULL, 10);
@@ -1543,21 +1587,21 @@ static void _set_options(int argc, char **argv)
 				opt.threads_per_core = NO_VAL;
 			break;
 		case LONG_OPT_NTASKSPERNODE:
-			opt.ntasks_per_node = _get_int(optarg,
-						       "ntasks-per-node");
+			opt.ntasks_per_node = parse_int("ntasks-per-node",
+							optarg, true);
 			if (opt.ntasks_per_node > 0)
 				setenvf(NULL, "SLURM_NTASKS_PER_NODE", "%d",
 					opt.ntasks_per_node);
 			break;
 		case LONG_OPT_NTASKSPERSOCKET:
-			opt.ntasks_per_socket = _get_int(optarg,
-				"ntasks-per-socket");
+			opt.ntasks_per_socket = parse_int("ntasks-per-socket",
+							  optarg, true);
 			setenvf(NULL, "SLURM_NTASKS_PER_SOCKET", "%d",
 				opt.ntasks_per_socket);
 			break;
 		case LONG_OPT_NTASKSPERCORE:
-			opt.ntasks_per_core = _get_int(optarg,
-				"ntasks-per-core");
+			opt.ntasks_per_core = parse_int("ntasks-per-core",
+							optarg, true);
 			setenvf(NULL, "SLURM_NTASKS_PER_CORE", "%d",
 				opt.ntasks_per_core);
 			break;
@@ -1684,6 +1728,12 @@ static void _set_options(int argc, char **argv)
 			xfree(opt.export_file);
 			opt.export_file = xstrdup(optarg);
 			break;
+		case LONG_OPT_CPU_FREQ:
+		        if (cpu_freq_verify_cmdline(optarg, &opt.cpu_freq_min,
+					&opt.cpu_freq_max, &opt.cpu_freq_gov))
+				error("Invalid --cpu-freq argument: %s. "
+						"Ignored", optarg);
+			break;
 		case LONG_OPT_REQ_SWITCH:
 			pos_delimit = strstr(optarg,"@");
 			if (pos_delimit != NULL) {
@@ -1691,7 +1741,7 @@ static void _set_options(int argc, char **argv)
 				pos_delimit++;
 				opt.wait4switch = time_str2secs(pos_delimit);
 			}
-			opt.req_switch = _get_int(optarg, "switches");
+			opt.req_switch = parse_int("switches", optarg, true);
 			break;
 		case LONG_OPT_IGNORE_PBS:
 			ignore_pbs = 1;
@@ -1702,6 +1752,23 @@ static void _set_options(int argc, char **argv)
 		case LONG_OPT_PARSABLE:
 			opt.parsable = true;
 			break;
+		case LONG_OPT_POWER:
+			opt.power_flags = power_flags_id(optarg);
+			break;
+		case LONG_OPT_SICP:
+			opt.sicp_mode = 1;
+			break;
+		case LONG_OPT_THREAD_SPEC:
+			opt.core_spec = parse_int("thread_spec",
+						  optarg, false) |
+					CORE_SPEC_THREAD;
+			break;
+		case LONG_OPT_KILL_INV_DEP:
+			if (strcasecmp(optarg, "yes") == 0)
+				opt.kill_invalid_dep |= KILL_INV_DEP;
+			if (strcasecmp(optarg, "no") == 0)
+				opt.kill_invalid_dep |= NO_KILL_INV_DEP;
+			break;
 		default:
 			if (spank_process_option (opt_char, optarg) < 0) {
 				error("Unrecognized command line parameter %c",
@@ -1908,9 +1975,9 @@ static char *_get_pbs_node_name(char *node_options, int *i)
 	int start = (*i);
 	char *value = NULL;
 
-	while(node_options[*i]
-	      && node_options[*i] != '+'
-	      && node_options[*i] != ':')
+	while (node_options[*i] &&
+	       (node_options[*i] != '+') &&
+	       (node_options[*i] != ':'))
 		(*i)++;
 
 	value = xmalloc((*i)-start+1);
@@ -1924,9 +1991,9 @@ static char *_get_pbs_node_name(char *node_options, int *i)
 
 static void _get_next_pbs_node_part(char *node_options, int *i)
 {
-	while(node_options[*i]
-	      && node_options[*i] != '+'
-	      && node_options[*i] != ':')
+	while (node_options[*i] &&
+	       (node_options[*i] != '+') &&
+	       (node_options[*i] != ':'))
 		(*i)++;
 	if (node_options[*i])
 		(*i)++;
@@ -1940,7 +2007,7 @@ static void _parse_pbs_nodes_opts(char *node_opts)
 	int node_cnt = 0;
 	hostlist_t hl = hostlist_create(NULL);
 
-	while(node_opts[i]) {
+	while (node_opts[i]) {
 		if (!strncmp(node_opts+i, "ppn=", 4)) {
 			i+=4;
 			ppn += strtol(node_opts+i, NULL, 10);
@@ -1986,7 +2053,7 @@ static void _parse_pbs_nodes_opts(char *node_opts)
 
 static void _get_next_pbs_option(char *pbs_options, int *i)
 {
-	while(pbs_options[*i] && pbs_options[*i] != ',')
+	while (pbs_options[*i] && pbs_options[*i] != ',')
 		(*i)++;
 	if (pbs_options[*i])
 		(*i)++;
@@ -2089,7 +2156,8 @@ static void _parse_pbs_resource_list(char *rl)
 			temp = _get_pbs_option_value(rl, &i, ':');
 			if (temp) {
 				pbs_pro_flag |= 4;
-				opt.ntasks_per_node = _get_int(temp, "mpiprocs");
+				opt.ntasks_per_node = parse_int("mpiprocs",
+								temp, true);
 				xfree(temp);
 			}
 #if defined(HAVE_ALPS_CRAY) || defined(HAVE_NATIVE_CRAY)
@@ -2102,7 +2170,8 @@ static void _parse_pbs_resource_list(char *rl)
 			i += 9;
 			temp = _get_pbs_option_value(rl, &i, ',');
 			if (temp) {
-				opt.cpus_per_task = _get_int(temp, "mppdepth");
+				opt.cpus_per_task = parse_int("mppdepth",
+							      temp, false);
 				opt.cpus_set	  = true;
 			}
 			xfree(temp);
@@ -2121,14 +2190,15 @@ static void _parse_pbs_resource_list(char *rl)
 			i += 8;
 			temp = _get_pbs_option_value(rl, &i, ',');
 			if (temp)
-				opt.ntasks_per_node = _get_int(temp, "mppnppn");
+				opt.ntasks_per_node = parse_int("mppnppn",
+								temp, true);
 			xfree(temp);
 		} else if (!strncmp(rl + i, "mppwidth=", 9)) {
 			/* Cray: task width (number of processing elements) */
 			i += 9;
 			temp = _get_pbs_option_value(rl, &i, ',');
 			if (temp) {
-				opt.ntasks     = _get_int(temp, "mppwidth");
+				opt.ntasks = parse_int("mppwidth", temp, true);
 				opt.ntasks_set = true;
 			}
 			xfree(temp);
@@ -2137,7 +2207,7 @@ static void _parse_pbs_resource_list(char *rl)
 			i += 14;
 			temp = _get_pbs_option_value(rl, &i, ',');
 			if (temp) {
-				gpus = _get_int(temp, "naccelerators");
+				gpus = parse_int("naccelerators", temp, true);
 				xfree(temp);
 			}
 		} else if (!strncasecmp(rl+i, "ncpus=", 6)) {
@@ -2145,7 +2215,7 @@ static void _parse_pbs_resource_list(char *rl)
 			temp = _get_pbs_option_value(rl, &i, ':');
 			if (temp) {
 				pbs_pro_flag |= 2;
-				opt.mincpus = _get_int(temp, "ncpus");
+				opt.mincpus = parse_int("ncpus", temp, true);
 				xfree(temp);
 			}
 		} else if (!strncmp(rl+i, "nice=", 5)) {
@@ -2214,7 +2284,7 @@ static void _parse_pbs_resource_list(char *rl)
 			temp = _get_pbs_option_value(rl, &i, ':');
 			if (temp) {
 				pbs_pro_flag |= 1;
-				opt.min_nodes = _get_int(temp, "select");
+				opt.min_nodes = parse_int("select", temp, true);
 				opt.max_nodes = opt.min_nodes;
 				opt.nodes_set = true;
 				xfree(temp);
@@ -2360,7 +2430,8 @@ static bool _opt_verify(void)
 	 * The limitations of the plane distribution in the cons_res
 	 * environment are more extensive and are documented in the
 	 * SLURM reference guide.  */
-	if (opt.distribution == SLURM_DIST_PLANE && opt.plane_size) {
+	if ((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE &&
+	    opt.plane_size) {
 		if ((opt.min_nodes <= 0) ||
 		    ((opt.ntasks/opt.plane_size) < opt.min_nodes)) {
 			if (((opt.min_nodes-1)*opt.plane_size) >= opt.ntasks) {
@@ -2388,7 +2459,7 @@ static bool _opt_verify(void)
 		error("Can't set SLURM_DISTRIBUTION env variable");
 	}
 
-	if ((opt.distribution == SLURM_DIST_PLANE) &&
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE) &&
 	    setenvf(NULL, "SLURM_DIST_PLANESIZE", "%d", opt.plane_size)) {
 		error("Can't set SLURM_DIST_PLANESIZE env variable");
 	}
@@ -2448,7 +2519,8 @@ static bool _opt_verify(void)
 				xfree(opt.nodelist);
 				opt.nodelist = add_slash;
 			}
-			opt.distribution = SLURM_DIST_ARBITRARY;
+			opt.distribution &= SLURM_DIST_STATE_FLAGS;
+			opt.distribution |= SLURM_DIST_ARBITRARY;
 			if (!_valid_node_list(&opt.nodelist)) {
 				error("Failure getting NodeNames from "
 				      "hostfile");
@@ -2465,7 +2537,7 @@ static bool _opt_verify(void)
 
 	/* set up the proc and node counts based on the arbitrary list
 	   of nodes */
-	if ((opt.distribution == SLURM_DIST_ARBITRARY)
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
 	   && (!opt.nodes_set || !opt.ntasks_set)) {
 		hostlist_t hl = hostlist_create(opt.nodelist);
 		if (!opt.ntasks_set) {
@@ -2579,6 +2651,9 @@ static bool _opt_verify(void)
 #endif
 	}
 
+	cpu_freq_set_env("SLURM_CPU_FREQ_REQ",
+			opt.cpu_freq_min, opt.cpu_freq_max, opt.cpu_freq_gov);
+
 	return verified;
 }
 
@@ -2732,31 +2807,6 @@ static char *print_constraints()
 	return buf;
 }
 
-/*
- *  Get a decimal integer from arg.
- *
- *  Returns the integer on success, exits program on failure.
- *
- */
-static int
-_get_int(const char *arg, const char *what)
-{
-	char *p;
-	long int result = strtol(arg, &p, 10);
-
-	if ((*p != '\0') || (result < 0L)) {
-		error ("Invalid numeric value \"%s\" for %s.", arg, what);
-		exit(error_exit);
-	}
-
-	if (result > INT_MAX) {
-		error ("Numeric argument (%ld) to big for %s.", result, what);
-	}
-
-	return (int) result;
-}
-
-
 /*
  * Return an absolute path for the "filename".  If "filename" is already
  * an absolute path, it returns a copy.  Free the returned with xfree().
@@ -2810,7 +2860,7 @@ static void _opt_list(void)
 	info("wckey             : `%s'", opt.wckey);
 	info("distribution      : %s",
 	     format_task_dist_states(opt.distribution));
-	if (opt.distribution == SLURM_DIST_PLANE)
+	if ((opt.distribution  & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE)
 		info("plane size        : %u", opt.plane_size);
 	info("verbose           : %d", opt.verbose);
 	info("immediate         : %s", tf_(opt.immediate));
@@ -2871,6 +2921,9 @@ static void _opt_list(void)
 	}
 	info("array             : %s",
 	     opt.array_inx == NULL ? "N/A" : opt.array_inx);
+	info("cpu_freq_min      : %u", opt.cpu_freq_min);
+	info("cpu_freq_max      : %u", opt.cpu_freq_max);
+	info("cpu_freq_gov      : %u", opt.cpu_freq_gov);
 	info("mail_type         : %s", print_mail_type(opt.mail_type));
 	info("mail_user         : %s", opt.mail_user);
 	info("sockets-per-node  : %d", opt.sockets_per_node);
@@ -2887,8 +2940,17 @@ static void _opt_list(void)
 	info("switches          : %d", opt.req_switch);
 	info("wait-for-switches : %d", opt.wait4switch);
 	str = print_commandline(opt.script_argc, opt.script_argv);
-	info("core-spec         : %d", opt.core_spec);
+	if (opt.core_spec == (uint16_t) NO_VAL)
+		info("core-spec         : NA");
+	else if (opt.core_spec & CORE_SPEC_THREAD) {
+		info("thread-spec       : %d",
+		     opt.core_spec & (~CORE_SPEC_THREAD));
+	} else
+		info("core-spec         : %d", opt.core_spec);
+	info("burst_buffer      : `%s'", opt.burst_buffer);
 	info("remote command    : `%s'", str);
+	info("power             : %s", power_flags_str(opt.power_flags));
+	info("sicp              : %u", opt.sicp_mode);
 	xfree(str);
 
 }
@@ -2925,8 +2987,9 @@ static void _usage(void)
 "              [--nodefile=file] [--nodelist=hosts] [--exclude=hosts]\n"
 "              [--network=type] [--mem-per-cpu=MB] [--qos=qos] [--gres=list]\n"
 "              [--mem_bind=...] [--reservation=name]\n"
-"              [--switches=max-switches{@max-time-to-wait}]\n"
-"              [--core-spec=cores] [--reboot]\n"
+"              [--cpu-freq=min[-max[:gov]] [--sicp] [--power=flags]\n"
+"              [--switches=max-switches{@max-time-to-wait}] [--reboot]\n"
+"              [--core-spec=cores] [--thread-spec=threads] [--bb=burst_buffer_spec]\n"
 "              [--array=index_values] [--profile=...] [--ignore-pbs]\n"
 "              [--export[=names]] [--export-file=file|fd] executable [args...]\n");
 }
@@ -2941,9 +3004,15 @@ static void _help(void)
 "Parallel run options:\n"
 "  -a, --array=indexes         job array index values\n"
 "  -A, --account=name          charge job to specified account\n"
+"      --bb=<spec>             burst buffer specifications\n"
 "      --begin=time            defer job until HH:MM MM/DD/YY\n"
-"  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
+"  -M, --clusters=names        Comma separated list of clusters to issue\n"
+"                              commands to.  Default is current cluster.\n"
+"                              Name of 'all' will submit to run on all clusters.\n"
 "      --comment=name          arbitrary comment\n"
+"      --cpu-freq=min[-max[:gov]] requested cpu frequency (and governor)\n"
+"  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
+
 "  -d, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
 "  -D, --workdir=directory     set working directory for batch script\n"
 "  -e, --error=err             file for batch script's standard error\n"
@@ -2954,6 +3023,7 @@ static void _help(void)
 "      --gid=group_id          group ID to run job as (user root only)\n"
 "      --gres=list             required generic resources\n"
 "  -H, --hold                  submit job in held state\n"
+"      --ignore-pbs            Ignore #PBS options in the batch script\n"
 "  -i, --input=in              file for batch script's standard input\n"
 "  -I, --immediate             exit if resources are not immediately available\n"
 "      --jobid=id              run under already allocated job\n"
@@ -2962,9 +3032,7 @@ static void _help(void)
 "  -L, --licenses=names        required license, comma separated\n"
 "  -m, --distribution=type     distribution method for processes to nodes\n"
 "                              (type = block|cyclic|arbitrary)\n"
-"  -M, --clusters=names        Comma separated list of clusters to issue\n"
-"                              commands to.  Default is current cluster.\n"
-"                              Name of 'all' will submit to run on all clusters.\n"
+
 "      --mail-type=type        notify on state change: BEGIN, END, FAIL or ALL\n"
 "      --mail-user=user        who to send email notification for job state\n"
 "                              changes\n"
@@ -2978,6 +3046,7 @@ static void _help(void)
 "  -p, --partition=partition   partition requested\n"
 "      --parsable              outputs only the jobid and cluster name (if present),\n"
 "                              separated by semicolon, only on successful submission.\n"
+"      --power=flags           power management options\n"
 "      --priority=value        set the priority of the job to value\n"
 "      --profile=value         enable acct_gather_profile for detailed data\n"
 "                              value is all or none or any combination of\n"
@@ -2986,19 +3055,21 @@ static void _help(void)
 "      --qos=qos               quality of service\n"
 "  -Q, --quiet                 quiet mode (suppress informational messages)\n"
 "      --reboot                reboot compute nodes before starting job\n"
-"      --signal=[B:]num[@time] send signal when time limit within time seconds\n"
 "      --requeue               if set, permit the job to be requeued\n"
-"  -t, --time=minutes          time limit\n"
-"      --time-min=minutes      minimum time limit (if distinct)\n"
 "  -s, --share                 share nodes with other jobs\n"
 "  -S, --core-spec=cores       count of reserved cores\n"
+"      --sicp                  If specified, signifies job is to receive\n"
+"      --signal=[B:]num[@time] send signal when time limit within time seconds\n"
+"      --switches=max-switches{@max-time-to-wait}\n"
+"                              Optimum switches and max time to wait for optimum\n"
+"      --thread-spec=threads   count of reserved threads\n"
+"  -t, --time=minutes          time limit\n"
+"      --time-min=minutes      minimum time limit (if distinct)\n"
 "      --uid=user_id           user ID to run job as (user root only)\n"
 "  -v, --verbose               verbose mode (multiple -v's increase verbosity)\n"
 "      --wckey=wckey           wckey to run job under\n"
 "      --wrap[=command string] wrap commmand string in a sh script and submit\n"
-"      --switches=max-switches{@max-time-to-wait}\n"
-"                              Optimum switches and max time to wait for optimum\n"
-"      --ignore-pbs            Ignore #PBS options in the batch script\n"
+
 "\n"
 "Constraint options:\n"
 "      --contiguous            demand a contiguous range of nodes\n"
@@ -3013,7 +3084,7 @@ static void _help(void)
 "  -x, --exclude=hosts...      exclude a specific list of hosts\n"
 "\n"
 "Consumable resources related options:\n"
-"      --exclusive             allocate nodes in exclusive mode when\n"
+"      --exclusive[=user]      allocate nodes in exclusive mode when\n"
 "                              cpu consumable resource is enabled\n"
 "      --mem-per-cpu=MB        maximum amount of real memory per allocated\n"
 "                              cpu required by the job.\n"
diff --git a/src/sbatch/opt.h b/src/sbatch/opt.h
index f1b2ccc8e..5f4d40d26 100644
--- a/src/sbatch/opt.h
+++ b/src/sbatch/opt.h
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>,
  *    Christopher J. Morrone <morrone2@llnl.gov>, et. al.
@@ -59,20 +60,18 @@
 
 
 typedef struct sbatch_options {
-	List clusters; /* cluster to run this on. */
+	char *clusters;		/* cluster to run this on. */
 	char *progname;		/* argv[0] of this program or   */
 
 	/* batch script argv and argc, if provided on the command line */
 	int script_argc;
 	char **script_argv;
-
 	char *user;		/* local username		*/
 	uid_t uid;		/* local uid			*/
 	gid_t gid;		/* local gid			*/
 	uid_t euid;		/* effective user --uid=user	*/
 	gid_t egid;		/* effective group --gid=group	*/
-	char *cwd;		/* current working directory	*/
-
+ 	char *cwd;		/* current working directory	*/
 	int  ntasks;		/* --ntasks=n,      -n n	*/
 	bool ntasks_set;	/* true if ntasks explicitly set */
 	int  cpus_per_task;	/* --cpus-per-task=n, -c n	*/
@@ -82,6 +81,7 @@ typedef struct sbatch_options {
 	bool nodes_set;		/* true if nodes explicitly set */
 	int sockets_per_node;	/* --sockets-per-node=n		*/
 	int cores_per_socket;	/* --cores-per-socket=n		*/
+	uint32_t kill_invalid_dep;  /* --kill_invalid_dep           */
 	int threads_per_core;	/* --threads-per-core=n		*/
 	int ntasks_per_node;	/* --ntasks-per-node=n		*/
 	int ntasks_per_socket;	/* --ntasks-per-socket=n	*/
@@ -180,7 +180,13 @@ typedef struct sbatch_options {
 	int spank_job_env_size;	/* size of spank_job_env	*/
 	int umask;		/* job umask for PBS		*/
 	int core_spec;		/* --core-spec=n,      -S n	*/
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
 	bool test_only;		/* --test-only			*/
+	char *burst_buffer;	/* -bb				*/
+	uint8_t power_flags;	/* Power management options	*/
+	uint8_t sicp_mode;	/* Inter-cluster job ID		*/
 } opt_t;
 
 extern opt_t opt;
diff --git a/src/sbatch/sbatch.c b/src/sbatch/sbatch.c
index 5b4384b48..0940c239a 100644
--- a/src/sbatch/sbatch.c
+++ b/src/sbatch/sbatch.c
@@ -53,15 +53,16 @@
 
 #include "slurm/slurm.h"
 
+#include "src/common/cpu_frequency.h"
 #include "src/common/env.h"
 #include "src/common/plugstack.h"
+#include "src/common/proc_args.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_rlimits_info.h"
 #include "src/common/xstring.h"
 #include "src/common/xmalloc.h"
 
 #include "src/sbatch/opt.h"
-#include "src/sbatch/mult_cluster.h"
 
 #define MAX_RETRIES 15
 
@@ -159,8 +160,13 @@ int main(int argc, char *argv[])
 
 	/* If can run on multiple clusters find the earliest run time
 	 * and run it there */
-	if (sbatch_set_first_avail_cluster(&desc) != SLURM_SUCCESS)
+	if (opt.clusters &&
+	    slurmdb_get_first_avail_cluster(&desc, opt.clusters,
+			&working_cluster_rec) != SLURM_SUCCESS) {
+		print_db_notok(opt.clusters, 0);
 		exit(error_exit);
+	}
+
 
 	if (_check_cluster_specific_settings(&desc) != SLURM_SUCCESS)
 		exit(error_exit);
@@ -322,17 +328,20 @@ static int _check_cluster_specific_settings(job_desc_msg_t *req)
 		/*
 		 * Fix options and inform user, but do not abort submission.
 		 */
-		if (req->shared && req->shared != (uint16_t)NO_VAL) {
-			info("--share is not (yet) supported on Cray.");
-			req->shared = false;
+		if (req->shared && (req->shared != (uint16_t)NO_VAL)) {
+			info("--share is not supported on Cray/ALPS systems.");
+			req->shared = (uint16_t)NO_VAL;
 		}
-		if (req->overcommit && req->overcommit != (uint8_t)NO_VAL) {
-			info("--overcommit is not supported on Cray.");
+		if (req->overcommit && (req->overcommit != (uint8_t)NO_VAL)) {
+			info("--overcommit is not supported on Cray/ALPS "
+			     "systems.");
 			req->overcommit = false;
 		}
-		if (req->wait_all_nodes && req->wait_all_nodes != (uint16_t)NO_VAL) {
-			info("--wait-all-nodes is handled automatically on Cray.");
-			req->wait_all_nodes = false;
+		if (req->wait_all_nodes &&
+		    (req->wait_all_nodes != (uint16_t)NO_VAL)) {
+			info("--wait-all-nodes is handled automatically on "
+			     "Cray/ALPS systems.");
+			req->wait_all_nodes = (uint16_t)NO_VAL;
 		}
 	}
 	return rc;
@@ -397,6 +406,8 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 	desc->mail_type = opt.mail_type;
 	if (opt.mail_user)
 		desc->mail_user = xstrdup(opt.mail_user);
+	if (opt.burst_buffer)
+		desc->burst_buffer = opt.burst_buffer;
 	if (opt.begin)
 		desc->begin_time = opt.begin;
 	if (opt.account)
@@ -508,7 +519,7 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 				    "SLURM_GET_USER_ENV", "1");
 	}
 
-	if (opt.distribution == SLURM_DIST_ARBITRARY) {
+	if ((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY) {
 		env_array_overwrite_fmt(&desc->environment,
 					"SLURM_ARBITRARY_NODELIST",
 					"%s", desc->req_nodes);
@@ -535,11 +546,22 @@ static int _fill_job_desc_from_opts(job_desc_msg_t *desc)
 		desc->spank_job_env      = opt.spank_job_env;
 		desc->spank_job_env_size = opt.spank_job_env_size;
 	}
+
+	desc->cpu_freq_min = opt.cpu_freq_min;
+	desc->cpu_freq_max = opt.cpu_freq_max;
+	desc->cpu_freq_gov = opt.cpu_freq_gov;
+
 	if (opt.req_switch >= 0)
 		desc->req_switch = opt.req_switch;
 	if (opt.wait4switch >= 0)
 		desc->wait4switch = opt.wait4switch;
 
+	if (opt.power_flags)
+		desc->power_flags = opt.power_flags;
+	if (opt.sicp_mode)
+		desc->sicp_mode = opt.sicp_mode;
+	if (opt.kill_invalid_dep)
+		desc->bitflags = opt.kill_invalid_dep;
 
 	return 0;
 }
diff --git a/src/sbcast/Makefile.in b/src/sbcast/Makefile.in
index 6c26cc42f..0681e79d2 100644
--- a/src/sbcast/Makefile.in
+++ b/src/sbcast/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sbcast/agent.c b/src/sbcast/agent.c
index 13d5fbd0d..07d48287a 100644
--- a/src/sbcast/agent.c
+++ b/src/sbcast/agent.c
@@ -108,8 +108,7 @@ static void *_agent_thread(void *args)
 
 	thread_ptr->rc = rc;
 	list_iterator_destroy(itr);
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 	slurm_mutex_lock(&agent_cnt_mutex);
 	agent_cnt--;
 	pthread_cond_broadcast(&agent_cnt_cond);
diff --git a/src/sbcast/opts.c b/src/sbcast/opts.c
index 23a3022ac..d6354e5a4 100644
--- a/src/sbcast/opts.c
+++ b/src/sbcast/opts.c
@@ -76,7 +76,7 @@ static void     _usage( void );
  */
 extern void parse_command_line(int argc, char *argv[])
 {
-	char *env_val = NULL;
+	char *end_ptr = NULL, *env_val = NULL;
 	int opt_char;
 	int option_index;
 	static struct option long_options[] = {
@@ -101,7 +101,8 @@ extern void parse_command_line(int argc, char *argv[])
 	if (getenv("SBCAST_FORCE"))
 		params.force = true;
 
-	params.jobid = NO_VAL;
+	params.job_id  = NO_VAL;
+	params.step_id = NO_VAL;
 
 	if (getenv("SBCAST_PRESERVE"))
 		params.preserve = true;
@@ -129,7 +130,9 @@ extern void parse_command_line(int argc, char *argv[])
 			params.fanout = atoi(optarg);
 			break;
 		case (int)'j':
-			params.jobid = atol(optarg);
+			params.job_id = strtol(optarg, &end_ptr, 10);
+			if (end_ptr[0] == '.')
+				params.step_id = strtol(end_ptr+1, NULL, 10);
 		case (int)'p':
 			params.preserve = true;
 			break;
@@ -161,14 +164,16 @@ extern void parse_command_line(int argc, char *argv[])
 		exit(1);
 	}
 
-	if (params.jobid == NO_VAL) {
+	if (params.job_id == NO_VAL) {
 		if (!(env_val = getenv("SLURM_JOB_ID"))) {
 			error("Need a job id to run this command.  "
 			      "Run from within a Slurm job or use the "
 			      "--jobid option.");
 			exit(1);
 		}
-		params.jobid = (uint32_t) atol(env_val);
+		params.job_id = strtol(env_val, &end_ptr, 10);
+		if (end_ptr[0] == '.')
+			params.step_id = strtol(end_ptr+1, NULL, 10);
 	}
 
 	params.src_fname = xstrdup(argv[optind]);
@@ -215,7 +220,10 @@ static void _print_options( void )
 	info("compress   = %s", params.compress ? "true" : "false");
 	info("force      = %s", params.force ? "true" : "false");
 	info("fanout     = %d", params.fanout);
-	info("jobid      = %u", params.jobid);
+	if (params.step_id == NO_VAL)
+		info("jobid      = %u", params.job_id);
+	else
+		info("jobid      = %u.%u", params.job_id, params.step_id);
 	info("preserve   = %s", params.preserve ? "true" : "false");
 	info("timeout    = %d", params.timeout);
 	info("verbose    = %d", params.verbose);
@@ -237,7 +245,8 @@ Usage: sbcast [OPTIONS] SOURCE DEST\n\
   -C, --compress      compress the file being transmitted\n\
   -f, --force         replace destination file as required\n\
   -F, --fanout=num    specify message fanout\n\
-  -j, --jobid=num     specify jobid, unneeded if ran inside allocation\n\
+  -j, --jobid=#[.#]   specify job ID and optional step ID, unneeded if run\n\
+                      inside allocation\n\
   -p, --preserve      preserve modes and times of source file\n\
   -s, --size=num      block size in bytes (rounded off)\n\
   -t, --timeout=secs  specify message timeout (seconds)\n\
diff --git a/src/sbcast/sbcast.c b/src/sbcast/sbcast.c
index 3035303a4..49997e650 100644
--- a/src/sbcast/sbcast.c
+++ b/src/sbcast/sbcast.c
@@ -55,13 +55,14 @@
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/read_config.h"
+#include "src/sbcast/sbcast.h"
 #include "src/common/slurm_cred.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_interface.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/sbcast/sbcast.h"
 
 /* global variables */
 int fd;					/* source file descriptor */
@@ -107,9 +108,9 @@ int main(int argc, char *argv[])
 	verbose("modes    = %o", (unsigned int) f_stat.st_mode);
 	verbose("uid      = %d", (int) f_stat.st_uid);
 	verbose("gid      = %d", (int) f_stat.st_gid);
-	verbose("atime    = %s", slurm_ctime(&f_stat.st_atime));
-	verbose("mtime    = %s", slurm_ctime(&f_stat.st_mtime));
-	verbose("ctime    = %s", slurm_ctime(&f_stat.st_ctime));
+	verbose("atime    = %s", slurm_ctime2(&f_stat.st_atime));
+	verbose("mtime    = %s", slurm_ctime2(&f_stat.st_mtime));
+	verbose("ctime    = %s", slurm_ctime2(&f_stat.st_ctime));
 	verbose("size     = %ld", (long) f_stat.st_size);
 	verbose("-----------------------------");
 
@@ -126,16 +127,25 @@ int main(int argc, char *argv[])
 /* get details about this slurm job: jobid and allocated node */
 static void _get_job_info(void)
 {
-	xassert(params.jobid != NO_VAL);
-
-	verbose("jobid      = %u", params.jobid);
-
-	if (slurm_sbcast_lookup(params.jobid, &sbcast_cred) != SLURM_SUCCESS) {
-		error("Slurm jobid %u lookup error: %s",
-		      params.jobid, slurm_strerror(slurm_get_errno()));
+	xassert(params.job_id != NO_VAL);
+
+	if (slurm_sbcast_lookup(params.job_id, params.step_id, &sbcast_cred)
+	    != SLURM_SUCCESS) {
+		if (params.step_id == NO_VAL) {
+			error("Slurm job ID %u lookup error: %s",
+			      params.job_id, slurm_strerror(slurm_get_errno()));
+		} else {
+			error("Slurm step ID %u.%u lookup error: %s",
+			      params.job_id, params.step_id,
+			      slurm_strerror(slurm_get_errno()));
+		}
 		exit(1);
 	}
 
+	if (params.step_id == NO_VAL)
+		verbose("jobid      = %u", params.job_id);
+	else
+		verbose("jobid      = %u.%u", params.job_id, params.step_id);
 	verbose("node_cnt   = %u", sbcast_cred->node_cnt);
 	verbose("node_list  = %s", sbcast_cred->node_list);
 	/* also see sbcast_cred->node_addr (array) */
diff --git a/src/sbcast/sbcast.h b/src/sbcast/sbcast.h
index fcff04c89..216fa6631 100644
--- a/src/sbcast/sbcast.h
+++ b/src/sbcast/sbcast.h
@@ -53,7 +53,8 @@ struct sbcast_parameters {
 	bool compress;
 	int  fanout;
 	bool force;
-	uint32_t jobid;
+	uint32_t job_id;
+	uint32_t step_id;
 	bool preserve;
 	int  timeout;
 	int  verbose;
diff --git a/src/scancel/Makefile.in b/src/scancel/Makefile.in
index 06b26a2b5..1f6d438bc 100644
--- a/src/scancel/Makefile.in
+++ b/src/scancel/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -250,6 +253,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -299,8 +304,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -319,6 +328,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -362,6 +374,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -385,6 +398,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/scancel/opt.c b/src/scancel/opt.c
index 45474bb59..32082c5e2 100644
--- a/src/scancel/opt.c
+++ b/src/scancel/opt.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -121,7 +122,7 @@ static bool _opt_verify(void);
 static void _xlate_job_step_ids(char **rest);
 
 /* translate job state name to number */
-static uint16_t _xlate_state_name(const char *state_name, bool env_var);
+static uint32_t _xlate_state_name(const char *state_name, bool env_var);
 
 /* translate name name to number */
 static uint16_t _xlate_signal_name(const char *signal_name);
@@ -151,14 +152,11 @@ int initialize_and_process_args(int argc, char *argv[])
 
 }
 
-/* has_default_opt()
- *
- * No getopt() options were specified, only the
+/*
+ * No job filtering options were specified (e.g. by user or state), only the
  * job ids is on the command line.
- *
  */
-bool
-has_default_opt(void)
+extern bool has_default_opt(void)
 {
 	if (opt.account == NULL
 	    && opt.batch == false
@@ -167,7 +165,7 @@ has_default_opt(void)
 	    && opt.partition == NULL
 	    && opt.qos == NULL
 	    && opt.reservation == NULL
-	    && opt.signal == (uint16_t) - 1
+	    && opt.signal == (uint16_t) NO_VAL
 	    && opt.state == JOB_END
 	    && opt.user_id == 0
 	    && opt.user_name == NULL
@@ -190,13 +188,13 @@ extern bool has_job_steps(void)
 	return false;
 }
 
-static uint16_t
+static uint32_t
 _xlate_state_name(const char *state_name, bool env_var)
 {
-	int i = job_state_num(state_name);
+	uint32_t i = job_state_num(state_name);
 
 	if (i >= 0)
-		return (uint16_t) i;
+		return i;
 
 	if (env_var) {
 		fprintf(stderr, "Unrecognized SCANCEL_STATE value: %s\n",
@@ -268,7 +266,7 @@ static void _opt_default(void)
 	opt.partition	= NULL;
 	opt.qos		= NULL;
 	opt.reservation	= NULL;
-	opt.signal	= (uint16_t)-1; /* no signal specified */
+	opt.signal	= (uint16_t) NO_VAL;
 	opt.state	= JOB_END;
 	opt.user_id	= 0;
 	opt.user_name	= NULL;
@@ -390,8 +388,8 @@ static void _opt_args(int argc, char **argv)
 		{NULL,          0,                 0, 0}
 	};
 
-	while((opt_char = getopt_long(argc, argv, "A:biM:n:p:Qq:R:s:t:u:vVw:",
-				      long_options, &option_index)) != -1) {
+	while ((opt_char = getopt_long(argc, argv, "A:biM:n:p:Qq:R:s:t:u:vVw:",
+				       long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
 			fprintf(stderr,
@@ -413,8 +411,7 @@ static void _opt_args(int argc, char **argv)
 			break;
 		case (int)'M':
 			opt.ctld = true;
-			if (opt.clusters)
-				list_destroy(opt.clusters);
+			FREE_NULL_LIST(opt.clusters);
 			opt.clusters = slurmdb_get_info_cluster(optarg);
 			if (!opt.clusters) {
 				print_db_notok(optarg, 0);
@@ -531,7 +528,10 @@ _xlate_job_step_ids(char **rest)
 			hostlist_destroy(hl);
 			end_char[1] = save_char;
 			/* No step ID support for job array range */
-			break;
+			continue;
+		} else if ((next_str[0] == '_') && (next_str[1] == '*')) {
+			opt.array_id[buf_offset] = INFINITE;
+			next_str += 2;
 		} else if (next_str[0] == '_') {
 			tmp_l = strtol(&next_str[1], &next_str, 10);
 			if (tmp_l < 0) {
@@ -612,15 +612,39 @@ static void _opt_list(void)
 	info("partition      : %s", opt.partition);
 	info("qos            : %s", opt.qos);
 	info("reservation    : %s", opt.reservation);
-	info("signal         : %u", opt.signal);
+	if (opt.signal != (uint16_t) NO_VAL)
+		info("signal         : %u", opt.signal);
 	info("state          : %s", job_state_string(opt.state));
 	info("user_id        : %u", opt.user_id);
 	info("user_name      : %s", opt.user_name);
 	info("verbose        : %d", opt.verbose);
 	info("wckey          : %s", opt.wckey);
 
-	for (i=0; i<opt.job_cnt; i++) {
-		info("job_steps      : %u.%u ", opt.job_id[i], opt.step_id[i]);
+	for (i = 0; i < opt.job_cnt; i++) {
+		if (opt.step_id[i] == SLURM_BATCH_SCRIPT) {
+			if (opt.array_id[i] == NO_VAL) {
+				info("job_id[%d]      : %u",
+				     i, opt.job_id[i]);
+			} else if (opt.array_id[i] == INFINITE) {
+				info("job_id[%d]      : %u_*",
+				     i, opt.job_id[i]);
+			} else {
+				info("job_id[%d]      : %u_%u",
+				     i, opt.job_id[i], opt.array_id[i]);
+			}
+		} else {
+			if (opt.array_id[i] == NO_VAL) {
+				info("job_step_id[%d] : %u.%u",
+				     i, opt.job_id[i], opt.step_id[i]);
+			} else if (opt.array_id[i] == INFINITE) {
+				info("job_step_id[%d] : %u_*.%u",
+				     i, opt.job_id[i], opt.step_id[i]);
+			} else {
+				info("job_step_id[%d] : %u_%u.%u",
+				     i, opt.job_id[i], opt.array_id[i],
+				     opt.step_id[i]);
+			}
+		}
 	}
 }
 
diff --git a/src/scancel/scancel.c b/src/scancel/scancel.c
index a0a3a8515..a4d7dc226 100644
--- a/src/scancel/scancel.c
+++ b/src/scancel/scancel.c
@@ -3,7 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
- *  Copyright (C) 2010-2013 SchedMD LLC.
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -73,27 +73,30 @@
 #define MAX_THREADS 20
 
 
-static void  _cancel_jobs (int filter_cnt);
+static int   _cancel_jobs (int filter_cnt);
 static void *_cancel_job_id (void *cancel_info);
 static void *_cancel_step_id (void *cancel_info);
 
-static int  _confirmation (int i, uint32_t step_id);
+static int  _confirmation(job_info_t *job_ptr, uint32_t step_id);
 static int  _filter_job_records (void);
 static void _load_job_records (void);
 static int  _multi_cluster(List clusters);
 static int  _proc_cluster(void);
-static int  _verify_job_ids (void);
 static int  _signal_job_by_str(void);
+static int  _verify_job_ids(void);
 
 static job_info_msg_t * job_buffer_ptr = NULL;
 
 typedef struct job_cancel_info {
-	uint32_t job_id;
 	uint32_t array_job_id;
 	uint32_t array_task_id;
 	bool     array_flag;
+/* Note: Either set job_id_str OR job_id */
+	char *   job_id_str;
+	uint32_t job_id;
 	uint32_t step_id;
 	uint16_t sig;
+	int    * rc;
 	int             *num_active_threads;
 	pthread_mutex_t *num_active_threads_lock;
 	pthread_cond_t  *num_active_threads_cond;
@@ -158,7 +161,6 @@ _proc_cluster(void)
 	_load_job_records();
 	rc = _verify_job_ids();
 	if ((opt.account) ||
-	    (opt.interactive) ||
 	    (opt.job_name) ||
 	    (opt.nodelist) ||
 	    (opt.partition) ||
@@ -169,7 +171,7 @@ _proc_cluster(void)
 	    (opt.wckey)) {
 		filter_cnt = _filter_job_records();
 	}
-	_cancel_jobs(filter_cnt);
+	rc = MAX(_cancel_jobs(filter_cnt), rc);
 	slurm_free_job_info_msg(job_buffer_ptr);
 
 	return rc;
@@ -183,6 +185,9 @@ _load_job_records (void)
 {
 	int error_code;
 
+	/* We need the fill job array string representation for identifying
+	 * and killing job arrays */
+	setenv("SLURM_BITSTR_LEN", "0", 1);
 	error_code = slurm_load_jobs ((time_t) NULL, &job_buffer_ptr, 1);
 
 	if (error_code) {
@@ -191,156 +196,176 @@ _load_job_records (void)
 	}
 }
 
-static bool
-_match_job(int opt_inx, int job_inx)
+static bool _is_task_in_job(job_info_t *job_ptr, int array_id)
 {
-	job_info_t *job_ptr = job_buffer_ptr->job_array;
+	int len;
 
-	job_ptr += job_inx;
-	if (opt.array_id[opt_inx] == NO_VAL) {
-		if ((opt.step_id[opt_inx] != SLURM_BATCH_SCRIPT) &&
-		    (!IS_JOB_RUNNING(job_ptr)))
-			return false;
+	if (job_ptr->array_task_id == array_id)
+		return true;
 
-		if ((opt.job_id[opt_inx] == job_ptr->job_id) ||
-		    (opt.job_id[opt_inx] == job_ptr->array_job_id))
-			return true;
-	} else {
-		if ((opt.array_id[opt_inx] == job_ptr->array_task_id) &&
-		    (opt.job_id[opt_inx]   == job_ptr->array_job_id))
-			return true;
-	}
-	return false;
+	if (!job_ptr->array_bitmap)
+		return false;
+	len = bit_size((bitstr_t *)job_ptr->array_bitmap);
+	if (len <= array_id)
+		return false;
+	return (bit_test((bitstr_t *)job_ptr->array_bitmap, array_id));
 }
 
-static int
-_verify_job_ids (void)
+static int _verify_job_ids(void)
 {
-	/* If a list of jobs was given, make sure each job is actually in
-         * our list of job records. */
-	int i, j;
-	job_info_t *job_ptr = job_buffer_ptr->job_array;
-	int rc = 0;
+	job_info_t *job_ptr;
+	int i, j, rc = 0;
 
-	for (j = 0; j < opt.job_cnt; j++ ) {
-		job_info_t *jp;
+	if (opt.job_cnt == 0)
+		return rc;
 
-		for (i = 0; i < job_buffer_ptr->record_count; i++) {
-			if (_match_job(j, i))
-				break;
-		}
-		jp = &job_ptr[i];
-		if ((i >= job_buffer_ptr->record_count) ||
-		    IS_JOB_FINISHED(jp)) {
-			if (opt.verbose < 0) {
-				;
-			} else if ((opt.array_id[j] == NO_VAL) &&
-				   (opt.step_id[j] == SLURM_BATCH_SCRIPT)) {
-				error("Kill job error on job id %u: %s",
-				      opt.job_id[j],
-				      slurm_strerror(ESLURM_INVALID_JOB_ID));
-			} else if (opt.array_id[j] == NO_VAL) {
-				error("Kill job error on job step id %u.%u: %s",
-				      opt.job_id[j], opt.step_id[j],
-				      slurm_strerror(ESLURM_INVALID_JOB_ID));
-			} else if (opt.step_id[j] == SLURM_BATCH_SCRIPT) {
-				error("Kill job error on job id %u_%u: %s",
-				      opt.job_id[j], opt.array_id[j],
-				      slurm_strerror(ESLURM_INVALID_JOB_ID));
-			} else {
-				error("Kill job error on job step id %u_%u.%u: %s",
-				      opt.job_id[j], opt.array_id[j],
-				      opt.step_id[j],
-				      slurm_strerror(ESLURM_INVALID_JOB_ID));
+	opt.job_found = xmalloc(sizeof(bool) * opt.job_cnt);
+	opt.job_pend  = xmalloc(sizeof(bool) * opt.job_cnt);
+	job_ptr = job_buffer_ptr->job_array;
+	for (i = 0; i < job_buffer_ptr->record_count; i++, job_ptr++) {
+		/* NOTE: We re-use the job's "assoc_id" value as a flag to
+		 * record if the job is referenced in the job list supplied
+		 * by the user. */
+		job_ptr->assoc_id = 0;
+		if (IS_JOB_FINISHED(job_ptr))
+			job_ptr->job_id = 0;
+		if (job_ptr->job_id == 0)
+			continue;
+
+		for (j = 0; j < opt.job_cnt; j++) {
+			if (opt.array_id[j] == NO_VAL) {
+				if ((opt.job_id[j] == job_ptr->job_id) ||
+				    ((opt.job_id[j] == job_ptr->array_job_id) &&
+				     (opt.step_id[j] == SLURM_BATCH_SCRIPT))) {
+					opt.job_found[j] = true;
+				}
+			} else if (opt.array_id[j] == INFINITE) {
+				if (opt.job_id[j] == job_ptr->array_job_id) {
+					opt.job_found[j] = true;
+				}
+			} else if (opt.job_id[j] != job_ptr->array_job_id) {
+				continue;
+			} else if (_is_task_in_job(job_ptr, opt.array_id[j])) {
+				opt.job_found[j] = true;
+			}
+			if (opt.job_found[j]) {
+				if (IS_JOB_PENDING(job_ptr))
+					opt.job_pend[j] = true;
+				job_ptr->assoc_id = 1;
 			}
+		}
+		if (job_ptr->assoc_id == 0)
+			job_ptr->job_id = 0;
+	}
+
+	for (j = 0; j < opt.job_cnt; j++) {
+		char *job_id_str = NULL;
+		if (!opt.job_found[j])
 			rc = 1;
+		else
+			continue;
+
+		if (opt.verbose < 0) {
+			;
+		} else if (opt.array_id[j] == NO_VAL) {
+			xstrfmtcat(job_id_str, "%u", opt.job_id[j]);
+		} else if (opt.array_id[j] == INFINITE) {
+			xstrfmtcat(job_id_str, "%u_*", opt.job_id[j]);
+		} else {
+			xstrfmtcat(job_id_str, "%u_%u", opt.job_id[j],
+				   opt.array_id[j]);
 		}
+
+		if (opt.verbose < 0) {
+			;
+		} else if (opt.step_id[j] == SLURM_BATCH_SCRIPT) {
+			error("Kill job error on job id %s: %s",
+			      job_id_str,
+			      slurm_strerror(ESLURM_INVALID_JOB_ID));
+		} else {
+			error("Kill job error on job step id %s.%u: %s",
+			      job_id_str, opt.step_id[j],
+			      slurm_strerror(ESLURM_INVALID_JOB_ID));
+		}
+		xfree(job_id_str);
+
+		/* Avoid this job in the cancel_job logic */
+		opt.job_id[j] = 0;
 	}
 
 	return rc;
 }
 
-/* variant of strcmp() that handles NULL input */
-static int _strcmp(char *s1, char *s2)
-{
-	if (s1 && s2)
-		return strcmp(s1, s2);
-	if (s1)
-		return 1;
-	if (s2)
-		return -1;
-	return 0;	/* both NULL */
-}
-
 /* _filter_job_records - filtering job information per user specification
  * RET Count of job's filtered out OTHER than for job ID value */
-static int
-_filter_job_records (void)
+static int _filter_job_records (void)
 {
 	int filter_cnt = 0;
-	int i, j;
+	int i;
 	job_info_t *job_ptr = NULL;
-	uint16_t job_base_state;
+	uint32_t job_base_state;
 
-	job_ptr = job_buffer_ptr->job_array ;
-	for (i = 0; i < job_buffer_ptr->record_count; i++) {
-		if (job_ptr[i].job_id == 0)
+	job_ptr = job_buffer_ptr->job_array;
+	for (i = 0; i < job_buffer_ptr->record_count; i++, job_ptr++) {
+		if (IS_JOB_FINISHED(job_ptr))
+			job_ptr->job_id = 0;
+		if (job_ptr->job_id == 0)
 			continue;
 
-		job_base_state = job_ptr[i].job_state & JOB_STATE_BASE;
+		job_base_state = job_ptr->job_state & JOB_STATE_BASE;
 		if ((job_base_state != JOB_PENDING) &&
 		    (job_base_state != JOB_RUNNING) &&
 		    (job_base_state != JOB_SUSPENDED)) {
-			job_ptr[i].job_id = 0;
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if (opt.account != NULL &&
-		    _strcmp(job_ptr[i].account, opt.account)) {
-			job_ptr[i].job_id = 0;
+		    xstrcmp(job_ptr->account, opt.account)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if (opt.job_name != NULL &&
-		    _strcmp(job_ptr[i].name, opt.job_name)) {
-			job_ptr[i].job_id = 0;
+		    xstrcmp(job_ptr->name, opt.job_name)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if ((opt.partition != NULL) &&
-		    _strcmp(job_ptr[i].partition,opt.partition)) {
-			job_ptr[i].job_id = 0;
+		    xstrcmp(job_ptr->partition, opt.partition)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if ((opt.qos != NULL) &&
-		    _strcmp(job_ptr[i].qos, opt.qos)) {
-			job_ptr[i].job_id = 0;
+		    xstrcmp(job_ptr->qos, opt.qos)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if ((opt.reservation != NULL) &&
-		    _strcmp(job_ptr[i].resv_name, opt.reservation)) {
-			job_ptr[i].job_id = 0;
+		    xstrcmp(job_ptr->resv_name, opt.reservation)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if ((opt.state != JOB_END) &&
-		    (job_ptr[i].job_state != opt.state)) {
-			job_ptr[i].job_id = 0;
+		    (job_ptr->job_state != opt.state)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
 
 		if ((opt.user_name != NULL) &&
-		    (job_ptr[i].user_id != opt.user_id)) {
-			job_ptr[i].job_id = 0;
+		    (job_ptr->user_id != opt.user_id)) {
+			job_ptr->job_id = 0;
 			filter_cnt++;
 			continue;
 		}
@@ -357,9 +382,9 @@ _filter_job_records (void)
 				}
 			}
 
-			hostset_t hs = hostset_create(job_ptr[i].nodes);
+			hostset_t hs = hostset_create(job_ptr->nodes);
 			if (!hostset_intersects(hs, opt.nodelist)) {
-				job_ptr[i].job_id = 0;
+				job_ptr->job_id = 0;
 				filter_cnt++;
 				hostset_destroy(hs);
 				continue;
@@ -369,7 +394,7 @@ _filter_job_records (void)
 		}
 
 		if (opt.wckey != NULL) {
-			char *job_key = job_ptr[i].wckey;
+			char *job_key = job_ptr->wckey;
 
 			/*
 			 * A wckey that begins with '*' indicates that the wckey
@@ -380,155 +405,213 @@ _filter_job_records (void)
 			if ((opt.wckey[0] != '*') && (job_key[0] == '*'))
 				job_key++;
 
-			if (strcmp(job_key, opt.wckey) != 0) {
-				job_ptr[i].job_id = 0;
+			if (xstrcmp(job_key, opt.wckey) != 0) {
+				job_ptr->job_id = 0;
 				filter_cnt++;
 				continue;
 			}
 		}
+	}
 
-		if (opt.job_cnt == 0)
-			continue;
+	return filter_cnt;
+}
 
-		for (j = 0; j < opt.job_cnt; j++) {
-			if (_match_job(j, i))
-				break;
-		}
-		if (j >= opt.job_cnt) { /* not found */
-			job_ptr[i].job_id = 0;
-			continue;
-		}
+static char *_build_jobid_str(job_info_t *job_ptr)
+{
+	char *result = NULL;
+
+	if (job_ptr->array_task_str) {
+		xstrfmtcat(result, "%u_[%s]",
+			   job_ptr->array_job_id, job_ptr->array_task_str);
+	} else if (job_ptr->array_task_id != NO_VAL) {
+		xstrfmtcat(result, "%u_%u",
+			   job_ptr->array_job_id, job_ptr->array_task_id);
+	} else {
+		xstrfmtcat(result, "%u", job_ptr->job_id);
 	}
 
-	return filter_cnt;
+	return result;
 }
 
-static void
-_cancel_jobs_by_state(uint16_t job_state, int filter_cnt)
+static void _cancel_jobid_by_state(uint32_t job_state, int filter_cnt, int *rc)
 {
-	int i, j, err;
 	job_cancel_info_t *cancel_info;
-	job_info_t *job_ptr = job_buffer_ptr->job_array;
-	pthread_t  dummy;
+	job_info_t *job_ptr;
+	pthread_t dummy;
+	int err, i, j;
 
-	/* Spawn a thread to cancel each job or job step marked for
-	 * cancellation */
-	for (i = 0; i < job_buffer_ptr->record_count; i++) {
-		if (job_ptr[i].job_id == 0)
-			continue;
+	if (opt.job_cnt == 0)
+		return;
 
-		if ((job_state < JOB_END) &&
-		    (job_ptr[i].job_state != job_state))
+	for (j = 0; j < opt.job_cnt; j++) {
+		if (opt.job_id[j] == 0)
+			continue;
+		if ((job_state == JOB_PENDING) && !opt.job_pend[j])
 			continue;
 
-		/* If cancelling a list of jobs, see if the current job
-		 * included a step id */
-		if (opt.job_cnt) {
-			for (j = 0; j < opt.job_cnt; j++ ) {
-				if (!_match_job(j, i))
-					continue;
-
-				if (opt.interactive &&
-				    (_confirmation(i, opt.step_id[j]) == 0))
-					continue;
-
-				cancel_info =
-					(job_cancel_info_t *)
-					xmalloc(sizeof(job_cancel_info_t));
-				cancel_info->sig     = opt.signal;
-				cancel_info->num_active_threads =
-					&num_active_threads;
-				cancel_info->num_active_threads_lock =
-					&num_active_threads_lock;
-				cancel_info->num_active_threads_cond =
-					&num_active_threads_cond;
-
-				if ((!opt.interactive) && (filter_cnt == 0) &&
-				    (opt.array_id[j] == NO_VAL) &&
-				    (opt.job_id[j] == job_ptr[i].array_job_id)&&
-				    (opt.step_id[j] == SLURM_BATCH_SCRIPT)) {
-					opt.job_id[j] = NO_VAL; /* !match_job */
-					cancel_info->array_flag = true;
-					cancel_info->job_id =
-						job_ptr[i].array_job_id;
-				} else {
-					cancel_info->array_flag = false;
-					cancel_info->job_id  =
-						job_ptr[i].job_id;
-					cancel_info->array_job_id  =
-						job_ptr[i].array_job_id;
-					cancel_info->array_task_id =
-						job_ptr[i].array_task_id;
-				}
+		job_ptr = job_buffer_ptr->job_array;
+		for (i = 0; i < job_buffer_ptr->record_count; i++, job_ptr++) {
+			if (IS_JOB_FINISHED(job_ptr))
+				job_ptr->job_id = 0;
+			if (job_ptr->job_id == 0)
+				continue;
+			if ((opt.step_id[j] != SLURM_BATCH_SCRIPT) &&
+			    IS_JOB_PENDING(job_ptr)) {
+				/* User specified #.# for step, but the job ID
+				 * may be job array leader with part of job
+				 * array running with other tasks pending */
+				continue;
+			}
 
-				pthread_mutex_lock(&num_active_threads_lock);
-				num_active_threads++;
-				while (num_active_threads > MAX_THREADS) {
-					pthread_cond_wait(
-						&num_active_threads_cond,
-						&num_active_threads_lock);
+			opt.job_found[j] = false;
+			if (opt.array_id[j] == NO_VAL) {
+				if ((opt.job_id[j] == job_ptr->job_id) ||
+				    ((opt.job_id[j] == job_ptr->array_job_id) &&
+				     (opt.step_id[j] == SLURM_BATCH_SCRIPT))) {
+					opt.job_found[j] = true;
 				}
-				pthread_mutex_unlock(&num_active_threads_lock);
-
-				if (opt.step_id[j] == SLURM_BATCH_SCRIPT) {
-					err = pthread_create(&dummy, &attr,
-							     _cancel_job_id,
-							     cancel_info);
-					if (err)
-						_cancel_job_id(cancel_info);
-					break;
-				} else {
-					cancel_info->step_id = opt.step_id[j];
-					err = pthread_create(&dummy, &attr,
-							     _cancel_step_id,
-							     cancel_info);
-					if (err)
-						_cancel_step_id(cancel_info);
-					/* Don't break here.  Keep looping in
-					 * case other steps from the same job
-					 * are cancelled. */
+			} else if (opt.array_id[j] == INFINITE) {
+				if (opt.job_id[j] == job_ptr->array_job_id) {
+					opt.job_found[j] = true;
 				}
+			} else if (opt.job_id[j] != job_ptr->array_job_id) {
+				continue;
+			} else if (_is_task_in_job(job_ptr, opt.array_id[j])) {
+				opt.job_found[j] = true;
 			}
-		} else {
+
+			if (!opt.job_found[j])
+				continue;
+
 			if (opt.interactive &&
-			    (_confirmation(i, SLURM_BATCH_SCRIPT) == 0))
+			    (_confirmation(job_ptr, opt.step_id[j]) == 0)) {
+				job_ptr->job_id = 0;	/* Don't check again */
 				continue;
+			}
+
+			pthread_mutex_lock(&num_active_threads_lock);
+			num_active_threads++;
+			while (num_active_threads > MAX_THREADS) {
+				pthread_cond_wait(&num_active_threads_cond,
+						  &num_active_threads_lock);
+			}
+			pthread_mutex_unlock(&num_active_threads_lock);
 
 			cancel_info = (job_cancel_info_t *)
-				xmalloc(sizeof(job_cancel_info_t));
-			cancel_info->job_id  = job_ptr[i].job_id;
+				      xmalloc(sizeof(job_cancel_info_t));
+			cancel_info->rc      = rc;
 			cancel_info->sig     = opt.signal;
 			cancel_info->num_active_threads = &num_active_threads;
 			cancel_info->num_active_threads_lock =
-				&num_active_threads_lock;
+					&num_active_threads_lock;
 			cancel_info->num_active_threads_cond =
-				&num_active_threads_cond;
+					&num_active_threads_cond;
+			if (opt.step_id[j] == SLURM_BATCH_SCRIPT) {
+				cancel_info->job_id_str =
+					_build_jobid_str(job_ptr);
+				err = pthread_create(&dummy, &attr,
+						     _cancel_job_id,
+						     cancel_info);
+				if (err)  /* Run in-line as needed */
+					_cancel_job_id(cancel_info);
+			} else {
+				cancel_info->job_id = job_ptr->job_id;
+				cancel_info->step_id = opt.step_id[j];
+				err = pthread_create(&dummy, &attr,
+						     _cancel_step_id,
+						     cancel_info);
+				if (err)  /* Run in-line as needed */
+					_cancel_step_id(cancel_info);
+			}
+			job_ptr->job_id = 0;
 
-			cancel_info->array_job_id  = 0;
-			cancel_info->array_task_id = NO_VAL;
-			cancel_info->array_flag    = false;
+			if (opt.interactive) {
+				/* Print any error message for first job before
+				 * starting confirmation of next job */
+				pthread_mutex_lock(&num_active_threads_lock);
+				while (num_active_threads > 0) {
+					pthread_cond_wait(&num_active_threads_cond,
+							  &num_active_threads_lock);
+				}
+				pthread_mutex_unlock(&num_active_threads_lock);
+			}
+		}
+	}
+}
 
-			pthread_mutex_lock( &num_active_threads_lock );
-			num_active_threads++;
-			while (num_active_threads > MAX_THREADS) {
+static void
+_cancel_jobs_by_state(uint32_t job_state, int filter_cnt, int *rc)
+{
+	int i, err;
+	job_cancel_info_t *cancel_info;
+	job_info_t *job_ptr = job_buffer_ptr->job_array;
+	pthread_t dummy;
+
+	/* Spawn a thread to cancel each job or job step marked for
+	 * cancellation */
+	if (opt.job_cnt) {
+		_cancel_jobid_by_state(job_state, filter_cnt, rc);
+		return;
+	}
+
+	for (i = 0; i < job_buffer_ptr->record_count; i++, job_ptr++) {
+		if (IS_JOB_FINISHED(job_ptr))
+			job_ptr->job_id = 0;
+		if (job_ptr->job_id == 0)
+			continue;
+
+		if ((job_state < JOB_END) &&
+		    (job_ptr->job_state != job_state))
+			continue;
+
+		if (opt.interactive &&
+		    (_confirmation(job_ptr, SLURM_BATCH_SCRIPT) == 0)) {
+			job_ptr->job_id = 0;
+			continue;
+		}
+
+		cancel_info = (job_cancel_info_t *)
+			xmalloc(sizeof(job_cancel_info_t));
+		cancel_info->job_id_str = _build_jobid_str(job_ptr);
+		cancel_info->rc      = rc;
+		cancel_info->sig     = opt.signal;
+		cancel_info->num_active_threads = &num_active_threads;
+		cancel_info->num_active_threads_lock =
+			&num_active_threads_lock;
+		cancel_info->num_active_threads_cond =
+			&num_active_threads_cond;
+
+		pthread_mutex_lock(&num_active_threads_lock);
+		num_active_threads++;
+		while (num_active_threads > MAX_THREADS) {
+			pthread_cond_wait(&num_active_threads_cond,
+					  &num_active_threads_lock);
+		}
+		pthread_mutex_unlock(&num_active_threads_lock);
+
+		err = pthread_create(&dummy, &attr, _cancel_job_id,cancel_info);
+		if (err)   /* Run in-line if thread create fails */
+			_cancel_job_id(cancel_info);
+		job_ptr->job_id = 0;
+
+		if (opt.interactive) {
+			/* Print any error message for first job before
+			 * starting confirmation of next job */
+			pthread_mutex_lock(&num_active_threads_lock);
+			while (num_active_threads > 0) {
 				pthread_cond_wait(&num_active_threads_cond,
 						  &num_active_threads_lock);
 			}
 			pthread_mutex_unlock(&num_active_threads_lock);
-
-			err = pthread_create(&dummy, &attr, _cancel_job_id,
-					     cancel_info);
-			if (err)
-				_cancel_job_id(cancel_info);
 		}
-		job_ptr[i].job_id = 0;
 	}
 }
 
 /* _cancel_jobs - filter then cancel jobs or job steps per request */
-static void
-_cancel_jobs (int filter_cnt)
+static int _cancel_jobs(int filter_cnt)
 {
+	int rc = 0;
+
 	slurm_attr_init(&attr);
 	if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
 		error("pthread_attr_setdetachstate error %m");
@@ -538,7 +621,7 @@ _cancel_jobs (int filter_cnt)
 	if (pthread_cond_init(&num_active_threads_cond, NULL))
 		error("pthread_cond_init error %m");
 
-	_cancel_jobs_by_state(JOB_PENDING, filter_cnt);
+	_cancel_jobs_by_state(JOB_PENDING, filter_cnt, &rc);
 	/* Wait for any cancel of pending jobs to complete before starting
 	 * cancellation of running jobs so that we don't have a race condition
 	 * with pending jobs getting scheduled while running jobs are also
@@ -550,7 +633,7 @@ _cancel_jobs (int filter_cnt)
 	}
 	pthread_mutex_unlock(&num_active_threads_lock);
 
-	_cancel_jobs_by_state(JOB_END, filter_cnt);
+	_cancel_jobs_by_state(JOB_END, filter_cnt, &rc);
 	/* Wait for any spawned threads that have not finished */
 	pthread_mutex_lock( &num_active_threads_lock );
 	while (num_active_threads > 0) {
@@ -563,85 +646,88 @@ _cancel_jobs (int filter_cnt)
 	slurm_mutex_destroy(&num_active_threads_lock);
 	if (pthread_cond_destroy(&num_active_threads_cond))
 		error("pthread_cond_destroy error %m");
+
+	return rc;
 }
 
 static void *
 _cancel_job_id (void *ci)
 {
 	int error_code = SLURM_SUCCESS, i;
-	bool sig_set = true;
-	bool msg_to_ctld = opt.ctld;
 	job_cancel_info_t *cancel_info = (job_cancel_info_t *)ci;
-	uint32_t job_id = cancel_info->job_id;
-	uint32_t array_job_id  = cancel_info->array_job_id;
-	uint32_t array_task_id = cancel_info->array_task_id;
-	uint32_t sig    = cancel_info->sig;
+	bool sig_set = true;
+	uint16_t flags = 0;
+	char *job_type = "";
 
-	if (sig == (uint16_t)-1) {
-		sig = SIGKILL;
+	if (cancel_info->sig == (uint16_t) NO_VAL) {
+		cancel_info->sig = SIGKILL;
 		sig_set = false;
 	}
-
-	for (i=0; i<MAX_CANCEL_RETRY; i++) {
-		if (!sig_set) {
-			if (array_job_id) {
-				verbose("Terminating job %u_%u",
-					array_job_id, array_task_id);
-			} else
-				verbose("Terminating job %u", job_id);
+	if (opt.batch) {
+		flags |= KILL_JOB_BATCH;
+		job_type = "batch ";
+	}
+	if (cancel_info->array_flag)
+		flags |= KILL_JOB_ARRAY;
+
+	if (!cancel_info->job_id_str) {
+		if (cancel_info->array_job_id &&
+		    (cancel_info->array_task_id == INFINITE)) {
+			xstrfmtcat(cancel_info->job_id_str, "%u_*",
+				   cancel_info->array_job_id);
+		} else if (cancel_info->array_job_id) {
+			xstrfmtcat(cancel_info->job_id_str, "%u_%u",
+				   cancel_info->array_job_id,
+				   cancel_info->array_task_id);
 		} else {
-			if (array_job_id) {
-				verbose("Signal %u to job %u_%u",
-					sig, array_job_id, array_task_id);
-			} else
-				verbose("Signal %u to job %u", sig, job_id);
+			xstrfmtcat(cancel_info->job_id_str, "%u",
+				   cancel_info->job_id);
 		}
+	}
 
-		if ((sig == SIGKILL) || (!sig_set) ||
-		    msg_to_ctld || opt.clusters) {
-			uint16_t flags = 0;
-			if (opt.batch)
-				flags |= KILL_JOB_BATCH;
-			if (cancel_info->array_flag)
-				flags |= KILL_JOB_ARRAY;
-			error_code = slurm_kill_job (job_id, sig, flags);
-		} else {
-			if (opt.batch) {
-				sig = sig | (KILL_JOB_BATCH << 24);
-				error_code = slurm_signal_job_step(job_id,
-						SLURM_BATCH_SCRIPT, sig);
-			} else {
-				error_code = slurm_signal_job (job_id, sig);
-			}
-			if (error_code && (errno == ESLURM_JOB_PENDING)) {
-				/* Send request to directly to slurmctld */
-				msg_to_ctld  = true;
-				continue;
-			}
-		}
+	if (!sig_set) {
+		verbose("Terminating %sjob %s", job_type,
+			cancel_info->job_id_str);
+	} else {
+		verbose("Signal %u to %sjob %s", cancel_info->sig, job_type,
+			cancel_info->job_id_str);
+	}
+
+	for (i = 0; i < MAX_CANCEL_RETRY; i++) {
+		error_code = slurm_kill_job2(cancel_info->job_id_str,
+					     cancel_info->sig, flags);
 		if ((error_code == 0) ||
 		    (errno != ESLURM_TRANSITION_STATE_NO_UPDATE))
 			break;
 		verbose("Job is in transistional state, retrying");
-		sleep ( 5 + i );
+		sleep(5 + i);
 	}
 	if (error_code) {
 		error_code = slurm_get_errno();
 		if ((opt.verbose > 0) ||
 		    ((error_code != ESLURM_ALREADY_DONE) &&
-		     (error_code != ESLURM_INVALID_JOB_ID)))
-			error("Kill job error on job id %u: %s",
-				job_id, slurm_strerror(slurm_get_errno()));
+		     (error_code != ESLURM_INVALID_JOB_ID))) {
+			error("Kill job error on job id %s: %s",
+			      cancel_info->job_id_str,
+			      slurm_strerror(slurm_get_errno()));
+		}
+		if (((error_code == ESLURM_ALREADY_DONE) ||
+		     (error_code == ESLURM_INVALID_JOB_ID)) &&
+		    (cancel_info->sig == SIGKILL)) {
+			error_code = 0;	/* Ignore error if job done */
+		}	
 	}
 
 	/* Purposely free the struct passed in here, so the caller doesn't have
 	 * to keep track of it, but don't destroy the mutex and condition
 	 * variables contained. */
-	pthread_mutex_lock(   cancel_info->num_active_threads_lock );
+	pthread_mutex_lock(cancel_info->num_active_threads_lock);
+	*(cancel_info->rc) = MAX(*(cancel_info->rc), error_code);
 	(*(cancel_info->num_active_threads))--;
-	pthread_cond_signal(  cancel_info->num_active_threads_cond );
-	pthread_mutex_unlock( cancel_info->num_active_threads_lock );
+	pthread_cond_signal(cancel_info->num_active_threads_cond);
+	pthread_mutex_unlock(cancel_info->num_active_threads_lock);
 
+	xfree(cancel_info->job_id_str);
 	xfree(cancel_info);
 	return NULL;
 }
@@ -653,97 +739,98 @@ _cancel_step_id (void *ci)
 	job_cancel_info_t *cancel_info = (job_cancel_info_t *)ci;
 	uint32_t job_id  = cancel_info->job_id;
 	uint32_t step_id = cancel_info->step_id;
-	uint32_t array_job_id  = cancel_info->array_job_id;
-	uint32_t array_task_id = cancel_info->array_task_id;
-	uint16_t sig     = cancel_info->sig;
 	bool sig_set = true;
 
-	if (sig == (uint16_t)-1) {
-		sig = SIGKILL;
+	if (cancel_info->sig == (uint16_t) NO_VAL) {
+		cancel_info->sig = SIGKILL;
 		sig_set = false;
 	}
 
-	for (i=0; i<MAX_CANCEL_RETRY; i++) {
-		if (sig == SIGKILL) {
-			if (array_job_id) {
-				verbose("Terminating step %u_%u.%u",
-					array_job_id, array_task_id, step_id);
-			} else {
-				verbose("Terminating step %u.%u",
-					job_id, step_id);
-			}
+	if (!cancel_info->job_id_str) {
+		if (cancel_info->array_job_id &&
+		    (cancel_info->array_task_id == INFINITE)) {
+			xstrfmtcat(cancel_info->job_id_str, "%u_*",
+				   cancel_info->array_job_id);
+		} else if (cancel_info->array_job_id) {
+			xstrfmtcat(cancel_info->job_id_str, "%u_%u",
+				   cancel_info->array_job_id,
+				   cancel_info->array_task_id);
 		} else {
-			if (array_job_id) {
-				verbose("Signal %u to step %u_%u.%u",
-					sig, array_job_id, array_task_id,
-					step_id);
-			} else {
-				verbose("Signal %u to step %u.%u",
-					sig, job_id, step_id);
-			}
+			xstrfmtcat(cancel_info->job_id_str, "%u",
+				   cancel_info->job_id);
+		}
+	}
+
+	for (i = 0; i < MAX_CANCEL_RETRY; i++) {
+		if (cancel_info->sig == SIGKILL) {
+			verbose("Terminating step %s.%u",
+				cancel_info->job_id_str, step_id);
+		} else {
+			verbose("Signal %u to step %s.%u",
+				cancel_info->sig,
+				cancel_info->job_id_str, step_id);
 		}
 
 		if ((!sig_set) || opt.ctld)
-			error_code = slurm_kill_job_step(job_id, step_id, sig);
-		else if (sig == SIGKILL)
+			error_code = slurm_kill_job_step(job_id, step_id,
+							 cancel_info->sig);
+		else if (cancel_info->sig == SIGKILL)
 			error_code = slurm_terminate_job_step(job_id, step_id);
 		else
 			error_code = slurm_signal_job_step(job_id, step_id,
-							   sig);
-		if (error_code == 0
-		    || (errno != ESLURM_TRANSITION_STATE_NO_UPDATE
-			&& errno != ESLURM_JOB_PENDING))
+							   cancel_info->sig);
+		if ((error_code == 0) ||
+		    ((errno != ESLURM_TRANSITION_STATE_NO_UPDATE) &&
+		     (errno != ESLURM_JOB_PENDING)))
 			break;
 		verbose("Job is in transistional state, retrying");
-		sleep ( 5 + i );
+		sleep(5 + i);
 	}
 	if (error_code) {
 		error_code = slurm_get_errno();
-		if ((opt.verbose > 0) || (error_code != ESLURM_ALREADY_DONE ))
-			error("Kill job error on job step id %u.%u: %s",
-		 		job_id, step_id,
-				slurm_strerror(slurm_get_errno()));
+		if ((opt.verbose > 0) || (error_code != ESLURM_ALREADY_DONE))
+			error("Kill job error on job step id %s: %s",
+		 	      cancel_info->job_id_str,
+			      slurm_strerror(slurm_get_errno()));
+
+		if ((error_code == ESLURM_ALREADY_DONE) &&
+		    (cancel_info->sig == SIGKILL)) {
+			error_code = 0;	/* Ignore error if job done */
+		}
 	}
 
 	/* Purposely free the struct passed in here, so the caller doesn't have
 	 * to keep track of it, but don't destroy the mutex and condition
 	 * variables contained. */
-	pthread_mutex_lock(   cancel_info->num_active_threads_lock );
+	pthread_mutex_lock(cancel_info->num_active_threads_lock);
+	*(cancel_info->rc) = MAX(*(cancel_info->rc), error_code);
 	(*(cancel_info->num_active_threads))--;
-	pthread_cond_signal(  cancel_info->num_active_threads_cond );
-	pthread_mutex_unlock( cancel_info->num_active_threads_lock );
+	pthread_cond_signal(cancel_info->num_active_threads_cond);
+	pthread_mutex_unlock(cancel_info->num_active_threads_lock);
 
+	xfree(cancel_info->job_id_str);
 	xfree(cancel_info);
 	return NULL;
 }
 
 /* _confirmation - Confirm job cancel request interactively */
 static int
-_confirmation (int i, uint32_t step_id)
+_confirmation(job_info_t *job_ptr, uint32_t step_id)
 {
-	char job_id_str[64], in_line[128];
-	job_info_t *job_ptr = NULL;
+	char *job_id_str, in_line[128];
 
-	job_ptr = job_buffer_ptr->job_array ;
 	while (1) {
-		if (job_ptr[i].array_task_id == NO_VAL) {
-			snprintf(job_id_str, sizeof(job_id_str), "%u",
-				 job_ptr[i].job_id);
-		} else {
-			snprintf(job_id_str, sizeof(job_id_str), "%u_%u",
-				 job_ptr[i].array_job_id,
-				 job_ptr[i].array_task_id);
-		}
-
+		job_id_str = _build_jobid_str(job_ptr);
 		if (step_id == SLURM_BATCH_SCRIPT) {
-			printf ("Cancel job_id=%s name=%s partition=%s [y/n]? ",
-			        job_id_str, job_ptr[i].name,
-				job_ptr[i].partition);
+			printf("Cancel job_id=%s name=%s partition=%s [y/n]? ",
+			       job_id_str, job_ptr->name,
+			       job_ptr->partition);
 		} else {
-			printf ("Cancel step_id=%s.%u name=%s partition=%s [y/n]? ",
-			        job_id_str, step_id, job_ptr[i].name,
-				job_ptr[i].partition);
+			printf("Cancel step_id=%s.%u name=%s partition=%s [y/n]? ",
+			       job_id_str, step_id, job_ptr->name,
+			       job_ptr->partition);
 		}
+		xfree(job_id_str);
 		if (fgets(in_line, sizeof(in_line), stdin) == NULL)
 			continue;
 		if ((in_line[0] == 'y') || (in_line[0] == 'Y'))
@@ -754,24 +841,51 @@ _confirmation (int i, uint32_t step_id)
 
 }
 
-/* _signal_job_by_str()
- */
-static int
-_signal_job_by_str(void)
+static int _signal_job_by_str(void)
 {
-	int cc, i;
-	int rc = 0;
+	job_cancel_info_t *cancel_info;
+	int err, i, rc = 0;
+	pthread_t dummy;
 
-	if (opt.signal == (uint16_t) - 1)
-		opt.signal = SIGKILL;
+	slurm_attr_init(&attr);
+	if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
+		error("pthread_attr_setdetachstate error %m");
+	slurm_mutex_init(&num_active_threads_lock);
 
 	for (i = 0; opt.job_list[i]; i++) {
-		verbose("Signalling job %s", opt.job_list[i]);
-		cc = slurm_kill_job2(opt.job_list[i], opt.signal, 0);
-		if ((cc != SLURM_SUCCESS) && (opt.verbose != -1)) {
-			error("slurm_kill_job2() failed %s", slurm_strerror(errno));
-			rc = -1;
+		cancel_info = (job_cancel_info_t *)
+			xmalloc(sizeof(job_cancel_info_t));
+		cancel_info->job_id_str = xstrdup(opt.job_list[i]);
+		cancel_info->rc      = &rc;
+		cancel_info->sig     = opt.signal;
+		cancel_info->num_active_threads = &num_active_threads;
+		cancel_info->num_active_threads_lock =
+			&num_active_threads_lock;
+		cancel_info->num_active_threads_cond =
+			&num_active_threads_cond;
+
+		pthread_mutex_lock(&num_active_threads_lock);
+		num_active_threads++;
+		while (num_active_threads > MAX_THREADS) {
+			pthread_cond_wait(&num_active_threads_cond,
+					  &num_active_threads_lock);
 		}
+		pthread_mutex_unlock(&num_active_threads_lock);
+
+		err = pthread_create(&dummy, &attr, _cancel_job_id,cancel_info);
+		if (err)	/* Run in-line if thread create fails */
+			_cancel_job_id(cancel_info);
 	}
+
+	/* Wait all spawned threads to finish */
+	pthread_mutex_lock( &num_active_threads_lock );
+	while (num_active_threads > 0) {
+		pthread_cond_wait(&num_active_threads_cond,
+				  &num_active_threads_lock);
+	}
+	pthread_mutex_unlock(&num_active_threads_lock);
+
+	slurm_attr_destroy(&attr);
+
 	return rc;
 }
diff --git a/src/scancel/scancel.h b/src/scancel/scancel.h
index bc210a312..0f62b760a 100644
--- a/src/scancel/scancel.h
+++ b/src/scancel/scancel.h
@@ -1,7 +1,9 @@
 /*****************************************************************************\
  *  scancel.h - definitions for scancel data structures and functions
  *****************************************************************************
- *  Copyright (C) 2002 The Regents of the University of California.
+ *  Copyright (C) 2002-2007 The Regents of the University of California.
+ *  Copyright (C) 2008-2009 Lawrence Livermore National Security.
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette<jette1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -56,18 +58,22 @@ typedef struct scancel_options {
 	char *qos;		/* --qos=n, -qn			*/
 	char *reservation;	/* --reservation=n, -Rn		*/
 	uint16_t signal;	/* --signal=n, -sn		*/
-	uint16_t state;		/* --state=n, -tn		*/
+	uint32_t state;		/* --state=n, -tn		*/
 	uid_t user_id;		/* derived from user_name	*/
 	char *user_name;	/* --user=n, -un		*/
 	int verbose;		/* --verbose, -v		*/
-
-	uint16_t job_cnt;	/* count of job_id's specified	*/
-	uint32_t *job_id;	/* list of job_id's		*/
-	uint32_t *array_id;	/* list of job array IDs	*/
-	uint32_t *step_id;	/* list of job step id's	*/
 	char *wckey;		/* --wckey			*/
 	char *nodelist;		/* --nodelist, -w		*/
-	char **job_list;        /* list of job ids as char *    */
+
+	char **job_list;        /* job ID input, NULL termated
+				 * Expanded in to arrays below	*/
+
+	uint16_t job_cnt;	/* count of job_id's specified	*/
+	uint32_t *job_id;	/* list of job ID's		*/
+	uint32_t *array_id;	/* list of job array task IDs	*/
+	uint32_t *step_id;	/* list of job step ID's	*/
+	bool *job_found;	/* Set if the job record is found */
+	bool *job_pend;		/* Set fi job is pending	*/
 } opt_t;
 
 opt_t opt;
@@ -80,6 +86,10 @@ opt_t opt;
  */
 extern int initialize_and_process_args(int argc, char *argv[]);
 
+/*
+ * No job filtering options were specified (e.g. by user or state), only the
+ * job ids is on the command line.
+ */
 extern bool has_default_opt(void);
 extern bool has_job_steps(void);
 #endif	/* _HAVE_SCANCEL_H */
diff --git a/src/scontrol/Makefile.am b/src/scontrol/Makefile.am
index f8622ade2..ec64fd39a 100644
--- a/src/scontrol/Makefile.am
+++ b/src/scontrol/Makefile.am
@@ -9,17 +9,22 @@ bin_PROGRAMS = scontrol
 scontrol_SOURCES =	\
 	create_res.c	\
 	info_block.c	\
+	info_burst_buffer.c \
+	info_assoc_mgr.c \
 	info_job.c	\
+	info_layout.c	\
+	info_lics.c	\
 	info_node.c	\
 	info_part.c	\
 	info_res.c	\
 	scontrol.c	\
 	scontrol.h	\
 	update_job.c	\
+	update_layout.c \
 	update_node.c	\
 	update_part.c	\
 	update_step.c   \
-	info_lics.c
+	update_powercap.c
 
 convenience_libs = $(top_builddir)/src/api/libslurm.o $(DL_LIBS) -lm
 
diff --git a/src/scontrol/Makefile.in b/src/scontrol/Makefile.in
index e707b1a22..bb543a885 100644
--- a/src/scontrol/Makefile.in
+++ b/src/scontrol/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -135,10 +138,13 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(bindir)"
 PROGRAMS = $(bin_PROGRAMS)
 am_scontrol_OBJECTS = create_res.$(OBJEXT) info_block.$(OBJEXT) \
-	info_job.$(OBJEXT) info_node.$(OBJEXT) info_part.$(OBJEXT) \
-	info_res.$(OBJEXT) scontrol.$(OBJEXT) update_job.$(OBJEXT) \
-	update_node.$(OBJEXT) update_part.$(OBJEXT) \
-	update_step.$(OBJEXT) info_lics.$(OBJEXT)
+	info_burst_buffer.$(OBJEXT) info_assoc_mgr.$(OBJEXT) \
+	info_job.$(OBJEXT) info_layout.$(OBJEXT) info_lics.$(OBJEXT) \
+	info_node.$(OBJEXT) info_part.$(OBJEXT) info_res.$(OBJEXT) \
+	scontrol.$(OBJEXT) update_job.$(OBJEXT) \
+	update_layout.$(OBJEXT) update_node.$(OBJEXT) \
+	update_part.$(OBJEXT) update_step.$(OBJEXT) \
+	update_powercap.$(OBJEXT)
 scontrol_OBJECTS = $(am_scontrol_OBJECTS)
 am__DEPENDENCIES_1 =
 am__DEPENDENCIES_2 = $(top_builddir)/src/api/libslurm.o \
@@ -253,6 +259,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -302,8 +310,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -322,6 +334,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -365,6 +380,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -388,6 +404,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -454,17 +471,22 @@ AM_CPPFLAGS = -I$(top_srcdir) $(BG_INCLUDES)
 scontrol_SOURCES = \
 	create_res.c	\
 	info_block.c	\
+	info_burst_buffer.c \
+	info_assoc_mgr.c \
 	info_job.c	\
+	info_layout.c	\
+	info_lics.c	\
 	info_node.c	\
 	info_part.c	\
 	info_res.c	\
 	scontrol.c	\
 	scontrol.h	\
 	update_job.c	\
+	update_layout.c \
 	update_node.c	\
 	update_part.c	\
 	update_step.c   \
-	info_lics.c
+	update_powercap.c
 
 convenience_libs = $(top_builddir)/src/api/libslurm.o $(DL_LIBS) -lm
 scontrol_LDADD = \
@@ -567,16 +589,21 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/create_res.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_assoc_mgr.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_block.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_burst_buffer.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_job.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_layout.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_lics.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_node.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_part.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/info_res.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/scontrol.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_job.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_layout.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_node.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_part.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_powercap.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/update_step.Po@am__quote@
 
 .c.o:
diff --git a/src/scontrol/create_res.c b/src/scontrol/create_res.c
index bf24e784a..6fc23db39 100644
--- a/src/scontrol/create_res.c
+++ b/src/scontrol/create_res.c
@@ -121,6 +121,17 @@ scontrol_parse_res_options(int argc, char *argv[], const char *msg,
 		if (strncasecmp(tag, "ReservationName", MAX(taglen, 1)) == 0) {
 			resv_msg_ptr->name = val;
 
+		} else if (strncasecmp(tag, "Accounts", MAX(taglen, 1)) == 0) {
+			if (plus_minus) {
+				resv_msg_ptr->accounts =
+					_process_plus_minus(plus_minus, val);
+				*free_acct_str = 1;
+			} else {
+				resv_msg_ptr->accounts = val;
+			}
+		} else if (strncasecmp(tag, "BurstBuffer", MAX(taglen, 2))
+			   == 0) {
+			resv_msg_ptr->burst_buffer = val;
 		} else if (strncasecmp(tag, "StartTime", MAX(taglen, 1)) == 0){
 			time_t  t = parse_time(val, 0);
 			if (errno == ESLURM_INVALID_TIME_VALUE) {
@@ -274,13 +285,10 @@ scontrol_parse_res_options(int argc, char *argv[], const char *msg,
 			} else {
 				resv_msg_ptr->users = val;
 			}
-		} else if (strncasecmp(tag, "Accounts", MAX(taglen, 1)) == 0) {
-			if (plus_minus) {
-				resv_msg_ptr->accounts =
-					_process_plus_minus(plus_minus, val);
-				*free_acct_str = 1;
-			} else {
-				resv_msg_ptr->accounts = val;
+		} else if (strncasecmp(tag, "Watts", MAX(taglen, 1)) == 0) {
+			if (parse_uint32(val, &(resv_msg_ptr->resv_watts))) {
+				error("Invalid Watts value: %s", val);
+				return -1;
 			}
 		} else if (strncasecmp(tag, "res", 3) == 0) {
 			continue;
@@ -423,13 +431,17 @@ scontrol_create_res(int argc, char *argv[])
 	 * make the reservation for the whole partition.
 	 */
 	if ((resv_msg.core_cnt == 0) &&
+	    (resv_msg.burst_buffer == NULL ||
+	     resv_msg.burst_buffer[0] == '\0') &&
 	    (resv_msg.node_cnt  == NULL || resv_msg.node_cnt[0]  == 0)    &&
 	    (resv_msg.node_list == NULL || resv_msg.node_list[0] == '\0') &&
-	    (resv_msg.licenses  == NULL || resv_msg.licenses[0]  == '\0')) {
+	    (resv_msg.licenses  == NULL || resv_msg.licenses[0]  == '\0') &&
+	    (resv_msg.resv_watts == NO_VAL)) {
 		if (resv_msg.partition == NULL) {
 			exit_code = 1;
-			error("CoreCnt, Nodes, NodeCnt or Licenses must be "
-			      "specified. No reservation created.");
+			error("CoreCnt, Nodes, NodeCnt, BurstBuffer, Licenses"
+			      "or Watts must be specified. No reservation "
+			      "created.");
 			goto SCONTROL_CREATE_RES_CLEANUP;
 		}
 		if (resv_msg.flags == (uint16_t) NO_VAL)
@@ -445,7 +457,17 @@ scontrol_create_res(int argc, char *argv[])
 		      "No reservation created.");
 		goto SCONTROL_CREATE_RES_CLEANUP;
 	}
-
+	if (resv_msg.resv_watts != NO_VAL &&
+	    (!(resv_msg.flags & RESERVE_FLAG_ANY_NODES) ||
+	     (resv_msg.core_cnt != 0) ||
+	     (resv_msg.node_cnt  != NULL && resv_msg.node_cnt[0]  != 0) ||
+	     (resv_msg.node_list != NULL && resv_msg.node_list[0] != '\0') ||
+	     (resv_msg.licenses  != NULL && resv_msg.licenses[0]  != '\0'))) {
+		exit_code = 1;
+		error("A power reservation must be empty and set the "
+		      "LICENSE_ONLY flag. No reservation created.");
+		goto SCONTROL_CREATE_RES_CLEANUP;
+	}
 	new_res_name = slurm_create_reservation(&resv_msg);
 	if (!new_res_name) {
 		exit_code = 1;
diff --git a/src/scontrol/info_assoc_mgr.c b/src/scontrol/info_assoc_mgr.c
new file mode 100644
index 000000000..d1305d577
--- /dev/null
+++ b/src/scontrol/info_assoc_mgr.c
@@ -0,0 +1,408 @@
+/*****************************************************************************\
+ *  info_assoc_mgr.c - Association Manager information from the
+ *                     slurmctld functions for scontrol.
+ *****************************************************************************
+ *  Copyright (C) 2004 CSCS
+ *  Copyright (C) 2015 SchedMD LLC
+ *  Written by Stephen Trofinoff and Danny Auble
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "scontrol.h"
+
+static uint32_t tres_cnt = 0;
+static char **tres_names = NULL;
+
+static void _print_tres_line(const char *name, uint64_t *limits, uint64_t *used,
+			     uint64_t divider, bool last)
+{
+	int i;
+	char *next_line = last ? "\n" : "\n    ";
+	char *new_line_char = one_liner && !last ? " " : next_line;
+	bool comma = 0;
+
+	xassert(tres_cnt);
+	xassert(tres_names);
+
+	printf("%s=", name);
+	if (!limits)
+		goto endit;
+
+	for (i=0; i<tres_cnt; i++) {
+		/* only print things that have limits or usage */
+		if (!used && (limits[i] == INFINITE64))
+			continue;
+
+		printf("%s%s=", comma ? "," : "", tres_names[i]);
+		if (limits[i] == INFINITE64)
+			printf("N");
+		else
+			printf("%"PRIu64, limits[i]);
+
+		if (used) {
+			uint64_t total_used = used[i];
+
+			if (divider)
+				total_used /= divider;
+
+			printf("(%"PRIu64")", total_used);
+		}
+
+		comma = 1;
+	}
+endit:
+	printf("%s", new_line_char);
+}
+
+static void _print_assoc_mgr_info(const char *name, assoc_mgr_info_msg_t *msg)
+{
+	ListIterator itr;
+	slurmdb_user_rec_t *user_rec;
+	slurmdb_assoc_rec_t *assoc_rec;
+	slurmdb_qos_rec_t *qos_rec;
+	uint64_t tmp64_array[msg->tres_cnt];
+	char *new_line_char = one_liner ? " " : "\n    ";
+	int i;
+
+	printf("Current Association Manager state\n");
+
+	tres_cnt = msg->tres_cnt;
+	tres_names = msg->tres_names;
+
+	if (!msg->user_list || !list_count(msg->user_list)) {
+		printf("\nNo users currently cached in Slurm.\n\n");
+	} else {
+		printf("\nUser Records\n\n");
+
+		itr = list_iterator_create(msg->user_list);
+		while ((user_rec = list_next(itr))) {
+			printf("UserName=%s(%u) DefAccount=%s "
+			       "DefWckey=%s AdminLevel=%s\n",
+			       user_rec->name,
+			       user_rec->uid,
+			       user_rec->default_acct,
+			       user_rec->default_wckey,
+			       slurmdb_admin_level_str(user_rec->admin_level));
+		}
+		list_iterator_destroy(itr);
+	}
+
+	if (!msg->assoc_list || !list_count(msg->assoc_list)) {
+		printf("\nNo associations currently cached in Slurm.\n\n");
+	} else {
+		printf("\nAssociation Records\n\n");
+
+		itr = list_iterator_create(msg->assoc_list);
+		while ((assoc_rec = list_next(itr))) {
+			if (!assoc_rec->usage)
+				continue;
+
+			printf("ClusterName=%s Account=%s ",
+			       assoc_rec->cluster,
+			       assoc_rec->acct);
+
+			if (assoc_rec->user)
+				printf("UserName=%s(%u) ",
+				       assoc_rec->user,
+				       assoc_rec->uid);
+			else
+				printf("UserName= ");
+
+			printf("Partition=%s ID=%u%s",
+			       assoc_rec->partition ? assoc_rec->partition : "",
+			       assoc_rec->id,
+			       new_line_char);
+
+			printf("SharesRaw/Norm/Level/Factor="
+			       "%u/%.2f/%u/%.2f%s",
+			       assoc_rec->shares_raw,
+			       assoc_rec->usage->shares_norm,
+			       assoc_rec->usage->level_shares,
+			       assoc_rec->usage->fs_factor,
+			       new_line_char);
+
+			printf("UsageRaw/Norm/Efctv=%.2Lf/%.2Lf/%.2Lf%s",
+			       assoc_rec->usage->usage_raw,
+			       assoc_rec->usage->usage_norm,
+			       assoc_rec->usage->usage_efctv,
+			       new_line_char);
+
+			if (assoc_rec->parent_acct)
+				printf("ParentAccount=%s(%u) ",
+				       assoc_rec->parent_acct,
+				       assoc_rec->parent_id);
+			else
+				printf("ParentAccount= ");
+
+			printf("Lft-Rgt=%u-%u DefAssoc=%s%s",
+			       assoc_rec->lft,
+			       assoc_rec->rgt,
+			       assoc_rec->is_def ? "Yes" : "No",
+			       new_line_char);
+
+
+			if (assoc_rec->grp_jobs != INFINITE)
+				printf("GrpJobs=%u(%u)",
+				       assoc_rec->grp_jobs,
+				       assoc_rec->usage->used_jobs);
+			else
+				printf("GrpJobs=");
+			/* NEW LINE */
+			printf("%s", new_line_char);
+
+			if (assoc_rec->grp_submit_jobs != INFINITE)
+				printf("GrpSubmitJobs=%u(%u) ",
+				       assoc_rec->grp_submit_jobs,
+				       assoc_rec->usage->used_submit_jobs);
+			else
+				printf("GrpSubmitJobs= ");
+			if (assoc_rec->grp_wall != INFINITE)
+				printf("GrpWall=%u(%.2f)",
+				       assoc_rec->grp_wall,
+				       assoc_rec->usage->grp_used_wall);
+			else
+				printf("GrpWall=");
+			/* NEW LINE */
+			printf("%s", new_line_char);
+
+			_print_tres_line("GrpTRES",
+					 assoc_rec->grp_tres_ctld,
+					 assoc_rec->usage->grp_used_tres, 0, 0);
+			if (assoc_rec->usage->usage_tres_raw)
+				for (i=0; i<tres_cnt; i++)
+					tmp64_array[i] = (uint64_t)
+						assoc_rec->usage->
+						usage_tres_raw[i];
+			else
+				memset(tmp64_array, 0, sizeof(tmp64_array));
+			_print_tres_line("GrpTRESMins",
+					 assoc_rec->grp_tres_mins_ctld,
+					 tmp64_array, 60, 0);
+			_print_tres_line("GrpTRESRunMins",
+					 assoc_rec->grp_tres_run_mins_ctld,
+					 assoc_rec->usage->
+					 grp_used_tres_run_secs, 60, 0);
+
+			if (assoc_rec->max_jobs != INFINITE)
+				printf("MaxJobs=%u(%u) ",
+				       assoc_rec->max_jobs,
+				       assoc_rec->usage->used_jobs);
+			else
+				printf("MaxJobs= ");
+
+			if (assoc_rec->max_submit_jobs != INFINITE)
+				printf("MaxSubmitJobs=%u(%u) ",
+				       assoc_rec->max_submit_jobs,
+				       assoc_rec->usage->used_submit_jobs);
+			else
+				printf("MaxSubmitJobs= ");
+
+			if (assoc_rec->max_wall_pj != INFINITE)
+				printf("MaxWallPJ=%u",
+				       assoc_rec->max_wall_pj);
+			else
+				printf("MaxWallPJ=");
+
+			/* NEW LINE */
+			printf("%s", new_line_char);
+
+			_print_tres_line("MaxTRESPJ",
+					 assoc_rec->max_tres_ctld,
+					 NULL, 0, 0);
+
+			_print_tres_line("MaxTRESPN",
+					 assoc_rec->max_tres_pn_ctld,
+					 NULL, 0, 0);
+
+			_print_tres_line("MaxTRESMinsPJ",
+					 assoc_rec->max_tres_mins_ctld,
+					 NULL, 0, 1);
+
+			/* Doesn't do anything yet */
+			/* _print_tres_line("MaxTRESRunMins", */
+			/* 		 assoc_rec->max_tres_mins_ctld, */
+			/* 		 NULL, 0, 1); */
+		}
+	}
+
+	if (!msg->qos_list || !list_count(msg->qos_list)) {
+		printf("\nNo QOS currently cached in Slurm.\n\n");
+	} else {
+
+		printf("\nQOS Records\n\n");
+
+		itr = list_iterator_create(msg->qos_list);
+		while ((qos_rec = list_next(itr))) {
+			if (!qos_rec->usage)
+				continue;
+
+			printf("QOS=%s(%u)%s", qos_rec->name, qos_rec->id,
+				new_line_char);
+
+			printf("UsageRaw=%Lf%s",
+			       qos_rec->usage->usage_raw,
+			       new_line_char);
+
+			if (qos_rec->grp_jobs != INFINITE)
+				printf("GrpJobs=%u(%u) ",
+				       qos_rec->grp_jobs,
+				       qos_rec->usage->grp_used_jobs);
+			else
+				printf("GrpJobs= ");
+			if (qos_rec->grp_submit_jobs != INFINITE)
+				printf("GrpSubmitJobs=%u(%u) ",
+				       qos_rec->grp_submit_jobs,
+				       qos_rec->usage->grp_used_submit_jobs);
+			else
+				printf("GrpSubmitJobs= ");
+			if (qos_rec->grp_wall != INFINITE)
+				printf("GrpWall=%u(%.2f)",
+				       qos_rec->grp_wall,
+				       qos_rec->usage->grp_used_wall);
+			else
+				printf("GrpWall=");
+			/* NEW LINE */
+			printf("%s", new_line_char);
+
+			_print_tres_line("GrpTRES",
+					 qos_rec->grp_tres_ctld,
+					 qos_rec->usage->grp_used_tres, 0, 0);
+			if (qos_rec->usage->usage_tres_raw)
+				for (i=0; i<tres_cnt; i++)
+					tmp64_array[i] = (uint64_t)
+						qos_rec->usage->
+						usage_tres_raw[i];
+			else
+				memset(tmp64_array, 0, sizeof(tmp64_array));
+			_print_tres_line("GrpTRESMins",
+					 qos_rec->grp_tres_mins_ctld,
+					 tmp64_array, 60, 0);
+			_print_tres_line("GrpTRESRunMins",
+					 qos_rec->grp_tres_run_mins_ctld,
+					 qos_rec->usage->
+					 grp_used_tres_run_secs, 60, 0);
+
+			if (qos_rec->max_jobs_pu != INFINITE)
+				printf("MaxJobsPU=%u(%u) ",
+				       qos_rec->max_jobs_pu,
+				       qos_rec->usage->grp_used_jobs);
+			else
+				printf("MaxJobs= ");
+
+			if (qos_rec->max_submit_jobs_pu != INFINITE)
+				printf("MaxSubmitJobs=%u(%u) ",
+				       qos_rec->max_submit_jobs_pu,
+				       qos_rec->usage->grp_used_submit_jobs);
+			else
+				printf("MaxSubmitJobs= ");
+
+			if (qos_rec->max_wall_pj != INFINITE)
+				printf("MaxWallPJ=%u",
+				       qos_rec->max_wall_pj);
+			else
+				printf("MaxWallPJ=");
+
+			/* NEW LINE */
+			printf("%s", new_line_char);
+
+			_print_tres_line("MaxTRESPJ",
+					 qos_rec->max_tres_pj_ctld,
+					 NULL, 0, 0);
+
+			_print_tres_line("MaxTRESPN",
+					 qos_rec->max_tres_pn_ctld,
+					 NULL, 0, 0);
+
+			_print_tres_line("MaxTRESPU",
+					 qos_rec->max_tres_pu_ctld,
+					 NULL, 0, 0);
+
+			_print_tres_line("MaxTRESMinsPJ",
+					 qos_rec->max_tres_mins_pj_ctld,
+					 NULL, 0, 0);
+
+			/* Doesn't do anything yet */
+			/* _print_tres_line("MaxTRESRunMinsPU", */
+			/* 		 qos_rec->max_tres_mins_pu_ctld, */
+			/* 		 NULL, 0); */
+
+			_print_tres_line("MinTRESPJ",
+					 qos_rec->min_tres_pj_ctld,
+					 NULL, 0, 1);
+		}
+	}
+}
+
+/* scontrol_print_assoc_mgr_info()
+ *
+ * Retrieve and display the association manager information
+ * from the controller
+ *
+ */
+
+extern void scontrol_print_assoc_mgr_info(const char *name)
+{
+	int cc;
+	assoc_mgr_info_request_msg_t req;
+	assoc_mgr_info_msg_t *msg = NULL;
+
+	/* FIXME: add more filtering in the future */
+	memset(&req, 0, sizeof(assoc_mgr_info_request_msg_t));
+	req.flags = ASSOC_MGR_INFO_FLAG_ASSOC | ASSOC_MGR_INFO_FLAG_USERS |
+		ASSOC_MGR_INFO_FLAG_QOS;
+	if (name) {
+		req.user_list = list_create(NULL);
+		list_append(req.user_list, (char *)name);
+	}
+	/* call the controller to get the meat */
+	cc = slurm_load_assoc_mgr_info(&req, &msg);
+
+	FREE_NULL_LIST(req.user_list);
+
+	if (cc != SLURM_PROTOCOL_SUCCESS) {
+		/* Hosed, crap out. */
+		exit_code = 1;
+		if (quiet_flag != 1)
+			slurm_perror("slurm_load_assoc_mgr_info error");
+		return;
+	}
+
+	/* print the info
+	 */
+	_print_assoc_mgr_info(name, msg);
+
+	/* free at last
+	 */
+	slurm_free_assoc_mgr_info_msg(msg);
+
+	return;
+}
+
diff --git a/src/plugins/slurmctld/dynalloc/job_ports_list.h b/src/scontrol/info_burst_buffer.c
similarity index 63%
rename from src/plugins/slurmctld/dynalloc/job_ports_list.h
rename to src/scontrol/info_burst_buffer.c
index e64cc9a4a..702aee061 100644
--- a/src/plugins/slurmctld/dynalloc/job_ports_list.h
+++ b/src/scontrol/info_burst_buffer.c
@@ -1,9 +1,8 @@
 /*****************************************************************************\
- *  job_ports_list.h - keep the pair of (slurm_jobid, resv_ports) for future release
+ *  info_burst_buffer.c - Burst buffer information functions for scontrol.
  *****************************************************************************
- *  Copyright (C) 2012-2013 Los Alamos National Security, LLC.
- *  Written by Jimmy Cao <Jimmy.Cao@emc.com>, Ralph Castain <rhc@open-mpi.org>
- *  All rights reserved.
+ *  Copyright (C) 2014 SchedMD LLC <http://www.schedmd.com/>.
+ *  Written by Morris Jette <jette@schedmd.com>
  *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -35,45 +34,32 @@
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
 
-#ifndef JOB_PORTS_LIST_H_
-#define JOB_PORTS_LIST_H_
+#include "scontrol.h"
 
+/*
+ * scontrol_print_burst_buffer - print all burst_buffer information to stdout
+ */
+extern void scontrol_print_burst_buffer(void)
+{
+	int error_code, i, verbosity = 0;
+	burst_buffer_info_msg_t *burst_buffer_info_ptr = NULL;
+	burst_buffer_info_t *burst_buffer_ptr = NULL;
 
-#if HAVE_CONFIG_H
-#  include "config.h"
-#  if HAVE_INTTYPES_H
-#    include <inttypes.h>
-#  else
-#    if HAVE_STDINT_H
-#      include <stdint.h>
-#    endif
-#  endif  /* HAVE_INTTYPES_H */
-#else   /* !HAVE_CONFIG_H */
-#  include <inttypes.h>
-#endif  /*  HAVE_CONFIG_H */
+	error_code = slurm_load_burst_buffer_info(&burst_buffer_info_ptr);
+	if (error_code) {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			slurm_perror ("slurm_load_burst_buffer error");
+		return;
+	}
 
-#include <string.h>
+	if (quiet_flag == -1)
+		verbosity = 1;
+	burst_buffer_ptr = burst_buffer_info_ptr->burst_buffer_array;
+	for (i = 0; i < burst_buffer_info_ptr->record_count; i++) {
+		slurm_print_burst_buffer_record(stdout, &burst_buffer_ptr[i],
+						one_liner, verbosity);
+	}
 
-#include "src/common/list.h"
-#include "src/common/xmalloc.h"
-
-
-typedef struct {
-	uint32_t slurm_jobid;
-	uint16_t port_cnt;
-	char *resv_ports;
-	int *port_array;
-} job_ports_t;
-
-extern List job_ports_list;
-
-extern void append_job_ports_item(uint32_t slurm_jobid, uint16_t port_cnt,
-							char *resv_ports, int *port_array);
-
-extern void free_job_ports_item_func(void *voiditem);
-
-extern int find_job_ports_item_func(void *voiditem, void *key);
-
-extern void print_list();
-
-#endif /* JOB_PORTS_LIST_H_ */
+	slurm_free_burst_buffer_info_msg(burst_buffer_info_ptr);
+}
diff --git a/src/scontrol/info_job.c b/src/scontrol/info_job.c
index e8a63aa44..c882f5d6a 100644
--- a/src/scontrol/info_job.c
+++ b/src/scontrol/info_job.c
@@ -36,12 +36,14 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
-#include <sys/types.h>
-#include <sys/stat.h>
+#include <arpa/inet.h>
 #include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
 
 #include "scontrol.h"
 #include "src/common/bitstring.h"
+#include "src/common/slurm_time.h"
 #include "src/common/stepd_api.h"
 #include "src/plugins/select/bluegene/bg_enums.h"
 
@@ -155,7 +157,7 @@ scontrol_pid_info(pid_t job_pid)
 			slurm_perror ("slurm_get_end_time error");
 		return;
 	}
-	printf("Slurm job id %u ends at %s\n", job_id, slurm_ctime(&end_time));
+	printf("Slurm job id %u ends at %s\n", job_id, slurm_ctime2(&end_time));
 
 	rem_time = slurm_get_rem_time(job_id);
 	printf("slurm_get_rem_time is %ld\n", rem_time);
@@ -614,8 +616,7 @@ _list_pids_all_steps(const char *node_name, uint32_t jobid)
 	steps = stepd_available(NULL, node_name);
 	if (!steps || list_count(steps) == 0) {
 		fprintf(stderr, "Job %u does not exist on this node.\n", jobid);
-		if (steps)
-			list_destroy(steps);
+		FREE_NULL_LIST(steps);
 		exit_code = 1;
 		return;
 	}
@@ -629,7 +630,7 @@ _list_pids_all_steps(const char *node_name, uint32_t jobid)
 		}
 	}
 	list_iterator_destroy(itr);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	if (count == 0) {
 		fprintf(stderr, "Job %u does not exist on this node.\n",
@@ -648,8 +649,7 @@ _list_pids_all_jobs(const char *node_name)
 	steps = stepd_available(NULL, node_name);
 	if (!steps || list_count(steps) == 0) {
 		fprintf(stderr, "No job steps exist on this node.\n");
-		if (steps)
-			list_destroy(steps);
+		FREE_NULL_LIST(steps);
 		exit_code = 1;
 		return;
 	}
@@ -660,7 +660,7 @@ _list_pids_all_jobs(const char *node_name)
 				    stepd->stepid);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 }
 
 /*
@@ -966,3 +966,96 @@ extern int scontrol_job_ready(char *job_id_str)
 
 	return rc;
 }
+
+extern int scontrol_callerid(int argc, char **argv)
+{
+	int af, ver = 4;
+	unsigned char ip_src[sizeof(struct in6_addr)],
+		      ip_dst[sizeof(struct in6_addr)];
+	uint32_t port_src, port_dst, job_id;
+	network_callerid_msg_t req;
+	char node_name[MAXHOSTNAMELEN], *ptr;
+
+	if (argc == 5) {
+		ver = strtoul(argv[4], &ptr, 0);
+		if (ptr && ptr[0]) {
+			error("Address family not an integer");
+			return SLURM_ERROR;
+		}
+	}
+
+	if (ver != 4 && ver != 6) {
+		error("Invalid address family: %d", ver);
+		return SLURM_ERROR;
+	}
+
+	af = ver == 4 ? AF_INET : AF_INET6;
+	if (!inet_pton(af, argv[0], ip_src)) {
+		error("inet_pton failed for '%s'", argv[0]);
+		return SLURM_ERROR;
+	}
+
+	port_src = strtoul(argv[1], &ptr, 0);
+	if (ptr && ptr[0]) {
+		error("Source port not an integer");
+		return SLURM_ERROR;
+	}
+
+	if (!inet_pton(af, argv[2], ip_dst)) {
+		error("scontrol_callerid: inet_pton failed for '%s'", argv[2]);
+		return SLURM_ERROR;
+	}
+
+	port_dst = strtoul(argv[3], &ptr, 0);
+	if (ptr && ptr[0]) {
+		error("Destination port not an integer");
+		return SLURM_ERROR;
+	}
+
+	memcpy(req.ip_src, ip_src, 16);
+	memcpy(req.ip_dst, ip_dst, 16);
+	req.port_src = port_src;
+	req.port_dst = port_dst;
+	req.af = af;
+
+	if (slurm_network_callerid(req, &job_id, node_name, MAXHOSTNAMELEN)
+			!= SLURM_SUCCESS) {
+		fprintf(stderr,
+			"slurm_network_callerid: unable to retrieve callerid data from remote slurmd\n");
+		return SLURM_FAILURE;
+	} else if (job_id == (uint32_t)NO_VAL) {
+		fprintf(stderr,
+			"slurm_network_callerid: remote job id indeterminate\n");
+		return SLURM_FAILURE;
+	} else {
+		printf("%u %s\n", job_id, node_name);
+		return SLURM_SUCCESS;
+	}
+}
+
+/*
+ * scontrol_print_sicp - print the inter-cluster job information
+ */
+extern void
+scontrol_print_sicp (void)
+{
+	int error_code = SLURM_SUCCESS, i;
+	sicp_info_msg_t * sicp_buffer_ptr = NULL;
+	sicp_info_t *sicp_ptr = NULL;
+
+	error_code = slurm_load_sicp(&sicp_buffer_ptr);
+	if (error_code) {
+		exit_code = 1;
+		if (quiet_flag != 1)
+			slurm_perror ("slurm_load_sicp error");
+		return;
+	}
+
+	for (i = 0, sicp_ptr = sicp_buffer_ptr->sicp_array;
+	     i < sicp_buffer_ptr->record_count; i++, sicp_ptr++) {
+		info("JobID=%u State=%s", sicp_ptr->job_id,
+		     job_state_string(sicp_ptr->job_state));
+	}
+
+	slurm_free_sicp_msg(sicp_buffer_ptr);
+}
diff --git a/src/scontrol/info_layout.c b/src/scontrol/info_layout.c
new file mode 100644
index 000000000..37510adf5
--- /dev/null
+++ b/src/scontrol/info_layout.c
@@ -0,0 +1,93 @@
+/*****************************************************************************\
+ *  info_layout.c - layout information functions for scontrol.
+ *****************************************************************************
+ *  Copyright (C) 2015
+ *  Written by Bull - Thomas Cadeau
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "scontrol.h"
+#include "src/common/pack.h"
+
+/*
+ * scontrol_print_layout - print information about the supplied layout
+ * IN layout_type - print information about the supplied layout 
+ */
+extern void
+scontrol_print_layout (int argc, char *argv[])
+{
+	int i = 0, tag_len = 0;
+	char *tag = NULL, *val = NULL;
+	char *layout_type = NULL, *entities = NULL, *type = NULL;
+	uint32_t no_relation = 0;
+	layout_info_msg_t *layout_info_ptr = NULL;
+
+	while (i < argc) {
+		tag = argv[i];
+		tag_len = strlen(tag);
+		val = strchr(argv[i], '=');
+
+		if (val) {
+			tag_len = val - argv[i];
+			val++;
+		} else if (argc > i+1) {
+			val = argv[i+1];
+			i++;
+		} else {
+			val = NULL;
+		}
+		if (strncasecmp(tag, "layouts", MAX(tag_len, 3)) == 0) {
+			layout_type = val;
+		} else if (strncasecmp (tag, "entity", MAX(tag_len, 3)) == 0) {
+			entities = val;
+		} else if (strncasecmp (tag, "type", MAX(tag_len, 3)) == 0) {
+			type = val;
+		} else if (strncasecmp (tag, "nolayout", MAX(tag_len, 4)) == 0){
+			no_relation = 1;
+		} else {
+			exit_code = 1;
+			if (quiet_flag != 1)
+				fprintf (stderr,
+					 "invalid option for layouts: %s\n",
+					 tag);
+		}
+		i++;
+	}
+	if (slurm_load_layout (layout_type,
+			       entities, type,
+			       no_relation, &layout_info_ptr)
+					== SLURM_PROTOCOL_SUCCESS) {
+		slurm_print_layout_info ( stdout, layout_info_ptr, one_liner );
+		slurm_free_layout_info_msg (layout_info_ptr);
+	}
+
+	return;
+}
diff --git a/src/scontrol/info_node.c b/src/scontrol/info_node.c
index c503b5835..3f4f025b3 100644
--- a/src/scontrol/info_node.c
+++ b/src/scontrol/info_node.c
@@ -242,6 +242,29 @@ extern void	scontrol_print_topo (char *node_list)
 	}
 }
 
+/*
+ * scontrol_print_powercap - print the powercapping related information
+ * above the specified node(s)
+ * IN node_list - NULL to print the overall powercapping details
+ */
+extern void	scontrol_print_powercap (char *node_list)
+{
+	static powercap_info_msg_t *powercap_info_msg = NULL;
+
+	if ((powercap_info_msg == NULL) &&
+	    slurm_load_powercap(&powercap_info_msg)) {
+		slurm_perror ("slurm_load_powercap error");
+		return;
+	}
+
+	/* TODO: the case of a particular node list is not yet treated here */
+	if ((node_list == NULL) || (node_list[0] == '\0')) {
+		slurm_print_powercap_info_msg(stdout, powercap_info_msg,
+					      one_liner);
+		return;
+	}
+}
+
 /*
  * Load current front_end table information into *node_buffer_pptr
  */
diff --git a/src/scontrol/scontrol.c b/src/scontrol/scontrol.c
index bd9516ed2..1438536c3 100644
--- a/src/scontrol/scontrol.c
+++ b/src/scontrol/scontrol.c
@@ -158,8 +158,7 @@ main (int argc, char *argv[])
 			break;
 		case (int)'M':
 			if (clusters) {
-				list_destroy(clusters);
-				clusters = NULL;
+				FREE_NULL_LIST(clusters);
 				working_cluster_rec = NULL;
 			}
 			if (!(clusters = slurmdb_get_info_cluster(optarg))) {
@@ -226,8 +225,7 @@ main (int argc, char *argv[])
 			break;
 		}
 	}
-	if (clusters)
-		list_destroy(clusters);
+	FREE_NULL_LIST(clusters);
 	exit(exit_code);
 }
 
@@ -716,8 +714,7 @@ _process_command (int argc, char *argv[])
 	}
 	else if (strncasecmp (tag, "cluster", MAX(tag_len, 2)) == 0) {
 		if (clusters) {
-			list_destroy(clusters);
-			clusters = NULL;
+			FREE_NULL_LIST(clusters);
 			working_cluster_rec = NULL;
 		}
 		if (argc >= 2) {
@@ -1320,7 +1317,23 @@ _process_command (int argc, char *argv[])
 			exit_code = 1;
 			slurm_perror("job notify failure");
 		}
-	}	else {
+	}
+	else if (strncasecmp (tag, "callerid", MAX(tag_len, 2)) == 0) {
+		if (argc < 5) {
+			exit_code = 1;
+			fprintf (stderr,
+				 "too few arguments for keyword:%s\n",
+				 tag);
+		} else if (argc > 6) {
+			exit_code = 1;
+			fprintf (stderr,
+				 "too many arguments for keyword:%s\n",
+				 tag);
+		} else if (scontrol_callerid(argc-1, &argv[1])) {
+			exit_code = 1;
+			slurm_perror("callerid failure");
+		}
+	} else {
 		exit_code = 1;
 		fprintf (stderr, "invalid keyword: %s\n", tag);
 	}
@@ -1450,20 +1463,25 @@ _show_it (int argc, char *argv[])
 {
 	char *tag = NULL, *val = NULL;
 	int tag_len = 0;
+	bool allow_opt = false;
 
-	if (argc > 3) {
+	if (argc < 2) {
 		exit_code = 1;
 		if (quiet_flag != 1)
 			fprintf(stderr,
-				"too many arguments for keyword:%s\n",
-				argv[0]);
+				"too few arguments for keyword:%s\n", argv[0]);
 		return;
 	}
-	else if (argc < 2) {
+
+	if (strncasecmp (argv[1], "layouts", MAX(tag_len, 2)) != 0)
+		allow_opt = true;
+
+	if (argc > 3 && allow_opt) {
 		exit_code = 1;
 		if (quiet_flag != 1)
 			fprintf(stderr,
-				"too few arguments for keyword:%s\n", argv[0]);
+				"too many arguments for keyword:%s\n",
+				argv[0]);
 		return;
 	}
 
@@ -1484,8 +1502,13 @@ _show_it (int argc, char *argv[])
 			_print_aliases (val);
 		else
 			_print_aliases (NULL);
-	} else if (strncasecmp (tag, "blocks", MAX(tag_len, 1)) == 0) {
+	} else if (strncasecmp (tag, "blocks", MAX(tag_len, 2)) == 0) {
 		scontrol_print_block (val);
+	} else if (strncasecmp (tag, "burstbuffer", MAX(tag_len, 2)) == 0) {
+		scontrol_print_burst_buffer ();
+	} else if (!strncasecmp(tag, "assoc_mgr", MAX(tag_len, 2)) ||
+		   !strncasecmp(tag, "cache", MAX(tag_len, 2))) {
+		scontrol_print_assoc_mgr_info(val);
 	} else if (strncasecmp (tag, "config", MAX(tag_len, 1)) == 0) {
 		_print_config (val);
 	} else if (strncasecmp (tag, "daemons", MAX(tag_len, 1)) == 0) {
@@ -1521,22 +1544,28 @@ _show_it (int argc, char *argv[])
 	} else if (strncasecmp (tag, "jobs", MAX(tag_len, 1)) == 0 ||
 		   strncasecmp (tag, "jobid", MAX(tag_len, 1)) == 0 ) {
 		scontrol_print_job (val);
+	} else if (strncasecmp (tag, "layouts", MAX(tag_len, 2)) == 0) {
+		scontrol_print_layout(argc-1, argv + 1);
+	} else if (strncasecmp(tag, "licenses", MAX(tag_len, 2)) == 0) {
+		scontrol_print_licenses(val);
 	} else if (strncasecmp (tag, "nodes", MAX(tag_len, 1)) == 0) {
 		scontrol_print_node_list (val);
-	} else if (strncasecmp (tag, "partitions", MAX(tag_len, 1)) == 0 ||
-		   strncasecmp (tag, "partitionname", MAX(tag_len, 1)) == 0) {
+	} else if (strncasecmp (tag, "partitions", MAX(tag_len, 2)) == 0 ||
+		   strncasecmp (tag, "partitionname", MAX(tag_len, 2)) == 0) {
 		scontrol_print_part (val);
+	} else if (strncasecmp (tag, "powercapping", MAX(tag_len, 2)) == 0) {
+		scontrol_print_powercap (val);
 	} else if (strncasecmp (tag, "reservations", MAX(tag_len, 1)) == 0 ||
 		   strncasecmp (tag, "reservationname", MAX(tag_len, 1)) == 0) {
 		scontrol_print_res (val);
+	} else if (strncasecmp (tag, "sicp", MAX(tag_len, 2)) == 0) {
+		scontrol_print_sicp ();     /* UNDOCUMENTED TESTING OPTION */
 	} else if (strncasecmp (tag, "slurmd", MAX(tag_len, 2)) == 0) {
 		_print_slurmd (val);
 	} else if (strncasecmp (tag, "steps", MAX(tag_len, 2)) == 0) {
 		scontrol_print_step (val);
 	} else if (strncasecmp (tag, "topology", MAX(tag_len, 1)) == 0) {
 		scontrol_print_topo (val);
-	} else if (strncasecmp(tag, "licenses", MAX(tag_len, 2)) == 0) {
-		scontrol_print_licenses(val);
 	} else {
 		exit_code = 1;
 		if (quiet_flag != 1)
@@ -1562,6 +1591,8 @@ _update_it (int argc, char *argv[])
 	int node_tag = 0, part_tag = 0, job_tag = 0;
 	int block_tag = 0, sub_tag = 0, res_tag = 0;
 	int debug_tag = 0, step_tag = 0, front_end_tag = 0;
+	int layout_tag = 0;
+	int powercap_tag = 0;
 	int jerror_code = SLURM_SUCCESS;
 
 	/* First identify the entity to update */
@@ -1569,11 +1600,14 @@ _update_it (int argc, char *argv[])
 		char *tag = argv[i];
 		int tag_len = 0;
 		val = strchr(argv[i], '=');
-		if (!val)
-			continue;
-		tag_len = val - argv[i];
-		val++;
-
+		if (!val){
+			tag = argv[i];
+			tag_len = strlen(tag);
+			++i;
+		} else {
+			tag_len = val - argv[i];
+			val++;
+		}
 		if (!strncasecmp(tag, "NodeName", MAX(tag_len, 3))) {
 			node_tag = 1;
 		} else if (!strncasecmp(tag, "PartitionName",
@@ -1598,9 +1632,12 @@ _update_it (int argc, char *argv[])
 		} else if (!strncasecmp(tag, "SlurmctldDebug",
 					MAX(tag_len, 2))) {
 			debug_tag = 1;
+		} else if (!strncasecmp(tag, "Layouts",	MAX(tag_len, 5))) {
+			layout_tag = 1;
+		} else if (!strncasecmp(tag, "PowerCap", MAX(tag_len, 3))) {
+			powercap_tag = 1;
 		}
 	}
-
 	/* The order of tests matters here.  An update job request can include
 	 * partition and reservation tags, possibly before the jobid tag, but
 	 * none of the other updates have a jobid tag, so check jobtag first.
@@ -1626,6 +1663,10 @@ _update_it (int argc, char *argv[])
 		error_code = _update_bluegene_submp (argc, argv);
 	else if (debug_tag)
 		error_code = _update_slurmctld_debug(val);
+	else if (layout_tag)
+		error_code = scontrol_update_layout(argc, argv);
+	else if (powercap_tag)
+		error_code = scontrol_update_powercap (argc, argv);
 	else {
 		exit_code = 1;
 		fprintf(stderr, "No valid entity in update command\n");
@@ -1635,7 +1676,8 @@ _update_it (int argc, char *argv[])
 				"(i.e. bgl000[0-3]),");
 		}
 		fprintf(stderr, "\"PartitionName\", \"Reservation\", "
-			"\"JobId\", or \"SlurmctldDebug\" \n");
+			"\"JobId\", \"SlurmctldDebug\" , \"PowerCap\"" 
+			"or \"Layouts\"\n");
 	}
 
 	if (error_code) {
@@ -1919,7 +1961,7 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
 			      (the primary controller will be stopped)     \n\
      suspend <job_list>       susend specified job (see resume)            \n\
      takeover                 ask slurm backup controller to take over     \n\
-     uhold <jobid_list>       place user hold on specified job (see hold)\n\
+     uhold <jobid_list>       place user hold on specified job (see hold)  \n\
      update <SPECIFICATIONS>  update job, node, partition, reservation,    \n\
 			      step or bluegene block/submp configuration   \n\
      verbose                  enable detailed logging.                     \n\
@@ -1928,9 +1970,11 @@ scontrol [<OPTION>] [<COMMAND>]                                            \n\
 			      are booted and usable                        \n\
      !!                       Repeat the last command entered.             \n\
 									   \n\
-  <ENTITY> may be \"aliases\", \"config\", \"daemons\", \"frontend\",      \n\
-       \"hostlist\", \"hostlistsorted\", \"hostnames\", \"job\", \"node\", \n\
-       \"partition\", \"reservation\", \"slurmd\", \"step\", or \"topology\"\n\
+  <ENTITY> may be \"aliases\", \"assoc_mgr\" \"burstBuffer\",              \n\
+       \"config\", \"daemons\", \"frontend\",                              \n\
+       \"hostlist\", \"hostlistsorted\", \"hostnames\",                    \n\
+       \"job\", \"layouts\", \"node\", \"partition\", \"reservation\",     \n\
+       \"slurmd\", \"step\", or \"topology\"                               \n\
        (also for BlueGene only: \"block\" or \"submp\").                   \n\
 									   \n\
   <ID> may be a configuration parameter name, job id, node name, partition \n\
diff --git a/src/scontrol/scontrol.h b/src/scontrol/scontrol.h
index 341c0202a..f17cc0f35 100644
--- a/src/scontrol/scontrol.h
+++ b/src/scontrol/scontrol.h
@@ -113,6 +113,7 @@ extern partition_info_msg_t *old_part_info_ptr;
 extern reserve_info_msg_t *old_res_info_ptr;
 extern slurm_ctl_conf_info_msg_t *old_slurm_ctl_conf_ptr;
 
+extern int	scontrol_callerid(int argc, char **argv);
 extern int	scontrol_checkpoint(char *op, char *job_step_id_str, int argc,
 				    char **argv);
 extern int	scontrol_create_part(int argc, char *argv[]);
@@ -135,6 +136,8 @@ extern int 	scontrol_load_partitions (partition_info_msg_t **
 					  part_info_pptr);
 extern int 	scontrol_load_block (block_info_msg_t **block_info_pptr);
 extern void	scontrol_pid_info(pid_t job_pid);
+extern void	scontrol_print_assoc_mgr_info(const char *name);
+extern void	scontrol_print_burst_buffer(void);
 extern void	scontrol_print_completing (void);
 extern void	scontrol_print_completing_job(job_info_t *job_ptr,
 					      node_info_msg_t *node_info_msg);
@@ -144,22 +147,28 @@ extern void	scontrol_print_front_end(char *node_name,
 					 front_end_buffer_ptr);
 extern void	scontrol_print_job (char * job_id_str);
 extern void	scontrol_print_hosts (char * node_list);
+extern void	scontrol_print_licenses(const char *feature);
 extern void	scontrol_print_node (char *node_name,
 				     node_info_msg_t *node_info_ptr);
 extern void	scontrol_print_node_list (char *node_list);
 extern void	scontrol_print_part (char *partition_name);
+extern void	scontrol_print_sicp (void);
 extern void	scontrol_print_block (char *block_name);
 extern void	scontrol_print_res (char *reservation_name);
 extern void	scontrol_print_step (char *job_step_id_str);
 extern void	scontrol_print_topo (char *node_list);
+extern void	scontrol_print_layout (int argc, char *argv[]);
+extern void	scontrol_print_powercap (char *node_list);
 extern void	scontrol_requeue(int argc, char **argv);
 extern void	scontrol_requeue_hold(int argc, char **argv);
 extern void	scontrol_suspend(char *op, char *job_id_str);
 extern int	scontrol_update_front_end (int argc, char *argv[]);
 extern int	scontrol_update_job (int argc, char *argv[]);
+extern int	scontrol_update_layout (int argc, char *argv[]);
 extern int	scontrol_update_node (int argc, char *argv[]);
 extern int	scontrol_update_part (int argc, char *argv[]);
 extern int	scontrol_update_res (int argc, char *argv[]);
 extern int	scontrol_update_step (int argc, char *argv[]);
-extern void	scontrol_print_licenses(const char *feature);
+extern int	scontrol_update_powercap (int argc, char *argv[]);
+
 #endif
diff --git a/src/scontrol/update_job.c b/src/scontrol/update_job.c
index 2e838bdbc..0d4d6fca7 100644
--- a/src/scontrol/update_job.c
+++ b/src/scontrol/update_job.c
@@ -736,6 +736,11 @@ scontrol_update_job (int argc, char *argv[])
 			job_msg.nice = NICE_OFFSET + 100;
 			update_cnt++;
 			continue;
+		} else if (!val && argv[i + 1]) {
+			tag = argv[i];
+			taglen = strlen(tag);
+			val = argv[++i];
+			vallen = strlen(val);
 		} else {
 			exit_code = 1;
 			fprintf (stderr, "Invalid input: %s\n", argv[i]);
@@ -1027,6 +1032,17 @@ scontrol_update_job (int argc, char *argv[])
 			}
 			update_cnt++;
 		}
+		else if (strncasecmp(tag, "ThreadSpec", MAX(taglen, 4)) == 0) {
+			if (!strcmp(val, "-1") || !strcmp(val, "*"))
+				job_msg.core_spec = (uint16_t) INFINITE;
+			else if (parse_uint16(val, &job_msg.core_spec)) {
+				error ("Invalid ThreadSpec value: %s", val);
+				exit_code = 1;
+				return 0;
+			} else
+				job_msg.core_spec |= CORE_SPEC_THREAD;
+			update_cnt++;
+		}
 		else if (strncasecmp(tag, "ExcNodeList", MAX(taglen, 3)) == 0){
 			job_msg.exc_nodes = val;
 			update_cnt++;
@@ -1054,6 +1070,10 @@ scontrol_update_job (int argc, char *argv[])
 			job_msg.account = val;
 			update_cnt++;
 		}
+		else if (strncasecmp(tag, "BurstBuffer", MAX(taglen, 1)) == 0) {
+			job_msg.burst_buffer = val;
+			update_cnt++;
+		}
 		else if (strncasecmp(tag, "Dependency", MAX(taglen, 1)) == 0) {
 			job_msg.dependency = val;
 			update_cnt++;
@@ -1437,7 +1457,8 @@ static uint32_t _get_job_time(const char *job_id_str)
 				       resp->job_array[i].array_bitmap;
 			if ((task_id == NO_VAL) ||
 			    (resp->job_array[i].array_task_id == task_id) ||
-			    ((task_id < bit_size(array_bitmap)) &&
+			    (array_bitmap &&
+			     (task_id < bit_size(array_bitmap)) &&
 			     bit_test(array_bitmap, task_id))) {
 				/* Array job with task_id match */
 				time_limit = resp->job_array[i].time_limit;
diff --git a/src/scontrol/update_layout.c b/src/scontrol/update_layout.c
new file mode 100644
index 000000000..9e05fbc7f
--- /dev/null
+++ b/src/scontrol/update_layout.c
@@ -0,0 +1,112 @@
+/*****************************************************************************\
+ *  update_layout.c - layout update functions for scontrol.
+ *****************************************************************************
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "scontrol.h"
+#include "src/common/pack.h"
+#include "src/common/slurm_protocol_defs.h"
+
+/*
+ * scontrol_print_layout - print information about the supplied layout
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints
+ *			error message and returns 0
+ */
+extern int
+scontrol_update_layout (int argc, char *argv[])
+{
+	int rc = 0;
+	int i = 0, tag_len = 0;
+	char *tag = NULL, *val = NULL;
+	update_layout_msg_t msg;
+	char *opt = NULL;
+
+	opt = xstrdup_printf(" ");
+	memset(&msg, 0, sizeof(update_layout_msg_t));
+	while (i < argc) {
+		tag = argv[i];
+		val = strchr(argv[i], '=');
+		if (val) {
+			tag_len = val - argv[i];
+			val++;
+		} else {
+			exit_code = 1;
+			fprintf (stderr,
+				 "invalid option:%s for layouts "
+				 "(\"=\" mandatory)\n",
+				 tag);
+			goto done;
+		}
+		if (strncasecmp(tag, "layouts", MAX(tag_len, 2)) == 0) {
+			msg.layout = val;
+		} else if (strncasecmp(tag, "entity", MAX(tag_len, 2)) == 0) {
+			msg.arg = xstrdup_printf("Entity=%s", val);
+		} else {
+			xstrcat(opt, tag);
+			xstrcat(opt, " ");
+		}
+		i++;
+	}
+
+	if (msg.layout == NULL) {
+		exit_code = 1;
+		fprintf (stderr,
+			 "No valid layout name in update command\n");
+		goto done;
+	}
+	if (msg.arg == NULL) {
+		exit_code = 1;
+		fprintf (stderr,
+			 "No valid layout enity in update command\n");
+		goto done;
+	}
+	if ( strlen(opt) <= 1 ) {
+		exit_code = 1;
+		fprintf (stderr,
+			 "No valid updates arguments in update command\n");
+		goto done;
+	}
+
+	xstrcat(msg.arg, opt);
+
+	rc = slurm_update_layout(&msg);
+
+done:	xfree(msg.arg);
+	xfree(opt);
+	if (rc) {	
+		exit_code = 1;
+		return slurm_get_errno ();
+	} else
+		return 0;
+}
diff --git a/src/scontrol/update_node.c b/src/scontrol/update_node.c
index 5253cf33f..af0a615e1 100644
--- a/src/scontrol/update_node.c
+++ b/src/scontrol/update_node.c
@@ -221,11 +221,12 @@ scontrol_update_node (int argc, char *argv[])
 		}
 	}
 
-	if (((node_msg.node_state == NODE_STATE_DRAIN) ||
+	if (((node_msg.node_state == NODE_STATE_DOWN)  ||
+	     (node_msg.node_state == NODE_STATE_DRAIN) ||
 	     (node_msg.node_state == NODE_STATE_FAIL)) &&
 	    ((node_msg.reason == NULL) || (strlen(node_msg.reason) == 0))) {
-		fprintf (stderr, "You must specify a reason when DRAINING a "
-			"node\nRequest aborted\n");
+		fprintf(stderr, "You must specify a reason when DOWNING or "
+			"DRAINING a node. Request denied\n");
 		goto done;
 	}
 
diff --git a/src/scontrol/update_part.c b/src/scontrol/update_part.c
index 0409c9225..79424ad16 100644
--- a/src/scontrol/update_part.c
+++ b/src/scontrol/update_part.c
@@ -157,6 +157,20 @@ scontrol_parse_part_options (int argc, char *argv[], int *update_cnt_ptr,
 			}
 			(*update_cnt_ptr)++;
 		}
+		else if (!strncasecmp(tag, "ExclusiveUser", MAX(taglen, 1))) {
+			if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
+				part_msg_ptr->flags |= PART_FLAG_EXC_USER_CLR;
+			else if (strncasecmp(val, "YES", MAX(vallen, 1)) == 0)
+				part_msg_ptr->flags |= PART_FLAG_EXCLUSIVE_USER;
+			else {
+				exit_code = 1;
+				error("Invalid input: %s", argv[i]);
+				error("Acceptable ExclusiveUser values "
+					"are YES and NO");
+				return -1;
+			}
+			(*update_cnt_ptr)++;
+		}
 		else if (strncasecmp(tag, "Hidden", MAX(taglen, 1)) == 0) {
 			if (strncasecmp(val, "NO", MAX(vallen, 1)) == 0)
 				part_msg_ptr->flags |= PART_FLAG_HIDDEN_CLR;
@@ -359,6 +373,10 @@ scontrol_parse_part_options (int argc, char *argv[], int *update_cnt_ptr,
 			}
 			(*update_cnt_ptr)++;
 		}
+		else if (!strncasecmp(tag, "QoS", MAX(taglen, 3))) {
+			part_msg_ptr->qos_char = val;
+			(*update_cnt_ptr)++;
+		}
 		else {
 			exit_code = 1;
 			error("Update of this parameter is not "
diff --git a/src/scontrol/update_powercap.c b/src/scontrol/update_powercap.c
new file mode 100644
index 000000000..7b04a47e9
--- /dev/null
+++ b/src/scontrol/update_powercap.c
@@ -0,0 +1,111 @@
+/*****************************************************************************\
+ *  update_powercap.c - powercapping update function for scontrol.
+ *****************************************************************************
+ *  Copyright (C) 2013 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/proc_args.h"
+#include "src/scontrol/scontrol.h"
+
+static uint32_t _parse_watts(char * watts_str)
+{
+	uint32_t watts_num = 0;
+	char *end_ptr = NULL;
+
+	if (!strcasecmp(watts_str, "n/a") || !strcasecmp(watts_str, "none"))
+		return watts_num;
+	if (!strcasecmp(watts_str, "INFINITE"))
+		return INFINITE;
+	watts_num = strtol(watts_str, &end_ptr, 10);
+	if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K'))
+		watts_num *= 1000;
+	else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M'))
+		watts_num *= 1000000;
+	else if (end_ptr[0] != '\0')
+		watts_num = NO_VAL;
+	return watts_num;
+}
+
+/*
+ * scontrol_update_powercap - update the slurm powercapping configuration per the
+ *	supplied arguments
+ * IN argc - count of arguments
+ * IN argv - list of arguments
+ * RET 0 if no slurm error, errno otherwise. parsing error prints
+ *			error message and returns 0
+ */
+extern int
+scontrol_update_powercap (int argc, char *argv[])
+{
+	update_powercap_msg_t powercap_msg;
+	int i;
+	char *tag, *val;
+	int taglen;
+
+	memset(&powercap_msg, 0, sizeof(update_powercap_msg_t));
+	powercap_msg.power_cap = NO_VAL;
+	powercap_msg.min_watts = NO_VAL;
+	powercap_msg.cur_max_watts = NO_VAL;
+	powercap_msg.adj_max_watts = NO_VAL;
+	powercap_msg.max_watts = NO_VAL;
+
+	for (i = 0; i < argc; i++) {
+		tag = argv[i];
+		val = strchr(argv[i], '=');
+		if (val) {
+			taglen = val - argv[i];
+			val++;
+		} else {
+			exit_code = 1;
+			error("Invalid input: %s  Request aborted", argv[i]);
+			return -1;
+		}
+
+		if (strncasecmp(tag, "PowerCap", MAX(taglen, 8)) == 0) {
+			powercap_msg.power_cap = _parse_watts(val);
+			break;
+		}
+	}
+
+	if (powercap_msg.power_cap == NO_VAL) {
+		exit_code = 1;
+		error("Invalid PowerCap value.");
+		return 0;
+	}
+
+	if (slurm_update_powercap(&powercap_msg)) {
+		exit_code = 1;
+		return slurm_get_errno ();
+	} else
+		return 0;
+}
diff --git a/src/sdiag/Makefile.in b/src/sdiag/Makefile.in
index 8b510ac5c..446aad89d 100644
--- a/src/sdiag/Makefile.in
+++ b/src/sdiag/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sdiag/sdiag.c b/src/sdiag/sdiag.c
index bf546759a..ecb2aed8a 100644
--- a/src/sdiag/sdiag.c
+++ b/src/sdiag/sdiag.c
@@ -46,6 +46,7 @@
 #include "src/common/macros.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_protocol_defs.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
@@ -112,8 +113,8 @@ static int _print_stats(void)
 	}
 
 	printf("*******************************************************\n");
-	printf("sdiag output at %s", ctime(&buf->req_time));
-	printf("Data since      %s", ctime(&buf->req_time_start));
+	printf("sdiag output at %s", slurm_ctime(&buf->req_time));
+	printf("Data since      %s", slurm_ctime(&buf->req_time_start));
 	printf("*******************************************************\n");
 
 	printf("Server thread count: %d\n", buf->server_thread_count);
@@ -151,11 +152,11 @@ static int _print_stats(void)
 	printf("\tTotal backfilled jobs (since last stats cycle start): %u\n",
 	       buf->bf_last_backfilled_jobs);
 	printf("\tTotal cycles: %u\n", buf->bf_cycle_counter);
-	printf("\tLast cycle when: %s", ctime(&buf->bf_when_last_cycle));
+	printf("\tLast cycle when: %s", slurm_ctime(&buf->bf_when_last_cycle));
 	printf("\tLast cycle: %u\n", buf->bf_cycle_last);
 	printf("\tMax cycle:  %u\n", buf->bf_cycle_max);
 	if (buf->bf_cycle_counter > 0) {
-		printf("\tMean cycle: %u\n",
+		printf("\tMean cycle: %"PRIu64"\n",
 		       buf->bf_cycle_sum / buf->bf_cycle_counter);
 	}
 	printf("\tLast depth cycle: %u\n", buf->bf_last_depth);
diff --git a/src/sinfo/Makefile.in b/src/sinfo/Makefile.in
index 1bed62ebe..df003d933 100644
--- a/src/sinfo/Makefile.in
+++ b/src/sinfo/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -252,6 +255,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -301,8 +306,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -321,6 +330,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -364,6 +376,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -387,6 +400,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sinfo/opts.c b/src/sinfo/opts.c
index 39c6da284..63758c1c2 100644
--- a/src/sinfo/opts.c
+++ b/src/sinfo/opts.c
@@ -65,6 +65,7 @@
 #define OPT_LONG_HELP   0x100
 #define OPT_LONG_USAGE  0x101
 #define OPT_LONG_HIDE	0x102
+#define OPT_LONG_NOCONVERT 0x104
 
 /* FUNCTIONS */
 static List  _build_state_list( char* str );
@@ -72,14 +73,17 @@ static List  _build_all_states_list( void );
 static List  _build_part_list( char *parts );
 static char *_get_prefix(char *token);
 static void  _help( void );
-static int   _parse_format( char* );
-static bool  _node_state_equal (int state_id, const char *state_str);
-static int   _node_state_id (char *str);
-static const char * _node_state_list (void);
-static void  _parse_token( char *token, char *field, int *field_size,
-			   bool *right_justify, char **suffix);
-static void  _print_options( void );
-static void  _usage( void );
+static int   _parse_format(char *);
+static int   _parse_long_format(char *);
+static bool  _node_state_equal(int state_id, const char *state_str);
+static int   _node_state_id(char *str);
+static const char * _node_state_list(void);
+static void  _parse_token(char *token, char *field, int *field_size,
+			  bool *right_justify, char **suffix);
+static void  _parse_long_token(char *token, char *sep, int *field_size,
+			       bool *right_justify, char **suffix);
+static void  _print_options(void);
+static void  _usage(void);
 
 /*
  * parse_command_line, fill in params data structure with data
@@ -90,6 +94,7 @@ extern void parse_command_line(int argc, char *argv[])
 	int opt_char;
 	int option_index;
 	hostlist_t host_list;
+	bool long_form = false;
 	bool opt_a_set = false, opt_p_set = false;
 	bool env_a_set = false, env_p_set = false;
 	static struct option long_options[] = {
@@ -103,8 +108,10 @@ extern void parse_command_line(int argc, char *argv[])
 		{"cluster",   required_argument, 0, 'M'},
 		{"clusters",  required_argument, 0, 'M'},
 		{"nodes",     required_argument, 0, 'n'},
+                {"noconvert", no_argument,       0, OPT_LONG_NOCONVERT},
 		{"Node",      no_argument,       0, 'N'},
 		{"format",    required_argument, 0, 'o'},
+		{"Format",    required_argument, 0, 'O'},
 		{"partition", required_argument, 0, 'p'},
 		{"responding",no_argument,       0, 'r'},
 		{"list-reasons", no_argument,    0, 'R'},
@@ -120,6 +127,8 @@ extern void parse_command_line(int argc, char *argv[])
 		{NULL,        0,                 0, 0}
 	};
 
+	params.convert_flags = CONVERT_NUM_UNIT_EXACT;
+
 	if (getenv("SINFO_ALL")) {
 		env_a_set = true;
 		params.all_flag = true;
@@ -145,8 +154,9 @@ extern void parse_command_line(int argc, char *argv[])
 		working_cluster_rec = list_peek(params.clusters);
 	}
 
-	while((opt_char = getopt_long(argc, argv, "abdehi:lM:n:No:p:rRsS:t:TvV",
-			long_options, &option_index)) != -1) {
+	while ((opt_char = getopt_long(argc, argv,
+				       "abdehi:lM:n:No:O:p:rRsS:t:TvV",
+				       long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
 			fprintf(stderr,
@@ -156,8 +166,7 @@ extern void parse_command_line(int argc, char *argv[])
 		case (int)'a':
 			opt_a_set = true;
 			xfree(params.partition);
-			if (params.partition)
-				list_destroy(params.part_list);
+			FREE_NULL_LIST(params.part_list);
 			params.all_flag = true;
 			break;
 		case (int)'b':
@@ -193,8 +202,7 @@ extern void parse_command_line(int argc, char *argv[])
 			params.long_output = true;
 			break;
 		case (int) 'M':
-			if (params.clusters)
-				list_destroy(params.clusters);
+			FREE_NULL_LIST(params.clusters);
 			if (!(params.clusters =
 			      slurmdb_get_info_cluster(optarg))) {
 				print_db_notok(optarg, 0);
@@ -202,6 +210,9 @@ extern void parse_command_line(int argc, char *argv[])
 			}
 			working_cluster_rec = list_peek(params.clusters);
 			break;
+		case OPT_LONG_NOCONVERT:
+			params.convert_flags |= CONVERT_NUM_UNIT_NO;
+			break;
 		case (int) 'n':
 			xfree(params.nodes);
 			params.nodes = xstrdup(optarg);
@@ -229,11 +240,15 @@ extern void parse_command_line(int argc, char *argv[])
 			xfree(params.format);
 			params.format = xstrdup(optarg);
 			break;
+		case (int) 'O':
+			long_form = true;
+			xfree(params.format);
+			params.format = xstrdup(optarg);
+			break;
 		case (int) 'p':
 			opt_p_set = true;
 			xfree(params.partition);
-			if (params.partition)
-				list_destroy(params.part_list);
+			FREE_NULL_LIST(params.part_list);
 			params.partition = xstrdup(optarg);
 			params.part_list = _build_part_list(optarg);
 			params.all_flag = true;
@@ -317,7 +332,11 @@ extern void parse_command_line(int argc, char *argv[])
 			  "%9P %.5a %.10l %.6D %.6t %N";
 		}
 	}
-	_parse_format( params.format );
+
+	if (long_form)
+		_parse_long_format(params.format);
+	else
+		_parse_format(params.format);
 
 	if (params.list_reasons && (params.state_list == NULL)) {
 		params.states = xstrdup ("down,drain,error");
@@ -703,6 +722,12 @@ _parse_format( char* format )
 					field_size,
 					right_justify,
 					suffix );
+		} else if (field[0] == 'e') {
+			params.match_flags.free_mem_flag = true;
+			format_add_free_mem( params.format_list,
+					field_size,
+					right_justify,
+					suffix );
 		} else if (field[0] == 'p') {
 			params.match_flags.priority_flag = true;
 			format_add_priority( params.format_list,
@@ -819,6 +844,280 @@ _parse_format( char* format )
 	return SLURM_SUCCESS;
 }
 
+static int _parse_long_format (char* format_long)
+{
+	int field_size;
+	bool right_justify, format_all = false;
+	char *tmp_format = NULL, *token = NULL, *str_tmp = NULL;
+	char *sep = NULL;
+	char* suffix = NULL;
+
+	if (format_long == NULL) {
+		error("Format long option lacks specification");
+		exit( 1 );
+	}
+
+	params.format_list = list_create(NULL);
+	tmp_format = xstrdup(format_long);
+	token = strtok_r(tmp_format, ",",&str_tmp);
+
+	while (token) {
+		_parse_long_token( token, sep, &field_size, &right_justify,
+				   &suffix);
+
+		if (!strcasecmp(token, "all")) {
+			_parse_format ("%all");
+		} else if (!strcasecmp(token, "allocmem")) {
+			params.match_flags.alloc_mem_flag = true;
+			format_add_alloc_mem( params.format_list,
+						field_size,
+						right_justify,
+						suffix );
+		} else if (!strcasecmp(token, "allocnodes")) {
+			format_add_alloc_nodes( params.format_list,
+						field_size,
+						right_justify,
+						suffix );
+		} else if (!strcasecmp(token, "available")) {
+			params.match_flags.avail_flag = true;
+			format_add_avail( params.format_list,
+					  field_size,
+					  right_justify,
+					  suffix );
+		} else if (!strcasecmp(token, "cpus")) {
+			params.match_flags.cpus_flag = true;
+			format_add_cpus( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "cpusload")) {
+			params.match_flags.cpu_load_flag = true;
+			format_add_cpu_load( params.format_list,
+					     field_size,
+					     right_justify,
+					     suffix );
+		} else if (!strcasecmp(token, "freemem")) {
+			params.match_flags.free_mem_flag = true;
+			format_add_free_mem( params.format_list,
+					     field_size,
+					     right_justify,
+					     suffix );
+		} else if (!strcasecmp(token, "cpusstate")) {
+			params.match_flags.cpus_flag = true;
+			format_add_cpus_aiot( params.format_list,
+					      field_size,
+					      right_justify,
+					      suffix );
+		} else if (!strcasecmp(token, "cores")) {
+			params.match_flags.cores_flag = true;
+			format_add_cores( params.format_list,
+					  field_size,
+					  right_justify,
+					  suffix );
+		} else if (!strcasecmp(token, "defaulttime")) {
+			params.match_flags.default_time_flag = true;
+			format_add_default_time( params.format_list,
+						 field_size,
+						 right_justify,
+						 suffix );
+		} else if (!strcasecmp(token, "disk")) {
+			params.match_flags.disk_flag = true;
+			format_add_disk( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "features")) {
+			params.match_flags.features_flag = true;
+			format_add_features( params.format_list,
+					     field_size,
+					     right_justify,
+					     suffix );
+		} else if (!strcasecmp(token, "groups")) {
+			params.match_flags.groups_flag = true;
+			format_add_groups( params.format_list,
+					   field_size,
+					   right_justify,
+					   suffix );
+		} else if (!strcasecmp(token, "gres")) {
+			params.match_flags.gres_flag = true;
+			format_add_gres( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "maxcpuspernode")) {
+			params.match_flags.max_cpus_per_node_flag = true;
+			format_add_max_cpus_per_node( params.format_list,
+						      field_size,
+						      right_justify,
+						      suffix );
+		} else if (!strcasecmp(token, "memory")) {
+			params.match_flags.memory_flag = true;
+			format_add_memory( params.format_list,
+					   field_size,
+					   right_justify,
+					   suffix );
+		} else if (!strcasecmp(token, "nodes")) {
+			format_add_nodes( params.format_list,
+					  field_size,
+					  right_justify,
+					  suffix );
+		} else if (!strcasecmp(token, "nodeaddr")) {
+			params.match_flags.node_addr_flag = true;
+			format_add_node_address( params.format_list,
+						 field_size,
+						 right_justify,
+						 suffix );
+		} else if (!strcasecmp(token, "nodeai")) {
+			format_add_nodes_ai( params.format_list,
+					     field_size,
+					     right_justify,
+					     suffix );
+		} else if (!strcasecmp(token, "nodeaiot")) {
+			format_add_nodes_aiot( params.format_list,
+					       field_size,
+					       right_justify,
+					       suffix );
+		} else if (!strcasecmp(token, "nodehost")) {
+			params.match_flags.hostnames_flag = true;
+			format_add_node_hostnames( params.format_list,
+						   field_size,
+						   right_justify,
+						   suffix );
+		} else if (!strcasecmp(token, "nodelist")) {
+			format_add_node_list( params.format_list,
+					      field_size,
+					      right_justify,
+					      suffix );
+		} else if (!strcasecmp(token, "partition")) {
+			params.match_flags.partition_flag = true;
+			format_add_partition( params.format_list,
+					      field_size,
+					      right_justify,
+					      suffix );
+		} else if (!strcasecmp(token, "partitionname")) {
+			params.match_flags.partition_flag = true;
+			format_add_partition_name( params.format_list,
+						   field_size,
+						   right_justify,
+						   suffix );
+		} else if (!strcasecmp(token, "preemptmode")) {
+			params.match_flags.preempt_mode_flag = true;
+			format_add_preempt_mode( params.format_list,
+						 field_size,
+						 right_justify,
+						 suffix );
+		} else if (!strcasecmp(token, "priority")) {
+			params.match_flags.priority_flag = true;
+			format_add_priority( params.format_list,
+					     field_size,
+					     right_justify,
+					     suffix );
+		} else if (!strcasecmp(token, "reason")) {
+			params.match_flags.reason_flag = true;
+			format_add_reason( params.format_list,
+					   field_size,
+					   right_justify,
+					   suffix );
+		} else if (!strcasecmp(token, "root")) {
+			params.match_flags.root_flag = true;
+			format_add_root( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "share")) {
+			params.match_flags.share_flag = true;
+			format_add_share( params.format_list,
+					  field_size,
+					  right_justify,
+					  suffix );
+		} else if (!strcasecmp(token, "size")) {
+			params.match_flags.job_size_flag = true;
+			format_add_size( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "statecompact")) {
+			params.match_flags.state_flag = true;
+			format_add_state_compact( params.format_list,
+						  field_size,
+						  right_justify,
+						  suffix );
+		} else if (!strcasecmp(token, "statelong")) {
+			params.match_flags.state_flag = true;
+			format_add_state_long( params.format_list,
+					       field_size,
+					       right_justify,
+					       suffix );
+		} else if (!strcasecmp(token, "sockets")) {
+			params.match_flags.sockets_flag = true;
+			format_add_sockets( params.format_list,
+					    field_size,
+					    right_justify,
+					    suffix );
+		} else if (!strcasecmp(token, "socketcorethread")) {
+			params.match_flags.sct_flag = true;
+			format_add_sct( params.format_list,
+					field_size,
+					right_justify,
+					suffix );
+		} else if (!strcasecmp(token, "time")) {
+			params.match_flags.max_time_flag = true;
+			format_add_time( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "timestamp")) {
+			params.match_flags.reason_timestamp_flag = true;
+			format_add_timestamp( params.format_list,
+					      field_size,
+					      right_justify,
+					      suffix );
+		} else if (!strcasecmp(token, "threads")) {
+			params.match_flags.threads_flag = true;
+			format_add_threads( params.format_list,
+					    field_size,
+					    right_justify,
+					    suffix );
+		} else if (!strcasecmp(token, "user")) {
+			params.match_flags.reason_user_flag = true;
+			format_add_user( params.format_list,
+					 field_size,
+					 right_justify,
+					 suffix );
+		} else if (!strcasecmp(token, "userlong")) {
+			params.match_flags.reason_user_flag = true;
+			format_add_user_long( params.format_list,
+					      field_size,
+					      right_justify,
+					      suffix );
+		} else if (!strcasecmp(token, "version")) {
+			params.match_flags.version_flag = true;
+			format_add_version( params.format_list,
+					    field_size,
+					    right_justify,
+					    suffix);
+		} else if (!strcasecmp(token, "weight")) {
+			params.match_flags.weight_flag = true;
+			format_add_weight( params.format_list,
+					   field_size,
+					   right_justify,
+					   suffix );
+		} else if (format_all) {
+			/* ignore */
+		} else {
+			format_add_invalid( params.format_list,
+					    field_size,
+					    right_justify,
+					    suffix );
+			error( "Invalid job format specification: %s",
+			       token );
+		}
+		token = strtok_r(NULL, ",", &str_tmp);
+	}
+	xfree(tmp_format);
+	return SLURM_SUCCESS;
+}
+
 /* Take a format specification and copy out it's prefix
  * IN/OUT token - input specification, everything before "%" is removed
  * RET - everything before "%" in the token
@@ -874,6 +1173,29 @@ _parse_token( char *token, char *field, int *field_size, bool *right_justify,
 	*suffix = xstrdup(&token[i]);
 }
 
+static void
+_parse_long_token( char *token, char *sep, int *field_size, bool *right_justify,
+		   char **suffix)
+{
+	char *ptr;
+
+	xassert(token);
+	ptr = strchr(token, ':');
+	if (ptr) {
+		ptr[0] = '\0';
+		if (ptr[1] == '.') {
+			*right_justify = true;
+			ptr++;
+		} else {
+			*right_justify = false;
+		}
+		*field_size = atoi(ptr + 1);
+	} else {
+		*right_justify = false;
+		*field_size = 20;
+	}
+}
+
 /* print the parameters specified */
 void _print_options( void )
 {
@@ -901,6 +1223,8 @@ void _print_options( void )
 	printf("verbose     = %d\n", params.verbose);
 	printf("-----------------------------\n");
 	printf("all_flag        = %s\n", params.all_flag ? "true" : "false");
+	printf("alloc_mem_flag  = %s\n", params.match_flags.alloc_mem_flag ?
+			"true" : "false");
 	printf("avail_flag      = %s\n", params.match_flags.avail_flag ?
 			"true" : "false");
 	printf("bg_flag         = %s\n", params.bg_flag ? "true" : "false");
@@ -949,7 +1273,7 @@ static void _usage( void )
 {
 	printf("\
 Usage: sinfo [-abdelNRrsTv] [-i seconds] [-t states] [-p partition] [-n nodes]\n\
-             [-S fields] [-o format] \n");
+             [-S fields] [-o format] [-O Format]\n");
 }
 
 static void _help( void )
@@ -957,7 +1281,7 @@ static void _help( void )
 	printf ("\
 Usage: sinfo [OPTIONS]\n\
   -a, --all                  show all partitions (including hidden and those\n\
-                             not accessible)\n\
+			     not accessible)\n\
   -b, --bg                   show bgblocks (on Blue Gene systems)\n\
   -d, --dead                 show only non-responding nodes\n\
   -e, --exact                group nodes only on exact match of configuration\n\
@@ -966,8 +1290,11 @@ Usage: sinfo [OPTIONS]\n\
   -i, --iterate=seconds      specify an iteration period\n\
   -l, --long                 long output - displays more information\n\
   -n, --nodes=NODES          report on specific node(s)\n\
+  --noconvert                don't convert units from their original type\n\
+			     (e.g. 2048M won't be converted to 2G).\n\
   -N, --Node                 Node-centric format\n\
   -o, --format=format        format specification\n\
+  -O, --Format=format        long format specification\n\
   -p, --partition=PARTITION  report on specific partition\n\
   -r, --responding           report only responding nodes\n\
   -R, --list-reasons         list reason nodes are down or drained\n\
diff --git a/src/sinfo/print.c b/src/sinfo/print.c
index 79af2817e..da3c8c86b 100644
--- a/src/sinfo/print.c
+++ b/src/sinfo/print.c
@@ -46,11 +46,13 @@
 #include <pwd.h>
 #include <sys/types.h>
 
-#include "src/common/list.h"
 #include "src/common/hostlist.h"
+#include "src/common/list.h"
+#include "src/common/parse_time.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
-#include "src/common/parse_time.h"
+
 #include "src/sinfo/print.h"
 #include "src/sinfo/sinfo.h"
 
@@ -65,6 +67,9 @@ static int   _build_min_max_32_string(char *buffer, int buf_size,
 static int   _build_cpu_load_min_max_32(char *buffer, int buf_size,
 					uint32_t min, uint32_t max,
 					bool range);
+static int   _build_free_mem_min_max_32(char *buffer, int buf_size,
+					uint32_t min, uint32_t max,
+					bool range);
 static void  _print_reservation(reserve_info_t *resv_ptr, int width);
 static int   _print_secs(long time, int width, bool right, bool cut_output);
 static int   _print_str(char *str, int width, bool right, bool cut_output);
@@ -81,7 +86,7 @@ void print_date(void)
 	time_t now;
 
 	now = time(NULL);
-	printf("%s", ctime(&now));
+	printf("%s", slurm_ctime(&now));
 }
 
 int print_sinfo_list(List sinfo_list)
@@ -237,8 +242,10 @@ _build_min_max_16_string(char *buffer, int buf_size, uint16_t min, uint16_t max,
 {
 	char tmp_min[8];
 	char tmp_max[8];
-	convert_num_unit((float)min, tmp_min, sizeof(tmp_min), UNIT_NONE);
-	convert_num_unit((float)max, tmp_max, sizeof(tmp_max), UNIT_NONE);
+	convert_num_unit((float)min, tmp_min, sizeof(tmp_min), UNIT_NONE,
+			 params.convert_flags);
+	convert_num_unit((float)max, tmp_max, sizeof(tmp_max), UNIT_NONE,
+			 params.convert_flags);
 
 	if (max == min)
 		return snprintf(buffer, buf_size, "%s", tmp_max);
@@ -263,9 +270,9 @@ _build_min_max_32_string(char *buffer, int buf_size,
 
 	if (use_suffix) {
 		convert_num_unit((float)min, tmp_min, sizeof(tmp_min),
-				 UNIT_NONE);
+				 UNIT_NONE, params.convert_flags);
 		convert_num_unit((float)max, tmp_max, sizeof(tmp_max),
-				 UNIT_NONE);
+				 UNIT_NONE, params.convert_flags);
 	} else {
 		snprintf(tmp_min, sizeof(tmp_min), "%u", min);
 		snprintf(tmp_max, sizeof(tmp_max), "%u", max);
@@ -315,6 +322,35 @@ _build_cpu_load_min_max_32(char *buffer, int buf_size,
 		return snprintf(buffer, buf_size, "%s+", tmp_min);
 }
 
+static int
+_build_free_mem_min_max_32(char *buffer, int buf_size,
+			    uint32_t min, uint32_t max,
+			    bool range)
+{
+
+	char tmp_min[16];
+	char tmp_max[16];
+
+	if (min == NO_VAL) {
+		strcpy(tmp_min, "N/A");
+	} else {
+		snprintf(tmp_min, sizeof(tmp_min), "%u", min);
+	}
+
+	if (max == NO_VAL) {
+		strcpy(tmp_max, "N/A");
+	} else {
+		snprintf(tmp_max, sizeof(tmp_max), "%u", max);
+	}
+
+	if (max == min)
+		return snprintf(buffer, buf_size, "%s", tmp_max);
+	else if (range)
+		return snprintf(buffer, buf_size, "%s-%s", tmp_min, tmp_max);
+	else
+		return snprintf(buffer, buf_size, "%s+", tmp_min);
+}
+
 int
 format_add_function(List list, int width, bool right, char *suffix,
 			int (*function) (sinfo_data_t *, int, bool, char*))
@@ -442,13 +478,17 @@ int _print_cpus_aiot(sinfo_data_t * sinfo_data, int width,
 	if (sinfo_data) {
 		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			convert_num_unit((float)sinfo_data->cpus_alloc,
-					 tmpa, sizeof(tmpa), UNIT_NONE);
+					 tmpa, sizeof(tmpa), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->cpus_idle,
-					 tmpi, sizeof(tmpi), UNIT_NONE);
+					 tmpi, sizeof(tmpi), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->cpus_other,
-					 tmpo, sizeof(tmpo), UNIT_NONE);
+					 tmpo, sizeof(tmpo), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->cpus_total,
-					 tmpt, sizeof(tmpt), UNIT_NONE);
+					 tmpt, sizeof(tmpt), UNIT_NONE,
+					 params.convert_flags);
 		} else {
 			snprintf(tmpa, sizeof(tmpa), "%u",
 				 sinfo_data->cpus_alloc);
@@ -730,7 +770,8 @@ int _print_nodes_t(sinfo_data_t * sinfo_data, int width,
 	if (sinfo_data) {
 		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			convert_num_unit((float)sinfo_data->nodes_total,
-					 tmp, sizeof(tmp), UNIT_NONE);
+					 tmp, sizeof(tmp), UNIT_NONE,
+					 params.convert_flags);
 		else
 			snprintf(tmp, sizeof(tmp), "%d",
 				 sinfo_data->nodes_total);
@@ -753,9 +794,11 @@ int _print_nodes_ai(sinfo_data_t * sinfo_data, int width,
 	if (sinfo_data) {
 		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			convert_num_unit((float)sinfo_data->nodes_alloc,
-					 tmpa, sizeof(tmpa), UNIT_NONE);
+					 tmpa, sizeof(tmpa), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->nodes_idle,
-					 tmpi, sizeof(tmpi), UNIT_NONE);
+					 tmpi, sizeof(tmpi), UNIT_NONE,
+					 params.convert_flags);
 		} else {
 			snprintf(tmpa, sizeof(tmpa), "%d",
 				 sinfo_data->nodes_alloc);
@@ -784,13 +827,17 @@ int _print_nodes_aiot(sinfo_data_t * sinfo_data, int width,
 	if (sinfo_data) {
 		if (params.cluster_flags & CLUSTER_FLAG_BG) {
 			convert_num_unit((float)sinfo_data->nodes_alloc,
-					 tmpa, sizeof(tmpa), UNIT_NONE);
+					 tmpa, sizeof(tmpa), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->nodes_idle,
-					 tmpi, sizeof(tmpi), UNIT_NONE);
+					 tmpi, sizeof(tmpi), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->nodes_other,
-					 tmpo, sizeof(tmpo), UNIT_NONE);
+					 tmpo, sizeof(tmpo), UNIT_NONE,
+					 params.convert_flags);
 			convert_num_unit((float)sinfo_data->nodes_total,
-					 tmpt, sizeof(tmpt), UNIT_NONE);
+					 tmpt, sizeof(tmpt), UNIT_NONE,
+					 params.convert_flags);
 		} else {
 			snprintf(tmpa, sizeof(tmpa), "%u",
 				 sinfo_data->nodes_alloc);
@@ -1188,6 +1235,26 @@ int _print_cpu_load(sinfo_data_t * sinfo_data, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_free_mem(sinfo_data_t * sinfo_data, int width,
+		    bool right_justify, char *suffix)
+{
+	char id[FORMAT_STRING_SIZE];
+
+	if (sinfo_data) {
+		_build_free_mem_min_max_32(id, FORMAT_STRING_SIZE,
+					 sinfo_data->min_free_mem,
+					 sinfo_data->max_free_mem,
+					 true);
+		_print_str(id, width, right_justify, true);
+	} else {
+		_print_str("FREE_MEM", width, right_justify, true);
+	}
+
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_max_cpus_per_node(sinfo_data_t * sinfo_data, int width,
 			     bool right_justify, char *suffix)
 {
@@ -1226,3 +1293,19 @@ int _print_version(sinfo_data_t * sinfo_data, int width,
 	return SLURM_SUCCESS;
 
 }
+
+int _print_alloc_mem(sinfo_data_t * sinfo_data, int width,
+		     bool right_justify, char *suffix)
+{
+	char tmp_line[32];
+	if (sinfo_data) {
+		sprintf(tmp_line, "%u", sinfo_data->alloc_memory);
+		_print_str(tmp_line, width, right_justify, true);
+	} else {
+		_print_str("ALLOCMEM", width, right_justify, true);
+	}
+	if (suffix) {
+		printf ("%s", suffix);
+	}
+	return SLURM_SUCCESS;
+}
diff --git a/src/sinfo/print.h b/src/sinfo/print.h
index 4fdb6bcc1..2100c834c 100644
--- a/src/sinfo/print.h
+++ b/src/sinfo/print.h
@@ -146,10 +146,14 @@ void print_sinfo_reservation(reserve_info_msg_t *resv_ptr);
 	format_add_function(list,wid,right,suffix,_print_com_invalid)
 #define format_add_cpu_load(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_cpu_load)
+#define format_add_free_mem(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_free_mem)
 #define format_add_max_cpus_per_node(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_max_cpus_per_node)
 #define format_add_version(list,wid,right,suffix) \
 	format_add_function(list,wid,right,suffix,_print_version)
+#define format_add_alloc_mem(list,wid,right,suffix) \
+	format_add_function(list,wid,right,suffix,_print_alloc_mem)
 
 /*****************************************************************************
  * Print Field Functions
@@ -231,8 +235,12 @@ int _print_com_invalid(sinfo_data_t * sinfo_data, int width,
 		       bool right_justify, char *suffix);
 int _print_cpu_load(sinfo_data_t * node_ptr, int width,
 		    bool right_justify, char *suffix);
+int _print_free_mem(sinfo_data_t * node_ptr, int width,
+		    bool right_justify, char *suffix);
 int _print_max_cpus_per_node(sinfo_data_t * sinfo_data, int width,
 			     bool right_justify, char *suffix);
 int _print_version(sinfo_data_t * sinfo_data, int width,
 		   bool right_justify, char *suffix);
+int _print_alloc_mem(sinfo_data_t * sinfo_data, int width,
+		     bool right_justify, char *suffix);
 #endif
diff --git a/src/sinfo/sinfo.c b/src/sinfo/sinfo.c
index 2b36fc3c2..815d5ec7e 100644
--- a/src/sinfo/sinfo.c
+++ b/src/sinfo/sinfo.c
@@ -93,7 +93,6 @@ static int  _query_server(partition_info_msg_t ** part_pptr,
 static int _reservation_report(reserve_info_msg_t *resv_ptr);
 static bool _serial_part_data(void);
 static void _sort_hostlist(List sinfo_list);
-static int  _strcmp(char *data1, char *data2);
 static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 			  uint32_t node_scaling);
 
@@ -735,23 +734,25 @@ static void _sort_hostlist(List sinfo_list)
 
 static bool _match_node_data(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 {
+	uint32_t tmp = 0;
+
 	if (params.match_flags.hostnames_flag ||
 	    params.match_flags.node_addr_flag)
 		return false;
 
 	if (sinfo_ptr->nodes &&
 	    params.match_flags.features_flag &&
-	    (_strcmp(node_ptr->features, sinfo_ptr->features)))
+	    (xstrcmp(node_ptr->features, sinfo_ptr->features)))
 		return false;
 
 	if (sinfo_ptr->nodes &&
 	    params.match_flags.gres_flag &&
-	    (_strcmp(node_ptr->gres, sinfo_ptr->gres)))
+	    (xstrcmp(node_ptr->gres, sinfo_ptr->gres)))
 		return false;
 
 	if (sinfo_ptr->nodes &&
 	    params.match_flags.reason_flag &&
-	    (_strcmp(node_ptr->reason, sinfo_ptr->reason)))
+	    (xstrcmp(node_ptr->reason, sinfo_ptr->reason)))
 		return false;
 
 	if (sinfo_ptr->nodes &&
@@ -769,10 +770,18 @@ static bool _match_node_data(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 		char *state1, *state2;
 		state1 = node_state_string(node_ptr->node_state);
 		state2 = node_state_string(sinfo_ptr->node_state);
-		if (strcmp(state1, state2))
+		if (xstrcmp(state1, state2))
 			return false;
 	}
 
+	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
+				     SELECT_NODEDATA_MEM_ALLOC,
+				     NODE_STATE_ALLOCATED,
+				     &tmp);
+	if (params.match_flags.alloc_mem_flag &&
+	    (tmp != sinfo_ptr->alloc_memory))
+		return false;
+
 	/* If no need to exactly match sizes, just return here
 	 * otherwise check cpus, disk, memory and weigth individually */
 	if (!params.exact_match)
@@ -808,6 +817,9 @@ static bool _match_node_data(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr)
 	if (params.match_flags.cpu_load_flag &&
 	    (node_ptr->cpu_load        != sinfo_ptr->min_cpu_load))
 		return false;
+	if (params.match_flags.free_mem_flag &&
+	    (node_ptr->free_mem        != sinfo_ptr->min_free_mem))
+		return false;
 	if (params.match_flags.version_flag &&
 	    (node_ptr->version     != sinfo_ptr->version))
 		return false;
@@ -839,7 +851,7 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 		return false;
 
 	if (params.match_flags.partition_flag
-	    && (_strcmp(part_ptr->name, sinfo_ptr->part_info->name)))
+	    && (xstrcmp(part_ptr->name, sinfo_ptr->part_info->name)))
 		return false;
 
 	if (params.match_flags.avail_flag &&
@@ -847,7 +859,7 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 		return false;
 
 	if (params.match_flags.groups_flag &&
-	    (_strcmp(part_ptr->allow_groups,
+	    (xstrcmp(part_ptr->allow_groups,
 		     sinfo_ptr->part_info->allow_groups)))
 		return false;
 
@@ -895,14 +907,14 @@ static bool _match_part_data(sinfo_data_t *sinfo_ptr,
 static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 			  uint32_t node_scaling)
 {
-	uint32_t base_state;
+	uint32_t base_state, alloc_mem = 0;
 	uint16_t used_cpus = 0, error_cpus = 0;
 	int total_cpus = 0, total_nodes = 0;
 	/* since node_scaling could be less here, we need to use the
 	 * global node scaling which should never change. */
 	int single_node_cpus = (node_ptr->cpus / g_node_scaling);
 
- 	base_state = node_ptr->node_state & NODE_STATE_BASE;
+	base_state = node_ptr->node_state & NODE_STATE_BASE;
 
 	if (sinfo_ptr->nodes_total == 0) {	/* first node added */
 		sinfo_ptr->node_state = node_ptr->node_state;
@@ -927,6 +939,8 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 		sinfo_ptr->max_weight = node_ptr->weight;
 		sinfo_ptr->min_cpu_load = node_ptr->cpu_load;
 		sinfo_ptr->max_cpu_load = node_ptr->cpu_load;
+		sinfo_ptr->min_free_mem = node_ptr->free_mem;
+		sinfo_ptr->max_free_mem = node_ptr->free_mem;
 		sinfo_ptr->max_cpus_per_node = sinfo_ptr->part_info->
 					       max_cpus_per_node;
 		sinfo_ptr->version    = node_ptr->version;
@@ -974,6 +988,11 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 			sinfo_ptr->min_cpu_load = node_ptr->cpu_load;
 		if (sinfo_ptr->max_cpu_load < node_ptr->cpu_load)
 			sinfo_ptr->max_cpu_load = node_ptr->cpu_load;
+
+		if (sinfo_ptr->min_free_mem > node_ptr->free_mem)
+			sinfo_ptr->min_free_mem = node_ptr->free_mem;
+		if (sinfo_ptr->max_free_mem < node_ptr->free_mem)
+			sinfo_ptr->max_free_mem = node_ptr->free_mem;
 	}
 
 	hostlist_push_host(sinfo_ptr->nodes, node_ptr->name);
@@ -993,6 +1012,10 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 				     SELECT_NODEDATA_SUBCNT,
 				     NODE_STATE_ERROR,
 				     &error_cpus);
+	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
+				     SELECT_NODEDATA_MEM_ALLOC,
+				     NODE_STATE_ALLOCATED,
+				     &alloc_mem);
 
 	if (params.cluster_flags & CLUSTER_FLAG_BG) {
 		if (!params.match_flags.state_flag &&
@@ -1046,10 +1069,10 @@ static void _update_sinfo(sinfo_data_t *sinfo_ptr, node_info_t *node_ptr,
 
 	sinfo_ptr->nodes_total += total_nodes;
 
-
 	sinfo_ptr->cpus_alloc += used_cpus;
 	sinfo_ptr->cpus_total += total_cpus;
 	total_cpus -= used_cpus + error_cpus;
+	sinfo_ptr->alloc_memory = alloc_mem;
 
 	if (error_cpus) {
 		sinfo_ptr->cpus_idle += total_cpus;
@@ -1213,19 +1236,6 @@ static void _sinfo_list_delete(void *data)
 	xfree(sinfo_ptr);
 }
 
-/* like strcmp, but works with NULL pointers */
-static int _strcmp(char *data1, char *data2)
-{
-	static char null_str[] = "(null)";
-
-	if (data1 == NULL)
-		data1 = null_str;
-	if (data2 == NULL)
-		data2 = null_str;
-	return strcmp(data1, data2);
-}
-
-
 /* Find the given partition name in the list */
 static int _find_part_list(void *x, void *key)
 {
diff --git a/src/sinfo/sinfo.h b/src/sinfo/sinfo.h
index cf6be03ca..5652fe1ea 100644
--- a/src/sinfo/sinfo.h
+++ b/src/sinfo/sinfo.h
@@ -100,8 +100,11 @@ typedef struct {
 	uint32_t max_weight;
 	uint32_t min_cpu_load;
 	uint32_t max_cpu_load;
+	uint32_t min_free_mem;
+	uint32_t max_free_mem;
 
 	uint32_t max_cpus_per_node;
+	uint32_t alloc_memory;
 
 	char *version;
 	char *features;
@@ -124,6 +127,7 @@ typedef struct {
 /* Identify what fields must match for a node's information to be
  * combined into a single sinfo_data entry based upon output format */
 struct sinfo_match_flags {
+	bool alloc_mem_flag;
 	bool avail_flag;
 	bool cpus_flag;
 	bool sockets_flag;
@@ -151,6 +155,7 @@ struct sinfo_match_flags {
 	bool reason_timestamp_flag;
 	bool reason_user_flag;
 	bool cpu_load_flag;
+	bool free_mem_flag;
 	bool max_cpus_per_node_flag;
 	bool version_flag;
 };
@@ -160,6 +165,7 @@ struct sinfo_parameters {
 	bool all_flag;
 	List clusters;
 	uint32_t cluster_flags;
+	uint32_t convert_flags;
 	bool bg_flag;
 	bool dead_nodes;
 	bool exact_match;
diff --git a/src/sinfo/sort.c b/src/sinfo/sort.c
index 4d98937a1..12ca54ad7 100644
--- a/src/sinfo/sort.c
+++ b/src/sinfo/sort.c
@@ -54,6 +54,7 @@ static void _get_sinfo_from_void(sinfo_data_t **s1, sinfo_data_t **s2,
 				 void *v1, void *v2);
 static int _sort_by_avail(void *void1, void *void2);
 static int _sort_by_cpu_load(void *void1, void *void2);
+static int _sort_by_free_mem(void *void1, void *void2);
 static int _sort_by_cpus(void *void1, void *void2);
 static int _sort_by_sct(void *void1, void *void2);
 static int _sort_by_sockets(void *void1, void *void2);
@@ -143,6 +144,8 @@ void sort_sinfo_list(List sinfo_list)
 			list_sort(sinfo_list, _sort_by_node_addr);
 		else if (params.sort[i] == 'O')
 			list_sort(sinfo_list, _sort_by_cpu_load);
+		else if (params.sort[i] == 'e')
+			list_sort(sinfo_list, _sort_by_free_mem);
 		else if (params.sort[i] == 'p')
 			list_sort(sinfo_list, _sort_by_priority);
 		else if (params.sort[i] == 'P')
@@ -227,6 +230,21 @@ static int _sort_by_cpu_load(void *void1, void *void2)
 	return diff;
 }
 
+static int _sort_by_free_mem(void *void1, void *void2)
+{
+	int diff;
+	sinfo_data_t *sinfo1;
+	sinfo_data_t *sinfo2;
+
+	_get_sinfo_from_void(&sinfo1, &sinfo2, void1, void2);
+
+	diff = _diff_uint32(sinfo1->min_free_mem, sinfo2->min_free_mem);
+
+	if (reverse_order)
+		diff = -diff;
+	return diff;
+}
+
 static int _sort_by_cpus(void *void1, void *void2)
 {
 	int diff;
diff --git a/src/slurmctld/Makefile.am b/src/slurmctld/Makefile.am
index d2d6e9f4e..5eb2c9242 100644
--- a/src/slurmctld/Makefile.am
+++ b/src/slurmctld/Makefile.am
@@ -15,6 +15,8 @@ slurmctld_SOURCES =     \
 	agent.c  	\
 	agent.h		\
 	backup.c	\
+	burst_buffer.c	\
+	burst_buffer.h	\
 	controller.c 	\
 	front_end.c	\
 	front_end.h	\
@@ -42,6 +44,8 @@ slurmctld_SOURCES =     \
 	port_mgr.c	\
 	port_mgr.h	\
 	power_save.c	\
+	powercapping.c	\
+	powercapping.h	\
 	preempt.c	\
 	preempt.h	\
 	proc_req.c	\
@@ -52,6 +56,8 @@ slurmctld_SOURCES =     \
 	reservation.h	\
 	sched_plugin.c	\
 	sched_plugin.h	\
+	sicp.c		\
+	sicp.h		\
 	slurmctld.h	\
 	srun_comm.c	\
 	srun_comm.h	\
diff --git a/src/slurmctld/Makefile.in b/src/slurmctld/Makefile.in
index 0517581b2..37b0daf66 100644
--- a/src/slurmctld/Makefile.in
+++ b/src/slurmctld/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -136,17 +139,17 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(sbindir)"
 PROGRAMS = $(sbin_PROGRAMS)
 am_slurmctld_OBJECTS = acct_policy.$(OBJEXT) agent.$(OBJEXT) \
-	backup.$(OBJEXT) controller.$(OBJEXT) front_end.$(OBJEXT) \
-	gang.$(OBJEXT) groups.$(OBJEXT) job_mgr.$(OBJEXT) \
-	job_scheduler.$(OBJEXT) job_submit.$(OBJEXT) \
+	backup.$(OBJEXT) burst_buffer.$(OBJEXT) controller.$(OBJEXT) \
+	front_end.$(OBJEXT) gang.$(OBJEXT) groups.$(OBJEXT) \
+	job_mgr.$(OBJEXT) job_scheduler.$(OBJEXT) job_submit.$(OBJEXT) \
 	licenses.$(OBJEXT) locks.$(OBJEXT) node_mgr.$(OBJEXT) \
 	node_scheduler.$(OBJEXT) partition_mgr.$(OBJEXT) \
 	ping_nodes.$(OBJEXT) slurmctld_plugstack.$(OBJEXT) \
-	port_mgr.$(OBJEXT) power_save.$(OBJEXT) preempt.$(OBJEXT) \
-	proc_req.$(OBJEXT) read_config.$(OBJEXT) reservation.$(OBJEXT) \
-	sched_plugin.$(OBJEXT) srun_comm.$(OBJEXT) \
-	state_save.$(OBJEXT) statistics.$(OBJEXT) step_mgr.$(OBJEXT) \
-	trigger_mgr.$(OBJEXT)
+	port_mgr.$(OBJEXT) power_save.$(OBJEXT) powercapping.$(OBJEXT) \
+	preempt.$(OBJEXT) proc_req.$(OBJEXT) read_config.$(OBJEXT) \
+	reservation.$(OBJEXT) sched_plugin.$(OBJEXT) sicp.$(OBJEXT) \
+	srun_comm.$(OBJEXT) state_save.$(OBJEXT) statistics.$(OBJEXT) \
+	step_mgr.$(OBJEXT) trigger_mgr.$(OBJEXT)
 slurmctld_OBJECTS = $(am_slurmctld_OBJECTS)
 am__DEPENDENCIES_1 =
 slurmctld_DEPENDENCIES = $(top_builddir)/src/common/libdaemonize.la \
@@ -260,6 +263,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -309,8 +314,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -329,6 +338,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -372,6 +384,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -395,6 +408,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -469,6 +483,8 @@ slurmctld_SOURCES = \
 	agent.c  	\
 	agent.h		\
 	backup.c	\
+	burst_buffer.c	\
+	burst_buffer.h	\
 	controller.c 	\
 	front_end.c	\
 	front_end.h	\
@@ -496,6 +512,8 @@ slurmctld_SOURCES = \
 	port_mgr.c	\
 	port_mgr.h	\
 	power_save.c	\
+	powercapping.c	\
+	powercapping.h	\
 	preempt.c	\
 	preempt.h	\
 	proc_req.c	\
@@ -506,6 +524,8 @@ slurmctld_SOURCES = \
 	reservation.h	\
 	sched_plugin.c	\
 	sched_plugin.h	\
+	sicp.c		\
+	sicp.h		\
 	slurmctld.h	\
 	srun_comm.c	\
 	srun_comm.h	\
@@ -618,6 +638,7 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/acct_policy.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/agent.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/backup.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/burst_buffer.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/controller.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/front_end.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/gang.Po@am__quote@
@@ -633,11 +654,13 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/ping_nodes.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/port_mgr.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/power_save.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/powercapping.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/preempt.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/proc_req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/read_config.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/reservation.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sched_plugin.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sicp.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmctld_plugstack.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/srun_comm.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/state_save.Po@am__quote@
diff --git a/src/slurmctld/acct_policy.c b/src/slurmctld/acct_policy.c
index a09c69645..6dc485787 100644
--- a/src/slurmctld/acct_policy.c
+++ b/src/slurmctld/acct_policy.c
@@ -59,35 +59,301 @@ enum {
 	ACCT_POLICY_JOB_FINI
 };
 
-static slurmdb_used_limits_t *_get_used_limits_for_user(
-	List user_limit_list, uint32_t user_id)
+static int get_tres_state_reason(int tres_pos, int unk_reason)
 {
-	slurmdb_used_limits_t *used_limits = NULL;
-	ListIterator itr = NULL;
-
-	if (!user_limit_list)
-		return NULL;
-
-	itr = list_iterator_create(user_limit_list);
-	while ((used_limits = list_next(itr))) {
-		if (used_limits->uid == user_id)
+	switch (tres_pos) {
+	case TRES_ARRAY_CPU:
+		switch (unk_reason) {
+		case WAIT_ASSOC_GRP_UNK:
+			return WAIT_ASSOC_GRP_CPU;
+		case WAIT_ASSOC_GRP_UNK_MIN:
+			return WAIT_ASSOC_GRP_CPU_MIN;
+		case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+			return WAIT_ASSOC_GRP_CPU_RUN_MIN;
+		case WAIT_ASSOC_MAX_UNK_PER_JOB:
+			return WAIT_ASSOC_MAX_CPU_PER_JOB;
+		case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+			return WAIT_ASSOC_MAX_CPU_MINS_PER_JOB;
+		case WAIT_QOS_GRP_UNK:
+			return WAIT_QOS_GRP_CPU;
+		case WAIT_QOS_GRP_UNK_MIN:
+			return WAIT_QOS_GRP_CPU_MIN;
+		case WAIT_QOS_GRP_UNK_RUN_MIN:
+			return WAIT_QOS_GRP_CPU_RUN_MIN;
+		case WAIT_QOS_MAX_UNK_PER_JOB:
+			return WAIT_QOS_MAX_CPU_PER_JOB;
+		case WAIT_QOS_MAX_UNK_PER_NODE:
+			return WAIT_QOS_MAX_CPU_PER_NODE;
+		case WAIT_QOS_MAX_UNK_PER_USER:
+			return WAIT_QOS_MAX_CPU_PER_USER;
+		case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+			return WAIT_QOS_MAX_CPU_MINS_PER_JOB;
+		case WAIT_QOS_MIN_UNK:
+			return WAIT_QOS_MIN_CPU;
+		default:
+			return unk_reason;
 			break;
+		}
+		break;
+	case TRES_ARRAY_MEM:
+		switch (unk_reason) {
+		case WAIT_ASSOC_GRP_UNK:
+			return WAIT_ASSOC_GRP_MEM;
+		case WAIT_ASSOC_GRP_UNK_MIN:
+			return WAIT_ASSOC_GRP_MEM_MIN;
+		case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+			return WAIT_ASSOC_GRP_MEM_RUN_MIN;
+		case WAIT_ASSOC_MAX_UNK_PER_JOB:
+			return WAIT_ASSOC_MAX_MEM_PER_JOB;
+		case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+			return WAIT_ASSOC_MAX_MEM_MINS_PER_JOB;
+		case WAIT_QOS_GRP_UNK:
+			return WAIT_QOS_GRP_MEM;
+		case WAIT_QOS_GRP_UNK_MIN:
+			return WAIT_QOS_GRP_MEM_MIN;
+		case WAIT_QOS_GRP_UNK_RUN_MIN:
+			return WAIT_QOS_GRP_MEM_RUN_MIN;
+		case WAIT_QOS_MAX_UNK_PER_JOB:
+			return WAIT_QOS_MAX_MEM_PER_JOB;
+		case WAIT_QOS_MAX_UNK_PER_NODE:
+			return WAIT_QOS_MAX_MEM_PER_NODE;
+		case WAIT_QOS_MAX_UNK_PER_USER:
+			return WAIT_QOS_MAX_MEM_PER_USER;
+		case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+			return WAIT_QOS_MAX_MEM_MINS_PER_JOB;
+		case WAIT_QOS_MIN_UNK:
+			return WAIT_QOS_MIN_MEM;
+		default:
+			return unk_reason;
+			break;
+		}
+		break;
+	case TRES_ARRAY_ENEGRY:
+		switch (unk_reason) {
+		case WAIT_ASSOC_GRP_UNK:
+			return WAIT_ASSOC_GRP_ENERGY;
+		case WAIT_ASSOC_GRP_UNK_MIN:
+			return WAIT_ASSOC_GRP_ENERGY_MIN;
+		case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+			return WAIT_ASSOC_GRP_ENERGY_RUN_MIN;
+		case WAIT_ASSOC_MAX_UNK_PER_JOB:
+			return WAIT_ASSOC_MAX_ENERGY_PER_JOB;
+		case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+			return WAIT_ASSOC_MAX_ENERGY_MINS_PER_JOB;
+		case WAIT_QOS_GRP_UNK:
+			return WAIT_QOS_GRP_ENERGY;
+		case WAIT_QOS_GRP_UNK_MIN:
+			return WAIT_QOS_GRP_ENERGY_MIN;
+		case WAIT_QOS_GRP_UNK_RUN_MIN:
+			return WAIT_QOS_GRP_ENERGY_RUN_MIN;
+		case WAIT_QOS_MAX_UNK_PER_JOB:
+			return WAIT_QOS_MAX_ENERGY_PER_JOB;
+		case WAIT_QOS_MAX_UNK_PER_NODE:
+			return WAIT_QOS_MAX_ENERGY_PER_NODE;
+		case WAIT_QOS_MAX_UNK_PER_USER:
+			return WAIT_QOS_MAX_ENERGY_PER_USER;
+		case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+			return WAIT_QOS_MAX_ENERGY_MINS_PER_JOB;
+		case WAIT_QOS_MIN_UNK:
+			return WAIT_QOS_MIN_ENERGY;
+		default:
+			return unk_reason;
+			break;
+		}
+		break;
+	case TRES_ARRAY_NODE:
+		switch (unk_reason) {
+		case WAIT_ASSOC_GRP_UNK:
+			return WAIT_ASSOC_GRP_NODE;
+		case WAIT_ASSOC_GRP_UNK_MIN:
+			return WAIT_ASSOC_GRP_NODE_MIN;
+		case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+			return WAIT_ASSOC_GRP_NODE_RUN_MIN;
+		case WAIT_ASSOC_MAX_UNK_PER_JOB:
+			return WAIT_ASSOC_MAX_NODE_PER_JOB;
+		case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+			return WAIT_ASSOC_MAX_NODE_MINS_PER_JOB;
+		case WAIT_QOS_GRP_UNK:
+			return WAIT_QOS_GRP_NODE;
+		case WAIT_QOS_GRP_UNK_MIN:
+			return WAIT_QOS_GRP_NODE_MIN;
+		case WAIT_QOS_GRP_UNK_RUN_MIN:
+			return WAIT_QOS_GRP_NODE_RUN_MIN;
+		case WAIT_QOS_MAX_UNK_PER_JOB:
+			return WAIT_QOS_MAX_NODE_PER_JOB;
+		case WAIT_QOS_MAX_UNK_PER_USER:
+			return WAIT_QOS_MAX_NODE_PER_USER;
+		case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+			return WAIT_QOS_MAX_NODE_MINS_PER_JOB;
+		case WAIT_QOS_MIN_UNK:
+			return WAIT_QOS_MIN_NODE;
+		default:
+			return unk_reason;
+			break;
+		}
+		break;
+	default:
+		if (!xstrcmp("gres", assoc_mgr_tres_array[tres_pos]->type))
+			switch (unk_reason) {
+			case WAIT_ASSOC_GRP_UNK:
+				return WAIT_ASSOC_GRP_GRES;
+			case WAIT_ASSOC_GRP_UNK_MIN:
+				return WAIT_ASSOC_GRP_GRES_MIN;
+			case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+				return WAIT_ASSOC_GRP_GRES_RUN_MIN;
+			case WAIT_ASSOC_MAX_UNK_PER_JOB:
+				return WAIT_ASSOC_MAX_GRES_PER_JOB;
+			case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+				return WAIT_ASSOC_MAX_GRES_MINS_PER_JOB;
+			case WAIT_QOS_GRP_UNK:
+				return WAIT_QOS_GRP_GRES;
+			case WAIT_QOS_GRP_UNK_MIN:
+				return WAIT_QOS_GRP_GRES_MIN;
+			case WAIT_QOS_GRP_UNK_RUN_MIN:
+				return WAIT_QOS_GRP_GRES_RUN_MIN;
+			case WAIT_QOS_MAX_UNK_PER_JOB:
+				return WAIT_QOS_MAX_GRES_PER_JOB;
+			case WAIT_QOS_MAX_UNK_PER_NODE:
+				return WAIT_QOS_MAX_GRES_PER_NODE;
+			case WAIT_QOS_MAX_UNK_PER_USER:
+				return WAIT_QOS_MAX_GRES_PER_USER;
+			case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+				return WAIT_QOS_MAX_GRES_MINS_PER_JOB;
+			case WAIT_QOS_MIN_UNK:
+				return WAIT_QOS_MIN_GRES;
+			default:
+				return unk_reason;
+				break;
+			}
+		else if (!xstrcmp("license",
+				  assoc_mgr_tres_array[tres_pos]->type))
+			switch (unk_reason) {
+			case WAIT_ASSOC_GRP_UNK:
+				return WAIT_ASSOC_GRP_LIC;
+			case WAIT_ASSOC_GRP_UNK_MIN:
+				return WAIT_ASSOC_GRP_LIC_MIN;
+			case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+				return WAIT_ASSOC_GRP_LIC_RUN_MIN;
+			case WAIT_ASSOC_MAX_UNK_PER_JOB:
+				return WAIT_ASSOC_MAX_LIC_PER_JOB;
+			case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+				return WAIT_ASSOC_MAX_LIC_MINS_PER_JOB;
+			case WAIT_QOS_GRP_UNK:
+				return WAIT_QOS_GRP_LIC;
+			case WAIT_QOS_GRP_UNK_MIN:
+				return WAIT_QOS_GRP_LIC_MIN;
+			case WAIT_QOS_GRP_UNK_RUN_MIN:
+				return WAIT_QOS_GRP_LIC_RUN_MIN;
+			case WAIT_QOS_MAX_UNK_PER_JOB:
+				return WAIT_QOS_MAX_LIC_PER_JOB;
+			case WAIT_QOS_MAX_UNK_PER_USER:
+				return WAIT_QOS_MAX_LIC_PER_USER;
+			case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+				return WAIT_QOS_MAX_LIC_MINS_PER_JOB;
+			case WAIT_QOS_MIN_UNK:
+				return WAIT_QOS_MIN_LIC;
+			default:
+				return unk_reason;
+				break;
+			}
+		else if (!xstrcmp("bb", assoc_mgr_tres_array[tres_pos]->type))
+			switch (unk_reason) {
+			case WAIT_ASSOC_GRP_UNK:
+				return WAIT_ASSOC_GRP_BB;
+			case WAIT_ASSOC_GRP_UNK_MIN:
+				return WAIT_ASSOC_GRP_BB_MIN;
+			case WAIT_ASSOC_GRP_UNK_RUN_MIN:
+				return WAIT_ASSOC_GRP_BB_RUN_MIN;
+			case WAIT_ASSOC_MAX_UNK_PER_JOB:
+				return WAIT_ASSOC_MAX_BB_PER_JOB;
+			case WAIT_ASSOC_MAX_UNK_MINS_PER_JOB:
+				return WAIT_ASSOC_MAX_BB_MINS_PER_JOB;
+			case WAIT_QOS_GRP_UNK:
+				return WAIT_QOS_GRP_BB;
+			case WAIT_QOS_GRP_UNK_MIN:
+				return WAIT_QOS_GRP_BB_MIN;
+			case WAIT_QOS_GRP_UNK_RUN_MIN:
+				return WAIT_QOS_GRP_BB_RUN_MIN;
+			case WAIT_QOS_MAX_UNK_PER_JOB:
+				return WAIT_QOS_MAX_BB_PER_JOB;
+			case WAIT_QOS_MAX_UNK_PER_NODE:
+				return WAIT_QOS_MAX_BB_PER_NODE;
+			case WAIT_QOS_MAX_UNK_PER_USER:
+				return WAIT_QOS_MAX_BB_PER_USER;
+			case WAIT_QOS_MAX_UNK_MINS_PER_JOB:
+				return WAIT_QOS_MAX_BB_MINS_PER_JOB;
+			case WAIT_QOS_MIN_UNK:
+				return WAIT_QOS_MIN_BB;
+			default:
+				return unk_reason;
+				break;
+			}
+		break;
 	}
-	list_iterator_destroy(itr);
 
-	return used_limits;
+	return unk_reason;
+}
+
+static void _set_qos_order(struct job_record *job_ptr,
+			   slurmdb_qos_rec_t **qos_ptr_1,
+			   slurmdb_qos_rec_t **qos_ptr_2)
+{
+	xassert(job_ptr);
+	xassert(qos_ptr_1);
+	xassert(qos_ptr_2);
+
+	/* Initialize incoming pointers */
+	*qos_ptr_1 = NULL;
+	*qos_ptr_2 = NULL;
+
+	if (job_ptr->qos_ptr) {
+		if (job_ptr->part_ptr && job_ptr->part_ptr->qos_ptr) {
+			/* If the job's QOS has the flag to over ride the
+			 * partition then use that otherwise use the
+			 * partition's QOS as the king.
+			 */
+			if (((slurmdb_qos_rec_t *)job_ptr->qos_ptr)->flags
+			    & QOS_FLAG_OVER_PART_QOS) {
+				*qos_ptr_1 = job_ptr->qos_ptr;
+				*qos_ptr_2 = job_ptr->part_ptr->qos_ptr;
+			} else {
+				*qos_ptr_1 = job_ptr->part_ptr->qos_ptr;
+				*qos_ptr_2 = job_ptr->qos_ptr;
+			}
+
+			/* No reason to look at the same QOS twice, actually
+			 * we never want to do that ;). */
+			if (*qos_ptr_1 == *qos_ptr_2)
+				*qos_ptr_2 = NULL;
+		} else
+			*qos_ptr_1 = job_ptr->qos_ptr;
+	} else if (job_ptr->part_ptr && job_ptr->part_ptr->qos_ptr)
+		*qos_ptr_1 = job_ptr->part_ptr->qos_ptr;
+
+	return;
+}
+
+static int _find_used_limits_for_user(void *x, void *key)
+{
+	slurmdb_used_limits_t *used_limits = (slurmdb_used_limits_t *)x;
+	uint32_t user_id = *(uint32_t *)key;
+
+	if (used_limits->uid == user_id)
+		return 1;
+
+	return 0;
 }
 
 static bool _valid_job_assoc(struct job_record *job_ptr)
 {
-	slurmdb_association_rec_t assoc_rec, *assoc_ptr;
+	slurmdb_assoc_rec_t assoc_rec, *assoc_ptr;
 
-	assoc_ptr = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	assoc_ptr = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 	if ((assoc_ptr == NULL) ||
 	    (assoc_ptr->id  != job_ptr->assoc_id) ||
 	    (assoc_ptr->uid != job_ptr->user_id)) {
 		error("Invalid assoc_ptr for jobid=%u", job_ptr->job_id);
-		memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+		memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 
 		assoc_rec.acct      = job_ptr->account;
 		if (job_ptr->part_ptr)
@@ -96,7 +362,7 @@ static bool _valid_job_assoc(struct job_record *job_ptr)
 
 		if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 					    accounting_enforce,
-					    (slurmdb_association_rec_t **)
+					    (slurmdb_assoc_rec_t **)
 					    &job_ptr->assoc_ptr, false)) {
 			info("_validate_job_assoc: invalid account or "
 			     "partition for uid=%u jobid=%u",
@@ -108,235 +374,1512 @@ static bool _valid_job_assoc(struct job_record *job_ptr)
 	return true;
 }
 
+static void _qos_adjust_limit_usage(int type, struct job_record *job_ptr,
+				    slurmdb_qos_rec_t *qos_ptr,
+				    uint64_t *used_tres_run_secs)
+{
+	slurmdb_used_limits_t *used_limits = NULL;
+	int i;
+
+	if (!qos_ptr)
+		return;
+
+	if (!qos_ptr->usage->user_limit_list)
+		qos_ptr->usage->user_limit_list =
+			list_create(slurmdb_destroy_used_limits);
+	if (!(used_limits = list_find_first(qos_ptr->usage->user_limit_list,
+					    _find_used_limits_for_user,
+					    &job_ptr->user_id))) {
+		used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
+		used_limits->uid = job_ptr->user_id;
+
+		i = sizeof(uint64_t) * slurmctld_tres_cnt;
+		used_limits->tres = xmalloc(i);
+		used_limits->tres_run_mins = xmalloc(i);
+
+		list_append(qos_ptr->usage->user_limit_list, used_limits);
+	}
+
+	switch(type) {
+	case ACCT_POLICY_ADD_SUBMIT:
+		qos_ptr->usage->grp_used_submit_jobs++;
+		used_limits->submit_jobs++;
+		break;
+	case ACCT_POLICY_REM_SUBMIT:
+		if (qos_ptr->usage->grp_used_submit_jobs)
+			qos_ptr->usage->grp_used_submit_jobs--;
+		else
+			debug2("acct_policy_remove_job_submit: "
+			       "grp_submit_jobs underflow for qos %s",
+			       qos_ptr->name);
+
+		if (used_limits->submit_jobs)
+			used_limits->submit_jobs--;
+		else
+			debug2("acct_policy_remove_job_submit: "
+			       "used_submit_jobs underflow for "
+			       "qos %s user %d",
+			       qos_ptr->name, used_limits->uid);
+		break;
+	case ACCT_POLICY_JOB_BEGIN:
+		qos_ptr->usage->grp_used_jobs++;
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			used_limits->tres[i] += job_ptr->tres_alloc_cnt[i];
+
+			qos_ptr->usage->grp_used_tres[i] +=
+				job_ptr->tres_alloc_cnt[i];
+			qos_ptr->usage->grp_used_tres_run_secs[i] +=
+				used_tres_run_secs[i];
+			debug2("acct_policy_job_begin: after "
+			       "adding job %u, qos %s "
+			       "grp_used_tres_run_secs(%s) "
+			       "is %"PRIu64,
+			       job_ptr->job_id,
+			       qos_ptr->name,
+			       assoc_mgr_tres_name_array[i],
+			       qos_ptr->usage->grp_used_tres_run_secs[i]);
+		}
+
+		used_limits->jobs++;
+		break;
+	case ACCT_POLICY_JOB_FINI:
+		qos_ptr->usage->grp_used_jobs--;
+		if ((int32_t)qos_ptr->usage->grp_used_jobs < 0) {
+			qos_ptr->usage->grp_used_jobs = 0;
+			debug2("acct_policy_job_fini: used_jobs "
+			       "underflow for qos %s", qos_ptr->name);
+		}
+
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			if (job_ptr->tres_alloc_cnt[i] >
+			    qos_ptr->usage->grp_used_tres[i]) {
+				qos_ptr->usage->grp_used_tres[i] = 0;
+				debug2("acct_policy_job_fini: "
+				       "grp_used_tres(%s) "
+				       "underflow for QOS %s",
+				       assoc_mgr_tres_name_array[i],
+				       qos_ptr->name);
+			} else
+				qos_ptr->usage->grp_used_tres[i] -=
+					job_ptr->tres_alloc_cnt[i];
+
+			if (job_ptr->tres_alloc_cnt[i] > used_limits->tres[i]) {
+				used_limits->tres[i] = 0;
+				debug2("acct_policy_job_fini: "
+				       "used_limits->tres(%s) "
+				       "underflow for qos %s user %u",
+				       assoc_mgr_tres_name_array[i],
+				       qos_ptr->name, used_limits->uid);
+			} else
+				used_limits->tres[i] -=
+					job_ptr->tres_alloc_cnt[i];
+		}
+
+		used_limits->jobs--;
+		if ((int32_t)used_limits->jobs < 0) {
+			used_limits->jobs = 0;
+			debug2("acct_policy_job_fini: used_jobs "
+			       "underflow for qos %s user %d",
+			       qos_ptr->name, used_limits->uid);
+		}
+
+		break;
+	default:
+		error("acct_policy: qos unknown type %d", type);
+		break;
+	}
+
+}
+
 static void _adjust_limit_usage(int type, struct job_record *job_ptr)
 {
-	slurmdb_association_rec_t *assoc_ptr = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
-	uint64_t used_cpu_run_secs = 0;
-	uint32_t job_memory = 0;
-	uint32_t node_cnt;
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_assoc_rec_t *assoc_ptr = NULL;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+	uint64_t used_tres_run_secs[slurmctld_tres_cnt];
+	int i;
+
+	memset(used_tres_run_secs, 0, sizeof(uint64_t) * slurmctld_tres_cnt);
 
 	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)
 	    || !_valid_job_assoc(job_ptr))
 		return;
-#ifdef HAVE_BG
-	xassert(job_ptr->select_jobinfo);
-	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
-				    SELECT_JOBDATA_NODE_CNT, &node_cnt);
-	if (node_cnt == NO_VAL) {
-		/* This should never happen */
-		node_cnt = job_ptr->node_cnt;
-		error("node_cnt not available at %s:%d\n", __FILE__, __LINE__);
-	}
-#else
-	node_cnt = job_ptr->node_cnt;
-#endif
 
 	if (type == ACCT_POLICY_JOB_FINI)
 		priority_g_job_end(job_ptr);
-	else if (type == ACCT_POLICY_JOB_BEGIN)
-		used_cpu_run_secs = (uint64_t)job_ptr->total_cpus
-			* (uint64_t)job_ptr->time_limit * 60;
-
-	if (job_ptr->details && job_ptr->details->pn_min_memory) {
-		if (job_ptr->details->pn_min_memory & MEM_PER_CPU) {
-			job_memory = (job_ptr->details->pn_min_memory
-				      & (~MEM_PER_CPU))
-				* job_ptr->total_cpus;
-			debug2("_adjust_limit_usage: job %u: MPC: "
-			       "job_memory set to %u", job_ptr->job_id,
-			       job_memory);
-		} else {
-			job_memory = (job_ptr->details->pn_min_memory)
-				* node_cnt;
-			debug2("_adjust_limit_usage: job %u: MPN: "
-			       "job_memory set to %u", job_ptr->job_id,
-			       job_memory);
-		}
+	else if (type == ACCT_POLICY_JOB_BEGIN) {
+		uint64_t time_limit_secs = (uint64_t)job_ptr->time_limit * 60;
+		for (i=0; i<slurmctld_tres_cnt; i++)
+			used_tres_run_secs[i] =
+				job_ptr->tres_alloc_cnt[i] * time_limit_secs;
 	}
 
 	assoc_mgr_lock(&locks);
-	if (job_ptr->qos_ptr) {
-		slurmdb_qos_rec_t *qos_ptr = NULL;
-		slurmdb_used_limits_t *used_limits = NULL;
 
-		qos_ptr = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-		if (!qos_ptr->usage->user_limit_list)
-			qos_ptr->usage->user_limit_list =
-				list_create(slurmdb_destroy_used_limits);
-		used_limits = _get_used_limits_for_user(
-			qos_ptr->usage->user_limit_list,
-			job_ptr->user_id);
-		if (!used_limits) {
-			used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
-			used_limits->uid = job_ptr->user_id;
-			list_append(qos_ptr->usage->user_limit_list,
-				    used_limits);
-		}
+	_set_qos_order(job_ptr, &qos_ptr_1, &qos_ptr_2);
+
+	_qos_adjust_limit_usage(type, job_ptr, qos_ptr_1,
+				used_tres_run_secs);
+	_qos_adjust_limit_usage(type, job_ptr, qos_ptr_2,
+				used_tres_run_secs);
+
+	assoc_ptr = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
+	while (assoc_ptr) {
 		switch(type) {
 		case ACCT_POLICY_ADD_SUBMIT:
-			qos_ptr->usage->grp_used_submit_jobs++;
-			used_limits->submit_jobs++;
+			assoc_ptr->usage->used_submit_jobs++;
 			break;
 		case ACCT_POLICY_REM_SUBMIT:
-			if (qos_ptr->usage->grp_used_submit_jobs)
-				qos_ptr->usage->grp_used_submit_jobs--;
-			else
-				debug2("acct_policy_remove_job_submit: "
-				       "grp_submit_jobs underflow for qos %s",
-				       qos_ptr->name);
-
-			if (used_limits->submit_jobs)
-				used_limits->submit_jobs--;
+			if (assoc_ptr->usage->used_submit_jobs)
+				assoc_ptr->usage->used_submit_jobs--;
 			else
 				debug2("acct_policy_remove_job_submit: "
 				       "used_submit_jobs underflow for "
-				       "qos %s user %d",
-				       qos_ptr->name, used_limits->uid);
+				       "account %s",
+				       assoc_ptr->acct);
 			break;
 		case ACCT_POLICY_JOB_BEGIN:
-			qos_ptr->usage->grp_used_jobs++;
-			qos_ptr->usage->grp_used_cpus += job_ptr->total_cpus;
-			qos_ptr->usage->grp_used_mem += job_memory;
-			qos_ptr->usage->grp_used_nodes += node_cnt;
-			qos_ptr->usage->grp_used_cpu_run_secs +=
-				used_cpu_run_secs;
-			used_limits->jobs++;
-			used_limits->cpus += job_ptr->total_cpus;
-			used_limits->nodes += node_cnt;
+			assoc_ptr->usage->used_jobs++;
+			for (i=0; i<slurmctld_tres_cnt; i++) {
+				assoc_ptr->usage->grp_used_tres[i] +=
+					job_ptr->tres_alloc_cnt[i];
+				assoc_ptr->usage->grp_used_tres_run_secs[i] +=
+					used_tres_run_secs[i];
+				debug2("acct_policy_job_begin: after "
+				       "adding job %u, assoc %u(%s/%s/%s) "
+				       "grp_used_tres_run_secs(%s) "
+				       "is %"PRIu64,
+				       job_ptr->job_id,
+				       assoc_ptr->id, assoc_ptr->acct,
+				       assoc_ptr->user, assoc_ptr->partition,
+				       assoc_mgr_tres_name_array[i],
+				       assoc_ptr->usage->
+				       grp_used_tres_run_secs[i]);
+			}
 			break;
 		case ACCT_POLICY_JOB_FINI:
-
-			qos_ptr->usage->grp_used_jobs--;
-			if ((int32_t)qos_ptr->usage->grp_used_jobs < 0) {
-				qos_ptr->usage->grp_used_jobs = 0;
+			if (assoc_ptr->usage->used_jobs)
+				assoc_ptr->usage->used_jobs--;
+			else
 				debug2("acct_policy_job_fini: used_jobs "
-				       "underflow for qos %s", qos_ptr->name);
+				       "underflow for account %s",
+				       assoc_ptr->acct);
+
+			for (i=0; i<slurmctld_tres_cnt; i++) {
+				if (job_ptr->tres_alloc_cnt[i] >
+				    assoc_ptr->usage->grp_used_tres[i]) {
+					assoc_ptr->usage->grp_used_tres[i] = 0;
+					debug2("acct_policy_job_fini: "
+					       "grp_used_tres(%s) "
+					       "underflow for assoc "
+					       "%u(%s/%s/%s)",
+					       assoc_mgr_tres_name_array[i],
+					       assoc_ptr->id, assoc_ptr->acct,
+					       assoc_ptr->user,
+					       assoc_ptr->partition);
+				} else
+					assoc_ptr->usage->grp_used_tres[i] -=
+						job_ptr->tres_alloc_cnt[i];
 			}
 
-			qos_ptr->usage->grp_used_cpus -= job_ptr->total_cpus;
-			if ((int32_t)qos_ptr->usage->grp_used_cpus < 0) {
-				qos_ptr->usage->grp_used_cpus = 0;
-				debug2("acct_policy_job_fini: grp_used_cpus "
-				       "underflow for qos %s", qos_ptr->name);
+			break;
+		default:
+			error("acct_policy: association unknown type %d", type);
+			break;
+		}
+		/* now handle all the group limits of the parents */
+		assoc_ptr = assoc_ptr->usage->parent_assoc_ptr;
+	}
+	assoc_mgr_unlock(&locks);
+}
+
+static void _set_time_limit(uint32_t *time_limit, uint32_t part_max_time,
+			    uint32_t limit_max_time, uint16_t *limit_set_time)
+{
+	if ((*time_limit) == NO_VAL) {
+		if (part_max_time == INFINITE)
+			(*time_limit) = limit_max_time;
+		else
+			(*time_limit) = MIN(limit_max_time, part_max_time);
+
+		(*limit_set_time) = 1;
+	} else if ((*limit_set_time) && ((*time_limit) > limit_max_time))
+		(*time_limit) = limit_max_time;
+}
+
+static void _qos_alter_job(struct job_record *job_ptr,
+			   slurmdb_qos_rec_t *qos_ptr,
+			   uint64_t *used_tres_run_secs,
+			   uint64_t *new_used_tres_run_secs)
+{
+	int i;
+
+	if (!qos_ptr || !job_ptr)
+		return;
+
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		if (used_tres_run_secs[i] == new_used_tres_run_secs[i])
+			continue;
+		qos_ptr->usage->grp_used_tres_run_secs[i] -=
+			used_tres_run_secs[i];
+		qos_ptr->usage->grp_used_tres_run_secs[i] +=
+			new_used_tres_run_secs[i];
+		debug2("altering job %u QOS %s "
+		       "got %"PRIu64" just removed %"PRIu64
+		       " and added %"PRIu64"",
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       qos_ptr->usage->grp_used_tres_run_secs[i],
+		       used_tres_run_secs[i],
+		       new_used_tres_run_secs[i]);
+	}
+}
+
+/*
+ * _validate_tres_limits_for_assoc - validate the tres requested against limits
+ * of an association as well as qos skipping any limit an admin set
+ *
+ * OUT - tres_pos - if false is returned position in array of failed limit
+ * IN - job_tres_array - count of various tres in use
+ * IN - assoc_tres_array - limits on the association
+ * IN - qos_tres_array - limits on the qos
+ * IN - acct_policy_limit_set_array - limits that have been overridden
+ *                                    by an admin
+ * IN strick_checking - If a limit needs to be enforced now or not.
+ * IN update_call - If this is an update or a create call
+ *
+ * RET - True if no limit is violated, false otherwise with tres_pos
+ * being set to the position of the failed limit.
+ */
+static bool _validate_tres_limits_for_assoc(
+	int *tres_pos,
+	uint64_t *job_tres_array,
+	uint64_t divisor,
+	uint64_t *assoc_tres_array,
+	uint64_t *qos_tres_array,
+	uint16_t *admin_set_limit_tres_array,
+	bool strict_checking,
+	bool update_call, bool max_limit)
+{
+	int i;
+	uint64_t job_tres;
+
+	if (!strict_checking)
+		return true;
+
+	for (i = 0; i < g_tres_count; i++) {
+		(*tres_pos) = i;
+
+		if ((admin_set_limit_tres_array[i] == ADMIN_SET_LIMIT)
+		    || (qos_tres_array[i] != INFINITE64)
+		    || (assoc_tres_array[i] == INFINITE64)
+		    || (!job_tres_array[i] && !update_call))
+			continue;
+
+		job_tres = job_tres_array[i];
+
+		if (divisor)
+			job_tres /= divisor;
+
+		if (max_limit) {
+			if (job_tres > assoc_tres_array[i])
+				return false;
+		} else if (job_tres < assoc_tres_array[i])
+				return false;
+	}
+
+	return true;
+}
+
+/*
+ * _validate_tres_usage_limits_for_assoc - validate the tres requested
+ * against limits
+ * of an association as well as qos skipping any limit an admin set
+ *
+ * OUT - tres_pos - if false is returned position in array of failed limit
+ * IN - job_tres_array - count of various tres in use
+ * IN - assoc_tres_array - limits on the association
+ * IN - qos_tres_array - limits on the qos
+ * IN - acct_policy_limit_set_array - limits that have been overridden
+ *                                    by an admin
+ * IN strick_checking - If a limit needs to be enforced now or not.
+ * IN update_call - If this is an update or a create call
+ *
+ * RET - True if no limit is violated, false otherwise with tres_pos
+ * being set to the position of the failed limit.
+ */
+static int _validate_tres_usage_limits_for_assoc(
+	int *tres_pos,
+	uint64_t *tres_limit_array,
+	uint64_t *qos_tres_limit_array,
+	uint64_t *tres_req_cnt,
+	uint64_t *tres_usage,
+	uint64_t *curr_usage,
+	uint16_t *admin_limit_set,
+	bool safe_limits)
+{
+	int i;
+	uint64_t usage = 0;
+
+	xassert(tres_limit_array);
+	xassert(qos_tres_limit_array);
+
+	for (i = 0; i < g_tres_count; i++) {
+		(*tres_pos) = i;
+
+		if ((admin_limit_set
+		     && admin_limit_set[i] == ADMIN_SET_LIMIT) ||
+		    (qos_tres_limit_array[i] != INFINITE64) ||
+		    (tres_limit_array[i] == INFINITE64))
+			continue;
+
+		if (curr_usage && (curr_usage[i] >= tres_limit_array[i]))
+			return 1;
+
+		if (safe_limits) {
+			xassert(tres_req_cnt);
+			if (tres_req_cnt[i] > tres_limit_array[i])
+				return 2;
+
+			if (curr_usage)
+				usage = curr_usage[i];
+			if (tres_usage &&
+			    ((tres_req_cnt[i] + tres_usage[i]) >
+			     (tres_limit_array[i] - usage)))
+				return 3;
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * _validate_tres_limits_for_qos - validate the tres requested against limits
+ * of an association as well as qos skipping any limit an admin set
+ *
+ * OUT - tres_pos - if false is returned position in array of failed limit
+ * IN - job_tres_array - count of various tres in use
+ * IN - assoc_tres_array - limits on the association
+ * IN - qos_tres_array - limits on the qos
+ * IN - acct_policy_limit_set_array - limits that have been overridden
+ *                                    by an admin
+ * IN strick_checking - If a limit needs to be enforced now or not.
+ * IN update_call - If this is an update or a create call
+ *
+ * RET - True if no limit is violated, false otherwise with tres_pos
+ * being set to the position of the failed limit.
+ */
+static bool _validate_tres_limits_for_qos(
+	int *tres_pos,
+	uint64_t *job_tres_array,
+	uint64_t divisor,
+	uint64_t *grp_tres_array,
+	uint64_t *max_tres_array,
+	uint64_t *out_grp_tres_array,
+	uint64_t *out_max_tres_array,
+	uint16_t *admin_set_limit_tres_array,
+	bool strict_checking, bool max_limit)
+{
+	uint64_t max_tres_limit, out_max_tres_limit;
+	int i;
+	uint64_t job_tres;
+
+	if (!strict_checking)
+		return true;
+
+	for (i = 0; i < g_tres_count; i++) {
+		(*tres_pos) = i;
+		if (grp_tres_array) {
+			max_tres_limit = MIN(grp_tres_array[i],
+					     max_tres_array[i]);
+			out_max_tres_limit = MIN(out_grp_tres_array[i],
+						 out_max_tres_array[i]);
+		} else {
+			max_tres_limit = max_tres_array[i];
+			out_max_tres_limit = out_max_tres_array[i];
+		}
+
+		/* we don't need to look at this limit */
+		if ((admin_set_limit_tres_array[i] == ADMIN_SET_LIMIT)
+		    || (out_max_tres_limit != INFINITE64)
+		    || (max_tres_limit == INFINITE64)
+		    || (job_tres_array[i] && (job_tres_array[i] == NO_VAL64)))
+			continue;
+
+		out_max_tres_array[i] = max_tres_array[i];
+
+		job_tres = job_tres_array[i];
+
+		if (divisor)
+			job_tres /= divisor;
+
+		if (out_grp_tres_array) {
+			if (out_grp_tres_array[i] == INFINITE64)
+				out_grp_tres_array[i] = grp_tres_array[i];
+
+			if (max_limit) {
+				if (job_tres > grp_tres_array[i])
+					return false;
+			}  else if (job_tres < grp_tres_array[i])
+				return false;
+		}
+
+		if (max_limit) {
+			if (job_tres > max_tres_array[i])
+				return false;
+		} else if (job_tres < max_tres_array[i])
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * _validate_tres_time_limits_for_qos - validate the tres requested
+ * against limits of an association as well as qos skipping any limit
+ * an admin set
+ *
+ * OUT - tres_pos - if false is returned position in array of failed limit
+ * IN - job_tres_array - count of various tres in use
+ * IN - assoc_tres_array - limits on the association
+ * IN - qos_tres_array - limits on the qos
+ * IN - acct_policy_limit_set_array - limits that have been overridden
+ *                                    by an admin
+ * IN strick_checking - If a limit needs to be enforced now or not.
+ * IN update_call - If this is an update or a create call
+ *
+ * RET - True if no limit is violated, false otherwise with tres_pos
+ * being set to the position of the failed limit.
+ */
+static bool _validate_tres_time_limits_for_qos(
+	int *tres_pos,
+	uint32_t *time_limit_in,
+	uint32_t part_max_time,
+	uint64_t *job_tres_array,
+	uint64_t *max_tres_array,
+	uint64_t *out_max_tres_array,
+	uint16_t *limit_set_time,
+	bool strict_checking)
+{
+	int i;
+	uint32_t max_time_limit;
+
+	if (!strict_checking || (*limit_set_time) == ADMIN_SET_LIMIT)
+		return true;
+
+	for (i = 0; i < g_tres_count; i++) {
+		(*tres_pos) = i;
+
+		if ((out_max_tres_array[i] != INFINITE64) ||
+		    (max_tres_array[i] == INFINITE64) ||
+		    (job_tres_array[i] == NO_VAL64))
+			continue;
+
+		max_time_limit = (uint32_t)(max_tres_array[i] /
+					    job_tres_array[i]);
+
+		_set_time_limit(time_limit_in,
+				part_max_time, max_time_limit,
+				limit_set_time);
+
+		out_max_tres_array[i] = max_tres_array[i];
+
+		if ((*time_limit_in) > max_time_limit)
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * _validate_tres_usage_limits_for_qos - validate the tres requested
+ * against limits of an association as well as qos skipping any limit
+ * an admin set
+ *
+ * OUT - tres_pos - if false is returned position in array of failed limit
+ * IN - job_tres_array - count of various tres in use
+ * IN - assoc_tres_array - limits on the association
+ * IN - qos_tres_array - limits on the qos
+ * IN - acct_policy_limit_set_array - limits that have been overridden
+ *                                    by an admin
+ * IN strick_checking - If a limit needs to be enforced now or not.
+ * IN update_call - If this is an update or a create call
+ *
+ * RET - True if no limit is violated, false otherwise with tres_pos
+ * being set to the position of the failed limit.
+ */
+static int _validate_tres_usage_limits_for_qos(
+	int *tres_pos,
+	uint64_t *tres_limit_array,
+	uint64_t *out_tres_limit_array,
+	uint64_t *tres_req_cnt,
+	uint64_t *tres_usage,
+	uint64_t *curr_usage,
+	uint16_t *admin_limit_set,
+	bool safe_limits)
+{
+	uint64_t usage = 0;
+	int i;
+
+	xassert(tres_limit_array);
+	xassert(out_tres_limit_array);
+
+	for (i = 0; i < g_tres_count; i++) {
+		(*tres_pos) = i;
+
+		if ((admin_limit_set
+		     && admin_limit_set[i] == ADMIN_SET_LIMIT) ||
+		    (out_tres_limit_array[i] != INFINITE64) ||
+		    (tres_limit_array[i] == INFINITE64))
+			continue;
+
+		out_tres_limit_array[i] = tres_limit_array[i];
+
+		if (curr_usage && (curr_usage[i] >= tres_limit_array[i]))
+			return 1;
+
+		if (safe_limits) {
+			xassert(tres_req_cnt);
+			if (tres_req_cnt[i] > tres_limit_array[i])
+				return 2;
+
+			if (curr_usage)
+				usage = curr_usage[i];
+			if (tres_usage &&
+			    ((tres_req_cnt[i] + tres_usage[i]) >
+			     (tres_limit_array[i] - usage)))
+				return 3;
+		}
+	}
+
+	return 0;
+}
+
+static int _qos_policy_validate(job_desc_msg_t *job_desc,
+				struct part_record *part_ptr,
+				slurmdb_qos_rec_t *qos_ptr,
+				slurmdb_qos_rec_t *qos_out_ptr,
+				uint32_t *reason,
+				acct_policy_limit_set_t *acct_policy_limit_set,
+				bool update_call,
+				char *user_name,
+				int job_cnt,
+				bool strict_checking)
+{
+	int rc = true;
+	int tres_pos = 0;
+
+	if (!qos_ptr || !qos_out_ptr)
+		return rc;
+
+	/* for validation we don't need to look at
+	 * qos_ptr->grp_tres_mins.
+	 */
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   job_desc->tres_req_cnt, 0,
+					   qos_ptr->grp_tres_ctld,
+					   qos_ptr->max_tres_pu_ctld,
+					   qos_out_ptr->grp_tres_ctld,
+					   qos_out_ptr->max_tres_pu_ctld,
+					   acct_policy_limit_set->tres,
+					   strict_checking, 1)) {
+		if (job_desc->tres_req_cnt[tres_pos] >
+		    qos_ptr->max_tres_pu_ctld[tres_pos]) {
+			if (reason)
+				*reason = get_tres_state_reason(
+					tres_pos, WAIT_QOS_MAX_UNK_PER_USER);
+
+			debug2("job submit for user %s(%u): "
+			       "min tres(%s) request %"PRIu64" exceeds "
+			       "per-user max tres limit %"PRIu64" for qos '%s'",
+			       user_name,
+			       job_desc->user_id,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       job_desc->tres_req_cnt[tres_pos],
+			       qos_ptr->max_tres_pu_ctld[tres_pos],
+			       qos_ptr->name);
+			rc = false;
+			goto end_it;
+		} else if (job_desc->tres_req_cnt[tres_pos] >
+			   qos_ptr->grp_tres_ctld[tres_pos]) {
+			if (reason)
+				*reason = get_tres_state_reason(
+					tres_pos, WAIT_QOS_GRP_UNK);
+
+			debug2("job submit for user %s(%u): "
+			       "min tres(%s) request %"PRIu64" exceeds "
+			       "group max tres limit %"PRIu64" for qos '%s'",
+			       user_name,
+			       job_desc->user_id,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       job_desc->tres_req_cnt[tres_pos],
+			       qos_ptr->grp_tres_ctld[tres_pos],
+			       qos_ptr->name);
+			rc = false;
+			goto end_it;
+		}
+	}
+
+	/* for validation we don't need to look at
+	 * qos_ptr->grp_jobs.
+	 */
+
+	if ((qos_out_ptr->grp_submit_jobs == INFINITE) &&
+	    (qos_ptr->grp_submit_jobs != INFINITE)) {
+
+		qos_out_ptr->grp_submit_jobs = qos_ptr->grp_submit_jobs;
+
+		if ((qos_ptr->usage->grp_used_submit_jobs + job_cnt)
+		    > qos_ptr->grp_submit_jobs) {
+			if (reason)
+				*reason = WAIT_QOS_GRP_SUB_JOB;
+			debug2("job submit for user %s(%u): "
+			       "group max submit job limit exceeded %u "
+			       "for qos '%s'",
+			       user_name,
+			       job_desc->user_id,
+			       qos_ptr->grp_submit_jobs,
+			       qos_ptr->name);
+			rc = false;
+			goto end_it;
+		}
+	}
+
+	/* If DenyOnLimit is set we do need to check
+	 * qos_ptr->max_tres_mins_pj as well as
+	 * qos_ptr->max_wall_pj and qos_ptr->grp_wall (at
+	 * least make sure it isn't above the grp limit)
+	 * otherwise you end up in PENDING on a QOSLimit.
+	 */
+	if (strict_checking &&
+	    (acct_policy_limit_set->time != ADMIN_SET_LIMIT)) {
+		if (!_validate_tres_time_limits_for_qos(
+			    &tres_pos,
+			    &job_desc->time_limit,
+			    part_ptr->max_time,
+			    job_desc->tres_req_cnt,
+			    qos_ptr->max_tres_mins_pj_ctld,
+			    qos_out_ptr->max_tres_mins_pj_ctld,
+			    &acct_policy_limit_set->time,
+			    strict_checking)) {
+			if (reason)
+				*reason = get_tres_state_reason(
+					tres_pos, WAIT_QOS_MAX_UNK_PER_JOB);
+			debug2("job submit for user %s(%u): "
+			       "tres(%s) time limit request %"PRIu64" "
+			       "exceeds max per-job limit %"PRIu64" "
+			       "for qos '%s'",
+			       user_name,
+			       job_desc->user_id,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       ((uint64_t)job_desc->time_limit *
+				job_desc->tres_req_cnt[tres_pos]),
+			       qos_ptr->max_tres_mins_pj_ctld[tres_pos],
+			       qos_ptr->name);
+			rc = false;
+			goto end_it;
+		}
+
+		if ((qos_out_ptr->max_wall_pj == INFINITE) &&
+		    (qos_ptr->max_wall_pj != INFINITE)) {
+			_set_time_limit(&job_desc->time_limit,
+					part_ptr->max_time,
+					qos_ptr->max_wall_pj,
+					&acct_policy_limit_set->time);
+			qos_out_ptr->max_wall_pj = qos_ptr->max_wall_pj;
+
+			if (job_desc->time_limit > qos_ptr->max_wall_pj) {
+				if (reason)
+					*reason = WAIT_QOS_MAX_WALL_PER_JOB;
+				debug2("job submit for user %s(%u): "
+				       "time limit %u exceeds qos max %u",
+				       user_name,
+				       job_desc->user_id,
+				       job_desc->time_limit,
+				       qos_ptr->max_wall_pj);
+				rc = false;
+				goto end_it;
 			}
+		}
+
+		if ((qos_out_ptr->grp_wall == INFINITE) &&
+		    (qos_ptr->grp_wall != INFINITE)) {
+			_set_time_limit(&job_desc->time_limit,
+					part_ptr->max_time,
+					qos_ptr->grp_wall,
+					&acct_policy_limit_set->time);
+
+			qos_out_ptr->grp_wall = qos_ptr->grp_wall;
+
+			if (job_desc->time_limit > qos_ptr->grp_wall) {
+				if (reason)
+					*reason = WAIT_ASSOC_GRP_WALL;
+				debug2("job submit for user %s(%u): "
+				       "time limit %u exceeds qos grp max %u",
+				       user_name,
+				       job_desc->user_id,
+				       job_desc->time_limit,
+				       qos_ptr->grp_wall);
+				rc = false;
+				goto end_it;
+			}
+		}
+	}
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   job_desc->tres_req_cnt, 0,
+					   NULL,
+					   qos_ptr->max_tres_pj_ctld,
+					   NULL,
+					   qos_out_ptr->max_tres_pj_ctld,
+					   acct_policy_limit_set->tres,
+					   strict_checking, 1)) {
+		if (reason)
+			*reason = get_tres_state_reason(
+				tres_pos, WAIT_QOS_MAX_UNK_PER_JOB);
+
+		debug2("job submit for user %s(%u): "
+		       "min tres(%s) request %"PRIu64" exceeds "
+		       "per-job max tres limit %"PRIu64" for qos '%s'",
+		       user_name,
+		       job_desc->user_id,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       job_desc->tres_req_cnt[tres_pos],
+		       qos_ptr->max_tres_pj_ctld[tres_pos],
+		       qos_ptr->name);
+		rc = false;
+		goto end_it;
+	}
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   job_desc->tres_req_cnt,
+					   job_desc->tres_req_cnt[
+						   TRES_ARRAY_NODE],
+					   NULL,
+					   qos_ptr->max_tres_pn_ctld,
+					   NULL,
+					   qos_out_ptr->max_tres_pn_ctld,
+					   acct_policy_limit_set->tres,
+					   strict_checking, 1)) {
+		if (reason)
+			*reason = get_tres_state_reason(
+				tres_pos, WAIT_QOS_MAX_UNK_PER_NODE);
+
+		debug2("job submit for user %s(%u): "
+		       "min tres(%s) request %"PRIu64" exceeds "
+		       "per-node max tres limit %"PRIu64" for qos '%s'",
+		       user_name,
+		       job_desc->user_id,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       job_desc->tres_req_cnt[tres_pos] /
+		       job_desc->tres_req_cnt[TRES_ARRAY_NODE],
+		       qos_ptr->max_tres_pn_ctld[tres_pos],
+		       qos_ptr->name);
+		rc = false;
+		goto end_it;
+	}
+
+	/* for validation we don't need to look at
+	 * qos_ptr->max_jobs.
+	 */
+
+	if ((qos_out_ptr->max_submit_jobs_pu == INFINITE) &&
+	    (qos_ptr->max_submit_jobs_pu != INFINITE)) {
+		slurmdb_used_limits_t *used_limits = NULL;
+
+		if (qos_ptr->usage->user_limit_list)
+			used_limits = list_find_first(
+				qos_ptr->usage->user_limit_list,
+				_find_used_limits_for_user,
+				&job_desc->user_id);
+
+		qos_out_ptr->max_submit_jobs_pu = qos_ptr->max_submit_jobs_pu;
+
+		if ((!used_limits &&
+		     qos_ptr->max_submit_jobs_pu == 0) ||
+		    (used_limits &&
+		     ((used_limits->submit_jobs + job_cnt) >
+		      qos_ptr->max_submit_jobs_pu))) {
+			if (reason)
+				*reason = WAIT_QOS_MAX_SUB_JOB;
+			debug2("job submit for user %s(%u): "
+			       "qos max submit job limit exceeded %u",
+			       user_name,
+			       job_desc->user_id,
+			       qos_ptr->max_submit_jobs_pu);
+			rc = false;
+			goto end_it;
+		}
+	}
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   job_desc->tres_req_cnt, 0,
+					   NULL,
+					   qos_ptr->min_tres_pj_ctld,
+					   NULL,
+					   qos_out_ptr->min_tres_pj_ctld,
+					   acct_policy_limit_set->tres,
+					   strict_checking, 0)) {
+		if (reason)
+			*reason = get_tres_state_reason(
+				tres_pos, WAIT_QOS_MIN_UNK);
+
+		debug2("job submit for user %s(%u): "
+		       "min tres(%s) request %"PRIu64" exceeds "
+		       "per-job max tres limit %"PRIu64" for qos '%s'",
+		       user_name,
+		       job_desc->user_id,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       job_desc->tres_req_cnt[tres_pos],
+		       qos_ptr->min_tres_pj_ctld[tres_pos],
+		       qos_ptr->name);
+		rc = false;
+		goto end_it;
+	}
+
+end_it:
+	return rc;
+}
+
+static int _qos_job_runnable_pre_select(struct job_record *job_ptr,
+					 slurmdb_qos_rec_t *qos_ptr,
+					 slurmdb_qos_rec_t *qos_out_ptr)
+{
+	uint32_t wall_mins;
+	uint32_t time_limit;
+	int rc = true;
+	slurmdb_used_limits_t *used_limits = NULL;
+	bool free_used_limits = false;
+
+	if (!qos_ptr || !qos_out_ptr)
+		return rc;
+
+	wall_mins = qos_ptr->usage->grp_used_wall / 60;
+
+	/*
+	 * Try to get the used limits for the user or initialise a local
+	 * nullified one if not available.
+	 */
+	if (!qos_ptr->usage->user_limit_list ||
+	    !(used_limits = list_find_first(qos_ptr->usage->user_limit_list,
+					    _find_used_limits_for_user,
+					    &job_ptr->user_id))) {
+		used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
+		used_limits->uid = job_ptr->user_id;
+		free_used_limits = true;
+	}
+
+	/* we don't need to check grp_tres_mins here */
+
+	/* we don't need to check grp_tres here */
+
+	/* we don't need to check grp_mem here */
+	if ((qos_out_ptr->grp_jobs == INFINITE) &&
+	    (qos_ptr->grp_jobs != INFINITE)) {
+
+		qos_out_ptr->grp_jobs = qos_ptr->grp_jobs;
+
+		if (qos_ptr->usage->grp_used_jobs >= qos_ptr->grp_jobs) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = WAIT_QOS_GRP_JOB;
+			debug2("job %u being held, "
+			       "the job is at or exceeds "
+			       "group max jobs limit %u with %u for qos %s",
+			       job_ptr->job_id,
+			       qos_ptr->grp_jobs,
+			       qos_ptr->usage->grp_used_jobs, qos_ptr->name);
+
+			rc = false;
+			goto end_it;
+		}
+	}
+
+	/* we don't need to check grp_tres_run_mins here */
+
+	/* we don't need to check grp_nodes here */
+
+	/* we don't need to check submit_jobs here */
+
+	if ((qos_out_ptr->grp_wall == INFINITE)
+	    && (qos_ptr->grp_wall != INFINITE)) {
+
+		qos_out_ptr->grp_wall = qos_ptr->grp_wall;
+
+		if (wall_mins >= qos_ptr->grp_wall) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = WAIT_QOS_GRP_WALL;
+			debug2("job %u being held, "
+			       "the job is at or exceeds "
+			       "group wall limit %u "
+			       "with %u for qos %s",
+			       job_ptr->job_id,
+			       qos_ptr->grp_wall,
+			       wall_mins, qos_ptr->name);
+			rc = false;
+			goto end_it;
+		}
+	}
+
+	/* we don't need to check max_tres_mins_pj here */
+
+	/* we don't need to check max_tres_pj here */
+
+	/* we don't need to check max_tres_pn here */
+
+	/* we don't need to check min_tres_pj here */
+
+	/* we don't need to check max_tres_pu here */
+
+	if ((qos_out_ptr->max_jobs_pu == INFINITE)
+	    && (qos_ptr->max_jobs_pu != INFINITE)) {
+
+		qos_out_ptr->max_jobs_pu = qos_ptr->max_jobs_pu;
+
+		if (used_limits->jobs >= qos_ptr->max_jobs_pu) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason =
+				WAIT_QOS_MAX_JOB_PER_USER;
+			debug2("job %u being held, "
+			       "the job is at or exceeds "
+			       "max jobs per-user limit "
+			       "%u with %u for QOS %s",
+			       job_ptr->job_id,
+			       qos_ptr->max_jobs_pu,
+			       used_limits->jobs, qos_ptr->name);
+			rc = false;
+			goto end_it;
+		}
+	}
+
+	/* we don't need to check submit_jobs_pu here */
+
+	/* if the qos limits have changed since job
+	 * submission and job can not run, then kill it */
+	if ((job_ptr->limit_set.time != ADMIN_SET_LIMIT)
+	    && (qos_out_ptr->max_wall_pj == INFINITE)
+	    && (qos_ptr->max_wall_pj != INFINITE)) {
+
+		qos_out_ptr->max_wall_pj = qos_ptr->max_wall_pj;
+
+		time_limit = qos_ptr->max_wall_pj;
+		if ((job_ptr->time_limit != NO_VAL) &&
+		    (job_ptr->time_limit > time_limit)) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason =
+				WAIT_QOS_MAX_WALL_PER_JOB;
+			debug2("job %u being held, "
+			       "time limit %u exceeds qos "
+			       "max wall pj %u",
+			       job_ptr->job_id,
+			       job_ptr->time_limit,
+			       time_limit);
+			rc = false;
+			goto end_it;
+		}
+	}
+end_it:
+
+	if (free_used_limits)
+		xfree(used_limits);
+
+	return rc;
+}
+
+static int _qos_job_runnable_post_select(struct job_record *job_ptr,
+					 slurmdb_qos_rec_t *qos_ptr,
+					 slurmdb_qos_rec_t *qos_out_ptr,
+					 uint64_t *tres_req_cnt,
+					 uint64_t *job_tres_time_limit)
+{
+	uint64_t tres_usage_mins[slurmctld_tres_cnt];
+	uint64_t tres_run_mins[slurmctld_tres_cnt];
+	slurmdb_used_limits_t *used_limits = NULL;
+	bool free_used_limits = false;
+	bool safe_limits = false;
+	int rc = true;
+	int i, tres_pos = 0;
+
+	if (!qos_ptr || !qos_out_ptr)
+		return rc;
+
+	/* check to see if we should be using safe limits, if so we
+	 * will only start a job if there are sufficient remaining
+	 * cpu-minutes for it to run to completion */
+	if (accounting_enforce & ACCOUNTING_ENFORCE_SAFE)
+		safe_limits = true;
+
+	/* clang needs this memset to avoid a warning */
+	memset(tres_run_mins, 0, sizeof(tres_run_mins));
+	memset(tres_usage_mins, 0, sizeof(tres_usage_mins));
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		tres_run_mins[i] =
+			qos_ptr->usage->grp_used_tres_run_secs[i] / 60;
+		tres_usage_mins[i] =
+			(uint64_t)(qos_ptr->usage->usage_tres_raw[i] / 60.0);
+	}
+
+	/*
+	 * Try to get the used limits for the user or initialize a local
+	 * nullified one if not available.
+	 */
+	if (!qos_ptr->usage->user_limit_list ||
+	    !(used_limits = list_find_first(qos_ptr->usage->user_limit_list,
+					    _find_used_limits_for_user,
+					    &job_ptr->user_id))) {
+		used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
+		used_limits->uid = job_ptr->user_id;
+		free_used_limits = true;
+	}
+
+	i = _validate_tres_usage_limits_for_qos(
+		&tres_pos, qos_ptr->grp_tres_mins_ctld,
+		qos_out_ptr->grp_tres_mins_ctld, job_tres_time_limit,
+		tres_run_mins, tres_usage_mins, job_ptr->limit_set.tres,
+		safe_limits);
+	switch (i) {
+	case 1:
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK_MIN);
+		debug2("Job %u being held, "
+		       "QOS %s group max tres(%s) minutes limit "
+		       "of %"PRIu64" is already at or exceeded with %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->grp_tres_mins_ctld[tres_pos],
+		       tres_usage_mins[i]);
+		rc = false;
+		goto end_it;
+		break;
+	case 2:
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK_MIN);
+		debug2("Job %u being held, "
+		       "the job is requesting more than allowed with QOS %s's "
+		       "group max tres(%s) minutes of %"PRIu64" "
+		       "with %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->grp_tres_mins_ctld[tres_pos],
+		       job_tres_time_limit[tres_pos]);
+		rc = false;
+		goto end_it;
+		break;
+	case 3:
+		/*
+		 * If we're using safe limits start
+		 * the job only if there are
+		 * sufficient cpu-mins left such that
+		 * it will run to completion without
+		 * being killed
+		 */
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK_MIN);
+		debug2("Job %u being held, "
+		       "the job is at or exceeds QOS %s's "
+		       "group max tres(%s) minutes of %"PRIu64" "
+		       "of which %"PRIu64" are still available "
+		       "but request is for %"PRIu64" "
+		       "(%"PRIu64" already used) tres "
+		       "minutes (%"PRIu64" tres count)",
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->grp_tres_mins_ctld[tres_pos],
+		       qos_ptr->grp_tres_mins_ctld[tres_pos] -
+		       tres_usage_mins[tres_pos],
+		       job_tres_time_limit[tres_pos] + tres_run_mins[tres_pos],
+		       tres_run_mins[tres_pos],
+		       tres_req_cnt[tres_pos]);
+		rc = false;
+		goto end_it;
+		break;
+	default:
+		/* all good */
+		break;
+	}
+
+	/* If the JOB's cpu limit wasn't administratively set and the
+	 * QOS has a GrpCPU limit, cancel the job if its minimum
+	 * cpu requirement has exceeded the limit for all CPUs
+	 * usable by the QOS
+	 */
+	i = _validate_tres_usage_limits_for_qos(
+		&tres_pos,
+		qos_ptr->grp_tres_ctld,	qos_out_ptr->grp_tres_ctld,
+		tres_req_cnt, qos_ptr->usage->grp_used_tres,
+		NULL, job_ptr->limit_set.tres, 1);
+	switch (i) {
+	case 1:
+		/* not possible because the curr_usage sent in is NULL */
+		break;
+	case 2:
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK);
+		debug2("job %u is being held, "
+		       "QOS %s min tres(%s) request %"PRIu64" exceeds "
+		       "group max tres limit %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       tres_req_cnt[tres_pos],
+		       qos_ptr->grp_tres_ctld[tres_pos]);
+		rc = false;
+		goto end_it;
+		break;
+	case 3:
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK);
+		debug2("job %u being held, "
+		       "if allowed the job request will exceed "
+		       "QOS %s group max tres(%s) limit "
+		       "%"PRIu64" with already used %"PRIu64" + "
+		       "requested %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->grp_tres_ctld[tres_pos],
+		       qos_ptr->usage->grp_used_tres[tres_pos],
+		       tres_req_cnt[tres_pos]);
+		rc = false;
+		goto end_it;
+	default:
+		/* all good */
+		break;
+	}
+
+	/* we don't need to check grp_jobs here */
+
+	i = _validate_tres_usage_limits_for_qos(
+		&tres_pos,
+		qos_ptr->grp_tres_run_mins_ctld,
+		qos_out_ptr->grp_tres_run_mins_ctld,
+		job_tres_time_limit, tres_run_mins, NULL, NULL, 1);
+	switch (i) {
+	case 1:
+		/* not possible because the curr_usage sent in is NULL */
+		break;
+	case 2:
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK_RUN_MIN);
+		debug2("job %u is being held, "
+		       "QOS %s group max running tres(%s) minutes "
+		       "limit %"PRIu64" is already full with %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->grp_tres_run_mins_ctld[tres_pos],
+		       tres_run_mins[tres_pos]);
+		rc = false;
+		goto end_it;
+		break;
+	case 3:
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_GRP_UNK_RUN_MIN);
+		debug2("job %u being held, "
+		       "if allowed the job request will exceed "
+		       "QOS %s group max running tres(%s) minutes "
+		       "limit %"PRIu64" with already "
+		       "used %"PRIu64" + requested %"PRIu64,
+		       job_ptr->job_id, qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->grp_tres_run_mins_ctld[tres_pos],
+		       tres_run_mins[tres_pos],
+		       job_tres_time_limit[tres_pos]);
+		rc = false;
+		goto end_it;
+		break;
+	default:
+		/* all good */
+		break;
+	}
+
+	/* we don't need to check submit_jobs here */
+
+	/* we don't need to check grp_wall here */
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   job_tres_time_limit, 0,
+					   NULL,
+					   qos_ptr->max_tres_mins_pj_ctld,
+					   NULL,
+					   qos_out_ptr->max_tres_mins_pj_ctld,
+					   job_ptr->limit_set.tres,
+					   1, 1)) {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_MAX_UNK_MINS_PER_JOB);
+		debug2("Job %u being held, "
+		       "the job is requesting more than allowed with QOS %s's "
+		       "max tres(%s) minutes of %"PRIu64" "
+		       "with %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->max_tres_mins_pj_ctld[tres_pos],
+		       job_tres_time_limit[tres_pos]);
+		rc = false;
+		goto end_it;
+	}
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   tres_req_cnt, 0,
+					   NULL,
+					   qos_ptr->max_tres_pj_ctld,
+					   NULL,
+					   qos_out_ptr->max_tres_pj_ctld,
+					   job_ptr->limit_set.tres,
+					   1, 1)) {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_MAX_UNK_PER_JOB);
+		debug2("job %u is being held, "
+		       "QOS %s min tres(%s) per job "
+		       "request %"PRIu64" exceeds "
+		       "max tres limit %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       tres_req_cnt[tres_pos],
+		       qos_ptr->max_tres_pj_ctld[tres_pos]);
+		rc = false;
+		goto end_it;
+	}
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   tres_req_cnt,
+					   tres_req_cnt[TRES_ARRAY_NODE],
+					   NULL,
+					   qos_ptr->max_tres_pn_ctld,
+					   NULL,
+					   qos_out_ptr->max_tres_pn_ctld,
+					   job_ptr->limit_set.tres,
+					   1, 1)) {
+		uint64_t req_per_node;
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_MAX_UNK_PER_NODE);
+		req_per_node = tres_req_cnt[tres_pos];
+		if (tres_req_cnt[TRES_ARRAY_NODE] > 1)
+			req_per_node /= tres_req_cnt[TRES_ARRAY_NODE];
+		debug2("job %u is being held, "
+		       "QOS %s min tres(%s) per node "
+		       "request %"PRIu64" exceeds "
+		       "max tres limit %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       req_per_node,
+		       qos_ptr->max_tres_pn_ctld[tres_pos]);
+		rc = false;
+		goto end_it;
+	}
+
+	if (!_validate_tres_limits_for_qos(&tres_pos,
+					   tres_req_cnt, 0,
+					   NULL,
+					   qos_ptr->min_tres_pj_ctld,
+					   NULL,
+					   qos_out_ptr->min_tres_pj_ctld,
+					   job_ptr->limit_set.tres,
+					   1, 0)) {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_MIN_UNK);
+		debug2("job %u is being held, "
+		       "QOS %s min tres(%s) per job "
+		       "request %"PRIu64" exceeds "
+		       "min tres limit %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       tres_req_cnt[tres_pos],
+		       qos_ptr->min_tres_pj_ctld[tres_pos]);
+		rc = false;
+		goto end_it;
+	}
+
+	i = _validate_tres_usage_limits_for_qos(
+		&tres_pos,
+		qos_ptr->max_tres_pu_ctld, qos_out_ptr->max_tres_pu_ctld,
+		tres_req_cnt, used_limits->tres,
+		NULL, job_ptr->limit_set.tres, 1);
+	switch (i) {
+	case 1:
+		/* not possible because the curr_usage sent in is NULL */
+		break;
+	case 2:
+		/* Hold the job if it exceeds the per-user
+		 * TRES limit for the given QOS
+		 */
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_MAX_UNK_PER_USER);
+		debug2("job %u is being held, "
+		       "QOS %s min tres(%s) "
+		       "request %"PRIu64" exceeds "
+		       "max tres per user limit %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       tres_req_cnt[tres_pos],
+		       qos_ptr->max_tres_pu_ctld[tres_pos]);
+		rc = false;
+		goto end_it;
+		break;
+	case 3:
+		/* Hold the job if the user has exceeded
+		 * the QOS per-user TRES limit with their
+		 * current usage */
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = get_tres_state_reason(
+			tres_pos, WAIT_QOS_MAX_UNK_PER_USER);
+		debug2("job %u being held, "
+		       "if allowed the job request will exceed "
+		       "QOS %s max tres(%s) per user limit "
+		       "%"PRIu64" with already used %"PRIu64" + "
+		       "requested %"PRIu64,
+		       job_ptr->job_id,
+		       qos_ptr->name,
+		       assoc_mgr_tres_name_array[tres_pos],
+		       qos_ptr->max_tres_pu_ctld[tres_pos],
+		       used_limits->tres[tres_pos],
+		       tres_req_cnt[tres_pos]);
+		rc = false;
+		goto end_it;
+	default:
+		/* all good */
+		break;
+	}
 
-			qos_ptr->usage->grp_used_mem -= job_memory;
-			if ((int32_t)qos_ptr->usage->grp_used_mem < 0) {
-				qos_ptr->usage->grp_used_mem = 0;
-				debug2("acct_policy_job_fini: grp_used_mem "
-				       "underflow for qos %s", qos_ptr->name);
-			}
+	/* We do not need to check max_jobs_pu here */
 
-			qos_ptr->usage->grp_used_nodes -= node_cnt;
-			if ((int32_t)qos_ptr->usage->grp_used_nodes < 0) {
-				qos_ptr->usage->grp_used_nodes = 0;
-				debug2("acct_policy_job_fini: grp_used_nodes "
-				       "underflow for qos %s", qos_ptr->name);
-			}
+end_it:
+	/* we don't need to check submit_jobs_pu here */
 
-			used_limits->cpus -= job_ptr->total_cpus;
-			if ((int32_t)used_limits->cpus < 0) {
-				used_limits->cpus = 0;
-				debug2("acct_policy_job_fini: "
-				       "used_limits->cpus "
-				       "underflow for qos %s user %d",
-				       qos_ptr->name, used_limits->uid);
-			}
+	/* we don't need to check max_wall_pj here */
 
-			used_limits->jobs--;
-			if ((int32_t)used_limits->jobs < 0) {
-				used_limits->jobs = 0;
-				debug2("acct_policy_job_fini: used_jobs "
-				       "underflow for qos %s user %d",
-				       qos_ptr->name, used_limits->uid);
-			}
+	if (free_used_limits)
+		xfree(used_limits);
 
-			used_limits->nodes -= node_cnt;
-			if ((int32_t)used_limits->nodes < 0) {
-				used_limits->nodes = 0;
-				debug2("acct_policy_job_fini: "
-				       "used_limits->nodes"
-				       "underflow for qos %s user %d",
-				       qos_ptr->name, used_limits->uid);
-			}
+	return rc;
+}
 
-			break;
-		default:
-			error("acct_policy: qos unknown type %d", type);
-			break;
-		}
-	}
+static int _qos_job_time_out(struct job_record *job_ptr,
+			     slurmdb_qos_rec_t *qos_ptr,
+			     slurmdb_qos_rec_t *qos_out_ptr,
+			     uint64_t *job_tres_usage_mins)
+{
+	uint64_t tres_usage_mins[slurmctld_tres_cnt];
+	uint32_t wall_mins;
+	int rc = true, tres_pos = 0, i;
+	time_t now = time(NULL);
 
-	assoc_ptr = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
-	while (assoc_ptr) {
-		switch(type) {
-		case ACCT_POLICY_ADD_SUBMIT:
-			assoc_ptr->usage->used_submit_jobs++;
-			break;
-		case ACCT_POLICY_REM_SUBMIT:
-			if (assoc_ptr->usage->used_submit_jobs)
-				assoc_ptr->usage->used_submit_jobs--;
-			else
-				debug2("acct_policy_remove_job_submit: "
-				       "used_submit_jobs underflow for "
-				       "account %s",
-				       assoc_ptr->acct);
-			break;
-		case ACCT_POLICY_JOB_BEGIN:
-			assoc_ptr->usage->used_jobs++;
-			assoc_ptr->usage->grp_used_cpus += job_ptr->total_cpus;
-			assoc_ptr->usage->grp_used_mem += job_memory;
-			assoc_ptr->usage->grp_used_nodes += node_cnt;
-			assoc_ptr->usage->grp_used_cpu_run_secs +=
-				used_cpu_run_secs;
-			debug4("acct_policy_job_begin: after adding job %i, "
-			       "assoc %s grp_used_cpu_run_secs is %"PRIu64"",
-			       job_ptr->job_id, assoc_ptr->acct,
-			       assoc_ptr->usage->grp_used_cpu_run_secs);
-			break;
-		case ACCT_POLICY_JOB_FINI:
-			if (assoc_ptr->usage->used_jobs)
-				assoc_ptr->usage->used_jobs--;
-			else
-				debug2("acct_policy_job_fini: used_jobs "
-				       "underflow for account %s",
-				       assoc_ptr->acct);
+	if (!qos_ptr || !qos_out_ptr)
+		return rc;
 
-			assoc_ptr->usage->grp_used_cpus -= job_ptr->total_cpus;
-			if ((int32_t)assoc_ptr->usage->grp_used_cpus < 0) {
-				assoc_ptr->usage->grp_used_cpus = 0;
-				debug2("acct_policy_job_fini: grp_used_cpus "
-				       "underflow for account %s",
-				       assoc_ptr->acct);
-			}
+	/* The idea here is for qos to trump what an association
+	 * has set for a limit, so if an association set of
+	 * wall 10 mins and the qos has 20 mins set and the
+	 * job has been running for 11 minutes it continues
+	 * until 20.
+	 */
+	/* clang needs this memset to avoid a warning */
+	memset(tres_usage_mins, 0, sizeof(tres_usage_mins));
+	for (i=0; i<slurmctld_tres_cnt; i++)
+		tres_usage_mins[i] =
+			(uint64_t)(qos_ptr->usage->usage_tres_raw[i] / 60.0);
+	wall_mins = qos_ptr->usage->grp_used_wall / 60;
+
+	i = _validate_tres_usage_limits_for_qos(
+		&tres_pos, qos_ptr->grp_tres_mins_ctld,
+		qos_out_ptr->grp_tres_mins_ctld, NULL,
+		NULL, tres_usage_mins, NULL, 0);
+	switch (i) {
+	case 1:
+		last_job_update = now;
+		info("Job %u timed out, "
+		     "the job is at or exceeds QOS %s's "
+		     "group max tres(%s) minutes of %"PRIu64" "
+		     "with %"PRIu64"",
+		     job_ptr->job_id,
+		     qos_ptr->name,
+		     assoc_mgr_tres_name_array[tres_pos],
+		     qos_ptr->grp_tres_mins_ctld[tres_pos],
+		     tres_usage_mins[tres_pos]);
+		job_ptr->state_reason = FAIL_TIMEOUT;
+		rc = false;
+		goto end_it;
+		break;
+	case 2:
+		/* not possible safe_limits is 0 */
+	case 3:
+		/* not possible safe_limits is 0 */
+	default:
+		/* all good */
+		break;
+	}
 
-			assoc_ptr->usage->grp_used_mem -= job_memory;
-			if ((int32_t)assoc_ptr->usage->grp_used_mem < 0) {
-				assoc_ptr->usage->grp_used_mem = 0;
-				debug2("acct_policy_job_fini: grp_used_mem "
-				       "underflow for account %s",
-				       assoc_ptr->acct);
-			}
+	if ((qos_out_ptr->grp_wall == INFINITE)
+	    && (qos_ptr->grp_wall != INFINITE)) {
 
-			assoc_ptr->usage->grp_used_nodes -= node_cnt;
-			if ((int32_t)assoc_ptr->usage->grp_used_nodes < 0) {
-				assoc_ptr->usage->grp_used_nodes = 0;
-				debug2("acct_policy_job_fini: grp_used_nodes "
-				       "underflow for account %s",
-				       assoc_ptr->acct);
-			}
+		qos_out_ptr->grp_wall = qos_ptr->grp_wall;
 
-			break;
-		default:
-			error("acct_policy: association unknown type %d", type);
-			break;
+		if (wall_mins >= qos_ptr->grp_wall) {
+			last_job_update = now;
+			info("Job %u timed out, "
+			     "the job is at or exceeds QOS %s's "
+			     "group wall limit of %u with %u",
+			     job_ptr->job_id,
+			     qos_ptr->name, qos_ptr->grp_wall,
+			     wall_mins);
+			job_ptr->state_reason = FAIL_TIMEOUT;
+			rc = false;
+			goto end_it;
 		}
-		/* now handle all the group limits of the parents */
-		assoc_ptr = assoc_ptr->usage->parent_assoc_ptr;
 	}
-	assoc_mgr_unlock(&locks);
+
+	i = _validate_tres_usage_limits_for_qos(
+		&tres_pos, qos_ptr->max_tres_mins_pj_ctld,
+		qos_out_ptr->max_tres_mins_pj_ctld, job_tres_usage_mins,
+		NULL, NULL, NULL, 1);
+	switch (i) {
+	case 1:
+		/* not possible curr_usage is NULL */
+		break;
+	case 2:
+		last_job_update = now;
+		info("Job %u timed out, "
+		     "the job is at or exceeds QOS %s's "
+		     "max tres(%s) minutes of %"PRIu64" with %"PRIu64,
+		     job_ptr->job_id,
+		     qos_ptr->name,
+		     assoc_mgr_tres_name_array[tres_pos],
+		     qos_ptr->max_tres_mins_pj_ctld[tres_pos],
+		     job_tres_usage_mins[tres_pos]);
+		job_ptr->state_reason = FAIL_TIMEOUT;
+		rc = false;
+		goto end_it;
+		break;
+	case 3:
+		/* not possible tres_usage is NULL */
+	default:
+		/* all good */
+		break;
+	}
+
+end_it:
+	return rc;
 }
 
 /*
@@ -384,10 +1927,14 @@ extern void acct_policy_job_fini(struct job_record *job_ptr)
 extern void acct_policy_alter_job(struct job_record *job_ptr,
 				  uint32_t new_time_limit)
 {
-	slurmdb_association_rec_t *assoc_ptr = NULL;
-	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK,
-				   WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
-	uint64_t used_cpu_run_secs, new_used_cpu_run_secs;
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_assoc_rec_t *assoc_ptr = NULL;
+	assoc_mgr_lock_t locks = { WRITE_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+	uint64_t used_tres_run_secs[slurmctld_tres_cnt];
+	uint64_t new_used_tres_run_secs[slurmctld_tres_cnt];
+	uint64_t time_limit_secs, new_time_limit_secs;
+	int i;
 
 	if (!IS_JOB_RUNNING(job_ptr) || (job_ptr->time_limit == new_time_limit))
 		return;
@@ -396,42 +1943,48 @@ extern void acct_policy_alter_job(struct job_record *job_ptr,
 	    || !_valid_job_assoc(job_ptr))
 		return;
 
-	used_cpu_run_secs = (uint64_t)job_ptr->total_cpus
-		* (uint64_t)job_ptr->time_limit * 60;
-	new_used_cpu_run_secs = (uint64_t)job_ptr->total_cpus
-		* (uint64_t)new_time_limit * 60;
+	time_limit_secs = (uint64_t)job_ptr->time_limit * 60;
+	new_time_limit_secs = (uint64_t)new_time_limit * 60;
+
+	/* clang needs these memset to avoid a warning */
+	memset(used_tres_run_secs, 0, sizeof(used_tres_run_secs));
+	memset(new_used_tres_run_secs, 0, sizeof(new_used_tres_run_secs));
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		used_tres_run_secs[i] =
+			job_ptr->tres_alloc_cnt[i] * time_limit_secs;
+		new_used_tres_run_secs[i] =
+			job_ptr->tres_alloc_cnt[i] * new_time_limit_secs;
+	}
 
 	assoc_mgr_lock(&locks);
-	if (job_ptr->qos_ptr) {
-		slurmdb_qos_rec_t *qos_ptr =
-			(slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-
-		qos_ptr->usage->grp_used_cpu_run_secs -=
-			used_cpu_run_secs;
-		qos_ptr->usage->grp_used_cpu_run_secs +=
-			new_used_cpu_run_secs;
-		debug2("altering %u QOS %s got %"PRIu64" "
-		       "just removed %"PRIu64" and added %"PRIu64"",
-		       job_ptr->job_id,
-		       qos_ptr->name,
-		       qos_ptr->usage->grp_used_cpu_run_secs,
-		       used_cpu_run_secs,
-		       new_used_cpu_run_secs);
-	}
 
-	assoc_ptr = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	_set_qos_order(job_ptr, &qos_ptr_1, &qos_ptr_2);
+
+	_qos_alter_job(job_ptr, qos_ptr_1,
+		       used_tres_run_secs, new_used_tres_run_secs);
+	_qos_alter_job(job_ptr, qos_ptr_2,
+		       used_tres_run_secs, new_used_tres_run_secs);
+
+	assoc_ptr = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 	while (assoc_ptr) {
-		assoc_ptr->usage->grp_used_cpu_run_secs -=
-			used_cpu_run_secs;
-		assoc_ptr->usage->grp_used_cpu_run_secs +=
-			new_used_cpu_run_secs;
-		debug2("altering %u acct %s got %"PRIu64" "
-		       "just removed %"PRIu64" and added %"PRIu64"",
-		       job_ptr->job_id,
-		       assoc_ptr->acct,
-		       assoc_ptr->usage->grp_used_cpu_run_secs,
-		       used_cpu_run_secs,
-		       new_used_cpu_run_secs);
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			if (used_tres_run_secs[i] == new_used_tres_run_secs[i])
+				continue;
+			assoc_ptr->usage->grp_used_tres_run_secs[i] -=
+				used_tres_run_secs[i];
+			assoc_ptr->usage->grp_used_tres_run_secs[i] +=
+				new_used_tres_run_secs[i];
+			debug2("altering job %u assoc %u(%s/%s/%s) "
+			       "got %"PRIu64" just removed %"PRIu64
+			       " and added %"PRIu64"",
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_ptr->usage->grp_used_tres_run_secs[i],
+			       used_tres_run_secs[i],
+			       new_used_tres_run_secs[i]);
+		}
+
 		/* now handle all the group limits of the parents */
 		assoc_ptr = assoc_ptr->usage->parent_assoc_ptr;
 	}
@@ -440,24 +1993,21 @@ extern void acct_policy_alter_job(struct job_record *job_ptr,
 
 extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 				 struct part_record *part_ptr,
-				 slurmdb_association_rec_t *assoc_in,
+				 slurmdb_assoc_rec_t *assoc_in,
 				 slurmdb_qos_rec_t *qos_ptr,
 				 uint32_t *reason,
 				 acct_policy_limit_set_t *acct_policy_limit_set,
 				 bool update_call)
 {
-	uint32_t time_limit;
-	slurmdb_association_rec_t *assoc_ptr = assoc_in;
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_qos_rec_t qos_rec;
+	slurmdb_assoc_rec_t *assoc_ptr = assoc_in;
 	int parent = 0, job_cnt = 1;
 	char *user_name = NULL;
 	bool rc = true;
-	uint32_t qos_max_cpus_limit = INFINITE;
-	uint32_t qos_max_nodes_limit = INFINITE;
-	uint32_t qos_time_limit = INFINITE;
-	uint32_t job_memory = 0;
-	bool admin_set_memory_limit = false;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	struct job_record job_rec;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 	bool strict_checking;
 
 	xassert(acct_policy_limit_set);
@@ -468,315 +2018,66 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 	}
 	user_name = assoc_ptr->user;
 
-	if (job_desc->pn_min_memory != NO_VAL) {
-		if ((job_desc->pn_min_memory & MEM_PER_CPU)
-		    && (job_desc->min_cpus != NO_VAL)) {
-			job_memory = (job_desc->pn_min_memory & (~MEM_PER_CPU))
-				* job_desc->min_cpus;
-			admin_set_memory_limit =
-				(acct_policy_limit_set->pn_min_memory
-				 == ADMIN_SET_LIMIT)
-				|| (acct_policy_limit_set->max_cpus
-				    == ADMIN_SET_LIMIT);
-			debug3("acct_policy_validate: MPC: "
-			       "job_memory set to %u", job_memory);
-		} else if (job_desc->min_nodes != NO_VAL) {
-			job_memory = (job_desc->pn_min_memory)
-				* job_desc->min_nodes;
-			admin_set_memory_limit =
-				(acct_policy_limit_set->pn_min_memory
-				 == ADMIN_SET_LIMIT)
-				|| (acct_policy_limit_set->max_nodes
-				    == ADMIN_SET_LIMIT);
-			debug3("acct_policy_validate: MPN: "
-			       "job_memory set to %u", job_memory);
-		}
-	}
-
 	if (job_desc->array_bitmap)
 		job_cnt = bit_set_count(job_desc->array_bitmap);
 
-	assoc_mgr_lock(&locks);
-
-	if (qos_ptr) {
-		slurmdb_used_limits_t *used_limits = NULL;
-		strict_checking =
-			(reason || (qos_ptr->flags & QOS_FLAG_DENY_LIMIT));
-		/* for validation we don't need to look at
-		 * qos_ptr->grp_cpu_mins.
-		 */
-		qos_max_cpus_limit =
-			MIN(qos_ptr->grp_cpus, qos_ptr->max_cpus_pu);
-		if ((acct_policy_limit_set->max_cpus == ADMIN_SET_LIMIT)
-		    || (qos_max_cpus_limit == INFINITE)
-		    || (update_call && (job_desc->max_cpus == NO_VAL))) {
-			/* no need to check/set */
-
-		} else if (strict_checking && (job_desc->min_cpus != NO_VAL)
-			   && (job_desc->min_cpus > qos_ptr->max_cpus_pu)) {
-			if (reason)
-				*reason = WAIT_QOS_MAX_CPU_PER_USER;
-
-			debug2("job submit for user %s(%u): "
-			       "min cpu request %u exceeds "
-			       "per-user max cpu limit %u for qos '%s'",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_cpus,
-			       qos_ptr->max_cpus_pu,
-			       qos_ptr->name);
-			rc = false;
-			goto end_it;
-		} else if (strict_checking && (job_desc->min_cpus != NO_VAL)
-			   && (job_desc->min_cpus > qos_ptr->grp_cpus)) {
-			if (reason)
-				*reason = WAIT_QOS_GRP_CPU;
-
-			debug2("job submit for user %s(%u): "
-			       "min cpu request %u exceeds "
-			       "group max cpu limit %u for qos '%s'",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_cpus,
-			       qos_ptr->grp_cpus,
-			       qos_ptr->name);
-			rc = false;
-			goto end_it;
-		}
-
-		/* for validation we don't need to look at
-		 * qos_ptr->grp_jobs.
-		 */
-		if (!admin_set_memory_limit && strict_checking
-		    && (qos_ptr->grp_mem != INFINITE)
-		    && (job_memory > qos_ptr->grp_mem)) {
-			if (reason)
-				*reason = WAIT_QOS_GRP_MEMORY;
-			debug2("job submit for user %s(%u): "
-			       "min memory request %u exceeds "
-			       "group max memory limit %u for qos '%s'",
-			       user_name,
-			       job_desc->user_id,
-			       job_memory,
-			       qos_ptr->grp_mem,
-			       qos_ptr->name);
-			rc = false;
-			goto end_it;
-		}
-
-		qos_max_nodes_limit =
-			MIN(qos_ptr->grp_nodes, qos_ptr->max_nodes_pu);
-
-		if ((acct_policy_limit_set->max_nodes == ADMIN_SET_LIMIT)
-		    || (qos_max_nodes_limit == INFINITE)
-		    || (update_call && (job_desc->max_nodes == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_nodes != NO_VAL)
-			   && (job_desc->min_nodes > qos_ptr->max_nodes_pu)) {
-			/* MaxNodesPerUser
-			 */
-			if (reason)
-				*reason = WAIT_QOS_MAX_NODE_PER_USER;
-			debug2("job submit for user %s(%u): "
-			       "min node request %u exceeds "
-			       "per-user max node limit %u for qos '%s'",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_nodes,
-			       qos_ptr->max_nodes_pu,
-			       qos_ptr->name);
-			rc = false;
-			goto end_it;
-		} else if (strict_checking && (job_desc->min_nodes != NO_VAL)
-			   && (job_desc->min_nodes > qos_ptr->grp_nodes)) {
-			if (reason)
-				*reason = WAIT_QOS_GRP_NODES;
-			debug2("job submit for user %s(%u): "
-			       "min node request %u exceeds "
-			       "group max node limit %u for qos '%s'",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_nodes,
-			       qos_ptr->grp_nodes,
-			       qos_ptr->name);
-			rc = false;
-			goto end_it;
-		}
-
-		if ((qos_ptr->grp_submit_jobs != INFINITE) &&
-		    ((qos_ptr->usage->grp_used_submit_jobs + job_cnt)
-		     > qos_ptr->grp_submit_jobs)) {
-			if (reason)
-				*reason = WAIT_QOS_GRP_SUB_JOB;
-			debug2("job submit for user %s(%u): "
-			       "group max submit job limit exceeded %u "
-			       "for qos '%s'",
-			       user_name,
-			       job_desc->user_id,
-			       qos_ptr->grp_submit_jobs,
-			       qos_ptr->name);
-			rc = false;
-			goto end_it;
-		}
+	slurmdb_init_qos_rec(&qos_rec, 0, INFINITE);
 
+	assoc_mgr_lock(&locks);
 
-		/* for validation we don't need to look at
-		 * qos_ptr->grp_wall. It is checked while the job is running.
-		 */
+	assoc_mgr_set_qos_tres_cnt(&qos_rec);
 
+	job_rec.qos_ptr = qos_ptr;
+	job_rec.part_ptr = part_ptr;
 
-		/* we do need to check qos_ptr->max_cpu_mins_pj.
-		 * if you can end up in PENDING QOSJobLimit, you need
-		 * to validate it if DenyOnLimit is set
-		 */
-		if (((job_desc->min_cpus  != NO_VAL) ||
-		     (job_desc->min_nodes != NO_VAL)) &&
-		    (qos_ptr->max_cpu_mins_pj != INFINITE)) {
-			uint32_t cpu_cnt = job_desc->min_nodes;
-			if ((job_desc->min_nodes == NO_VAL) ||
-			    (job_desc->min_cpus > job_desc->min_nodes))
-				cpu_cnt = job_desc->min_cpus;
-			qos_time_limit = qos_ptr->max_cpu_mins_pj / cpu_cnt;
-		}
+	_set_qos_order(&job_rec, &qos_ptr_1, &qos_ptr_2);
 
-		if ((acct_policy_limit_set->max_cpus == ADMIN_SET_LIMIT)
-		    || (qos_ptr->max_cpus_pj == INFINITE)
-		    || (update_call && (job_desc->max_cpus == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_cpus != NO_VAL)
-			   && (job_desc->min_cpus > qos_ptr->max_cpus_pj)) {
-			if (reason)
-				*reason = WAIT_QOS_MAX_CPUS_PER_JOB;
-			debug2("job submit for user %s(%u): "
-			       "min cpu limit %u exceeds "
-			       "qos max %u",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_cpus,
-			       qos_ptr->max_cpus_pj);
-			rc = false;
+	if (qos_ptr_1) {
+		strict_checking =
+			(reason || (qos_ptr_1->flags & QOS_FLAG_DENY_LIMIT));
+		if (qos_ptr_2 && !strict_checking)
+			strict_checking =
+				qos_ptr_2->flags & QOS_FLAG_DENY_LIMIT;
+
+		if (!(rc = _qos_policy_validate(
+			      job_desc, part_ptr, qos_ptr_1, &qos_rec,
+			      reason, acct_policy_limit_set, update_call,
+			      user_name, job_cnt, strict_checking)))
 			goto end_it;
-		}
-
-		/* for validation we don't need to look at
-		 * qos_ptr->max_jobs.
-		 */
-
-		if ((acct_policy_limit_set->max_nodes == ADMIN_SET_LIMIT)
-		    || (qos_ptr->max_nodes_pj == INFINITE)
-		    || (update_call && (job_desc->max_nodes == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_nodes != NO_VAL)
-			   && (job_desc->min_nodes > qos_ptr->max_nodes_pj)) {
-			if (reason)
-				*reason = WAIT_QOS_MAX_NODE_PER_JOB;
-			debug2("job submit for user %s(%u): "
-			       "min node limit %u exceeds "
-			       "qos max %u",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_nodes,
-			       qos_ptr->max_nodes_pj);
-			rc = false;
+		if (!(rc = _qos_policy_validate(
+			      job_desc, part_ptr, qos_ptr_2, &qos_rec,
+			      reason, acct_policy_limit_set, update_call,
+			      user_name, job_cnt, strict_checking)))
 			goto end_it;
-		}
-
-		if (qos_ptr->max_submit_jobs_pu != INFINITE) {
-			if (!used_limits)
-				used_limits = _get_used_limits_for_user(
-					qos_ptr->usage->user_limit_list,
-					job_desc->user_id);
-			if ((!used_limits &&
-			     qos_ptr->max_submit_jobs_pu == 0) ||
-			    (used_limits &&
-			     ((used_limits->submit_jobs + job_cnt) >
-			      qos_ptr->max_submit_jobs_pu))) {
-				if (reason)
-					*reason = WAIT_QOS_MAX_SUB_JOB;
-				debug2("job submit for user %s(%u): "
-				       "qos max submit job limit exceeded %u",
-				       user_name,
-				       job_desc->user_id,
-				       qos_ptr->max_submit_jobs_pu);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((acct_policy_limit_set->time == ADMIN_SET_LIMIT)
-		    || (qos_ptr->max_wall_pj == INFINITE)
-		    || (update_call && (job_desc->time_limit == NO_VAL))) {
-			/* no need to check/set */
-		} else if (qos_time_limit > qos_ptr->max_wall_pj) {
-			qos_time_limit = qos_ptr->max_wall_pj;
-		}
-
-		if (qos_time_limit != INFINITE) {
-			if (job_desc->time_limit == NO_VAL) {
-				if (part_ptr->max_time == INFINITE)
-					job_desc->time_limit = qos_time_limit;
-				else {
-					job_desc->time_limit =
-						MIN(qos_time_limit,
-						    part_ptr->max_time);
-				}
-				acct_policy_limit_set->time = 1;
-			} else if (acct_policy_limit_set->time &&
-				   job_desc->time_limit > qos_time_limit) {
-				job_desc->time_limit = qos_time_limit;
-			} else if (strict_checking
-				   && job_desc->time_limit > qos_time_limit) {
-				if (reason)
-					*reason = WAIT_QOS_MAX_WALL_PER_JOB;
-				debug2("job submit for user %s(%u): "
-				       "time limit %u exceeds qos max %u",
-				       user_name,
-				       job_desc->user_id,
-				       job_desc->time_limit, qos_time_limit);
-				rc = false;
-				goto end_it;
-			}
-		}
 
-		if (strict_checking && (qos_ptr->min_cpus_pj != INFINITE)
-		    && (job_desc->min_cpus < qos_ptr->min_cpus_pj)) {
-			if (reason)
-				*reason = WAIT_QOS_MIN_CPUS;
-			debug2("job submit for user %s(%u): "
-			       "min cpus %u below "
-			       "qos min %u",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_cpus,
-			      qos_ptr->min_cpus_pj);
-			rc = false;
-			goto end_it;
-		}
-	} else {
+	} else
 		strict_checking = reason ? true : false;
-	}
 
 	while (assoc_ptr) {
+		int tres_pos = 0;
+
 		/* for validation we don't need to look at
 		 * assoc_ptr->grp_cpu_mins.
 		 */
 
-		if ((acct_policy_limit_set->max_cpus == ADMIN_SET_LIMIT)
-		    || (qos_ptr && (qos_ptr->grp_cpus != INFINITE))
-		    || (assoc_ptr->grp_cpus == INFINITE)
-		    || (update_call && (job_desc->max_cpus == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_cpus != NO_VAL)
-			   && (job_desc->min_cpus > assoc_ptr->grp_cpus)) {
+		if (!_validate_tres_limits_for_assoc(
+			    &tres_pos, job_desc->tres_req_cnt, 0,
+			    assoc_ptr->grp_tres_ctld,
+			    qos_rec.grp_tres_ctld,
+			    acct_policy_limit_set->tres,
+			    strict_checking, update_call, 1)) {
 			if (reason)
-				*reason = WAIT_ASSOC_GRP_CPU;
+				*reason = get_tres_state_reason(
+					tres_pos, WAIT_ASSOC_GRP_UNK);
+
 			debug2("job submit for user %s(%u): "
-			       "min cpu request %u exceeds "
-			       "group max cpu limit %u for account %s",
+			       "min tres(%s) request %"PRIu64" exceeds "
+			       "group max tres limit %"PRIu64" for account %s",
 			       user_name,
 			       job_desc->user_id,
-			       job_desc->min_cpus,
-			       assoc_ptr->grp_cpus,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       job_desc->tres_req_cnt[tres_pos],
+			       assoc_ptr->grp_tres_ctld[tres_pos],
 			       assoc_ptr->acct);
 			rc = false;
 			break;
@@ -785,47 +2086,8 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 		/* for validation we don't need to look at
 		 * assoc_ptr->grp_jobs.
 		 */
-		if (strict_checking && !admin_set_memory_limit
-		    && (!qos_ptr || (qos_ptr->grp_mem == INFINITE))
-		    && (assoc_ptr->grp_mem != INFINITE)
-		    && (job_memory > assoc_ptr->grp_mem)) {
-			if (reason)
-				*reason = WAIT_ASSOC_GRP_MEMORY;
-			debug2("job submit for user %s(%u): "
-			       "min memory request %u exceeds "
-			       "group max memory limit %u for account %s",
-			       user_name,
-			       job_desc->user_id,
-			       job_memory,
-			       assoc_ptr->grp_mem,
-			       assoc_ptr->acct);
-			rc = false;
-			break;
-		}
-
-		if ((acct_policy_limit_set->max_nodes == ADMIN_SET_LIMIT)
-		    || (qos_ptr && (qos_ptr->grp_nodes != INFINITE))
-		    || (assoc_ptr->grp_nodes == INFINITE)
-		    || (update_call && (job_desc->max_nodes == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_nodes != NO_VAL)
-			   && (job_desc->min_nodes > assoc_ptr->grp_nodes)) {
-			if (reason)
-				*reason = WAIT_ASSOC_GRP_NODES;
-			debug2("job submit for user %s(%u): "
-			       "min node request %u exceeds "
-			       "group max node limit %u for account %s",
-			       user_name,
-			       job_desc->user_id,
-			       job_desc->min_nodes,
-			       assoc_ptr->grp_nodes,
-			       assoc_ptr->acct);
-			rc = false;
-			break;
-		}
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->grp_submit_jobs == INFINITE)) &&
+		if ((qos_rec.grp_submit_jobs == INFINITE) &&
 		    (assoc_ptr->grp_submit_jobs != INFINITE) &&
 		    ((assoc_ptr->usage->used_submit_jobs + job_cnt)
 		     > assoc_ptr->grp_submit_jobs)) {
@@ -860,52 +2122,63 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 		 * assoc_ptr->max_cpu_mins_pj.
 		 */
 
-		if ((acct_policy_limit_set->max_cpus == ADMIN_SET_LIMIT)
-		    || (qos_ptr && (qos_ptr->max_cpus_pj != INFINITE))
-		    || (assoc_ptr->max_cpus_pj == INFINITE)
-		    || (update_call && (job_desc->max_cpus == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_cpus != NO_VAL)
-			   && (job_desc->min_cpus > assoc_ptr->max_cpus_pj)) {
+		tres_pos = 0;
+		if (!_validate_tres_limits_for_assoc(
+			    &tres_pos, job_desc->tres_req_cnt, 0,
+			    assoc_ptr->max_tres_ctld,
+			    qos_rec.max_tres_pj_ctld,
+			    acct_policy_limit_set->tres,
+			    strict_checking, update_call, 1)) {
 			if (reason)
-				*reason = WAIT_ASSOC_MAX_CPUS_PER_JOB;
+				*reason = get_tres_state_reason(
+					tres_pos, WAIT_ASSOC_MAX_UNK_PER_JOB);
+
 			debug2("job submit for user %s(%u): "
-			       "min cpu limit %u exceeds "
-			       "account max %u",
+			       "min tres(%s) request %"PRIu64" exceeds "
+			       "max tres limit %"PRIu64" for account %s",
 			       user_name,
 			       job_desc->user_id,
-			       job_desc->min_cpus,
-			       assoc_ptr->max_cpus_pj);
+			       assoc_mgr_tres_name_array[tres_pos],
+			       job_desc->tres_req_cnt[tres_pos],
+			       assoc_ptr->max_tres_ctld[tres_pos],
+			       assoc_ptr->acct);
 			rc = false;
 			break;
 		}
 
-		/* for validation we don't need to look at
-		 * assoc_ptr->max_jobs.
-		 */
-
-		if ((acct_policy_limit_set->max_nodes == ADMIN_SET_LIMIT)
-		    || (qos_ptr && (qos_ptr->max_nodes_pj != INFINITE))
-		    || (assoc_ptr->max_nodes_pj == INFINITE)
-		    || (update_call && (job_desc->max_nodes == NO_VAL))) {
-			/* no need to check/set */
-		} else if (strict_checking && (job_desc->min_nodes != NO_VAL)
-			   && (job_desc->min_nodes > assoc_ptr->max_nodes_pj)) {
+		tres_pos = 0;
+		if (!_validate_tres_limits_for_assoc(
+			    &tres_pos, job_desc->tres_req_cnt,
+			    job_desc->tres_req_cnt[TRES_ARRAY_NODE],
+			    assoc_ptr->max_tres_pn_ctld,
+			    qos_rec.max_tres_pn_ctld,
+			    acct_policy_limit_set->tres,
+			    strict_checking, update_call, 1)) {
 			if (reason)
-				*reason = WAIT_ASSOC_MAX_NODE_PER_JOB;
+				*reason = get_tres_state_reason(
+					tres_pos,
+					WAIT_ASSOC_MAX_UNK_PER_NODE);
+
 			debug2("job submit for user %s(%u): "
-			       "min node limit %u exceeds "
-			       "account max %u",
+			       "min tres(%s) request %"PRIu64" exceeds "
+			       "max tres limit %"PRIu64" per node "
+			       "for account %s",
 			       user_name,
 			       job_desc->user_id,
-			       job_desc->min_nodes,
-			       assoc_ptr->max_nodes_pj);
+			       assoc_mgr_tres_name_array[tres_pos],
+			       job_desc->tres_req_cnt[tres_pos] /
+			       job_desc->tres_req_cnt[TRES_ARRAY_NODE],
+			       assoc_ptr->max_tres_pn_ctld[tres_pos],
+			       assoc_ptr->acct);
 			rc = false;
 			break;
 		}
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->max_submit_jobs_pu == INFINITE)) &&
+		/* for validation we don't need to look at
+		 * assoc_ptr->max_jobs.
+		 */
+
+		if ((qos_rec.max_submit_jobs_pu == INFINITE) &&
 		    (assoc_ptr->max_submit_jobs != INFINITE) &&
 		    ((assoc_ptr->usage->used_submit_jobs + job_cnt)
 		     > assoc_ptr->max_submit_jobs)) {
@@ -921,12 +2194,12 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 		}
 
 		if ((acct_policy_limit_set->time == ADMIN_SET_LIMIT)
-		    || (qos_ptr && (qos_ptr->max_wall_pj != INFINITE))
+		    || (qos_rec.max_wall_pj != INFINITE)
 		    || (assoc_ptr->max_wall_pj == INFINITE)
 		    || (update_call && (job_desc->time_limit == NO_VAL))) {
 			/* no need to check/set */
 		} else {
-			time_limit = assoc_ptr->max_wall_pj;
+			uint32_t time_limit = assoc_ptr->max_wall_pj;
 			if (job_desc->time_limit == NO_VAL) {
 				if (part_ptr->max_time == INFINITE)
 					job_desc->time_limit = time_limit;
@@ -957,6 +2230,7 @@ extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 	}
 end_it:
 	assoc_mgr_unlock(&locks);
+	slurmdb_free_qos_rec_members(&qos_rec);
 
 	return rc;
 }
@@ -990,18 +2264,17 @@ extern bool acct_policy_job_runnable_state(struct job_record *job_ptr)
  */
 extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 {
-	slurmdb_qos_rec_t *qos_ptr;
-	slurmdb_association_rec_t *assoc_ptr;
-	slurmdb_used_limits_t *used_limits = NULL;
-	bool free_used_limits = false;
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_qos_rec_t qos_rec;
+	slurmdb_assoc_rec_t *assoc_ptr;
 	uint32_t time_limit;
 	bool rc = true;
 	uint32_t wall_mins;
 	int parent = 0; /* flag to tell us if we are looking at the
 			 * parent or not
 			 */
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	/* check to see if we are enforcing associations */
 	if (!accounting_enforce)
@@ -1023,121 +2296,35 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 		job_ptr->state_reason = WAIT_NO_REASON;
 	}
 
-	assoc_mgr_lock(&locks);
-	qos_ptr = job_ptr->qos_ptr;
-	if (qos_ptr) {
-		wall_mins = qos_ptr->usage->grp_used_wall / 60;
-
-		/*
-		 * Try to get the used limits for the user or initialise a local
-		 * nullified one if not available.
-		 */
-		used_limits = _get_used_limits_for_user(
-			qos_ptr->usage->user_limit_list,
-			job_ptr->user_id);
-		if (!used_limits) {
-			used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
-			used_limits->uid = job_ptr->user_id;
-			free_used_limits = true;
-		}
-
-		/* we don't need to check grp_cpu_mins here */
-
-		/* we don't need to check grp_cpus here */
-
-		/* we don't need to check grp_mem here */
-
-		if ((qos_ptr->grp_jobs != INFINITE) &&
-		    (qos_ptr->usage->grp_used_jobs >= qos_ptr->grp_jobs)) {
-			xfree(job_ptr->state_desc);
-			job_ptr->state_reason = WAIT_QOS_GRP_JOB;
-			debug2("job %u being held, "
-			       "the job is at or exceeds "
-			       "group max jobs limit %u with %u for qos %s",
-			       job_ptr->job_id,
-			       qos_ptr->grp_jobs,
-			       qos_ptr->usage->grp_used_jobs, qos_ptr->name);
-
-			rc = false;
-			goto end_it;
-		}
-
-		/* we don't need to check grp_cpu_run_mins here */
-
-		/* we don't need to check grp_nodes here */
-
-		/* we don't need to check submit_jobs here */
-
-		if ((qos_ptr->grp_wall != INFINITE)
-		    && (wall_mins >= qos_ptr->grp_wall)) {
-			xfree(job_ptr->state_desc);
-			job_ptr->state_reason = WAIT_QOS_GRP_WALL;
-			debug2("job %u being held, "
-			       "the job is at or exceeds "
-			       "group wall limit %u "
-			       "with %u for qos %s",
-			       job_ptr->job_id,
-			       qos_ptr->grp_wall,
-			       wall_mins, qos_ptr->name);
-			rc = false;
-			goto end_it;
-		}
-
-		/* we don't need to check max_cpu_mins_pj here */
-
-		/* we don't need to check max_cpus_pj here */
-
-		/* we don't need to check min_cpus_pj here */
+	slurmdb_init_qos_rec(&qos_rec, 0, INFINITE);
 
-		/* we don't need to check max_cpus_pu here */
-
-		if (qos_ptr->max_jobs_pu != INFINITE) {
-			if (used_limits->jobs >= qos_ptr->max_jobs_pu) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_JOB_PER_USER;
-				debug2("job %u being held, "
-				       "the job is at or exceeds "
-				       "max jobs per-user limit "
-				       "%u with %u for QOS %s",
-				       job_ptr->job_id,
-				       qos_ptr->max_jobs_pu,
-				       used_limits->jobs, qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
+	assoc_mgr_lock(&locks);
 
-		/* we don't need to check max_nodes_pj here */
+	assoc_mgr_set_qos_tres_cnt(&qos_rec);
 
-		/* we don't need to check max_nodes_pu here */
+	_set_qos_order(job_ptr, &qos_ptr_1, &qos_ptr_2);
 
-		/* we don't need to check submit_jobs_pu here */
+	/* check the first QOS setting it's values in the qos_rec */
+	if (qos_ptr_1 &&
+	    !(rc = _qos_job_runnable_pre_select(job_ptr, qos_ptr_1,
+						 &qos_rec)))
+		goto end_it;
 
-		/* if the qos limits have changed since job
-		 * submission and job can not run, then kill it */
-		if ((job_ptr->limit_set_time != ADMIN_SET_LIMIT)
-		    && qos_ptr->max_wall_pj != INFINITE) {
-			time_limit = qos_ptr->max_wall_pj;
-			if ((job_ptr->time_limit != NO_VAL) &&
-			    (job_ptr->time_limit > time_limit)) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_WALL_PER_JOB;
-				debug2("job %u being held, "
-				       "time limit %u exceeds qos "
-				       "max wall pj %u",
-				       job_ptr->job_id,
-				       job_ptr->time_limit,
-				       time_limit);
-				rc = false;
-				goto end_it;
-			}
-		}
-	}
+	/* If qos_ptr_1 didn't set the value use the 2nd QOS to set
+	   the limit.
+	*/
+	if (qos_ptr_2 &&
+	    !(rc = _qos_job_runnable_pre_select(job_ptr, qos_ptr_2,
+						 &qos_rec)))
+		goto end_it;
 
 	assoc_ptr = job_ptr->assoc_ptr;
 	while (assoc_ptr) {
+		/* This only trips when the grp_used_wall is divisible
+		 * by 60, i.e if a limit is 1 min and you have only
+		 * accumulated 59 seconds you will still be able to
+		 * get another job in as 59/60 = 0 int wise.
+		 */
 		wall_mins = assoc_ptr->usage->grp_used_wall / 60;
 
 #if _DEBUG
@@ -1150,8 +2337,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 
 		/* we don't need to check grp_mem here */
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->grp_jobs == INFINITE)) &&
+		if ((qos_rec.grp_jobs == INFINITE) &&
 		    (assoc_ptr->grp_jobs != INFINITE) &&
 		    (assoc_ptr->usage->used_jobs >= assoc_ptr->grp_jobs)) {
 			xfree(job_ptr->state_desc);
@@ -1173,8 +2359,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 
 		/* we don't need to check submit_jobs here */
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->grp_wall == INFINITE))
+		if ((qos_rec.grp_wall == INFINITE)
 		    && (assoc_ptr->grp_wall != INFINITE)
 		    && (wall_mins >= assoc_ptr->grp_wall)) {
 			xfree(job_ptr->state_desc);
@@ -1204,8 +2389,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 
 		/* we don't need to check max_cpus_pj here */
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->max_jobs_pu == INFINITE)) &&
+		if ((qos_rec.max_jobs_pu == INFINITE) &&
 		    (assoc_ptr->max_jobs != INFINITE) &&
 		    (assoc_ptr->usage->used_jobs >= assoc_ptr->max_jobs)) {
 			xfree(job_ptr->state_desc);
@@ -1220,15 +2404,12 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 			goto end_it;
 		}
 
-		/* we don't need to check max_nodes_pj here */
-
 		/* we don't need to check submit_jobs here */
 
 		/* if the association limits have changed since job
 		 * submission and job can not run, then kill it */
-		if ((job_ptr->limit_set_time != ADMIN_SET_LIMIT)
-		    && (!qos_ptr ||
-			(qos_ptr && qos_ptr->max_wall_pj == INFINITE))
+		if ((job_ptr->limit_set.time != ADMIN_SET_LIMIT)
+		    && (qos_rec.max_wall_pj == INFINITE)
 		    && (assoc_ptr->max_wall_pj != INFINITE)) {
 			time_limit = assoc_ptr->max_wall_pj;
 			if ((job_ptr->time_limit != NO_VAL) &&
@@ -1251,9 +2432,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr)
 	}
 end_it:
 	assoc_mgr_unlock(&locks);
-
-	if (free_used_limits)
-		xfree(used_limits);
+	slurmdb_free_qos_rec_members(&qos_rec);
 
 	return rc;
 }
@@ -1263,26 +2442,25 @@ end_it:
  *	selected for the job verify the counts don't exceed aggregated limits.
  */
 extern bool acct_policy_job_runnable_post_select(
-	struct job_record *job_ptr, uint32_t node_cnt,
-	uint32_t cpu_cnt, uint32_t pn_min_memory)
+	struct job_record *job_ptr, uint64_t *tres_req_cnt)
 {
-	slurmdb_qos_rec_t *qos_ptr;
-	slurmdb_association_rec_t *assoc_ptr;
-	slurmdb_used_limits_t *used_limits = NULL;
-	bool free_used_limits = false;
-	uint64_t cpu_time_limit;
-	uint64_t job_cpu_time_limit;
-	uint64_t cpu_run_mins;
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_qos_rec_t qos_rec;
+	slurmdb_assoc_rec_t *assoc_ptr;
+	uint64_t tres_usage_mins[slurmctld_tres_cnt];
+	uint64_t tres_run_mins[slurmctld_tres_cnt];
+	uint64_t job_tres_time_limit[slurmctld_tres_cnt];
 	bool rc = true;
-	uint64_t usage_mins;
-	uint32_t job_memory = 0;
-	bool admin_set_memory_limit = false;
 	bool safe_limits = false;
+	int i, tres_pos;
 	int parent = 0; /* flag to tell us if we are looking at the
 			 * parent or not
 			 */
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	xassert(job_ptr);
+	xassert(tres_req_cnt);
 
 	/* check to see if we are enforcing associations */
 	if (!accounting_enforce)
@@ -1298,410 +2476,61 @@ extern bool acct_policy_job_runnable_post_select(
 	if (!(accounting_enforce & ACCOUNTING_ENFORCE_LIMITS))
 		return true;
 
-	/* check to see if we should be using safe limits, if so we
-	 * will only start a job if there are sufficient remaining
-	 * cpu-minutes for it to run to completion */
-	if (accounting_enforce & ACCOUNTING_ENFORCE_SAFE)
-		safe_limits = true;
-
-	/* clear old state reason */
-	if (!acct_policy_job_runnable_state(job_ptr)) {
-		xfree(job_ptr->state_desc);
-		job_ptr->state_reason = WAIT_NO_REASON;
-	}
-
-	job_cpu_time_limit = (uint64_t)job_ptr->time_limit * (uint64_t)cpu_cnt;
-
-	if (pn_min_memory) {
-		char *memory_type = NULL;
-
-		admin_set_memory_limit =
-			(job_ptr->limit_set_pn_min_memory == ADMIN_SET_LIMIT)
-			|| (job_ptr->limit_set_min_cpus == ADMIN_SET_LIMIT);
-
-		if (pn_min_memory & MEM_PER_CPU) {
-			memory_type = "MPC";
-			job_memory = (pn_min_memory & (~MEM_PER_CPU)) * cpu_cnt;
-		} else {
-			memory_type = "MPN";
-			job_memory = (pn_min_memory) * node_cnt;
-		}
-		debug3("acct_policy_job_runnable_post_select: job %u: %s: "
-		       "job_memory set to %u",
-		       job_ptr->job_id, memory_type, job_memory);
-	}
-
-	assoc_mgr_lock(&locks);
-	qos_ptr = job_ptr->qos_ptr;
-	if (qos_ptr) {
-		usage_mins = (uint64_t)(qos_ptr->usage->usage_raw / 60.0);
-		cpu_run_mins = qos_ptr->usage->grp_used_cpu_run_secs / 60;
-
-		/*
-		 * Try to get the used limits for the user or initialise a local
-		 * nullified one if not available.
-		 */
-		used_limits = _get_used_limits_for_user(
-			qos_ptr->usage->user_limit_list,
-			job_ptr->user_id);
-		if (!used_limits) {
-			used_limits = xmalloc(sizeof(slurmdb_used_limits_t));
-			used_limits->uid = job_ptr->user_id;
-			free_used_limits = true;
-		}
-
-		/* If the QOS has a GrpCPUMins limit set we may hold the job */
-		if (qos_ptr->grp_cpu_mins != (uint64_t)INFINITE) {
-			if (usage_mins >= qos_ptr->grp_cpu_mins) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_QOS_GRP_CPU_MIN;
-				debug2("Job %u being held, "
-				       "the job is at or exceeds QOS %s's "
-				       "group max cpu minutes of %"PRIu64" "
-				       "with %"PRIu64"",
-				       job_ptr->job_id,
-				       qos_ptr->name,
-				       qos_ptr->grp_cpu_mins,
-				       usage_mins);
-				rc = false;
-				goto end_it;
-			} else if (safe_limits
-				   && ((job_cpu_time_limit + cpu_run_mins) >
-				       (qos_ptr->grp_cpu_mins - usage_mins))) {
-				/*
-				 * If we're using safe limits start
-				 * the job only if there are
-				 * sufficient cpu-mins left such that
-				 * it will run to completion without
-				 * being killed
-				 */
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_QOS_GRP_CPU_MIN;
-				debug2("Job %u being held, "
-				       "the job is at or exceeds QOS %s's "
-				       "group max cpu minutes of %"PRIu64" "
-				       "of which %"PRIu64" are still available "
-				       "but request is for %"PRIu64" "
-				       "(%"PRIu64" already used) cpu "
-				       "minutes (%u cpus)",
-				       job_ptr->job_id,
-				       qos_ptr->name,
-				       qos_ptr->grp_cpu_mins,
-				       qos_ptr->grp_cpu_mins - usage_mins,
-				       job_cpu_time_limit + cpu_run_mins,
-				       cpu_run_mins,
-				       cpu_cnt);
-
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		/* If the JOB's cpu limit wasn't administratively set and the
-		 * QOS has a GrpCPU limit, cancel the job if its minimum
-		 * cpu requirement has exceeded the limit for all CPUs
-		 * usable by the QOS
-		 */
-		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT)
-		    && qos_ptr->grp_cpus != INFINITE) {
-			if (cpu_cnt > qos_ptr->grp_cpus) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_QOS_GRP_CPU;
-				debug2("job %u is being held, "
-				       "min cpu request %u exceeds "
-				       "group max cpu limit %u for "
-				       "qos '%s'",
-				       job_ptr->job_id,
-				       cpu_cnt,
-				       qos_ptr->grp_cpus,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-
-			if ((qos_ptr->usage->grp_used_cpus +
-			     cpu_cnt) > qos_ptr->grp_cpus) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =	WAIT_QOS_GRP_CPU;
-				debug2("job %u being held, "
-				       "the job is at or exceeds "
-				       "group max cpu limit %u "
-				       "with already used %u + requested %u "
-				       "for qos %s",
-				       job_ptr->job_id,
-				       qos_ptr->grp_cpus,
-				       qos_ptr->usage->grp_used_cpus,
-				       cpu_cnt,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if (!admin_set_memory_limit
-		    && (qos_ptr->grp_mem != INFINITE)) {
-			if (job_memory > qos_ptr->grp_mem) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_QOS_GRP_MEMORY;
-				info("job %u is being held, "
-				     "memory request %u exceeds "
-				     "group max memory limit %u for "
-				     "qos '%s'",
-				     job_ptr->job_id,
-				     job_memory,
-				     qos_ptr->grp_mem,
-				     qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-
-			if ((qos_ptr->usage->grp_used_mem +
-			     job_memory) > qos_ptr->grp_mem) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =	WAIT_QOS_GRP_MEMORY;
-				debug2("job %u being held, "
-				       "the job is at or exceeds "
-				       "group memory limit %u "
-				       "with already used %u + requested %u "
-				       "for qos %s",
-				       job_ptr->job_id,
-				       qos_ptr->grp_mem,
-				       qos_ptr->usage->grp_used_mem,
-				       job_memory,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		/* we don't need to check grp_jobs here */
-
-		if (qos_ptr->grp_cpu_run_mins != INFINITE) {
-			if (cpu_run_mins + job_cpu_time_limit >
-			    qos_ptr->grp_cpu_run_mins) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_GRP_CPU_RUN_MIN;
-				debug2("job %u being held, "
-				       "qos %s is at or exceeds "
-				       "group max running cpu minutes "
-				       "limit %"PRIu64" with already "
-				       "used %"PRIu64" + requested %"PRIu64" "
-				       "for qos '%s'",
-				       job_ptr->job_id, qos_ptr->name,
-				       qos_ptr->grp_cpu_run_mins,
-				       cpu_run_mins,
-				       job_cpu_time_limit,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
-		    && qos_ptr->grp_nodes != INFINITE) {
-			if (node_cnt > qos_ptr->grp_nodes) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_QOS_GRP_NODES;
-				debug2("job %u is being held, "
-				       "min node request %u exceeds "
-				       "group max node limit %u for "
-				       "qos '%s'",
-				       job_ptr->job_id,
-				       node_cnt,
-				       qos_ptr->grp_nodes,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-
-			if ((qos_ptr->usage->grp_used_nodes +
-			     node_cnt) >
-			    qos_ptr->grp_nodes) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =	WAIT_QOS_GRP_NODES;
-				debug2("job %u being held, "
-				       "the job is at or exceeds "
-				       "group max node limit %u "
-				       "with already used %u + requested %u "
-				       "for qos %s",
-				       job_ptr->job_id,
-				       qos_ptr->grp_nodes,
-				       qos_ptr->usage->grp_used_nodes,
-				       node_cnt,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		/* we don't need to check submit_jobs here */
-
-		/* we don't need to check grp_wall here */
-
-		if (qos_ptr->max_cpu_mins_pj != INFINITE) {
-			cpu_time_limit = qos_ptr->max_cpu_mins_pj;
-			if ((job_ptr->time_limit != NO_VAL) &&
-			    (job_cpu_time_limit > cpu_time_limit)) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_CPU_MINS_PER_JOB;
-				debug2("job %u being held, "
-				       "cpu time limit %"PRIu64" exceeds "
-				       "qos max per-job %"PRIu64"",
-				       job_ptr->job_id,
-				       job_cpu_time_limit,
-				       cpu_time_limit);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT)
-		    && qos_ptr->max_cpus_pj != INFINITE) {
-			if (cpu_cnt > qos_ptr->max_cpus_pj) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_CPUS_PER_JOB;
-				debug2("job %u being held, "
-				       "min cpu limit %u exceeds "
-				       "qos per-job max %u",
-				       job_ptr->job_id,
-				       cpu_cnt,
-				       qos_ptr->max_cpus_pj);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT)
-		    && qos_ptr->min_cpus_pj != INFINITE) {
-			if (cpu_cnt && cpu_cnt < qos_ptr->min_cpus_pj) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =	WAIT_QOS_MIN_CPUS;
-				debug2("%s job %u being held, "
-				       "min cpu limit %u below "
-				       "qos per-job min %u",
-				       __func__, job_ptr->job_id,
-				       cpu_cnt,
-				       qos_ptr->min_cpus_pj);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT) &&
-		    (qos_ptr->max_cpus_pu != INFINITE)) {
-			/* Hold the job if it exceeds the per-user
-			 * CPU limit for the given QOS
-			 */
-			if (cpu_cnt > qos_ptr->max_cpus_pu) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_CPU_PER_USER;
-				debug2("job %u being held, "
-				       "min cpu limit %u exceeds "
-				       "qos per-user max %u",
-				       job_ptr->job_id,
-				       cpu_cnt,
-				       qos_ptr->max_cpus_pu);
-				rc = false;
-				goto end_it;
-			}
-			/* Hold the job if the user has exceeded
-			 * the QOS per-user CPU limit with their
-			 * current usage */
-			if ((used_limits->cpus + cpu_cnt)
-			    > qos_ptr->max_cpus_pu) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_CPU_PER_USER;
-				debug2("job %u being held, "
-				       "the user is at or would exceed "
-				       "max cpus per-user limit "
-				       "%u with %u(+%u) for QOS %s",
-				       job_ptr->job_id,
-				       qos_ptr->max_cpus_pu,
-				       used_limits->cpus,
-				       cpu_cnt,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		/* We do not need to check max_jobs_pu here */
-
-		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
-		    && qos_ptr->max_nodes_pj != INFINITE) {
-			if (node_cnt >
-			    qos_ptr->max_nodes_pj) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_NODE_PER_JOB;
-				debug2("job %u being held, "
-				       "min node limit %u exceeds "
-				       "qos max %u",
-				       job_ptr->job_id,
-				       node_cnt,
-				       qos_ptr->max_nodes_pj);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT) &&
-		    (qos_ptr->max_nodes_pu != INFINITE)) {
-			/* Cancel the job if it exceeds the per-user
-			 * node limit for the given QOS
-			 */
-			if (node_cnt > qos_ptr->max_nodes_pu) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_NODE_PER_USER;
-				debug2("job %u being held, "
-				       "min node per-puser limit %u exceeds "
-				       "qos max %u",
-				       job_ptr->job_id,
-				       node_cnt,
-				       qos_ptr->max_nodes_pu);
-				rc = false;
-				goto end_it;
-			}
-
-			/*
-			* Hold the job if the user has exceeded
-			* the QOS per-user CPU limit with their
-			* current usage
-			*/
-			if ((used_limits->nodes + node_cnt)
-			    > qos_ptr->max_nodes_pu) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_QOS_MAX_NODE_PER_USER;
-				debug2("job %u being held, "
-				       "the user is at or would exceed "
-				       "max nodes per-user "
-				       "limit %u with %u(+%u) for QOS %s",
-				       job_ptr->job_id,
-				       qos_ptr->max_nodes_pu,
-				       used_limits->nodes,
-				       node_cnt,
-				       qos_ptr->name);
-				rc = false;
-				goto end_it;
-			}
-		}
+	/* check to see if we should be using safe limits, if so we
+	 * will only start a job if there are sufficient remaining
+	 * cpu-minutes for it to run to completion */
+	if (accounting_enforce & ACCOUNTING_ENFORCE_SAFE)
+		safe_limits = true;
 
-		/* we don't need to check submit_jobs_pu here */
+	/* clear old state reason */
+	if (!acct_policy_job_runnable_state(job_ptr)) {
+		xfree(job_ptr->state_desc);
+		job_ptr->state_reason = WAIT_NO_REASON;
+	}
 
-		/* we don't need to check max_wall_pj here */
+	/* clang needs this memset to avoid a warning */
+	memset(tres_run_mins, 0, sizeof(tres_run_mins));
+	memset(tres_usage_mins, 0, sizeof(tres_usage_mins));
+	memset(job_tres_time_limit, 0, sizeof(job_tres_time_limit));
+	for (i=0; i<slurmctld_tres_cnt; i++) {
+		job_tres_time_limit[i] = (uint64_t)job_ptr->time_limit *
+			tres_req_cnt[i];
 	}
 
+	slurmdb_init_qos_rec(&qos_rec, 0, INFINITE);
+
+	assoc_mgr_lock(&locks);
+
+	assoc_mgr_set_qos_tres_cnt(&qos_rec);
+
+	_set_qos_order(job_ptr, &qos_ptr_1, &qos_ptr_2);
+
+	/* check the first QOS setting it's values in the qos_rec */
+	if (qos_ptr_1 &&
+	    !(rc = _qos_job_runnable_post_select(job_ptr, qos_ptr_1,
+						 &qos_rec, tres_req_cnt,
+						 job_tres_time_limit)))
+		goto end_it;
+
+	/* If qos_ptr_1 didn't set the value use the 2nd QOS to set
+	   the limit.
+	*/
+	if (qos_ptr_2 &&
+	    !(rc = _qos_job_runnable_post_select(job_ptr, qos_ptr_2,
+						 &qos_rec, tres_req_cnt,
+						 job_tres_time_limit)))
+		goto end_it;
+
 	assoc_ptr = job_ptr->assoc_ptr;
 	while (assoc_ptr) {
-		usage_mins = (uint64_t)(assoc_ptr->usage->usage_raw / 60.0);
-		cpu_run_mins = assoc_ptr->usage->grp_used_cpu_run_secs / 60;
+		for (i=0; i<slurmctld_tres_cnt; i++) {
+			tres_usage_mins[i] =
+				(uint64_t)(assoc_ptr->usage->usage_tres_raw[i]
+					   / 60);
+			tres_run_mins[i] =
+				assoc_ptr->usage->grp_used_tres_run_secs[i] /
+				60;
+		}
 
 #if _DEBUG
 		info("acct_job_limits: %u of %u",
@@ -1711,191 +2540,187 @@ extern bool acct_policy_job_runnable_post_select(
 		 * If the association has a GrpCPUMins limit set (and there
 		 * is no QOS with GrpCPUMins set) we may hold the job
 		 */
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->grp_cpu_mins == (uint64_t)INFINITE))
-		    && (assoc_ptr->grp_cpu_mins != (uint64_t)INFINITE)) {
-			if (usage_mins >= assoc_ptr->grp_cpu_mins) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_CPU_MIN;
-				debug2("job %u being held, "
-				       "assoc %u is at or exceeds "
-				       "group max cpu minutes limit %"PRIu64" "
-				       "with %Lf for account %s",
-				       job_ptr->job_id, assoc_ptr->id,
-				       assoc_ptr->grp_cpu_mins,
-				       assoc_ptr->usage->usage_raw,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			} else if (safe_limits
-				   && ((job_cpu_time_limit + cpu_run_mins) >
-				       (assoc_ptr->grp_cpu_mins
-					- usage_mins))) {
-				/*
-				 * If we're using safe limits start
-				 * the job only if there are
-				 * sufficient cpu-mins left such that
-				 * it will run to completion without
-				 * being killed
-				 */
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_CPU_MIN;
-				debug2("job %u being held, "
-				       "assoc %u is at or exceeds "
-				       "group max cpu minutes of %"PRIu64" "
-				       "of which %"PRIu64" are still available "
-				       "but request is for %"PRIu64" cpu "
-				       "minutes (%u cpus)"
-				       "for account %s",
-				       job_ptr->job_id, assoc_ptr->id,
-				       assoc_ptr->grp_cpu_mins,
-				       assoc_ptr->grp_cpu_mins - usage_mins,
-				       job_cpu_time_limit + cpu_run_mins,
-				       cpu_cnt,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_cpus != ADMIN_SET_LIMIT)
-		    && (!qos_ptr || (qos_ptr && qos_ptr->grp_cpus == INFINITE))
-		    && (assoc_ptr->grp_cpus != INFINITE)) {
-			if (cpu_cnt > assoc_ptr->grp_cpus) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_CPU;
-				debug2("job %u being held, "
-				       "min cpu request %u exceeds "
-				       "group max cpu limit %u for "
-				       "account %s",
-				       job_ptr->job_id,
-				       cpu_cnt,
-				       assoc_ptr->grp_cpus,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
-
-			if ((assoc_ptr->usage->grp_used_cpus + cpu_cnt) >
-			    assoc_ptr->grp_cpus) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_CPU;
-				debug2("job %u being held, "
-				       "assoc %u is at or exceeds "
-				       "group max cpu limit %u "
-				       "with already used %u + requested %u "
-				       "for account %s",
-				       job_ptr->job_id, assoc_ptr->id,
-				       assoc_ptr->grp_cpus,
-				       assoc_ptr->usage->grp_used_cpus,
-				       cpu_cnt,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
+		i = _validate_tres_usage_limits_for_assoc(
+			&tres_pos, assoc_ptr->grp_tres_mins_ctld,
+			qos_rec.grp_tres_mins_ctld,
+			job_tres_time_limit, tres_run_mins,
+			tres_usage_mins, job_ptr->limit_set.tres,
+			safe_limits);
+		switch (i) {
+		case 1:
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK_MIN);
+			debug2("Job %u being held, "
+			       "assoc %u(%s/%s/%s) group max tres(%s) "
+			       "minutes limit of %"PRIu64" is already at or "
+			       "exceeded with %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->grp_tres_mins_ctld[tres_pos],
+			       tres_usage_mins[tres_pos]);
+			rc = false;
+			goto end_it;
+			break;
+		case 2:
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK_MIN);
+			debug2("Job %u being held, "
+			       "the job is requesting more than allowed "
+			       "with assoc %u(%s/%s/%s) "
+			       "group max tres(%s) minutes of %"PRIu64" "
+			       "with %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->grp_tres_mins_ctld[tres_pos],
+			       job_tres_time_limit[tres_pos]);
+			rc = false;
+			goto end_it;
+			break;
+		case 3:
+			/*
+			 * If we're using safe limits start
+			 * the job only if there are
+			 * sufficient cpu-mins left such that
+			 * it will run to completion without
+			 * being killed
+			 */
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK_MIN);
+			debug2("Job %u being held, "
+			       "the job is at or exceeds assoc %u(%s/%s/%s) "
+			       "group max tres(%s) minutes of %"PRIu64" "
+			       "of which %"PRIu64" are still available "
+			       "but request is for %"PRIu64" "
+			       "(%"PRIu64" already used) tres "
+			       "minutes (%"PRIu64" tres count)",
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->grp_tres_mins_ctld[tres_pos],
+			       assoc_ptr->grp_tres_mins_ctld[tres_pos] -
+			       tres_usage_mins[tres_pos],
+			       job_tres_time_limit[tres_pos] +
+			       tres_run_mins[tres_pos],
+			       tres_run_mins[tres_pos],
+			       tres_req_cnt[tres_pos]);
+			rc = false;
+			goto end_it;
+			break;
+		default:
+			/* all good */
+			break;
 		}
 
-		if (!admin_set_memory_limit
-		    && (!qos_ptr || (qos_ptr && qos_ptr->grp_mem == INFINITE))
-		    && (assoc_ptr->grp_mem != INFINITE)) {
-			if (job_memory > assoc_ptr->grp_mem) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_MEMORY;
-				info("job %u being held, "
-				     "memory request %u exceeds "
-				     "group memory limit %u for "
-				     "account %s",
-				     job_ptr->job_id,
-				     job_memory,
-				     assoc_ptr->grp_mem,
-				     assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
 
-			if ((assoc_ptr->usage->grp_used_mem + job_memory) >
-			    assoc_ptr->grp_mem) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_MEMORY;
-				debug2("job %u being held, "
-				       "assoc %u is at or exceeds "
-				       "group memory limit %u "
-				       "with already used %u + requested %u "
-				       "for account %s",
-				       job_ptr->job_id, assoc_ptr->id,
-				       assoc_ptr->grp_mem,
-				       assoc_ptr->usage->grp_used_mem,
-				       job_memory,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
+		i = _validate_tres_usage_limits_for_assoc(
+			&tres_pos,
+			assoc_ptr->grp_tres_ctld, qos_rec.grp_tres_ctld,
+			tres_req_cnt, assoc_ptr->usage->grp_used_tres,
+			NULL, job_ptr->limit_set.tres, 1);
+		switch (i) {
+		case 1:
+			/* not possible because the curr_usage sent in is NULL*/
+			break;
+		case 2:
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK);
+			debug2("job %u is being held, "
+			       "assoc %u(%s/%s/%s) min tres(%s) "
+			       "request %"PRIu64" exceeds "
+			       "group max tres limit %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       tres_req_cnt[tres_pos],
+			       assoc_ptr->grp_tres_ctld[tres_pos]);
+			rc = false;
+			goto end_it;
+			break;
+		case 3:
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK);
+			debug2("job %u being held, "
+			       "if allowed the job request will exceed "
+			       "assoc %u(%s/%s/%s) group max "
+			       "tres(%s) limit "
+			       "%"PRIu64" with already used %"PRIu64" + "
+			       "requested %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->grp_tres_ctld[tres_pos],
+			       assoc_ptr->usage->grp_used_tres[tres_pos],
+			       tres_req_cnt[tres_pos]);
+			rc = false;
+			goto end_it;
+		default:
+			/* all good */
+			break;
 		}
 
 		/* we don't need to check grp_jobs here */
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->grp_cpu_run_mins == INFINITE))
-		    && (assoc_ptr->grp_cpu_run_mins != INFINITE)) {
-			if (cpu_run_mins + job_cpu_time_limit >
-			    assoc_ptr->grp_cpu_run_mins) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_ASSOC_GRP_CPU_RUN_MIN;
-				debug2("job %u being held, "
-				       "assoc %u is at or exceeds "
-				       "group max running cpu minutes "
-				       "limit %"PRIu64" with already "
-				       "used %"PRIu64" + requested %"PRIu64" "
-				       "for account %s",
-				       job_ptr->job_id, assoc_ptr->id,
-				       assoc_ptr->grp_cpu_run_mins,
-				       cpu_run_mins,
-				       job_cpu_time_limit,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
-		}
-
-		if ((job_ptr->limit_set_min_nodes != ADMIN_SET_LIMIT)
-		    && (!qos_ptr ||
-			(qos_ptr && qos_ptr->grp_nodes == INFINITE))
-		    && (assoc_ptr->grp_nodes != INFINITE)) {
-			if (node_cnt >
-			    assoc_ptr->grp_nodes) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_NODES;
-				debug2("job %u being held, "
-				       "min node request %u exceeds "
-				       "group max node limit %u for "
-				       "account %s",
-				       job_ptr->job_id,
-				       node_cnt,
-				       assoc_ptr->grp_nodes,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
-
-			if ((assoc_ptr->usage->grp_used_nodes +
-			     node_cnt) >
-			    assoc_ptr->grp_nodes) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason = WAIT_ASSOC_GRP_NODES;
-				debug2("job %u being held, "
-				       "assoc %u is at or exceeds "
-				       "group max node limit %u "
-				       "with already used %u + requested %u "
-				       "for account %s",
-				       job_ptr->job_id, assoc_ptr->id,
-				       assoc_ptr->grp_nodes,
-				       assoc_ptr->usage->grp_used_nodes,
-				       node_cnt,
-				       assoc_ptr->acct);
-				rc = false;
-				goto end_it;
-			}
+		i = _validate_tres_usage_limits_for_assoc(
+			&tres_pos,
+			assoc_ptr->grp_tres_run_mins_ctld,
+			qos_rec.grp_tres_run_mins_ctld,
+			job_tres_time_limit, tres_run_mins, NULL, NULL, 1);
+		switch (i) {
+		case 1:
+			/* not possible because the curr_usage sent in is NULL*/
+			break;
+		case 2:
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK_RUN_MIN);
+			debug2("job %u is being held, "
+			       "assoc %u(%s/%s/%s) group max running "
+			       "tres(%s) minutes limit %"PRIu64
+			       " is already full with %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->grp_tres_run_mins_ctld[tres_pos],
+			       tres_run_mins[tres_pos]);
+			rc = false;
+			goto end_it;
+			break;
+		case 3:
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_GRP_UNK_RUN_MIN);
+			debug2("job %u being held, "
+			       "if allowed the job request will exceed "
+			       "assoc %u(%s/%s/%s) group max running "
+			       "tres(%s) minutes limit %"PRIu64
+			       " with already used %"PRIu64
+			       " + requested %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->grp_tres_run_mins_ctld[tres_pos],
+			       tres_run_mins[tres_pos],
+			       job_tres_time_limit[tres_pos]);
+			rc = false;
+			goto end_it;
+			break;
+		default:
+			/* all good */
+			break;
 		}
 
 		/* we don't need to check submit_jobs here */
@@ -1912,65 +2737,78 @@ extern bool acct_policy_job_runnable_post_select(
 			continue;
 		}
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->max_cpu_mins_pj == INFINITE)) &&
-		    (assoc_ptr->max_cpu_mins_pj != INFINITE)) {
-			cpu_time_limit = assoc_ptr->max_cpu_mins_pj;
-			if ((job_ptr->time_limit != NO_VAL) &&
-			    (job_cpu_time_limit > cpu_time_limit)) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_ASSOC_MAX_CPU_MINS_PER_JOB;
-				debug2("job %u being held, "
-				       "cpu time limit %"PRIu64" exceeds "
-				       "assoc max per job %"PRIu64"",
-				       job_ptr->job_id,
-				       job_cpu_time_limit,
-				       cpu_time_limit);
-				rc = false;
-				goto end_it;
-			}
+		if (!_validate_tres_limits_for_assoc(
+			    &tres_pos, job_tres_time_limit, 0,
+			    assoc_ptr->max_tres_mins_ctld,
+			    qos_rec.max_tres_mins_pj_ctld,
+			    job_ptr->limit_set.tres,
+			    1, 0, 1)) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_MAX_UNK_MINS_PER_JOB);
+			debug2("Job %u being held, "
+			       "the job is requesting more than allowed "
+			       "with assoc %u(%s/%s/%s) max tres(%s) "
+			       "minutes of %"PRIu64" with %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->max_tres_mins_ctld[tres_pos],
+			       job_tres_time_limit[tres_pos]);
+			rc = false;
+			goto end_it;
 		}
 
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->max_cpus_pj == INFINITE)) &&
-		    (assoc_ptr->max_cpus_pj != INFINITE)) {
-			if (cpu_cnt >
-			    assoc_ptr->max_cpus_pj) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_ASSOC_MAX_CPUS_PER_JOB;
-				debug2("job %u being held, "
-				       "min cpu limit %u exceeds "
-				       "account max %u",
-				       job_ptr->job_id,
-				       cpu_cnt,
-				       assoc_ptr->max_cpus_pj);
-				rc = false;
-				goto end_it;
-			}
+		if (!_validate_tres_limits_for_assoc(
+			    &tres_pos, tres_req_cnt, 0,
+			    assoc_ptr->max_tres_ctld,
+			    qos_rec.max_tres_pj_ctld,
+			    job_ptr->limit_set.tres,
+			    1, 0, 1)) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_MAX_UNK_PER_JOB);
+			debug2("job %u is being held, "
+			       "the job is requesting more than allowed "
+			       "with assoc %u(%s/%s/%s) max tres(%s) "
+			       "limit of %"PRIu64" with %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->max_tres_ctld[tres_pos],
+			       tres_req_cnt[tres_pos]);
+			rc = false;
+			break;
 		}
 
-		/* we do not need to check max_jobs here */
-
-		if ((!qos_ptr ||
-		     (qos_ptr && qos_ptr->max_nodes_pj == INFINITE))
-		    && (assoc_ptr->max_nodes_pj != INFINITE)) {
-			if (node_cnt > assoc_ptr->max_nodes_pj) {
-				xfree(job_ptr->state_desc);
-				job_ptr->state_reason =
-					WAIT_ASSOC_MAX_NODE_PER_JOB;
-				debug2("job %u being held, "
-				       "min node limit %u exceeds "
-				       "account max %u",
-				       job_ptr->job_id,
-				       node_cnt,
-				       assoc_ptr->max_nodes_pj);
-				rc = false;
-				goto end_it;
-			}
+		if (!_validate_tres_limits_for_assoc(
+			    &tres_pos, tres_req_cnt,
+			    tres_req_cnt[TRES_ARRAY_NODE],
+			    assoc_ptr->max_tres_pn_ctld,
+			    qos_rec.max_tres_pn_ctld,
+			    job_ptr->limit_set.tres,
+			    1, 0, 1)) {
+			xfree(job_ptr->state_desc);
+			job_ptr->state_reason = get_tres_state_reason(
+				tres_pos, WAIT_ASSOC_MAX_UNK_PER_NODE);
+			debug2("job %u is being held, "
+			       "the job is requesting more than allowed "
+			       "with assoc %u(%s/%s/%s) max tres(%s) "
+			       "per node limit of %"PRIu64" with %"PRIu64,
+			       job_ptr->job_id,
+			       assoc_ptr->id, assoc_ptr->acct,
+			       assoc_ptr->user, assoc_ptr->partition,
+			       assoc_mgr_tres_name_array[tres_pos],
+			       assoc_ptr->max_tres_pn_ctld[tres_pos],
+			       tres_req_cnt[tres_pos]);
+			rc = false;
+			break;
 		}
 
+		/* we do not need to check max_jobs here */
+
 		/* we don't need to check submit_jobs here */
 
 		/* we don't need to check max_wall_pj here */
@@ -1980,9 +2818,7 @@ extern bool acct_policy_job_runnable_post_select(
 	}
 end_it:
 	assoc_mgr_unlock(&locks);
-
-	if (free_used_limits)
-		xfree(used_limits);
+	slurmdb_free_qos_rec_members(&qos_rec);
 
 	return rc;
 }
@@ -1990,11 +2826,12 @@ end_it:
 extern uint32_t acct_policy_get_max_nodes(struct job_record *job_ptr,
 					  uint32_t *wait_reason)
 {
-	uint32_t max_nodes_limit = INFINITE, qos_max_p_limit = INFINITE;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
-	slurmdb_qos_rec_t *qos_ptr = job_ptr->qos_ptr;
-	slurmdb_association_rec_t *assoc_ptr = job_ptr->assoc_ptr;
+	uint64_t max_nodes_limit = INFINITE64, qos_max_p_limit = INFINITE64,
+		grp_nodes = INFINITE64;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_assoc_rec_t *assoc_ptr = job_ptr->assoc_ptr;
 	bool parent = 0; /* flag to tell us if we are looking at the
 			  * parent or not
 			  */
@@ -2007,20 +2844,42 @@ extern uint32_t acct_policy_get_max_nodes(struct job_record *job_ptr,
 	xassert(wait_reason);
 
 	assoc_mgr_lock(&locks);
-	if (qos_ptr) {
-		if (qos_ptr->max_nodes_pj < qos_ptr->max_nodes_pu) {
-			max_nodes_limit = qos_ptr->max_nodes_pj;
+
+	_set_qos_order(job_ptr, &qos_ptr_1, &qos_ptr_2);
+
+	if (qos_ptr_1) {
+		uint64_t max_nodes_pj =
+			qos_ptr_1->max_tres_pj_ctld[TRES_ARRAY_NODE];
+		uint64_t max_nodes_pu =
+			qos_ptr_1->max_tres_pu_ctld[TRES_ARRAY_NODE];
+
+		grp_nodes = qos_ptr_1->grp_tres_ctld[TRES_ARRAY_NODE];
+
+		if (qos_ptr_2) {
+			if (max_nodes_pj == INFINITE64)
+				max_nodes_pj = qos_ptr_2->max_tres_pj_ctld[
+					TRES_ARRAY_NODE];
+			if (max_nodes_pu == INFINITE64)
+				max_nodes_pu = qos_ptr_2->max_tres_pu_ctld[
+					TRES_ARRAY_NODE];
+			if (grp_nodes == INFINITE64)
+				grp_nodes = qos_ptr_2->grp_tres_ctld[
+					TRES_ARRAY_NODE];
+		}
+
+		if (max_nodes_pj < max_nodes_pu) {
+			max_nodes_limit = max_nodes_pj;
 			*wait_reason = WAIT_QOS_MAX_NODE_PER_JOB;
-		} else if (qos_ptr->max_nodes_pu != INFINITE) {
-			max_nodes_limit = qos_ptr->max_nodes_pu;
+		} else if (max_nodes_pu != INFINITE64) {
+			max_nodes_limit = max_nodes_pu;
 			*wait_reason = WAIT_QOS_MAX_NODE_PER_USER;
 		}
 
 		qos_max_p_limit = max_nodes_limit;
 
-		if (qos_ptr->grp_nodes < max_nodes_limit) {
-			max_nodes_limit = qos_ptr->grp_nodes;
-			*wait_reason = WAIT_QOS_GRP_NODES;
+		if (grp_nodes < max_nodes_limit) {
+			max_nodes_limit = grp_nodes;
+			*wait_reason = WAIT_QOS_GRP_NODE;
 		}
 	}
 
@@ -2028,19 +2887,23 @@ extern uint32_t acct_policy_get_max_nodes(struct job_record *job_ptr,
 	   not override a particular limit.
 	*/
 	while (assoc_ptr) {
-		if ((!qos_ptr || (qos_ptr->grp_nodes == INFINITE))
-		    && (assoc_ptr->grp_nodes != INFINITE)
-		    && (assoc_ptr->grp_nodes < max_nodes_limit)) {
-			max_nodes_limit = assoc_ptr->grp_nodes;
-			*wait_reason = WAIT_ASSOC_GRP_NODES;
+		if ((!qos_ptr_1 || (grp_nodes == INFINITE64))
+		    && (assoc_ptr->grp_tres_ctld[TRES_ARRAY_NODE] != INFINITE64)
+		    && (assoc_ptr->grp_tres_ctld[TRES_ARRAY_NODE] <
+			max_nodes_limit)) {
+			max_nodes_limit =
+				assoc_ptr->grp_tres_ctld[TRES_ARRAY_NODE];
+			*wait_reason = WAIT_ASSOC_GRP_NODE;
 			grp_set = 1;
 		}
 
 		if (!parent
-		    && (qos_max_p_limit == INFINITE)
-		    && (assoc_ptr->max_nodes_pj != INFINITE)
-		    && (assoc_ptr->max_nodes_pj < max_nodes_limit)) {
-			max_nodes_limit = assoc_ptr->max_nodes_pj;
+		    && (qos_max_p_limit == INFINITE64)
+		    && (assoc_ptr->max_tres_ctld[TRES_ARRAY_NODE] != INFINITE64)
+		    && (assoc_ptr->max_tres_ctld[TRES_ARRAY_NODE] <
+			max_nodes_limit)) {
+			max_nodes_limit =
+				assoc_ptr->max_tres_ctld[TRES_ARRAY_NODE];
 			*wait_reason = WAIT_ASSOC_MAX_NODE_PER_JOB;
 		}
 
@@ -2070,6 +2933,7 @@ extern int acct_policy_update_pending_job(struct job_record *job_ptr)
 	bool update_accounting = false;
 	struct job_details *details_ptr;
 	int rc = SLURM_SUCCESS;
+	uint64_t tres_req_cnt[slurmctld_tres_cnt];
 
 	/* check to see if we are enforcing associations and the job
 	 * is pending or if we are even enforcing limits. */
@@ -2089,41 +2953,20 @@ extern int acct_policy_update_pending_job(struct job_record *job_ptr)
 	 */
 	slurm_init_job_desc_msg(&job_desc);
 
-	memset(&acct_policy_limit_set, 0, sizeof(acct_policy_limit_set_t));
-
-	job_desc.min_cpus = details_ptr->min_cpus;
-	/* Only set this value if not set from a limit */
-	if (job_ptr->limit_set_max_cpus == ADMIN_SET_LIMIT)
-		acct_policy_limit_set.max_cpus = job_ptr->limit_set_max_cpus;
-	else if ((details_ptr->max_cpus != NO_VAL)
-		 && !job_ptr->limit_set_max_cpus)
-		job_desc.max_cpus = details_ptr->max_cpus;
-
-	job_desc.min_nodes = details_ptr->min_nodes;
-	/* Only set this value if not set from a limit */
-	if (job_ptr->limit_set_max_nodes == ADMIN_SET_LIMIT)
-		acct_policy_limit_set.max_nodes = job_ptr->limit_set_max_nodes;
-	else if ((details_ptr->max_nodes != NO_VAL)
-		 && !job_ptr->limit_set_max_nodes)
-		job_desc.max_nodes = details_ptr->max_nodes;
-	else
-		job_desc.max_nodes = 0;
-
-	job_desc.pn_min_memory = details_ptr->pn_min_memory;
-	/* Only set this value if not set from a limit */
-	if (job_ptr->limit_set_pn_min_memory == ADMIN_SET_LIMIT)
-		acct_policy_limit_set.pn_min_memory =
-			job_ptr->limit_set_pn_min_memory;
-	else if ((details_ptr->pn_min_memory != NO_VAL)
-		 && !job_ptr->limit_set_pn_min_memory)
-		job_desc.pn_min_memory = details_ptr->pn_min_memory;
-	else
-		job_desc.pn_min_memory = 0;
+	/* copy the limits set from the job the only one that
+	 * acct_policy_validate changes is the time limit so we
+	 * should be ok with the memcpy here */
+	memcpy(&acct_policy_limit_set, &job_ptr->limit_set,
+	       sizeof(acct_policy_limit_set_t));
+	job_desc.tres_req_cnt = tres_req_cnt;
+	/* copy all the tres requests over */
+	memcpy(job_desc.tres_req_cnt, job_ptr->tres_req_cnt,
+	       sizeof(uint64_t) * slurmctld_tres_cnt);
 
 	/* Only set this value if not set from a limit */
-	if (job_ptr->limit_set_time == ADMIN_SET_LIMIT)
-		acct_policy_limit_set.time = job_ptr->limit_set_time;
-	else if ((job_ptr->time_limit != NO_VAL) && !job_ptr->limit_set_time)
+	if (job_ptr->limit_set.time == ADMIN_SET_LIMIT)
+		acct_policy_limit_set.time = job_ptr->limit_set.time;
+	else if ((job_ptr->time_limit != NO_VAL) && !job_ptr->limit_set.time)
 		job_desc.time_limit = job_ptr->time_limit;
 
 	if (!acct_policy_validate(&job_desc, job_ptr->part_ptr,
@@ -2136,53 +2979,30 @@ extern int acct_policy_update_pending_job(struct job_record *job_ptr)
 		return SLURM_ERROR;
 	}
 
-	/* If it isn't an admin set limit replace it. */
-	if (!acct_policy_limit_set.max_cpus
-	    && (job_ptr->limit_set_max_cpus == 1)) {
-		details_ptr->max_cpus = NO_VAL;
-		job_ptr->limit_set_max_cpus = 0;
-		update_accounting = true;
-	} else if (acct_policy_limit_set.max_cpus != ADMIN_SET_LIMIT) {
-		if (details_ptr->max_cpus != job_desc.max_cpus) {
-			details_ptr->max_cpus = job_desc.max_cpus;
-			update_accounting = true;
-		}
-		job_ptr->limit_set_max_cpus = acct_policy_limit_set.max_cpus;
-	}
-
-	if (!acct_policy_limit_set.max_nodes
-	    && (job_ptr->limit_set_max_nodes == 1)) {
-		details_ptr->max_nodes = 0;
-		job_ptr->limit_set_max_nodes = 0;
-		update_accounting = true;
-	} else if (acct_policy_limit_set.max_nodes != ADMIN_SET_LIMIT) {
-		if (details_ptr->max_nodes != job_desc.max_nodes) {
-			details_ptr->max_nodes = job_desc.max_nodes;
-			update_accounting = true;
-		}
-		job_ptr->limit_set_max_nodes = acct_policy_limit_set.max_nodes;
-	}
+	/* The only variable in acct_policy_limit_set that is changed
+	 * in acct_policy_validate is the time limit so only worry
+	 * about that one.
+	 */
 
-	if (!acct_policy_limit_set.time && (job_ptr->limit_set_time == 1)) {
+	/* If it isn't an admin set limit replace it. */
+	if (!acct_policy_limit_set.time && (job_ptr->limit_set.time == 1)) {
 		job_ptr->time_limit = NO_VAL;
-		job_ptr->limit_set_time = 0;
+		job_ptr->limit_set.time = 0;
 		update_accounting = true;
 	} else if (acct_policy_limit_set.time != ADMIN_SET_LIMIT) {
 		if (job_ptr->time_limit != job_desc.time_limit) {
 			job_ptr->time_limit = job_desc.time_limit;
 			update_accounting = true;
 		}
-		job_ptr->limit_set_time = acct_policy_limit_set.time;
+		job_ptr->limit_set.time = acct_policy_limit_set.time;
 	}
 
 	if (update_accounting) {
 		last_job_update = time(NULL);
 		debug("limits changed for job %u: updating accounting",
 		      job_ptr->job_id);
-		if (details_ptr->begin_time) {
-			/* Update job record in accounting to reflect changes */
-			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
-		}
+		/* Update job record in accounting to reflect changes */
+		jobacct_storage_job_start_direct(acct_db_conn, job_ptr);
 	}
 
 	return rc;
@@ -2194,14 +3014,17 @@ extern int acct_policy_update_pending_job(struct job_record *job_ptr)
  */
 extern bool acct_policy_job_time_out(struct job_record *job_ptr)
 {
-	uint64_t job_cpu_usage_mins = 0;
-	uint64_t usage_mins;
+	uint64_t job_tres_usage_mins[slurmctld_tres_cnt];
+	uint64_t time_delta;
+	uint64_t tres_usage_mins[slurmctld_tres_cnt];
 	uint32_t wall_mins;
-	slurmdb_qos_rec_t *qos = NULL;
-	slurmdb_association_rec_t *assoc = NULL;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_qos_rec_t *qos_ptr_1, *qos_ptr_2;
+	slurmdb_qos_rec_t qos_rec;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 	time_t now;
+	int i, tres_pos;
 
 	/* Now see if we are enforcing limits.  If Safe is set then
 	 * return false as well since we are being safe if the limit
@@ -2211,95 +3034,82 @@ extern bool acct_policy_job_time_out(struct job_record *job_ptr)
 	    || (accounting_enforce & ACCOUNTING_ENFORCE_SAFE))
 		return false;
 
+	slurmdb_init_qos_rec(&qos_rec, 0, INFINITE);
 	assoc_mgr_lock(&locks);
 
-	qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-	assoc =	(slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	assoc_mgr_set_qos_tres_cnt(&qos_rec);
 
-	now = time(NULL);
+	_set_qos_order(job_ptr, &qos_ptr_1, &qos_ptr_2);
 
-	/* find out how many cpu minutes this job has been
-	 * running for. */
-	job_cpu_usage_mins = (uint64_t)
-		((((now - job_ptr->start_time)
-		   - job_ptr->tot_sus_time) / 60)
-		 * job_ptr->total_cpus);
+	assoc =	(slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 
-	/* The idea here is for qos to trump what an association
-	 * has set for a limit, so if an association set of
-	 * wall 10 mins and the qos has 20 mins set and the
-	 * job has been running for 11 minutes it continues
-	 * until 20.
-	 */
-	if (qos) {
-		usage_mins = (uint64_t)(qos->usage->usage_raw / 60.0);
-		wall_mins = qos->usage->grp_used_wall / 60;
+	now = time(NULL);
 
-		if ((qos->grp_cpu_mins != (uint64_t)INFINITE)
-		    && (usage_mins >= qos->grp_cpu_mins)) {
-			last_job_update = now;
-			info("Job %u timed out, "
-			     "the job is at or exceeds QOS %s's "
-			     "group max cpu minutes of %"PRIu64" "
-			     "with %"PRIu64"",
-			     job_ptr->job_id,
-			     qos->name,
-			     qos->grp_cpu_mins,
-			     usage_mins);
-			job_ptr->state_reason = FAIL_TIMEOUT;
-			goto job_failed;
-		}
+	time_delta = (uint64_t)(((now - job_ptr->start_time) -
+				 job_ptr->tot_sus_time) / 60);
 
-		if ((qos->grp_wall != INFINITE)
-		    && (wall_mins >= qos->grp_wall)) {
-			last_job_update = now;
-			info("Job %u timed out, "
-			     "the job is at or exceeds QOS %s's "
-			     "group wall limit of %u with %u",
-			     job_ptr->job_id,
-			     qos->name, qos->grp_wall,
-			     wall_mins);
-			job_ptr->state_reason = FAIL_TIMEOUT;
-			goto job_failed;
-		}
+	/* clang needs this memset to avoid a warning */
+	memset(job_tres_usage_mins, 0, sizeof(tres_usage_mins));
+	memset(tres_usage_mins, 0, sizeof(tres_usage_mins));
 
-		if ((qos->max_cpu_mins_pj != (uint64_t)INFINITE)
-		    && (job_cpu_usage_mins >= qos->max_cpu_mins_pj)) {
-			last_job_update = now;
-			info("Job %u timed out, "
-			     "the job is at or exceeds QOS %s's "
-			     "max cpu minutes of %"PRIu64" "
-			     "with %"PRIu64"",
-			     job_ptr->job_id,
-			     qos->name,
-			     qos->max_cpu_mins_pj,
-			     job_cpu_usage_mins);
-			job_ptr->state_reason = FAIL_TIMEOUT;
-			goto job_failed;
-		}
-	}
+	/* find out how many cpu minutes this job has been
+	 * running for. We add 1 here to make it so we can check for
+	 * just > instead of >= in our checks */
+	for (i=0; i<slurmctld_tres_cnt; i++)
+		if (job_ptr->tres_alloc_cnt[i])
+			job_tres_usage_mins[i] =
+				(time_delta * job_ptr->tres_alloc_cnt[i]) + 1;
+
+	/* check the first QOS setting it's values in the qos_rec */
+	if (qos_ptr_1 && !_qos_job_time_out(job_ptr, qos_ptr_1,
+					    &qos_rec, job_tres_usage_mins))
+		goto job_failed;
+
+	/* If qos_ptr_1 didn't set the value use the 2nd QOS to set
+	   the limit.
+	*/
+	if (qos_ptr_2 && !_qos_job_time_out(job_ptr, qos_ptr_2,
+					    &qos_rec, job_tres_usage_mins))
+		goto job_failed;
 
 	/* handle any association stuff here */
 	while (assoc) {
-		usage_mins = (uint64_t)(assoc->usage->usage_raw / 60.0);
+		for (i=0; i<slurmctld_tres_cnt; i++)
+			tres_usage_mins[i] =
+				(uint64_t)(assoc->usage->usage_tres_raw[i]
+					   / 60.0);
 		wall_mins = assoc->usage->grp_used_wall / 60;
 
-		if ((qos && (qos->grp_cpu_mins == INFINITE))
-		    && (assoc->grp_cpu_mins != (uint64_t)INFINITE)
-		    && (usage_mins >= assoc->grp_cpu_mins)) {
+		i = _validate_tres_usage_limits_for_assoc(
+			&tres_pos, assoc->grp_tres_mins_ctld,
+			qos_rec.grp_tres_mins_ctld, NULL,
+			NULL, tres_usage_mins, NULL, 0);
+		switch (i) {
+		case 1:
+			last_job_update = now;
 			info("Job %u timed out, "
-			     "assoc %u is at or exceeds "
-			     "group max cpu minutes limit %"PRIu64" "
-			     "with %"PRIu64" for account %s",
-			     job_ptr->job_id, assoc->id,
-			     assoc->grp_cpu_mins,
-			     usage_mins,
-			     assoc->acct);
+			     "the job is at or exceeds assoc %u(%s/%s/%s) "
+			     "group max tres(%s) minutes of %"PRIu64
+			     " with %"PRIu64"",
+			     job_ptr->job_id,
+			     assoc->id, assoc->acct,
+			     assoc->user, assoc->partition,
+			     assoc_mgr_tres_name_array[tres_pos],
+			     assoc->grp_tres_mins_ctld[tres_pos],
+			     tres_usage_mins[tres_pos]);
 			job_ptr->state_reason = FAIL_TIMEOUT;
+			goto job_failed;
+			break;
+		case 2:
+			/* not possible safe_limits is 0 */
+		case 3:
+			/* not possible safe_limits is 0 */
+		default:
+			/* all good */
 			break;
 		}
 
-		if ((qos && (qos->grp_wall == INFINITE))
+		if ((qos_rec.grp_wall == INFINITE)
 		    && (assoc->grp_wall != INFINITE)
 		    && (wall_mins >= assoc->grp_wall)) {
 			info("Job %u timed out, "
@@ -2313,18 +3123,33 @@ extern bool acct_policy_job_time_out(struct job_record *job_ptr)
 			break;
 		}
 
-		if ((qos && (qos->max_cpu_mins_pj == INFINITE))
-		    && (assoc->max_cpu_mins_pj != (uint64_t)INFINITE)
-		    && (job_cpu_usage_mins >= assoc->max_cpu_mins_pj)) {
+		i = _validate_tres_usage_limits_for_assoc(
+			&tres_pos, assoc->max_tres_mins_ctld,
+			qos_rec.max_tres_mins_pj_ctld, job_tres_usage_mins,
+			NULL, NULL, NULL, 1);
+		switch (i) {
+		case 1:
+			/* not possible curr_usage is NULL */
+			break;
+		case 2:
+			last_job_update = now;
 			info("Job %u timed out, "
-			     "assoc %u is at or exceeds "
-			     "max cpu minutes limit %"PRIu64" "
-			     "with %"PRIu64" for account %s",
-			     job_ptr->job_id, assoc->id,
-			     assoc->max_cpu_mins_pj,
-			     job_cpu_usage_mins,
-			     assoc->acct);
+			     "the job is at or exceeds assoc %u(%s/%s/%s) "
+			     "max tres(%s) minutes of %"PRIu64
+			     " with %"PRIu64,
+			     job_ptr->job_id,
+			     assoc->id, assoc->acct,
+			     assoc->user, assoc->partition,
+			     assoc_mgr_tres_name_array[tres_pos],
+			     assoc->max_tres_mins_ctld[tres_pos],
+			     job_tres_usage_mins[tres_pos]);
 			job_ptr->state_reason = FAIL_TIMEOUT;
+			goto job_failed;
+			break;
+		case 3:
+			/* not possible tres_usage is NULL */
+		default:
+			/* all good */
 			break;
 		}
 
@@ -2335,6 +3160,7 @@ extern bool acct_policy_job_time_out(struct job_record *job_ptr)
 	}
 job_failed:
 	assoc_mgr_unlock(&locks);
+	slurmdb_free_qos_rec_members(&qos_rec);
 
 	if (job_ptr->state_reason == FAIL_TIMEOUT)
 		return true;
diff --git a/src/slurmctld/acct_policy.h b/src/slurmctld/acct_policy.h
index e6aad3e45..456c345f1 100644
--- a/src/slurmctld/acct_policy.h
+++ b/src/slurmctld/acct_policy.h
@@ -39,18 +39,6 @@
 #ifndef _HAVE_ACCT_POLICY_H
 #define _HAVE_ACCT_POLICY_H
 
-#define ADMIN_SET_LIMIT 0xffff
-
-typedef struct {
-	uint16_t max_cpus;
-	uint16_t max_nodes;
-	uint16_t min_cpus;
-	uint16_t min_nodes;
-	uint16_t pn_min_memory;
-	uint16_t qos;
-	uint16_t time;
-} acct_policy_limit_set_t;
-
 /*
  * acct_policy_add_job_submit - Note that a job has been submitted for
  *	accounting policy purposes.
@@ -89,7 +77,7 @@ extern void acct_policy_alter_job(struct job_record *job_ptr,
 
 extern bool acct_policy_validate(job_desc_msg_t *job_desc,
 				 struct part_record *part_ptr,
-				 slurmdb_association_rec_t *assoc_in,
+				 slurmdb_assoc_rec_t *assoc_in,
 				 slurmdb_qos_rec_t *qos_ptr,
 				 uint32_t *state_reason,
 				 acct_policy_limit_set_t *acct_policy_limit_set,
@@ -109,8 +97,7 @@ extern bool acct_policy_job_runnable_pre_select(struct job_record *job_ptr);
  *	selected for the job verify the counts don't exceed aggregated limits.
  */
 extern bool acct_policy_job_runnable_post_select(
-	struct job_record *job_ptr, uint32_t cpu_cnt,
-	uint32_t node_cnt, uint32_t pn_min_memory);
+	struct job_record *job_ptr, uint64_t *tres_req_cnt);
 
 /*
  * Determine of the specified job can execute right now or is currently
diff --git a/src/slurmctld/agent.c b/src/slurmctld/agent.c
index 26f10b7df..e02ccf914 100644
--- a/src/slurmctld/agent.c
+++ b/src/slurmctld/agent.c
@@ -4,6 +4,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, et. al.
  *  Derived from pdsh written by Jim Garlick <garlick1@llnl.gov>
@@ -202,6 +203,7 @@ static List mail_list = NULL;		/* pending e-mail requests */
 static pthread_mutex_t agent_cnt_mutex = PTHREAD_MUTEX_INITIALIZER;
 static pthread_cond_t  agent_cnt_cond  = PTHREAD_COND_INITIALIZER;
 static int agent_cnt = 0;
+static int agent_thread_cnt = 0;
 static uint16_t message_timeout = (uint16_t) NO_VAL;
 
 static bool run_scheduler    = false;
@@ -227,6 +229,7 @@ void *agent(void *args)
 	task_info_t *task_specific_ptr;
 	time_t begin_time;
 	bool spawn_retry_agent = false;
+	int rpc_thread_cnt;
 
 #if HAVE_SYS_PRCTL_H
 	if (prctl(PR_SET_NAME, "slurmctld_agent", NULL, NULL, NULL) < 0) {
@@ -236,8 +239,9 @@ void *agent(void *args)
 #endif
 
 #if 0
-	info("Agent_cnt is %d of %d with msg_type %d",
-	     agent_cnt, MAX_AGENT_CNT, agent_arg_ptr->msg_type);
+	info("Agent_cnt=%d agent_thread_cnt=%d with msg_type=%d backlog_size=%d",
+	     agent_cnt, agent_thread_cnt, agent_arg_ptr->msg_type,
+	     list_count(retry_list));
 #endif
 	slurm_mutex_lock(&agent_cnt_mutex);
 	if (!wiki2_sched_test) {
@@ -248,10 +252,12 @@ void *agent(void *args)
 		wiki2_sched_test = true;
 	}
 
+	rpc_thread_cnt = 2 + MIN(agent_arg_ptr->node_count, AGENT_THREAD_COUNT);
 	while (1) {
 		if (slurmctld_config.shutdown_time ||
-		    (agent_cnt < MAX_AGENT_CNT)) {
+		    ((agent_thread_cnt+rpc_thread_cnt) <= MAX_SERVER_THREADS)) {
 			agent_cnt++;
+			agent_thread_cnt += rpc_thread_cnt;
 			break;
 		} else {	/* wait for state change and retry */
 			pthread_cond_wait(&agent_cnt_cond, &agent_cnt_mutex);
@@ -283,10 +289,8 @@ void *agent(void *args)
 		usleep(10000);	/* sleep and retry */
 	}
 	slurm_attr_destroy(&attr_wdog);
-#if 	AGENT_THREAD_COUNT < 1
-	fatal("AGENT_THREAD_COUNT value is invalid");
-#endif
-	debug2("got %d threads to send out",agent_info_ptr->thread_count);
+
+	debug2("got %d threads to send out", agent_info_ptr->thread_count);
 	/* start all the other threads (up to AGENT_THREAD_COUNT active) */
 	for (i = 0; i < agent_info_ptr->thread_count; i++) {
 
@@ -352,14 +356,20 @@ void *agent(void *args)
 	}
 	slurm_mutex_lock(&agent_cnt_mutex);
 
-	if (agent_cnt > 0)
+	if (agent_cnt > 0) {
 		agent_cnt--;
-	else {
+	} else {
 		error("agent_cnt underflow");
 		agent_cnt = 0;
 	}
+	if (agent_thread_cnt >= rpc_thread_cnt) {
+		agent_thread_cnt -= rpc_thread_cnt;
+	} else {
+		error("agent_thread_cnt underflow");
+		agent_thread_cnt = 0;
+	}
 
-	if (agent_cnt && (agent_cnt < MAX_AGENT_CNT))
+	if ((agent_thread_cnt + AGENT_THREAD_COUNT + 2) < MAX_SERVER_THREADS)
 		spawn_retry_agent = true;
 
 	pthread_cond_broadcast(&agent_cnt_cond);
@@ -607,8 +617,7 @@ static void *_wdog(void *args)
 	}
 
 	for (i = 0; i < agent_ptr->thread_count; i++) {
-		if (thread_ptr[i].ret_list)
-			list_destroy(thread_ptr[i].ret_list);
+		FREE_NULL_LIST(thread_ptr[i].ret_list);
 		xfree(thread_ptr[i].nodelist);
 	}
 
@@ -926,6 +935,8 @@ static void *_thread_per_group_rpc(void *args)
 			lock_slurmctld(node_write_lock);
 			reset_node_load(ret_data_info->node_name,
 					ping_resp->cpu_load);
+			reset_node_free_mem(ret_data_info->node_name,
+					    ping_resp->free_mem);
 			unlock_slurmctld(node_write_lock);
 		}
 		/* SPECIAL CASE: Mark node as IDLE if job already complete */
@@ -1004,10 +1015,8 @@ static void *_thread_per_group_rpc(void *args)
 			/* Not indicative of a real error */
 		case ESLURMD_JOB_NOTRUNNING:
 			/* Not indicative of a real error */
-			debug2("agent processed RPC to node %s: %s",
-			       ret_data_info->node_name,
-			       slurm_strerror(rc));
-
+			debug2("RPC to node %s failed, job not running",
+			       ret_data_info->node_name);
 			thread_state = DSH_DONE;
 			break;
 		default:
@@ -1195,7 +1204,7 @@ extern int agent_retry (int min_wait, bool mail_too)
 		static time_t last_msg_time = (time_t) 0;
 		uint32_t msg_type[5] = {0, 0, 0, 0, 0}, i = 0;
 		list_size = list_count(retry_list);
-		if ((list_size > MAX_AGENT_CNT) &&
+		if ((list_size > 50) &&
 		    (difftime(now, last_msg_time) > 300)) {
 			/* Note sizable backlog of work */
 			info("WARNING: agent retry_list size is %d",
@@ -1217,7 +1226,8 @@ extern int agent_retry (int min_wait, bool mail_too)
 	}
 
 	slurm_mutex_lock(&agent_cnt_mutex);
-	if (agent_cnt >= MAX_AGENT_CNT) {	/* too much work already */
+	if (agent_thread_cnt + AGENT_THREAD_COUNT + 2 > MAX_SERVER_THREADS) {
+		/* too much work already */
 		slurm_mutex_unlock(&agent_cnt_mutex);
 		slurm_mutex_unlock(&retry_mutex);
 		return list_size;
@@ -1310,6 +1320,9 @@ void agent_queue_request(agent_arg_t *agent_arg_ptr)
 {
 	queued_request_t *queued_req_ptr = NULL;
 
+	if ((AGENT_THREAD_COUNT + 2) >= MAX_SERVER_THREADS)
+		fatal("AGENT_THREAD_COUNT value is too low relative to MAX_SERVER_THREADS");
+
 	if (message_timeout == (uint16_t) NO_VAL) {
 		message_timeout = MAX(slurm_get_msg_timeout(), 30);
 	}
@@ -1398,14 +1411,12 @@ void agent_purge(void)
 {
 	if (retry_list) {
 		slurm_mutex_lock(&retry_mutex);
-		list_destroy(retry_list);
-		retry_list = NULL;
+		FREE_NULL_LIST(retry_list);
 		slurm_mutex_unlock(&retry_mutex);
 	}
 	if (mail_list) {
 		slurm_mutex_lock(&mail_mutex);
-		list_destroy(mail_list);
-		mail_list = NULL;
+		FREE_NULL_LIST(mail_list);
 		slurm_mutex_unlock(&mail_mutex);
 	}
 }
@@ -1518,6 +1529,8 @@ static char *_mail_type_str(uint16_t mail_type)
 		return "Failed";
 	if (mail_type == MAIL_JOB_REQUEUE)
 		return "Requeued";
+	if (mail_type == MAIL_JOB_STAGE_OUT)
+		return "Staged Out";
 	if (mail_type == MAIL_JOB_TIME100)
 		return "Reached time limit";
 	if (mail_type == MAIL_JOB_TIME90)
@@ -1567,6 +1580,14 @@ static void _set_job_time(struct job_record *job_ptr, uint16_t mail_type,
 			interval = time(NULL) - job_ptr->start_time;
 		snprintf(buf, buf_len, ", Run time ");
 		secs2time_str(interval, buf+11, buf_len-11);
+		return;
+	}
+
+	if ((mail_type == MAIL_JOB_STAGE_OUT) && job_ptr->end_time) {
+		interval = time(NULL) - job_ptr->end_time;
+		snprintf(buf, buf_len, ", StageOut time ");
+		secs2time_str(interval, buf+16, buf_len-16);
+		return;
 	}
 }
 
diff --git a/src/slurmctld/agent.h b/src/slurmctld/agent.h
index cc2da5b82..02ca456ae 100644
--- a/src/slurmctld/agent.h
+++ b/src/slurmctld/agent.h
@@ -46,11 +46,7 @@
 
 #define AGENT_THREAD_COUNT	10	/* maximum active threads per agent */
 #define COMMAND_TIMEOUT 	30	/* command requeue or error, seconds */
-#define MAX_AGENT_CNT		(MAX_SERVER_THREADS / (AGENT_THREAD_COUNT + 2))
-					/* maximum simultaneous agents, note
-					 *   total thread count is product of
-					 *   MAX_AGENT_CNT and
-					 *   (AGENT_THREAD_COUNT + 2) */
+
 #define LOTS_OF_AGENTS_CNT 50
 #define LOTS_OF_AGENTS ((get_agent_count() <= LOTS_OF_AGENTS_CNT) ? 0 : 1)
 
diff --git a/src/slurmctld/backup.c b/src/slurmctld/backup.c
index a62007f86..eb4cb1646 100644
--- a/src/slurmctld/backup.c
+++ b/src/slurmctld/backup.c
@@ -383,11 +383,11 @@ static void *_background_rpc_mgr(void *no_data)
 		slurm_free_msg_data(msg->msg_type, msg->data);
 		slurm_free_msg(msg);
 
-		slurm_close_accepted_conn(newsockfd);	/* close new socket */
+		slurm_close(newsockfd);	/* close new socket */
 	}
 
 	debug3("_background_rpc_mgr shutting down");
-	slurm_close_accepted_conn(sockfd);	/* close the main socket */
+	slurm_close(sockfd);	/* close the main socket */
 	pthread_exit((void *) 0);
 	return NULL;
 }
@@ -399,7 +399,8 @@ static int _background_process_msg(slurm_msg_t * msg)
 
 	if (msg->msg_type != REQUEST_PING) {
 		bool super_user = false;
-		uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+		uid_t uid = g_slurm_auth_get_uid(msg->auth_cred,
+						 slurm_get_auth_info());
 		if ((uid == 0) || (uid == getuid()))
 			super_user = true;
 
diff --git a/src/slurmctld/burst_buffer.c b/src/slurmctld/burst_buffer.c
new file mode 100644
index 000000000..6377a7196
--- /dev/null
+++ b/src/slurmctld/burst_buffer.c
@@ -0,0 +1,656 @@
+/*****************************************************************************\
+ *  burst_buffer.c - driver for burst buffer infrastructure and plugin
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#if HAVE_CONFIG_H
+#  include "config.h"
+#  if STDC_HEADERS
+#    include <string.h>
+#  endif
+#  if HAVE_SYS_TYPES_H
+#    include <sys/types.h>
+#  endif /* HAVE_SYS_TYPES_H */
+#  if HAVE_UNISTD_H
+#    include <unistd.h>
+#  endif
+#  if HAVE_INTTYPES_H
+#    include <inttypes.h>
+#  else /* ! HAVE_INTTYPES_H */
+#    if HAVE_STDINT_H
+#      include <stdint.h>
+#    endif
+#  endif /* HAVE_INTTYPES_H */
+#else /* ! HAVE_CONFIG_H */
+#  include <sys/types.h>
+#  include <unistd.h>
+#  include <stdint.h>
+#  include <string.h>
+#endif /* HAVE_CONFIG_H */
+#include <stdio.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
+#include "src/common/list.h"
+#include "src/common/macros.h"
+#include "src/common/pack.h"
+#include "src/common/plugin.h"
+#include "src/common/plugrack.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/agent.h"
+#include "src/slurmctld/burst_buffer.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/reservation.h"
+
+typedef struct slurm_bb_ops {
+	uint64_t	(*get_system_size)	(void);
+	int		(*load_state)	(bool init_config);
+	int		(*state_pack)	(uid_t uid, Buf buffer,
+					 uint16_t protocol_version);
+	int		(*reconfig)	(void);
+	int		(*job_validate)	(struct job_descriptor *job_desc,
+					 uid_t submit_uid);
+	int		(*job_validate2)(struct job_record *job_ptr,
+					 char **err_msg);
+	void		(*job_set_tres_cnt) (struct job_record *job_ptr,
+					     uint64_t *tres_cnt, bool locked);
+	time_t		(*job_get_est_start) (struct job_record *job_ptr);
+	int		(*job_try_stage_in) (List job_queue);
+	int		(*job_test_stage_in) (struct job_record *job_ptr,
+					      bool test_only);
+	int		(*job_begin) (struct job_record *job_ptr);
+	int		(*job_start_stage_out) (struct job_record *job_ptr);
+	int		(*job_test_stage_out) (struct job_record *job_ptr);
+	int		(*job_cancel) (struct job_record *job_ptr);
+	char *		(*xlate_bb_2_tres_str) (char *burst_buffer);
+} slurm_bb_ops_t;
+
+/*
+ * Must be synchronized with slurm_bb_ops_t above.
+ */
+static const char *syms[] = {
+	"bb_p_get_system_size",
+	"bb_p_load_state",
+	"bb_p_state_pack",
+	"bb_p_reconfig",
+	"bb_p_job_validate",
+	"bb_p_job_validate2",
+	"bb_p_job_set_tres_cnt",
+	"bb_p_job_get_est_start",
+	"bb_p_job_try_stage_in",
+	"bb_p_job_test_stage_in",
+	"bb_p_job_begin",
+	"bb_p_job_start_stage_out",
+	"bb_p_job_test_stage_out",
+	"bb_p_job_cancel",
+	"bb_p_xlate_bb_2_tres_str"
+};
+
+static int g_context_cnt = -1;
+static slurm_bb_ops_t *ops = NULL;
+static plugin_context_t **g_context = NULL;
+static char *bb_plugin_list = NULL;
+static pthread_mutex_t g_context_lock = PTHREAD_MUTEX_INITIALIZER;
+static bool init_run = false;
+
+/*
+ * Initialize the burst buffer infrastructure.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_init(void)
+{
+	int rc = SLURM_SUCCESS;
+	char *last = NULL, *names;
+	char *plugin_type = "burst_buffer";
+	char *type;
+
+	if (init_run && (g_context_cnt >= 0))
+		return rc;
+
+	slurm_mutex_lock(&g_context_lock);
+	if (g_context_cnt >= 0)
+		goto fini;
+
+	bb_plugin_list = slurm_get_bb_type();
+	g_context_cnt = 0;
+	if ((bb_plugin_list == NULL) || (bb_plugin_list[0] == '\0'))
+		goto fini;
+
+	names = bb_plugin_list;
+	while ((type = strtok_r(names, ",", &last))) {
+		xrealloc(ops, (sizeof(slurm_bb_ops_t) * (g_context_cnt + 1)));
+		xrealloc(g_context,
+			 (sizeof(plugin_context_t *) * (g_context_cnt + 1)));
+		if (strncmp(type, "burst_buffer/", 13) == 0)
+			type += 13; /* backward compatibility */
+		type = xstrdup_printf("burst_buffer/%s", type);
+		g_context[g_context_cnt] = plugin_context_create(
+			plugin_type, type, (void **)&ops[g_context_cnt],
+			syms, sizeof(syms));
+		if (!g_context[g_context_cnt]) {
+			error("cannot create %s context for %s",
+			      plugin_type, type);
+			rc = SLURM_ERROR;
+			xfree(type);
+			break;
+		}
+
+		xfree(type);
+		g_context_cnt++;
+		names = NULL; /* for next iteration */
+	}
+	init_run = true;
+
+fini:
+	slurm_mutex_unlock(&g_context_lock);
+
+	if (rc != SLURM_SUCCESS)
+		bb_g_fini();
+
+	return rc;
+}
+
+/*
+ * Terminate the burst buffer infrastructure. Free memory.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_fini(void)
+{
+	int i, j, rc = SLURM_SUCCESS;
+
+	slurm_mutex_lock(&g_context_lock);
+	if (g_context_cnt < 0)
+		goto fini;
+
+	init_run = false;
+	for (i = 0; i < g_context_cnt; i++) {
+		if (g_context[i]) {
+			j = plugin_context_destroy(g_context[i]);
+			if (j != SLURM_SUCCESS)
+				rc = j;
+		}
+	}
+	xfree(ops);
+	xfree(g_context);
+	xfree(bb_plugin_list);
+	g_context_cnt = -1;
+
+fini:	slurm_mutex_unlock(&g_context_lock);
+	return rc;
+}
+
+/*
+ **************************************************************************
+ *                          P L U G I N   C A L L S                       *
+ **************************************************************************
+ */
+
+/*
+ * Load the current burst buffer state (e.g. how much space is available now).
+ * Run at the beginning of each scheduling cycle in order to recognize external
+ * changes to the burst buffer state (e.g. capacity is added, removed, fails,
+ * etc.).
+ *
+ * init_config IN - true if called as part of slurmctld initialization
+ * Returns a SLURM errno.
+ */
+extern int bb_g_load_state(bool init_config)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+
+	START_TIMER;
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; ((i < g_context_cnt) && (rc == SLURM_SUCCESS)); i++) {
+		rc2 = (*(ops[i].load_state))(init_config);
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Pack current burst buffer state information for network transmission to
+ * user (e.g. "scontrol show burst")
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_state_pack(uid_t uid, Buf buffer, uint16_t protocol_version)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+	uint32_t rec_count = 0;
+	int eof, last_offset, offset;
+
+	START_TIMER;
+	offset = get_buf_offset(buffer);
+	pack32(rec_count, buffer);
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		last_offset = get_buf_offset(buffer);
+		rc2 = (*(ops[i].state_pack))(uid, buffer, protocol_version);
+		if (last_offset != get_buf_offset(buffer))
+			rec_count++;
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	if (rec_count != 0) {
+		eof = get_buf_offset(buffer);
+		set_buf_offset(buffer, offset);
+		pack32(rec_count, buffer);
+		set_buf_offset(buffer, eof);
+	}
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Note configuration may have changed. Handle changes in BurstBufferParameters.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_reconfig(void)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+
+	START_TIMER;
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; ((i < g_context_cnt) && (rc == SLURM_SUCCESS)); i++) {
+		rc2 = (*(ops[i].reconfig))();
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Give the total burst buffer size in MB of a given plugin name (e.g. "cray");.
+ * If "name" is NULL, return the total space of all burst buffer plugins.
+ */
+extern uint64_t bb_g_get_system_size(char *name)
+{
+	uint64_t i, size = 0;
+	int offset = 0;
+
+	(void) bb_g_init();
+
+	if (strncmp(name, "burst_buffer/", 13))
+		offset = 13;
+
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+
+		if (g_context[i] && !xstrcmp(g_context[i]->type+offset, name)) {
+			size = (*(ops[i].get_system_size))();
+			break;
+		}
+	}
+	slurm_mutex_unlock(&g_context_lock);
+
+	return size;
+}
+
+/*
+ * Preliminary validation of a job submit request with respect to burst buffer
+ * options. Performed after setting default account + qos, but prior to
+ * establishing job ID or creating script file.
+ *
+ * job_desc IN - Job submission request
+ * submit_uid IN - ID of the user submitting the job.
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_validate(struct job_descriptor *job_desc,
+			     uid_t submit_uid)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+
+	START_TIMER;
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_validate))(job_desc, submit_uid);
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Secondary validation of a job submit request with respect to burst buffer
+ * options. Performed after establishing job ID and creating script file.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_validate2(struct job_record *job_ptr, char **err_msg)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+
+	START_TIMER;
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_validate2))(job_ptr, err_msg);
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Fill in the tres_cnt (in MB) based off the job record
+ * NOTE: Based upon job-specific burst buffers, excludes persistent buffers
+ * IN job_ptr - job record
+ * IN/OUT tres_cnt - fill in this already allocated array with tres_cnts
+ * IN locked - if the assoc_mgr tres read locked is locked or not
+ */
+extern void bb_g_job_set_tres_cnt(struct job_record *job_ptr,
+				  uint64_t *tres_cnt,
+				  bool locked)
+{
+	DEF_TIMERS;
+	int i;
+
+	START_TIMER;
+	(void) bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		(*(ops[i].job_set_tres_cnt))(job_ptr, tres_cnt, locked);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+}
+
+/* sort jobs by expected start time */
+extern int _sort_job_queue(void *x, void *y)
+{
+	struct job_record *job_ptr1 = *(struct job_record **) x;
+	struct job_record *job_ptr2 = *(struct job_record **) y;
+	time_t t1, t2;
+
+	t1 = job_ptr1->start_time;
+	t2 = job_ptr2->start_time;
+	if (t1 > t2)
+		return 1;
+	if (t1 < t2)
+		return -1;
+	return 0;
+}
+
+/*
+ * For a given job, return our best guess if when it might be able to start
+ */
+extern time_t bb_g_job_get_est_start(struct job_record *job_ptr)
+{
+	DEF_TIMERS;
+	int i;
+	time_t start_time = time(NULL), new_time;
+
+	START_TIMER;
+	if (bb_g_init() != SLURM_SUCCESS)
+		return start_time + 24 * 60 * 60;
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		new_time = (*(ops[i].job_get_est_start))(job_ptr);
+		start_time = MAX(start_time, new_time);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return start_time;
+}
+/*
+ * Allocate burst buffers to jobs expected to start soonest
+ * Job records must be read locked
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_try_stage_in(void)
+{
+	DEF_TIMERS;
+	int i, rc = 1, rc2;
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	time_t now = time(NULL);
+	List job_queue;
+
+	START_TIMER;
+	job_queue = list_create(NULL);
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (!IS_JOB_PENDING(job_ptr))
+			continue;
+		if ((job_ptr->burst_buffer == NULL) ||
+		    (job_ptr->burst_buffer[0] == '\0'))
+			continue;
+		if ((job_ptr->start_time == 0) ||
+		    (job_ptr->start_time > now + 10 * 60 * 60))	/* ten hours */
+			continue;
+		list_push(job_queue, job_ptr);
+	}
+	list_iterator_destroy(job_iterator);
+	list_sort(job_queue, _sort_job_queue);
+
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_try_stage_in))(job_queue);
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	FREE_NULL_LIST(job_queue);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Determine if a job's burst buffer stage-in is complete
+ * job_ptr IN - Job to test
+ * test_only IN - If false, then attempt to load burst buffer if possible
+ *
+ * RET: 0 - stage-in is underway
+ *      1 - stage-in complete
+ *     -1 - stage-in not started or burst buffer in some unexpected state
+ */
+extern int bb_g_job_test_stage_in(struct job_record *job_ptr, bool test_only)
+{
+	DEF_TIMERS;
+	int i, rc = 1, rc2;
+
+	START_TIMER;
+	if (bb_g_init() != SLURM_SUCCESS)
+		rc = -1;
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_test_stage_in))(job_ptr, test_only);
+		rc = MIN(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/* Attempt to claim burst buffer resources.
+ * At this time, bb_g_job_test_stage_in() should have been run sucessfully AND
+ * the compute nodes selected for the job.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_begin(struct job_record *job_ptr)
+{
+	DEF_TIMERS;
+	int i, rc = SLURM_SUCCESS, rc2;
+
+	START_TIMER;
+	if (bb_g_init() != SLURM_SUCCESS)
+		rc = SLURM_ERROR;
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_begin))(job_ptr);
+		if (rc2 != SLURM_SUCCESS)
+			rc = rc2;
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Trigger a job's burst buffer stage-out to begin
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_start_stage_out(struct job_record *job_ptr)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+
+	START_TIMER;
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_start_stage_out))(job_ptr);
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Determine if a job's burst buffer stage-out is complete
+ *
+ * RET: 0 - stage-out is underway
+ *      1 - stage-out complete
+ *     -1 - fatal error
+ */
+extern int bb_g_job_test_stage_out(struct job_record *job_ptr)
+{
+	DEF_TIMERS;
+	int i, rc = 1, rc2;
+
+	START_TIMER;
+	if (bb_g_init() != SLURM_SUCCESS)
+		rc = -1;
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_test_stage_out))(job_ptr);
+		rc = MIN(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	if ((rc != 0) && (job_ptr->mail_type & MAIL_JOB_STAGE_OUT)) {
+		mail_job_info(job_ptr, MAIL_JOB_STAGE_OUT);
+		job_ptr->mail_type &= (~MAIL_JOB_STAGE_OUT);
+	}
+
+	return rc;
+}
+
+/*
+ * Terminate any file staging and completely release burst buffer resources
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_cancel(struct job_record *job_ptr)
+{
+	DEF_TIMERS;
+	int i, rc, rc2;
+
+	START_TIMER;
+	rc = bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		rc2 = (*(ops[i].job_cancel))(job_ptr);
+		rc = MAX(rc, rc2);
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return rc;
+}
+
+/*
+ * Translate a burst buffer string to it's equivalent TRES string
+ * (e.g. "cray:2G,generic:4M" -> "1004=2048,1005=4")
+ * Caller must xfree the return value
+ */
+extern char *bb_g_xlate_bb_2_tres_str(char *burst_buffer)
+{
+	DEF_TIMERS;
+	int i;
+	char *tmp = NULL, *tmp2;
+
+	START_TIMER;
+	(void) bb_g_init();
+	slurm_mutex_lock(&g_context_lock);
+	for (i = 0; i < g_context_cnt; i++) {
+		tmp2 = (*(ops[i].xlate_bb_2_tres_str))(burst_buffer);
+		if (!tmp) {
+			tmp = tmp2;
+		} else {
+			xstrcat(tmp, ",");
+			xstrcat(tmp, tmp2);
+			xfree(tmp2);
+		}
+	}
+	slurm_mutex_unlock(&g_context_lock);
+	END_TIMER2(__func__);
+
+	return tmp;
+}
diff --git a/src/slurmctld/burst_buffer.h b/src/slurmctld/burst_buffer.h
new file mode 100644
index 000000000..443965d85
--- /dev/null
+++ b/src/slurmctld/burst_buffer.h
@@ -0,0 +1,188 @@
+/*****************************************************************************\
+ *  burst_buffer.h - driver for burst buffer infrastructure and plugin
+ *****************************************************************************
+ *  Copyright (C) 2014 SchedMD LLC.
+ *  Written by Morris Jette <jette@schedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _SLURM_BURST_BUFFER_H
+#define _SLURM_BURST_BUFFER_H
+
+#include "slurm/slurm.h"
+#include "src/common/pack.h"
+#include "src/slurmctld/slurmctld.h"
+
+/*
+ * Initialize the burst buffer infrastructure.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_init(void);
+
+/*
+ * Terminate the burst buffer infrastructure. Free memory.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_fini(void);
+
+/*
+ **************************************************************************
+ *                          P L U G I N   C A L L S                       *
+ **************************************************************************
+ */
+
+/*
+ * Load the current burst buffer state (e.g. how much space is available now).
+ * Run at the beginning of each scheduling cycle in order to recognize external
+ * changes to the burst buffer state (e.g. capacity is added, removed, fails,
+ * etc.)
+ *
+ * init_config IN - true if called as part of slurmctld initialization
+ * Returns a SLURM errno.
+ */
+extern int bb_g_load_state(bool init_config);
+
+/*
+ * Pack current burst buffer state information for network transmission to
+ * user (e.g. "scontrol show burst")
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_state_pack(uid_t uid, Buf buffer, uint16_t protocol_version);
+
+/*
+ * Note configuration may have changed. Handle changes in BurstBufferParameters.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_reconfig(void);
+
+/*
+ * Give the total burst buffer size in MB of a given plugin name (e.g. "cray");.
+ * If "name" is NULL, return the total space of all burst buffer plugins.
+ */
+extern uint64_t bb_g_get_system_size(char *name);
+
+/*
+ * Preliminary validation of a job submit request with respect to burst buffer
+ * options. Performed after setting default account + qos, but prior to
+ * establishing job ID or creating script file.
+ *
+ * job_desc IN - Job submission request
+ * submit_uid IN - ID of the user submitting the job.
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_validate(struct job_descriptor *job_desc,
+			     uid_t submit_uid);
+
+/*
+ * Secondary validation of a job submit request with respect to burst buffer
+ * options. Performed after establishing job ID and creating script file.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_validate2(struct job_record *job_ptr, char **err_msg);
+
+/*
+ * Fill in the tres_cnt (in MB) based off the job record
+ * NOTE: Based upon job-specific burst buffers, excludes persistent buffers
+ * IN job_ptr - job record
+ * IN/OUT tres_cnt - fill in this already allocated array with tres_cnts
+ * IN locked - if the assoc_mgr tres read locked is locked or not
+ */
+extern void bb_g_job_set_tres_cnt(struct job_record *job_ptr,
+				  uint64_t *tres_cnt, bool locked);
+
+/*
+ * For a given job, return our best guess if when it might be able to start
+ */
+extern time_t bb_g_job_get_est_start(struct job_record *job_ptr);
+
+/*
+ * Allocate burst buffers to jobs expected to start soonest
+ * Job records must be read locked
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_try_stage_in(void);
+
+/*
+ * Determine if a job's burst buffer stage-in is complete
+ * job_ptr IN - Job to test
+ * test_only IN - If false, then attempt to load burst buffer if possible
+ *
+ * RET: 0 - stage-in is underway
+ *      1 - stage-in complete
+ *     -1 - stage-in not started or burst buffer in some unexpected state
+ */
+extern int bb_g_job_test_stage_in(struct job_record *job_ptr, bool test_only);
+
+/* Attempt to claim burst buffer resources.
+ * At this time, bb_g_job_test_stage_in() should have been run sucessfully AND
+ * the compute nodes selected for the job.
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_begin(struct job_record *job_ptr);
+
+/*
+ * Trigger a job's burst buffer stage-out to begin
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_start_stage_out(struct job_record *job_ptr);
+
+/*
+ * Determine if a job's burst buffer stage-out is complete
+ *
+ * RET: 0 - stage-out is underway
+ *      1 - stage-out complete
+ *     -1 - fatal error
+ */
+extern int bb_g_job_test_stage_out(struct job_record *job_ptr);
+
+/*
+ * Terminate any file staging and completely release burst buffer resources
+ *
+ * Returns a SLURM errno.
+ */
+extern int bb_g_job_cancel(struct job_record *job_ptr);
+
+/*
+ * Translate a burst buffer string to it's equivalent TRES string
+ * (e.g. "cray:2G,generic:4M" -> "1004=2048,1005=4")
+ * Caller must xfree the return value
+ */
+extern char *bb_g_xlate_bb_2_tres_str(char *burst_buffer);
+
+#endif /* !_SLURM_BURST_BUFFER_H */
diff --git a/src/slurmctld/controller.c b/src/slurmctld/controller.c
index 7ddb2558b..47f321b05 100644
--- a/src/slurmctld/controller.c
+++ b/src/slurmctld/controller.c
@@ -3,7 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010-2014 SchedMD LLC.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>, Kevin Tew <tew1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -73,27 +73,29 @@
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/pack.h"
+#include "src/common/power.h"
 #include "src/common/proc_args.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_acct_gather_profile.h"
-#include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_ext_sensors.h"
+#include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_jobcomp.h"
-#include "src/common/slurm_route.h"
-#include "src/common/slurm_topology.h"
 #include "src/common/slurm_priority.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_protocol_interface.h"
+#include "src/common/slurm_route.h"
+#include "src/common/slurm_topology.h"
 #include "src/common/switch.h"
 #include "src/common/timers.h"
 #include "src/common/uid.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_protocol_interface.h"
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/job_submit.h"
@@ -101,10 +103,12 @@
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/ping_nodes.h"
 #include "src/slurmctld/port_mgr.h"
+#include "src/slurmctld/powercapping.h"
 #include "src/slurmctld/preempt.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/read_config.h"
 #include "src/slurmctld/reservation.h"
+#include "src/slurmctld/sicp.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/slurmctld_plugstack.h"
 #include "src/slurmctld/sched_plugin.h"
@@ -180,7 +184,7 @@ slurmctld_config_t slurmctld_config;
 diag_stats_t slurmctld_diag_stats;
 int	slurmctld_primary = 1;
 bool	want_nodes_reboot = true;
-int	with_slurmdbd = 0;
+int   slurmctld_tres_cnt = 0;
 
 /* Local variables */
 static pthread_t assoc_cache_thread = (pthread_t) 0;
@@ -214,16 +218,18 @@ static int          _accounting_mark_all_nodes_down(char *reason);
 static void *       _assoc_cache_mgr(void *no_data);
 static void         _become_slurm_user(void);
 static void         _default_sigaction(int sig);
-inline static void  _free_server_thread(void);
 static void         _init_config(void);
 static void         _init_pidfile(void);
 static void         _kill_old_slurmctld(void);
 static void         _parse_commandline(int argc, char *argv[]);
 inline static int   _ping_backup_controller(void);
-static void         _remove_assoc(slurmdb_association_rec_t *rec);
+static void         _remove_assoc(slurmdb_assoc_rec_t *rec);
 static void         _remove_qos(slurmdb_qos_rec_t *rec);
-static void         _update_assoc(slurmdb_association_rec_t *rec);
+static void         _update_assoc(slurmdb_assoc_rec_t *rec);
 static void         _update_qos(slurmdb_qos_rec_t *rec);
+static int          _init_tres(void);
+static void         _update_cluster_tres(void);
+
 inline static int   _report_locks_set(void);
 static void *       _service_connection(void *arg);
 static void         _set_work_dir(void);
@@ -304,12 +310,6 @@ int main(int argc, char *argv[])
 	if (daemonize)
 		_set_work_dir();
 
-	/* load old config */
-	load_config_state_lite();
-
-	/* store new config */
-	dump_config_state_lite();
-
 	if (stat(slurmctld_conf.mail_prog, &stat_buf) != 0)
 		error("Configured MailProg is invalid");
 
@@ -396,6 +396,25 @@ int main(int argc, char *argv[])
 			slurmctld_conf.job_credential_private_key);
 	}
 
+	/* Must set before plugins are loaded. */
+	if (slurmctld_conf.backup_controller &&
+	    (strcmp(node_name, slurmctld_conf.backup_controller) == 0)) {
+#ifndef HAVE_ALPS_CRAY
+		char *sched_params = NULL;
+#endif
+		slurmctld_primary = 0;
+
+#ifdef HAVE_ALPS_CRAY
+		slurmctld_config.scheduling_disabled = true;
+#else
+		sched_params = slurm_get_sched_params();
+		if (sched_params &&
+		    strstr(sched_params, "no_backup_scheduling"))
+			slurmctld_config.scheduling_disabled = true;
+		xfree(sched_params);
+#endif
+	}
+
 
 	/* Not used in creator
 	 *
@@ -431,11 +450,8 @@ int main(int argc, char *argv[])
 		slurmctld_config.resume_backup = false;
 
 		/* start in primary or backup mode */
-		if (slurmctld_conf.backup_controller &&
-		    (strcmp(node_name,
-			    slurmctld_conf.backup_controller) == 0)) {
+		if (!slurmctld_primary) {
 			slurm_sched_fini();	/* make sure shutdown */
-			slurmctld_primary = 0;
 			run_backup(&callbacks);
 			if (slurm_acct_storage_init(NULL) != SLURM_SUCCESS )
 				fatal("failed to initialize "
@@ -464,9 +480,6 @@ int main(int argc, char *argv[])
 				slurmctld_init_db = 1;
 				_accounting_mark_all_nodes_down("cold-start");
 			}
-
-			slurmctld_primary = 1;
-
 		} else {
 			error("this host (%s) not valid controller (%s or %s)",
 				node_name, slurmctld_conf.control_machine,
@@ -510,13 +523,17 @@ int main(int argc, char *argv[])
 			fatal("failed to initialize scheduling plugin");
 		if (slurmctld_plugstack_init())
 			fatal("failed to initialize slurmctld_plugstack");
+		if (bb_g_init() != SLURM_SUCCESS )
+			fatal( "failed to initialize burst buffer plugin");
+		if (power_g_init() != SLURM_SUCCESS )
+			fatal( "failed to initialize power management plugin");
+		sicp_init();
+
 
 		/*
 		 * create attached thread to process RPCs
 		 */
-		slurm_mutex_lock(&slurmctld_config.thread_count_lock);
-		slurmctld_config.server_thread_count++;
-		slurm_mutex_unlock(&slurmctld_config.thread_count_lock);
+		server_thread_incr();
 		slurm_attr_init(&thread_attr);
 		while (pthread_create(&slurmctld_config.thread_id_rpc,
 				      &thread_attr, _slurmctld_rpc_mgr,
@@ -573,6 +590,9 @@ int main(int argc, char *argv[])
 		slurmctld_config.thread_id_sig  = (pthread_t) 0;
 		slurmctld_config.thread_id_rpc  = (pthread_t) 0;
 		slurmctld_config.thread_id_save = (pthread_t) 0;
+		bb_g_fini();
+		power_g_fini();
+		sicp_fini();
 
 		if (running_cache) {
 			/* break out and end the association cache
@@ -605,7 +625,7 @@ int main(int argc, char *argv[])
 		recover = 2;
 	}
 
-	slurm_layouts_fini();
+	layouts_fini();
 
 	/* Since pidfile is created as user root (its owner is
 	 *   changed to SlurmUser) SlurmUser may not be able to
@@ -741,6 +761,7 @@ static void  _init_config(void)
 	slurmctld_config.server_thread_count = 0;
 	slurmctld_config.shutdown_time  = (time_t) 0;
 	slurmctld_config.thread_id_main = pthread_self();
+	slurmctld_config.scheduling_disabled = false;
 #ifdef WITH_PTHREADS
 	pthread_mutex_init(&slurmctld_config.thread_count_lock, NULL);
 	slurmctld_config.thread_id_main    = (pthread_t) 0;
@@ -965,7 +986,7 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 		if (select(max_fd+1, &rfds, NULL, NULL, NULL) == -1) {
 			if (errno != EINTR)
 				error("slurm_accept_msg_conn select: %m");
-			_free_server_thread();
+			server_thread_decr();
 			continue;
 		}
 		/* find one to process */
@@ -986,7 +1007,7 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 		    SLURM_SOCKET_ERROR) {
 			if (errno != EINTR)
 				error("slurm_accept_msg_conn: %m");
-			_free_server_thread();
+			server_thread_decr();
 			continue;
 		}
 		fd_set_close_on_exec(newsockfd);
@@ -997,7 +1018,7 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 		if (slurmctld_conf.debug_flags & DEBUG_FLAG_PROTOCOL) {
 			char inetbuf[64];
 
-			_slurm_print_slurm_addr(&cli_addr,
+			slurm_print_slurm_addr(&cli_addr,
 						inetbuf,
 						sizeof(inetbuf));
 			info("%s: accept() connection from %s", __func__, inetbuf);
@@ -1025,7 +1046,7 @@ static void *_slurmctld_rpc_mgr(void *no_data)
 	for (i=0; i<nports; i++)
 		(void) slurm_shutdown_msg_engine(sockfd[i]);
 	xfree(sockfd);
-	_free_server_thread();
+	server_thread_decr();
 	pthread_exit((void *) 0);
 	return NULL;
 }
@@ -1056,7 +1077,7 @@ static void *_service_connection(void *arg)
 	if (slurm_receive_msg(conn->newsockfd, msg, 0) != 0) {
 		error("slurm_receive_msg: %m");
 		/* close the new socket */
-		slurm_close_accepted_conn(conn->newsockfd);
+		slurm_close(conn->newsockfd);
 		goto cleanup;
 	}
 
@@ -1070,13 +1091,13 @@ static void *_service_connection(void *arg)
 		slurmctld_req(msg, conn);
 	}
 	if ((conn->newsockfd >= 0)
-	    && slurm_close_accepted_conn(conn->newsockfd) < 0)
+	    && slurm_close(conn->newsockfd) < 0)
 		error ("close(%d): %m",  conn->newsockfd);
 
 cleanup:
 	slurm_free_msg(msg);
 	xfree(arg);
-	_free_server_thread();
+	server_thread_decr();
 	return return_code;
 }
 
@@ -1123,7 +1144,8 @@ static bool _wait_for_server_thread(void)
 	return rc;
 }
 
-static void _free_server_thread(void)
+/* Decrement slurmctld thread count (as applies to thread limit) */
+extern void server_thread_decr(void)
 {
 	slurm_mutex_lock(&slurmctld_config.thread_count_lock);
 	if (slurmctld_config.server_thread_count > 0)
@@ -1134,19 +1156,26 @@ static void _free_server_thread(void)
 	slurm_mutex_unlock(&slurmctld_config.thread_count_lock);
 }
 
-static int _accounting_cluster_ready()
+/* Increment slurmctld thread count (as applies to thread limit) */
+extern void server_thread_incr(void)
+{
+	slurm_mutex_lock(&slurmctld_config.thread_count_lock);
+	slurmctld_config.server_thread_count++;
+	slurm_mutex_unlock(&slurmctld_config.thread_count_lock);
+}
+
+static int _accounting_cluster_ready(void)
 {
 	int rc = SLURM_ERROR;
 	time_t event_time = time(NULL);
 	bitstr_t *total_node_bitmap = NULL;
-	char *cluster_nodes = NULL;
+	char *cluster_nodes = NULL, *cluster_tres_str;
 	slurmctld_lock_t node_read_lock = {
 		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   WRITE_LOCK, NO_LOCK, NO_LOCK };
 
 	lock_slurmctld(node_read_lock);
-
-	set_cluster_cpus();
-
 	/* Now get the names of all the nodes on the cluster at this
 	   time and send it also.
 	*/
@@ -1154,12 +1183,24 @@ static int _accounting_cluster_ready()
 	bit_nset(total_node_bitmap, 0, node_record_count-1);
 	cluster_nodes = bitmap2node_name_sortable(total_node_bitmap, 0);
 	FREE_NULL_BITMAP(total_node_bitmap);
+
+	assoc_mgr_lock(&locks);
+
+	set_cluster_tres(true);
+
+	cluster_tres_str = slurmdb_make_tres_string(
+		assoc_mgr_tres_list, TRES_STR_FLAG_SIMPLE);
+	assoc_mgr_unlock(&locks);
+
 	unlock_slurmctld(node_read_lock);
 
-	rc = clusteracct_storage_g_cluster_cpus(acct_db_conn,
+	rc = clusteracct_storage_g_cluster_tres(acct_db_conn,
 						cluster_nodes,
-						cluster_cpus, event_time);
+						cluster_tres_str, event_time);
+
 	xfree(cluster_nodes);
+	xfree(cluster_tres_str);
+
 	if (rc == ACCOUNTING_FIRST_REG) {
 		/* see if we are running directly to a database
 		 * instead of a slurmdbd.
@@ -1210,7 +1251,7 @@ static int _accounting_mark_all_nodes_down(char *reason)
 	return rc;
 }
 
-static void _remove_assoc(slurmdb_association_rec_t *rec)
+static void _remove_assoc(slurmdb_assoc_rec_t *rec)
 {
 	int cnt = 0;
 
@@ -1226,6 +1267,25 @@ static void _remove_assoc(slurmdb_association_rec_t *rec)
 static void _remove_qos(slurmdb_qos_rec_t *rec)
 {
 	int cnt = 0;
+	ListIterator itr;
+	struct part_record *part_ptr;
+	slurmctld_lock_t part_write_lock =
+		{ NO_LOCK, NO_LOCK, NO_LOCK, WRITE_LOCK };
+
+	lock_slurmctld(part_write_lock);
+	if (part_list) {
+		itr = list_iterator_create(part_list);
+		while ((part_ptr = list_next(itr))) {
+			if (part_ptr->qos_ptr != rec)
+				continue;
+			info("Partition %s's QOS %s was just removed, "
+			     "you probably didn't mean for this to happen "
+			     "unless you are also removing the partition.",
+			     part_ptr->name, rec->name);
+			part_ptr->qos_ptr = NULL;
+		}
+	}
+	unlock_slurmctld(part_write_lock);
 
 	cnt = job_hold_by_qos_id(rec->id);
 
@@ -1235,7 +1295,7 @@ static void _remove_qos(slurmdb_qos_rec_t *rec)
 		debug("Removed QOS:%s", rec->name);
 }
 
-static void _update_assoc(slurmdb_association_rec_t *rec)
+static void _update_assoc(slurmdb_assoc_rec_t *rec)
 {
 	ListIterator job_iterator;
 	struct job_record *job_ptr;
@@ -1283,6 +1343,172 @@ static void _update_qos(slurmdb_qos_rec_t *rec)
 	unlock_slurmctld(job_write_lock);
 }
 
+static int _init_tres(void)
+{
+	char *temp_char = slurm_get_accounting_storage_tres();
+	List char_list;
+	List add_list = NULL;
+	slurmdb_tres_rec_t *tres_rec;
+	slurmdb_update_object_t update_object;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!temp_char) {
+		error("No tres defined, this should never happen");
+		return SLURM_ERROR;
+	}
+
+	char_list = list_create(slurm_destroy_char);
+	slurm_addto_char_list(char_list, temp_char);
+	xfree(temp_char);
+
+	memset(&update_object, 0, sizeof(slurmdb_update_object_t));
+	if (!association_based_accounting) {
+		update_object.type = SLURMDB_ADD_TRES;
+		update_object.objects = list_create(slurmdb_destroy_tres_rec);
+	} else if (!g_tres_count)
+		fatal("You are running with a database but for some reason "
+		      "we have no TRES from it.  This should only happen if "
+		      "the database is down and you don't have "
+		      "any state files.");
+
+	while ((temp_char = list_pop(char_list))) {
+		tres_rec = xmalloc(sizeof(slurmdb_tres_rec_t));
+
+		tres_rec->type = temp_char;
+
+		if (!strcasecmp(temp_char, "cpu"))
+			tres_rec->id = TRES_CPU;
+		else if (!strcasecmp(temp_char, "mem"))
+			tres_rec->id = TRES_MEM;
+		else if (!strcasecmp(temp_char, "energy"))
+			tres_rec->id = TRES_ENERGY;
+		else if (!strcasecmp(temp_char, "node"))
+			tres_rec->id = TRES_NODE;
+		else if (!strncasecmp(temp_char, "bb/", 3)) {
+			tres_rec->type[2] = '\0';
+			tres_rec->name = xstrdup(temp_char+3);
+			if (!tres_rec->name)
+				fatal("Burst Buffer type tres need to have a "
+				      "name, (i.e. bb/cray).  You gave %s",
+				      temp_char);
+		} else if (!strncasecmp(temp_char, "gres/", 5)) {
+			tres_rec->type[4] = '\0';
+			tres_rec->name = xstrdup(temp_char+5);
+			if (!tres_rec->name)
+				fatal("Gres type tres need to have a name, "
+				      "(i.e. Gres/GPU).  You gave %s",
+				      temp_char);
+		} else if (!strncasecmp(temp_char, "license/", 8)) {
+			tres_rec->type[7] = '\0';
+			tres_rec->name = xstrdup(temp_char+8);
+			if (!tres_rec->name)
+				fatal("License type tres need to "
+				      "have a name, (i.e. License/Foo).  "
+				      "You gave %s",
+				      temp_char);
+		} else {
+			fatal("%s: Unknown tres type '%s', acceptable "
+			      "types are CPU,Gres/,License/,Mem",
+			      __func__, temp_char);
+			xfree(tres_rec->type);
+			xfree(tres_rec);
+		}
+
+		if (!association_based_accounting) {
+			if (!tres_rec->id)
+				fatal("Unless running with a database you "
+				      "can only run with certain TRES, "
+				      "%s%s%s is not one of them.  "
+				      "Either set up "
+				      "a database preferably with a slurmdbd "
+				      "or remove this TRES from your "
+				      "configuration.",
+				      tres_rec->type, tres_rec->name ? "/" : "",
+				      tres_rec->name ? tres_rec->name : "");
+			list_append(update_object.objects, tres_rec);
+		} else if (!tres_rec->id &&
+			   assoc_mgr_fill_in_tres(
+				   acct_db_conn, tres_rec,
+				   ACCOUNTING_ENFORCE_TRES, NULL, 0)
+			   != SLURM_SUCCESS) {
+			if (!add_list)
+				add_list = list_create(
+					slurmdb_destroy_tres_rec);
+			info("Couldn't find tres %s%s%s in the database, "
+			     "creating.",
+			     tres_rec->type, tres_rec->name ? "/" : "",
+			     tres_rec->name ? tres_rec->name : "");
+			list_append(add_list, tres_rec);
+		} else
+			slurmdb_destroy_tres_rec(tres_rec);
+	}
+	FREE_NULL_LIST(char_list);
+
+	if (add_list) {
+		if (acct_storage_g_add_tres(acct_db_conn, getuid(), add_list)
+		    != SLURM_SUCCESS)
+			fatal("Problem adding tres to the database, "
+			      "can't continue until database is able to "
+			      "make new tres");
+		/* refresh list here since the updates are not
+		   sent dynamically */
+		assoc_mgr_refresh_lists(acct_db_conn, ASSOC_MGR_CACHE_TRES);
+		FREE_NULL_LIST(add_list);
+	}
+
+	if (!association_based_accounting) {
+		assoc_mgr_update_tres(&update_object, false);
+		list_destroy(update_object.objects);
+	}
+
+	/* Set up the slurmctld_tres_cnt here (Current code is set to
+	 * not have this ever change).
+	*/
+	assoc_mgr_lock(&locks);
+	slurmctld_tres_cnt = g_tres_count;
+	assoc_mgr_unlock(&locks);
+
+	return SLURM_SUCCESS;
+}
+
+/* any association manager locks should be unlocked before hand */
+static void _update_cluster_tres(void)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	/* Write lock on jobs */
+	slurmctld_lock_t job_write_lock =
+		{ NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!job_list)
+		return;
+
+	lock_slurmctld(job_write_lock);
+	assoc_mgr_lock(&locks);
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = list_next(job_iterator))) {
+		/* If this returns 1 it means the positions were
+		   altered so just rebuild it.
+		*/
+		if (assoc_mgr_set_tres_cnt_array(&job_ptr->tres_req_cnt,
+						 job_ptr->tres_req_str,
+						 0, true))
+			job_set_req_tres(job_ptr, true);
+		if (assoc_mgr_set_tres_cnt_array(&job_ptr->tres_alloc_cnt,
+						 job_ptr->tres_alloc_str,
+						 0, true))
+			job_set_alloc_tres(job_ptr, true);
+	}
+	list_iterator_destroy(job_iterator);
+
+	assoc_mgr_unlock(&locks);
+	unlock_slurmctld(job_write_lock);
+}
+
+
 static void _queue_reboot_msg(void)
 {
 	agent_arg_t *reboot_agent_args = NULL;
@@ -1306,8 +1532,14 @@ static void _queue_reboot_msg(void)
 			want_nodes_reboot = true;
 			continue;
 		}
-		if (IS_NODE_IDLE(node_ptr) && !IS_NODE_NO_RESPOND(node_ptr) &&
-		    !IS_NODE_POWER_UP(node_ptr)) /* only active idle nodes */
+                /* only active idle nodes, don't reboot
+                 * nodes that are idle but have suspended
+                 * jobs on them
+                 */
+		if (IS_NODE_IDLE(node_ptr)
+                    && !IS_NODE_NO_RESPOND(node_ptr)
+                    && !IS_NODE_POWER_UP(node_ptr)
+                    && node_ptr->sus_job_cnt == 0)
 			want_reboot = true;
 		else if (IS_NODE_FUTURE(node_ptr) &&
 			 (node_ptr->last_response == (time_t) 0))
@@ -1394,6 +1626,9 @@ static void *_slurmctld_background(void *no_data)
 	/* Locks: Read config, write job, write node, read partition */
 	slurmctld_lock_t job_write_lock = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
+	/* Locks: Write job */
+	slurmctld_lock_t job_write_lock2 = {
+		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
 	/* Locks: Read config, write job, write node
 	 * (Might kill jobs on nodes set DOWN) */
 	slurmctld_lock_t node_write_lock = {
@@ -1459,15 +1694,24 @@ static void *_slurmctld_background(void *no_data)
 			no_resp_msg_interval = 1;
 
 		if (slurmctld_config.shutdown_time) {
+			struct timespec ts = {0, 0};
+			struct timeval now;
 			/* wait for RPC's to complete */
-			for (i = 1; i < (CONTROL_TIMEOUT * 10); i++) {
-				if (slurmctld_config.server_thread_count == 0)
-					break;
-				usleep(100000);
+			gettimeofday(&now, NULL);
+			ts.tv_sec = now.tv_sec + CONTROL_TIMEOUT;
+			ts.tv_nsec = now.tv_usec * 1000;
+			slurm_mutex_lock(&slurmctld_config.thread_count_lock);
+			while (slurmctld_config.server_thread_count > 0) {
+				pthread_cond_timedwait(&server_thread_cond,
+					&slurmctld_config.thread_count_lock,
+					&ts);
 			}
-			if (slurmctld_config.server_thread_count)
+			if (slurmctld_config.server_thread_count) {
 				info("shutdown server_thread_count=%d",
 					slurmctld_config.server_thread_count);
+			}
+			slurm_mutex_unlock(&slurmctld_config.thread_count_lock);
+
 			if (_report_locks_set() == 0) {
 				info("Saving all slurm state");
 				save_all_state();
@@ -1620,6 +1864,9 @@ static void *_slurmctld_background(void *no_data)
 		if (job_limit != NO_VAL) {
 			now = time(NULL);
 			last_sched_time = now;
+			lock_slurmctld(job_write_lock2);
+			bb_g_load_state(false);	/* May alter job nice/prio */
+			unlock_slurmctld(job_write_lock2);
 			if (schedule(job_limit))
 				last_checkpoint_time = 0; /* force state save */
 			set_job_elig_time();
@@ -1740,11 +1987,13 @@ extern void ctld_assoc_mgr_init(slurm_trigger_callbacks_t *callbacks)
 	assoc_init_arg.update_assoc_notify = _update_assoc;
 	assoc_init_arg.update_license_notify = license_update_remote;
 	assoc_init_arg.update_qos_notify = _update_qos;
+	assoc_init_arg.update_cluster_tres = _update_cluster_tres;
 	assoc_init_arg.update_resvs = update_assocs_in_resvs;
 	assoc_init_arg.cache_level = ASSOC_MGR_CACHE_ASSOC |
 				     ASSOC_MGR_CACHE_USER  |
 				     ASSOC_MGR_CACHE_QOS   |
-				     ASSOC_MGR_CACHE_RES;
+				     ASSOC_MGR_CACHE_RES   |
+                         	     ASSOC_MGR_CACHE_TRES;
 	if (slurmctld_conf.track_wckey)
 		assoc_init_arg.cache_level |= ASSOC_MGR_CACHE_WCKEY;
 
@@ -1788,6 +2037,8 @@ extern void ctld_assoc_mgr_init(slurm_trigger_callbacks_t *callbacks)
 		num_jobs = list_count(job_list);
 	unlock_slurmctld(job_read_lock);
 
+	_init_tres();
+
 	/* This thread is looking for when we get correct data from
 	   the database so we can update the assoc_ptr's in the jobs
 	*/
@@ -1818,33 +2069,130 @@ extern void send_all_to_accounting(time_t event_time)
 	send_resvs_to_accounting();
 }
 
+static int _add_node_gres_tres(void *x, void *arg)
+{
+	uint64_t gres_cnt;
+	int tres_pos;
+	slurmdb_tres_rec_t *tres_rec_in = (slurmdb_tres_rec_t *)x;
+	struct node_record *node_ptr = (struct node_record *)arg;
+
+	xassert(tres_rec_in);
+
+	if (xstrcmp(tres_rec_in->type, "gres"))
+		return 0;
+
+	gres_cnt = gres_plugin_node_config_cnt(node_ptr->gres_list,
+					       tres_rec_in->name);
+	if ((tres_pos = assoc_mgr_find_tres_pos(tres_rec_in, true)) != -1)
+		node_ptr->tres_cnt[tres_pos] = gres_cnt;
+
+	return 0;
+}
+
 /* A slurmctld lock needs to at least have a node read lock set before
  * this is called */
-extern void set_cluster_cpus(void)
+extern void set_cluster_tres(bool assoc_mgr_locked)
 {
-	uint32_t cpus = 0;
 	struct node_record *node_ptr;
+	slurmdb_tres_rec_t *tres_rec, *cpu_tres = NULL, *mem_tres = NULL;
 	int i;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   WRITE_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!assoc_mgr_locked)
+		assoc_mgr_lock(&locks);
+
+	xassert(assoc_mgr_tres_array);
+
+	for (i=0; i < g_tres_count; i++) {
+		tres_rec = assoc_mgr_tres_array[i];
+
+		if (!tres_rec->type) {
+			error("TRES %d doesn't have a type given, "
+			      "this should never happen",
+			      tres_rec->id);
+			continue; /* this should never happen */
+		}
+		/* reset them now since we are about to add to them */
+		tres_rec->count = 0;
+		if (tres_rec->id == TRES_CPU) {
+			cpu_tres = tres_rec;
+			continue;
+		} else if (tres_rec->id == TRES_MEM) {
+			mem_tres = tres_rec;
+			continue;
+		} else if (!strcmp(tres_rec->type, "bb")) {
+			tres_rec->count = bb_g_get_system_size(tres_rec->name);
+			continue;
+		} else if (!strcmp(tres_rec->type, "gres")) {
+			tres_rec->count = gres_get_system_cnt(tres_rec->name);
+			continue;
+		} else if (!strcmp(tres_rec->type, "license")) {
+			tres_rec->count = get_total_license_cnt(
+				tres_rec->name);
+			continue;
+		}
+		/* FIXME: set up the other tres here that aren't specific */
+	}
+
+	cluster_cpus = 0;
 
 	node_ptr = node_record_table_ptr;
 	for (i = 0; i < node_record_count; i++, node_ptr++) {
+		uint64_t cpu_count = 0, mem_count = 0;
 		if (node_ptr->name == '\0')
 			continue;
+
 #ifdef SLURM_NODE_ACCT_REGISTER
-		if (slurmctld_conf.fast_schedule)
-			cpus += node_ptr->config_ptr->cpus;
-		else
-			cpus += node_ptr->cpus;
+		if (slurmctld_conf.fast_schedule) {
+			cpu_count += node_ptr->config_ptr->cpus;
+			mem_count += node_ptr->config_ptr->real_memory;
+		} else {
+			cpu_count += node_ptr->cpus;
+			mem_count += node_ptr->real_memory;
+		}
 #else
-		cpus += node_ptr->config_ptr->cpus;
+		cpu_count += node_ptr->config_ptr->cpus;
+		mem_count += node_ptr->config_ptr->real_memory;
+
 #endif
-	}
+		cluster_cpus += cpu_count;
+		if (mem_tres)
+			mem_tres->count += mem_count;
+
+		if (!node_ptr->tres_cnt)
+			node_ptr->tres_cnt = xmalloc(sizeof(uint64_t) *
+						     slurmctld_tres_cnt);
+		node_ptr->tres_cnt[TRES_ARRAY_CPU] = cpu_count;
+		node_ptr->tres_cnt[TRES_ARRAY_MEM] = mem_count;
+
+		list_for_each(assoc_mgr_tres_list,
+			      _add_node_gres_tres, node_ptr);
+
+		xfree(node_ptr->tres_str);
+		node_ptr->tres_str =
+			assoc_mgr_make_tres_str_from_array(node_ptr->tres_cnt,
+							   TRES_STR_FLAG_SIMPLE,
+							   true);
+		xfree(node_ptr->tres_fmt_str);
+		node_ptr->tres_fmt_str =
+			assoc_mgr_make_tres_str_from_array(node_ptr->tres_cnt,
+							   0,
+							   true);
+	}
+
+	/* FIXME: cluster_cpus probably needs to be removed and handled
+	 * differently in the spots this is used.
+	 */
+	if (cpu_tres)
+		cpu_tres->count = cluster_cpus;
 
-	/* Since cluster_cpus is used else where we need to keep a
-	   local var here to avoid race conditions on cluster_cpus
-	   not being correct.
-	*/
-	cluster_cpus = cpus;
+	assoc_mgr_tres_array[TRES_ARRAY_NODE]->count = node_record_count;
+
+	set_partition_tres();
+
+	if (!assoc_mgr_locked)
+		assoc_mgr_unlock(&locks);
 }
 
 /*
@@ -2245,7 +2593,7 @@ static void *_assoc_cache_mgr(void *no_data)
 	struct job_record *job_ptr = NULL;
 	struct part_record *part_ptr = NULL;
 	slurmdb_qos_rec_t qos_rec;
-	slurmdb_association_rec_t assoc_rec;
+	slurmdb_assoc_rec_t assoc_rec;
 	/* Write lock on jobs, read lock on nodes and partitions */
 	slurmctld_lock_t job_write_lock =
 		{ NO_LOCK, WRITE_LOCK, READ_LOCK, WRITE_LOCK };
@@ -2264,7 +2612,7 @@ static void *_assoc_cache_mgr(void *no_data)
 			return NULL;
 		}
 		lock_slurmctld(job_write_lock);
-		assoc_mgr_refresh_lists(acct_db_conn);
+		assoc_mgr_refresh_lists(acct_db_conn, 0);
 		if (running_cache)
 			unlock_slurmctld(job_write_lock);
 		slurm_mutex_unlock(&assoc_cache_mutex);
@@ -2286,7 +2634,7 @@ static void *_assoc_cache_mgr(void *no_data)
 	while ((job_ptr = list_next(itr))) {
 		if (job_ptr->assoc_id) {
 			memset(&assoc_rec, 0,
-			       sizeof(slurmdb_association_rec_t));
+			       sizeof(slurmdb_assoc_rec_t));
 			assoc_rec.id = job_ptr->assoc_id;
 
 			debug("assoc is %zx (%d) for job %u",
@@ -2296,7 +2644,7 @@ static void *_assoc_cache_mgr(void *no_data)
 			if (assoc_mgr_fill_in_assoc(
 				    acct_db_conn, &assoc_rec,
 				    accounting_enforce,
-				    (slurmdb_association_rec_t **)
+				    (slurmdb_assoc_rec_t **)
 				    &job_ptr->assoc_ptr, false)) {
 				verbose("Invalid association id %u "
 					"for job id %u",
@@ -2345,10 +2693,30 @@ handle_parts:
 		if (part_ptr->deny_qos)
 			qos_list_build(part_ptr->deny_qos,
 				       &part_ptr->deny_qos_bitstr);
+
+		if (part_ptr->qos_char) {
+			slurmdb_qos_rec_t qos_rec;
+
+			memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+			qos_rec.name = part_ptr->qos_char;
+			part_ptr->qos_ptr = NULL;
+			if (assoc_mgr_fill_in_qos(
+				    acct_db_conn, &qos_rec, accounting_enforce,
+				    (slurmdb_qos_rec_t **)&part_ptr->qos_ptr, 0)
+			    != SLURM_SUCCESS) {
+				fatal("Partition %s has an invalid qos (%s), "
+				      "please check your configuration",
+				      part_ptr->name, qos_rec.name);
+			}
+		}
 	}
 	list_iterator_destroy(itr);
 
 end_it:
+	/* issuing a reconfig will reset the pointers on the burst
+	   buffers */
+	bb_g_reconfig();
+
 	unlock_slurmctld(job_write_lock);
 	/* This needs to be after the lock and after we update the
 	   jobs so if we need to send them we are set. */
diff --git a/src/slurmctld/front_end.c b/src/slurmctld/front_end.c
index 050293f5c..2fe61400c 100644
--- a/src/slurmctld/front_end.c
+++ b/src/slurmctld/front_end.c
@@ -56,9 +56,8 @@
 #include "src/slurmctld/state_save.h"
 #include "src/slurmctld/trigger_mgr.h"
 
-/* Change FRONT_END_STATE_VERSION value when changing the state save format */
+/* No need to change we always pack SLURM_PROTOCOL_VERSION */
 #define FRONT_END_STATE_VERSION        "PROTOCOL_VERSION"
-#define FRONT_END_2_6_STATE_VERSION    "VER001"	/* SLURM version 2.6 */
 
 front_end_record_t *front_end_nodes = NULL;
 uint16_t front_end_node_cnt = 0;
@@ -157,20 +156,6 @@ static void _pack_front_end(struct front_end_record *dump_front_end_ptr,
 		pack_time(dump_front_end_ptr->reason_time, buffer);
 		pack32(dump_front_end_ptr->reason_uid, buffer);
 
-		pack_time(dump_front_end_ptr->slurmd_start_time, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		packstr(dump_front_end_ptr->allow_groups, buffer);
-		packstr(dump_front_end_ptr->allow_users, buffer);
-		pack_time(dump_front_end_ptr->boot_time, buffer);
-		packstr(dump_front_end_ptr->deny_groups, buffer);
-		packstr(dump_front_end_ptr->deny_users, buffer);
-		packstr(dump_front_end_ptr->name, buffer);
-		pack16(dump_front_end_ptr->node_state, buffer);
-
-		packstr(dump_front_end_ptr->reason, buffer);
-		pack_time(dump_front_end_ptr->reason_time, buffer);
-		pack32(dump_front_end_ptr->reason_uid, buffer);
-
 		pack_time(dump_front_end_ptr->slurmd_start_time, buffer);
 	} else {
 		error("_pack_front_end: Unsupported slurm version %u",
@@ -234,24 +219,41 @@ extern front_end_record_t *assign_front_end(struct job_record *job_ptr)
 	uint32_t state_flags;
 	int i;
 
-	for (i = 0, front_end_ptr = front_end_nodes; i < front_end_node_cnt;
-	     i++, front_end_ptr++) {
-		if (job_ptr->batch_host) {   /* Find specific front-end node */
-			if (strcmp(job_ptr->batch_host, front_end_ptr->name))
-				continue;
-			if (!_front_end_access(front_end_ptr, job_ptr))
-				break;
-		} else {		/* Find some usable front-end node */
-			if (IS_NODE_DOWN(front_end_ptr) ||
-			    IS_NODE_DRAIN(front_end_ptr) ||
-			    IS_NODE_NO_RESPOND(front_end_ptr))
-				continue;
-			if (!_front_end_access(front_end_ptr, job_ptr))
-				continue;
-		}
-		if ((best_front_end == NULL) ||
-		    (front_end_ptr->job_cnt_run < best_front_end->job_cnt_run))
+	if (!job_ptr->batch_host && (job_ptr->batch_flag == 0) &&
+	    (front_end_ptr = find_front_end_record(job_ptr->alloc_node))) {
+		/* Use submit host for interactive job */
+		if (!IS_NODE_DOWN(front_end_ptr)  &&
+		    !IS_NODE_DRAIN(front_end_ptr) &&
+		    !IS_NODE_NO_RESPOND(front_end_ptr) &&
+		    _front_end_access(front_end_ptr, job_ptr)) {
 			best_front_end = front_end_ptr;
+		} else {
+			info("%s: front-end node %s not available for job %u",
+			     __func__, job_ptr->alloc_node, job_ptr->job_id);
+			return NULL;
+		}
+	} else {
+		for (i = 0, front_end_ptr = front_end_nodes;
+		     i < front_end_node_cnt; i++, front_end_ptr++) {
+			if (job_ptr->batch_host) { /* Find specific front-end */
+				if (strcmp(job_ptr->batch_host,
+					   front_end_ptr->name))
+					continue;
+				if (!_front_end_access(front_end_ptr, job_ptr))
+					break;
+			} else {	      /* Find a usable front-end node */
+				if (IS_NODE_DOWN(front_end_ptr) ||
+				    IS_NODE_DRAIN(front_end_ptr) ||
+				    IS_NODE_NO_RESPOND(front_end_ptr))
+					continue;
+				if (!_front_end_access(front_end_ptr, job_ptr))
+					continue;
+			}
+			if ((best_front_end == NULL) ||
+			    (front_end_ptr->job_cnt_run <
+			     best_front_end->job_cnt_run))
+				best_front_end = front_end_ptr;
+		}
 	}
 
 	if (best_front_end) {
@@ -830,12 +832,8 @@ extern int load_all_front_end_state(bool state_only)
 
 	safe_unpackstr_xmalloc( &ver_str, &name_len, buffer);
 	debug3("Version string in front_end_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, FRONT_END_STATE_VERSION)) {
-			safe_unpack16(&protocol_version, buffer);
-		} else
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, FRONT_END_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
 
 	if (protocol_version == (uint16_t) NO_VAL) {
 		error("*****************************************************");
@@ -871,14 +869,6 @@ extern int load_all_front_end_state(bool state_only)
 			safe_unpack16 (&obj_protocol_version, buffer);
 			node_state = tmp_state;
 			base_state = node_state & NODE_STATE_BASE;
-		} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-			safe_unpackstr_xmalloc (&node_name, &name_len, buffer);
-			safe_unpack16 (&tmp_state,  buffer);
-			safe_unpackstr_xmalloc (&reason,    &name_len, buffer);
-			safe_unpack_time (&reason_time, buffer);
-			safe_unpack32 (&reason_uid,  buffer);
-			node_state = tmp_state;
-			base_state = node_state & NODE_STATE_BASE;
 		} else
 			goto unpack_error;
 
diff --git a/src/slurmctld/gang.c b/src/slurmctld/gang.c
index 4a9c50c34..2d3fd5602 100644
--- a/src/slurmctld/gang.c
+++ b/src/slurmctld/gang.c
@@ -225,6 +225,24 @@ static uint16_t _get_gr_type(void)
 	return GS_NODE;
 }
 
+static uint16_t _get_part_gr_type(struct part_record *part_ptr)
+{
+	if (part_ptr) {
+		if (part_ptr->cr_type & CR_CORE)
+			return GS_CORE;
+		if (part_ptr->cr_type & CR_CPU) {
+			if (!strcmp(slurmctld_conf.task_plugin, "task/none"))
+				return GS_CPU;
+			return GS_CPU2;
+		}
+		if (part_ptr->cr_type & CR_SOCKET)
+			return GS_SOCKET;
+	}
+
+	/* Use global configuration */
+	return gr_type;
+}
+
 /* For GS_CPU and GS_CPU2 gs_bits_per_node is the total number of CPUs per node.
  * For GS_CORE and GS_SOCKET gs_bits_per_node is the total number of
  *	cores per per node.
@@ -320,10 +338,7 @@ static void _build_parts(void)
 	struct gs_part *gs_part_ptr;
 	int num_parts;
 
-	if (gs_part_list) {
-		list_destroy(gs_part_list);
-		gs_part_list = NULL;
-	}
+	FREE_NULL_LIST(gs_part_list);
 
 	/* reset the sorted list, since it's currently
 	 * pointing to partitions we just destroyed */
@@ -401,17 +416,19 @@ static int _job_fits_in_active_row(struct job_record *job_ptr,
 	job_resources_t *job_res = job_ptr->job_resrcs;
 	int count;
 	bitstr_t *job_map;
+	uint16_t job_gr_type;
 
 	if ((p_ptr->active_resmap == NULL) || (p_ptr->jobs_active == 0))
 		return 1;
 
-	if ((gr_type == GS_CPU2) || (gr_type == GS_CORE) ||
-	    (gr_type == GS_SOCKET)) {
+	job_gr_type = _get_part_gr_type(job_ptr->part_ptr);
+	if ((job_gr_type == GS_CPU2) || (job_gr_type == GS_CORE) ||
+	    (job_gr_type == GS_SOCKET)) {
 		return job_fits_into_cores(job_res, p_ptr->active_resmap,
 					   gs_bits_per_node);
 	}
 
-	/* gr_type == GS_NODE || gr_type == GS_CPU */
+	/* job_gr_type == GS_NODE || job_gr_type == GS_CPU */
 	job_map = bit_copy(job_res->node_bitmap);
 	bit_and(job_map, p_ptr->active_resmap);
 	/* any set bits indicate contention for the same resource */
@@ -421,7 +438,7 @@ static int _job_fits_in_active_row(struct job_record *job_ptr,
 	FREE_NULL_BITMAP(job_map);
 	if (count == 0)
 		return 1;
-	if (gr_type == GS_CPU) {
+	if (job_gr_type == GS_CPU) {
 		/* For GS_CPU we check the CPU arrays */
 		return _can_cpus_fit(job_ptr, p_ptr);
 	}
@@ -478,17 +495,19 @@ static void _add_job_to_active(struct job_record *job_ptr,
 			       struct gs_part *p_ptr)
 {
 	job_resources_t *job_res = job_ptr->job_resrcs;
+	uint16_t job_gr_type;
 
 	/* add job to active_resmap */
-	if ((gr_type == GS_CPU2) || (gr_type == GS_CORE) ||
-	    (gr_type == GS_SOCKET)) {
+	job_gr_type = _get_part_gr_type(job_ptr->part_ptr);
+	if ((job_gr_type == GS_CPU2) || (job_gr_type == GS_CORE) ||
+	    (job_gr_type == GS_SOCKET)) {
 		if (p_ptr->jobs_active == 0 && p_ptr->active_resmap) {
 			uint32_t size = bit_size(p_ptr->active_resmap);
 			bit_nclear(p_ptr->active_resmap, 0, size-1);
 		}
 		add_job_to_cores(job_res, &(p_ptr->active_resmap),
 				 gs_bits_per_node);
-		if (gr_type == GS_SOCKET)
+		if (job_gr_type == GS_SOCKET)
 			_fill_sockets(job_res->node_bitmap, p_ptr);
 	} else { /* GS_NODE or GS_CPU */
 		if (!p_ptr->active_resmap) {
@@ -514,7 +533,7 @@ static void _add_job_to_active(struct job_record *job_ptr,
 	}
 
 	/* add job to the active_cpus array */
-	if (gr_type == GS_CPU) {
+	if (job_gr_type == GS_CPU) {
 		uint32_t i, a, sz = bit_size(p_ptr->active_resmap);
 		if (!p_ptr->active_cpus) {
 			/* create active_cpus array */
@@ -661,16 +680,21 @@ static void _preempt_job_dequeue(void)
 				error("preempted job %u could not be "
 				      "requeued: %s",
 				      job_ptr->job_id, slurm_strerror(rc));
+		} else if (preempt_mode == PREEMPT_MODE_OFF) {
+			error("Invalid preempt_mode %u for job %u",
+			      preempt_mode, job_ptr->job_id);
+			continue;
 		}
 
 		if (rc != SLURM_SUCCESS) {
 			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0, true);
 			if (rc == SLURM_SUCCESS)
-				info("preempted job %u had to be killed",
-				     job_ptr->job_id);
+				info("%s: preempted job %u had to be killed",
+				     __func__,job_ptr->job_id);
 			else {
-				info("preempted job %u kill failure %s",
-				     job_ptr->job_id, slurm_strerror(rc));
+				info("%s: preempted job %u kill failure %s",
+				     __func__, job_ptr->job_id,
+				     slurm_strerror(rc));
 			}
 		}
 	}
@@ -777,6 +801,7 @@ static void _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 {
 	int i;
 	struct gs_job *j_ptr;
+	uint16_t preempt_mode;
 
 	if (slurmctld_conf.debug_flags & DEBUG_FLAG_GANG) {
 		info("gang: update_active_row: rebuilding part %s...",
@@ -801,9 +826,11 @@ static void _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 			/* this job has been preempted by a shadow job.
 			 * suspend it and preserve it's job_list order */
 			if (j_ptr->sig_state != GS_SUSPEND) {
+				preempt_mode =
+					slurm_job_preempt_mode(j_ptr->job_ptr);
 				if (p_ptr->num_shadows &&
-				    (slurm_job_preempt_mode(j_ptr->job_ptr) !=
-				     PREEMPT_MODE_SUSPEND)) {
+				    (preempt_mode != PREEMPT_MODE_OFF) &&
+				    (preempt_mode != PREEMPT_MODE_SUSPEND)) {
 					_preempt_job_queue(j_ptr->job_id);
 				} else
 					_suspend_job(j_ptr->job_id);
@@ -825,9 +852,11 @@ static void _update_active_row(struct gs_part *p_ptr, int add_new_jobs)
 			/* this job has been preempted by a shadow job.
 			 * suspend it and preserve it's job_list order */
 			if (j_ptr->sig_state != GS_SUSPEND) {
+				preempt_mode =
+					slurm_job_preempt_mode(j_ptr->job_ptr);
 				if (p_ptr->num_shadows &&
-				    (slurm_job_preempt_mode(j_ptr->job_ptr) !=
-				     PREEMPT_MODE_SUSPEND)) {
+				    (preempt_mode != PREEMPT_MODE_OFF) &&
+				    (preempt_mode != PREEMPT_MODE_SUSPEND)) {
 					_preempt_job_queue(j_ptr->job_id);
 				} else
 					_suspend_job(j_ptr->job_id);
@@ -943,6 +972,7 @@ static uint16_t _add_job_to_part(struct gs_part *p_ptr,
 {
 	int i;
 	struct gs_job *j_ptr;
+	uint16_t preempt_mode;
 
 	xassert(p_ptr);
 	xassert(job_ptr->job_id > 0);
@@ -1017,9 +1047,10 @@ static uint16_t _add_job_to_part(struct gs_part *p_ptr,
 			info("gang: _add_job_to_part: suspending job %u",
 			     job_ptr->job_id);
 		}
+		preempt_mode = slurm_job_preempt_mode(job_ptr);
 		if (p_ptr->num_shadows &&
-		    (slurm_job_preempt_mode(job_ptr) !=
-		     PREEMPT_MODE_SUSPEND)) {
+		    (preempt_mode != PREEMPT_MODE_OFF) &&
+		    (preempt_mode != PREEMPT_MODE_SUSPEND)) {
 			_preempt_job_queue(job_ptr->job_id);
 		} else
 			_suspend_job(job_ptr->job_id);
@@ -1201,10 +1232,10 @@ extern int gs_fini(void)
 	}
 	pthread_mutex_unlock(&thread_flag_mutex);
 
-	list_destroy(preempt_job_list);
+	FREE_NULL_LIST(preempt_job_list);
 
 	pthread_mutex_lock(&data_mutex);
-	list_destroy(gs_part_list);
+	FREE_NULL_LIST(gs_part_list);
 	gs_part_list = NULL;
 	xfree(gs_bits_per_node);
 	pthread_mutex_unlock(&data_mutex);
@@ -1218,7 +1249,7 @@ extern int gs_fini(void)
 extern int gs_job_start(struct job_record *job_ptr)
 {
 	struct gs_part *p_ptr;
-	uint16_t job_state;
+	uint16_t job_sig_state;
 	char *part_name;
 
 	if (slurmctld_conf.debug_flags & DEBUG_FLAG_GANG)
@@ -1231,9 +1262,9 @@ extern int gs_job_start(struct job_record *job_ptr)
 	pthread_mutex_lock(&data_mutex);
 	p_ptr = list_find_first(gs_part_list, _find_gs_part, part_name);
 	if (p_ptr) {
-		job_state = _add_job_to_part(p_ptr, job_ptr);
+		job_sig_state = _add_job_to_part(p_ptr, job_ptr);
 		/* if this job is running then check for preemption */
-		if (job_state == GS_RESUME)
+		if (job_sig_state == GS_RESUME)
 			_update_all_active_rows();
 	}
 	pthread_mutex_unlock(&data_mutex);
@@ -1435,7 +1466,7 @@ extern int gs_reconfig(void)
 	 * are tracking all jobs */
 	_scan_slurm_job_list();
 
-	list_destroy(old_part_list);
+	FREE_NULL_LIST(old_part_list);
 	pthread_mutex_unlock(&data_mutex);
 
 	_preempt_job_dequeue();	/* MUST BE OUTSIDE OF data_mutex lock */
@@ -1499,6 +1530,7 @@ static void _cycle_job_list(struct gs_part *p_ptr)
 {
 	int i, j;
 	struct gs_job *j_ptr;
+	uint16_t preempt_mode;
 
 	if (slurmctld_conf.debug_flags & DEBUG_FLAG_GANG)
 		info("gang: entering _cycle_job_list");
@@ -1534,9 +1566,10 @@ static void _cycle_job_list(struct gs_part *p_ptr)
 		    		info("gang: _cycle_job_list: suspending job %u",
 				     j_ptr->job_id);
 			}
+			preempt_mode = slurm_job_preempt_mode(j_ptr->job_ptr);
 			if (p_ptr->num_shadows &&
-			    (slurm_job_preempt_mode(j_ptr->job_ptr) !=
-			     PREEMPT_MODE_SUSPEND)) {
+			    (preempt_mode != PREEMPT_MODE_OFF) &&
+			    (preempt_mode != PREEMPT_MODE_SUSPEND)) {
 				_preempt_job_queue(j_ptr->job_id);
 			} else
 				_suspend_job(j_ptr->job_id);
diff --git a/src/slurmctld/groups.c b/src/slurmctld/groups.c
index ad3a237d9..dffb434a2 100644
--- a/src/slurmctld/groups.c
+++ b/src/slurmctld/groups.c
@@ -138,6 +138,26 @@ extern uid_t *get_group_members(char *group_name)
 
 	j = 0;
 	uid_cnt = 0;
+
+	/* Get the members from the getgrnam_r() call.
+	 */
+	for (i = 0; grp_result->gr_mem[i]; i++) {
+
+		if (uid_from_string(grp_result->gr_mem[i],
+				    &my_uid) < 0) {
+			continue;
+		}
+		if (my_uid == 0)
+			continue;
+		if (j + 1 >= uid_cnt) {
+			uid_cnt += 100;
+			xrealloc(group_uids,
+				 (sizeof(uid_t) * uid_cnt));
+		}
+
+		group_uids[j++] = my_uid;
+	}
+
 #ifdef HAVE_AIX
 	setgrent_r(&fp);
 	while (1) {
@@ -160,6 +180,9 @@ extern uid_t *get_group_members(char *group_name)
 #else
 	setgrent();
 	while (1) {
+		/* MH-CEA workaround to handle different group entries with
+		 * the same gid
+		 */
 		slurm_seterrno(0);
 		res = getgrent_r(&grp, grp_buffer, buflen, &grp_result);
 		if (res != 0 || grp_result == NULL) {
@@ -242,10 +265,7 @@ extern uid_t *get_group_members(char *group_name)
 extern void clear_group_cache(void)
 {
 	pthread_mutex_lock(&group_cache_mutex);
-	if (group_cache_list) {
-		list_destroy(group_cache_list);
-		group_cache_list = NULL;
-	}
+	FREE_NULL_LIST(group_cache_list);
 	pthread_mutex_unlock(&group_cache_mutex);
 }
 
diff --git a/src/slurmctld/job_mgr.c b/src/slurmctld/job_mgr.c
index 34d256771..1bd7a6382 100644
--- a/src/slurmctld/job_mgr.c
+++ b/src/slurmctld/job_mgr.c
@@ -64,12 +64,14 @@
 #include "src/common/slurm_acct_gather.h"
 #include "src/common/assoc_mgr.h"
 #include "src/common/bitstring.h"
+#include "src/common/cpu_frequency.h"
 #include "src/common/fd.h"
 #include "src/common/forward.h"
 #include "src/common/gres.h"
 #include "src/common/hostlist.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_time.h"
+#include "src/common/power.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_jobcomp.h"
 #include "src/common/slurm_priority.h"
@@ -81,6 +83,7 @@
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/job_submit.h"
@@ -99,6 +102,7 @@
 
 #define ARRAY_ID_BUF_SIZE 32
 #define DETAILS_FLAG 0xdddd
+#define MAX_EXIT_VAL 255	/* Maximum value returned by WIFEXITED() */
 #define SLURM_CREATE_JOB_FLAG_NO_ALLOCATE_0 0
 #define STEP_FLAG 0xbbbb
 #define TOP_PRIORITY 0xffff0000	/* large, but leave headroom for higher */
@@ -107,9 +111,8 @@
 #define JOB_ARRAY_HASH_INX(_job_id, _task_id) \
 	((_job_id + _task_id) % hash_table_size)
 
-/* Change JOB_STATE_VERSION value when changing the state save format */
+/* No need to change we always pack SLURM_PROTOCOL_VERSION */
 #define JOB_STATE_VERSION       "PROTOCOL_VERSION"
-#define JOB_2_6_STATE_VERSION   "VER014"	/* SLURM version 2.6 */
 
 #define JOB_CKPT_VERSION      "PROTOCOL_VERSION"
 
@@ -134,17 +137,15 @@ static uint32_t job_id_sequence = 0;	/* first job_id to assign new job */
 static struct   job_record **job_hash = NULL;
 static struct   job_record **job_array_hash_j = NULL;
 static struct   job_record **job_array_hash_t = NULL;
+static bool     kill_invalid_dep;
 static time_t   last_file_write_time = (time_t) 0;
 static uint32_t max_array_size = NO_VAL;
+static bitstr_t *requeue_exit = NULL;
+static bitstr_t *requeue_exit_hold = NULL;
 static int	select_serial = -1;
 static bool     wiki_sched = false;
 static bool     wiki2_sched = false;
 static bool     wiki_sched_test = false;
-static uint32_t num_exit;
-static int32_t  *requeue_exit;
-static uint32_t num_hold;
-static int32_t  *requeue_exit_hold;
-static bool     kill_invalid_dep;
 
 /* Local functions */
 static void _add_job_hash(struct job_record *job_ptr);
@@ -167,19 +168,17 @@ static struct job_record *_create_job_record(int *error_code,
 static void _del_batch_list_rec(void *x);
 static void _delete_job_desc_files(uint32_t job_id);
 static slurmdb_qos_rec_t *_determine_and_validate_qos(
-	char *resv_name, slurmdb_association_rec_t *assoc_ptr,
-	bool admin, slurmdb_qos_rec_t *qos_rec,	int *error_code);
-static void _dump_job_details(struct job_details *detail_ptr,
-			      Buf buffer);
+	char *resv_name, slurmdb_assoc_rec_t *assoc_ptr,
+	bool admin, slurmdb_qos_rec_t *qos_rec,	int *error_code, bool locked);
+static void _dump_job_details(struct job_details *detail_ptr, Buf buffer);
 static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer);
 static int  _find_batch_dir(void *x, void *key);
 static void _get_batch_job_dir_ids(List batch_dirs);
 static time_t _get_last_state_write_time(void);
-static struct job_record *_job_rec_copy(struct job_record *job_ptr);
-static void _job_timed_out(struct job_record *job_ptr);
 static int  _job_create(job_desc_msg_t * job_specs, int allocate, int will_run,
 			struct job_record **job_rec_ptr, uid_t submit_uid,
 			char **err_msg, uint16_t protocol_version);
+static void _job_timed_out(struct job_record *job_ptr);
 static void _kill_dependent(struct job_record *job_ptr);
 static void _list_delete_job(void *job_entry);
 static int  _list_find_job_id(void *job_entry, void *key);
@@ -187,7 +186,7 @@ static int  _list_find_job_old(void *job_entry, void *key);
 static int  _load_job_details(struct job_record *job_ptr, Buf buffer,
 			      uint16_t protocol_version);
 static int  _load_job_state(Buf buffer,	uint16_t protocol_version);
-static int32_t *_make_requeue_array(char *conf_buf, uint32_t *num);
+static bitstr_t *_make_requeue_array(char *conf_buf);
 static uint32_t _max_switch_wait(uint32_t input_wait);
 static void _notify_srun_missing_step(struct job_record *job_ptr, int node_inx,
 				      time_t now, time_t node_boot_time);
@@ -220,7 +219,7 @@ static job_array_resp_msg_t *_resp_array_xlate(resp_array_struct_t *resp,
 					       uint32_t job_id);
 static int  _resume_job_nodes(struct job_record *job_ptr, bool indf_susp);
 static void _send_job_kill(struct job_record *job_ptr);
-static int  _set_job_id(struct job_record *job_ptr);
+static int  _set_job_id(struct job_record *job_ptr, bool global_job);
 static void _set_job_requeue_exit_value(struct job_record *job_ptr);
 static void _signal_batch_job(struct job_record *job_ptr,
 			      uint16_t signal,
@@ -234,15 +233,15 @@ static int  _valid_job_part(job_desc_msg_t * job_desc,
 			    uid_t submit_uid, bitstr_t *req_bitmap,
 			    struct part_record **part_pptr,
 			    List part_ptr_list,
-			    slurmdb_association_rec_t *assoc_ptr,
+			    slurmdb_assoc_rec_t *assoc_ptr,
 			    slurmdb_qos_rec_t *qos_ptr);
 static int  _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
-                               uid_t submit_uid, struct part_record *part_ptr,
-                               List part_list);
+			       uid_t submit_uid, struct part_record *part_ptr,
+			       List part_list);
 static void _validate_job_files(List batch_dirs);
 static bool _validate_min_mem_partition(job_desc_msg_t *job_desc_msg,
-                                        struct part_record *,
-                                        List part_list);
+					struct part_record *part_ptr,
+					List part_list);
 static int  _write_data_to_file(char *file_name, char *data);
 static int  _write_data_array_to_file(char *file_name, char **data,
 				      uint32_t size);
@@ -469,6 +468,7 @@ static struct job_record *_create_job_record(int *error_code, uint32_t num_jobs)
 	detail_ptr->submit_time = time(NULL);
 	job_ptr->requid = -1; /* force to -1 for sacct to know this
 			       * hasn't been set yet  */
+	job_ptr->billable_tres = (double)NO_VAL;
 	(void) list_append(job_list, job_ptr);
 
 	return job_ptr;
@@ -498,8 +498,7 @@ void delete_job_details(struct job_record *job_entry)
 	xfree(job_entry->details->argv);
 	xfree(job_entry->details->ckpt_dir);
 	xfree(job_entry->details->cpu_bind);
-	if (job_entry->details->depend_list)
-		list_destroy(job_entry->details->depend_list);
+	FREE_NULL_LIST(job_entry->details->depend_list);
 	xfree(job_entry->details->dependency);
 	xfree(job_entry->details->orig_dependency);
 	for (i=0; i<job_entry->details->env_cnt; i++)
@@ -508,8 +507,7 @@ void delete_job_details(struct job_record *job_entry)
 	xfree(job_entry->details->std_err);
 	FREE_NULL_BITMAP(job_entry->details->exc_node_bitmap);
 	xfree(job_entry->details->exc_nodes);
-	if (job_entry->details->feature_list)
-		list_destroy(job_entry->details->feature_list);
+	FREE_NULL_LIST(job_entry->details->feature_list);
 	xfree(job_entry->details->features);
 	xfree(job_entry->details->std_in);
 	xfree(job_entry->details->mc_ptr);
@@ -526,9 +524,11 @@ void delete_job_details(struct job_record *job_entry)
 /* _delete_job_desc_files - delete job descriptor related files */
 static void _delete_job_desc_files(uint32_t job_id)
 {
-	char *dir_name = NULL, *file_name;
+	char *dir_name = NULL, *file_name = NULL;
 	struct stat sbuf;
 	int hash = job_id % 10, stat_rc;
+	DIR *f_dir;
+	struct dirent *dir_ent;
 
 	dir_name = slurm_get_state_save_location();
 	xstrfmtcat(dir_name, "/hash.%d/job.%u", hash, job_id);
@@ -545,15 +545,18 @@ static void _delete_job_desc_files(uint32_t job_id)
 		}
 	}
 
-	file_name = xstrdup(dir_name);
-	xstrcat(file_name, "/environment");
-	(void) unlink(file_name);
-	xfree(file_name);
-
-	file_name = xstrdup(dir_name);
-	xstrcat(file_name, "/script");
-	(void) unlink(file_name);
-	xfree(file_name);
+	f_dir = opendir(dir_name);
+	if (f_dir) {
+		while ((dir_ent = readdir(f_dir))) {
+			xstrfmtcat(file_name, "%s/%s", dir_name,
+				   dir_ent->d_name);
+			(void) unlink(file_name);
+			xfree(file_name);
+		}
+		closedir(f_dir);
+	} else {
+		error("opendir(%s): %m", dir_name);
+	}
 
 	(void) rmdir(dir_name);
 	xfree(dir_name);
@@ -588,8 +591,9 @@ static uint32_t _max_switch_wait(uint32_t input_wait)
 }
 
 static slurmdb_qos_rec_t *_determine_and_validate_qos(
-	char *resv_name, slurmdb_association_rec_t *assoc_ptr,
-	bool admin, slurmdb_qos_rec_t *qos_rec, int *error_code)
+	char *resv_name, slurmdb_assoc_rec_t *assoc_ptr,
+	bool admin, slurmdb_qos_rec_t *qos_rec, int *error_code,
+	bool locked)
 {
 	slurmdb_qos_rec_t *qos_ptr = NULL;
 
@@ -599,28 +603,9 @@ static slurmdb_qos_rec_t *_determine_and_validate_qos(
 
 	xassert(qos_rec);
 
-	if (!qos_rec->name && !qos_rec->id) {
-		if (assoc_ptr && assoc_ptr->usage->valid_qos) {
-			if (assoc_ptr->def_qos_id)
-				qos_rec->id = assoc_ptr->def_qos_id;
-			else if (bit_set_count(assoc_ptr->usage->valid_qos)
-				 == 1)
-				qos_rec->id =
-					bit_ffs(assoc_ptr->usage->valid_qos);
-			else if (assoc_mgr_root_assoc
-				 && assoc_mgr_root_assoc->def_qos_id)
-				qos_rec->id = assoc_mgr_root_assoc->def_qos_id;
-			else
-				qos_rec->name = "normal";
-		} else if (assoc_mgr_root_assoc
-			   && assoc_mgr_root_assoc->def_qos_id)
-			qos_rec->id = assoc_mgr_root_assoc->def_qos_id;
-		else
-			qos_rec->name = "normal";
-	}
-
+	assoc_mgr_get_default_qos_info(assoc_ptr, qos_rec);
 	if (assoc_mgr_fill_in_qos(acct_db_conn, qos_rec, accounting_enforce,
-				  &qos_ptr, 0) != SLURM_SUCCESS) {
+				  &qos_ptr, locked) != SLURM_SUCCESS) {
 		error("Invalid qos (%s)", qos_rec->name);
 		*error_code = ESLURM_INVALID_QOS;
 		return NULL;
@@ -670,7 +655,7 @@ int dump_all_job_state(void)
 	ListIterator job_iterator;
 	struct job_record *job_ptr;
 	Buf buffer = init_buf(high_buffer_size);
-	time_t min_age = 0, now = time(NULL);
+	time_t now = time(NULL);
 	time_t last_state_file_time;
 	DEF_TIMERS;
 
@@ -697,9 +682,6 @@ int dump_all_job_state(void)
 	pack16(SLURM_PROTOCOL_VERSION, buffer);
 	pack_time(now, buffer);
 
-	if (slurmctld_conf.min_job_age > 0)
-		min_age = now  - slurmctld_conf.min_job_age;
-
 	/*
 	 * write header: job id
 	 * This is needed so that the job id remains persistent even after
@@ -715,10 +697,6 @@ int dump_all_job_state(void)
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		xassert (job_ptr->magic == JOB_MAGIC);
-		if ((min_age > 0) && (job_ptr->end_time < min_age) &&
-		    (! IS_JOB_COMPLETING(job_ptr)) && IS_JOB_FINISHED(job_ptr))
-			continue;	/* job ready for purging, don't dump */
-
 		_dump_job_state(job_ptr, buffer);
 	}
 	list_iterator_destroy(job_iterator);
@@ -820,7 +798,7 @@ static int _open_job_state_file(char **state_file)
 	} else if (stat_buf.st_size < 10) {
 		error("Job state file %s too small", *state_file);
 		(void) close(state_fd);
-	} else 	/* Success */
+	} else	/* Success */
 		return state_fd;
 
 	error("NOTE: Trying backup state save file. Jobs may be lost!");
@@ -882,12 +860,8 @@ static time_t _get_last_state_write_time(void)
 
 	buffer = create_buf(data, data_size);
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
-	if (ver_str) {
-		if (!strcmp(ver_str, JOB_STATE_VERSION))
-			safe_unpack16(&protocol_version, buffer);
-		else if (!strcmp(ver_str, JOB_2_6_STATE_VERSION))
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, JOB_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
 	safe_unpack_time(&buf_time, buffer);
 
 unpack_error:
@@ -914,6 +888,8 @@ extern int load_all_job_state(void)
 	char *ver_str = NULL;
 	uint32_t ver_str_len;
 	uint16_t protocol_version = (uint16_t)NO_VAL;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
 	/* read the file */
 	lock_state_files();
@@ -953,34 +929,32 @@ extern int load_all_job_state(void)
 	buffer = create_buf(data, data_size);
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in job_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, JOB_STATE_VERSION))
-			safe_unpack16(&protocol_version, buffer);
-		else if (!strcmp(ver_str, JOB_2_6_STATE_VERSION))
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, JOB_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
+	xfree(ver_str);
 
 	if (protocol_version == (uint16_t)NO_VAL) {
 		error("***********************************************");
 		error("Can not recover job state, incompatible version");
 		error("***********************************************");
-		xfree(ver_str);
 		free_buf(buffer);
 		return EFAULT;
 	}
-	xfree(ver_str);
 
 	safe_unpack_time(&buf_time, buffer);
-	safe_unpack32( &saved_job_id, buffer);
-	job_id_sequence = MAX(saved_job_id, job_id_sequence);
+	safe_unpack32(&saved_job_id, buffer);
+	if (saved_job_id <= slurmctld_conf.max_job_id)
+		job_id_sequence = MAX(saved_job_id, job_id_sequence);
 	debug3("Job id in job_state header is %u", saved_job_id);
 
+	assoc_mgr_lock(&locks);
 	while (remaining_buf(buffer) > 0) {
 		error_code = _load_job_state(buffer, protocol_version);
 		if (error_code != SLURM_SUCCESS)
 			goto unpack_error;
 		job_cnt++;
 	}
+	assoc_mgr_unlock(&locks);
 	debug3("Set job_id_sequence to %u", job_id_sequence);
 
 	free_buf(buffer);
@@ -988,7 +962,8 @@ extern int load_all_job_state(void)
 	return error_code;
 
 unpack_error:
-	error("Incomplete job data checkpoint file");
+	assoc_mgr_unlock(&locks);
+	error("Incomplete job state save file");
 	info("Recovered information about %d jobs", job_cnt);
 	free_buf(buffer);
 	return SLURM_FAILURE;
@@ -1050,12 +1025,8 @@ extern int load_last_job_id( void )
 	buffer = create_buf(data, data_size);
 	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
 	debug3("Version string in job_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, JOB_STATE_VERSION))
-			safe_unpack16(&protocol_version, buffer);
-		else if (!strcmp(ver_str, JOB_2_6_STATE_VERSION))
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, JOB_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
 	xfree(ver_str);
 
 	if (protocol_version == (uint16_t)NO_VAL) {
@@ -1081,6 +1052,37 @@ unpack_error:
 	return SLURM_FAILURE;
 }
 
+static void _pack_acct_policy_limit(acct_policy_limit_set_t *limit_set,
+				    Buf buffer, uint16_t protocol_version)
+{
+	xassert(limit_set);
+
+	pack16(limit_set->qos, buffer);
+	pack16(limit_set->time, buffer);
+	pack16_array(limit_set->tres, slurmctld_tres_cnt, buffer);
+}
+
+static int _unpack_acct_policy_limit_members(
+	acct_policy_limit_set_t *limit_set,
+	Buf buffer, uint16_t protocol_version)
+{
+	uint32_t tmp32;
+
+	xassert(limit_set);
+
+	safe_unpack16(&limit_set->qos, buffer);
+	safe_unpack16(&limit_set->time, buffer);
+	xfree(limit_set->tres);
+	safe_unpack16_array(&limit_set->tres, &tmp32, buffer);
+
+	return SLURM_SUCCESS;
+
+unpack_error:
+	xfree(limit_set->tres);
+
+	return SLURM_ERROR;
+}
+
 /*
  * _dump_job_state - dump the state of a specific job, its details, and
  *	steps to a buffer
@@ -1117,6 +1119,7 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 		tmp_32 = NO_VAL;
 		pack32(tmp_32, buffer);
 	}
+
 	pack32(dump_job_ptr->assoc_id, buffer);
 	pack32(dump_job_ptr->job_id, buffer);
 	pack32(dump_job_ptr->user_id, buffer);
@@ -1150,7 +1153,7 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	pack_time(dump_job_ptr->tot_sus_time, buffer);
 
 	pack16(dump_job_ptr->direct_set_prio, buffer);
-	pack16(dump_job_ptr->job_state, buffer);
+	pack32(dump_job_ptr->job_state, buffer);
 	pack16(dump_job_ptr->kill_on_node_fail, buffer);
 	pack16(dump_job_ptr->batch_flag, buffer);
 	pack16(dump_job_ptr->mail_type, buffer);
@@ -1161,20 +1164,19 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	pack16(dump_job_ptr->warn_flags, buffer);
 	pack16(dump_job_ptr->warn_signal, buffer);
 	pack16(dump_job_ptr->warn_time, buffer);
-	pack16(dump_job_ptr->limit_set_max_cpus, buffer);
-	pack16(dump_job_ptr->limit_set_max_nodes, buffer);
-	pack16(dump_job_ptr->limit_set_min_cpus, buffer);
-	pack16(dump_job_ptr->limit_set_min_nodes, buffer);
-	pack16(dump_job_ptr->limit_set_pn_min_memory, buffer);
-	pack16(dump_job_ptr->limit_set_time, buffer);
-	pack16(dump_job_ptr->limit_set_qos, buffer);
+
+	_pack_acct_policy_limit(&dump_job_ptr->limit_set, buffer,
+				SLURM_PROTOCOL_VERSION);
 
 	packstr(dump_job_ptr->state_desc, buffer);
 	packstr(dump_job_ptr->resp_host, buffer);
 
 	pack16(dump_job_ptr->alloc_resp_port, buffer);
 	pack16(dump_job_ptr->other_port, buffer);
+	pack8(dump_job_ptr->power_flags, buffer);
+	pack8(dump_job_ptr->sicp_mode, buffer);
 	pack16(dump_job_ptr->start_protocol_ver, buffer);
+	packdouble(dump_job_ptr->billable_tres, buffer);
 
 	if (IS_JOB_COMPLETING(dump_job_ptr)) {
 		if (dump_job_ptr->nodes_completing == NULL) {
@@ -1199,6 +1201,7 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	packstr(dump_job_ptr->mail_user, buffer);
 	packstr(dump_job_ptr->resv_name, buffer);
 	packstr(dump_job_ptr->batch_host, buffer);
+	packstr(dump_job_ptr->burst_buffer, buffer);
 
 	select_g_select_jobinfo_pack(dump_job_ptr->select_jobinfo,
 				     buffer, SLURM_PROTOCOL_VERSION);
@@ -1235,9 +1238,15 @@ static void _dump_job_state(struct job_record *dump_job_ptr, Buf buffer)
 	}
 	list_iterator_destroy(step_iterator);
 	pack16((uint16_t) 0, buffer);	/* no step flag */
+	pack32(dump_job_ptr->bit_flags, buffer);
+	packstr(dump_job_ptr->tres_alloc_str, buffer);
+	packstr(dump_job_ptr->tres_fmt_alloc_str, buffer);
+	packstr(dump_job_ptr->tres_req_str, buffer);
+	packstr(dump_job_ptr->tres_fmt_req_str, buffer);
 }
 
 /* Unpack a job's state information from a buffer */
+/* NOTE: assoc_mgr tres and assoc read lock must be locked before calling */
 static int _load_job_state(Buf buffer, uint16_t protocol_version)
 {
 	uint32_t job_id, user_id, group_id, time_limit, priority, alloc_sid;
@@ -1246,31 +1255,29 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	uint32_t resv_id, spank_job_env_size = 0, qos_id, derived_ec = 0;
 	uint32_t array_job_id = 0, req_switch = 0, wait4switch = 0;
 	uint32_t profile = ACCT_GATHER_PROFILE_NOT_SET;
+	uint32_t job_state;
 	time_t start_time, end_time, suspend_time, pre_sus_time, tot_sus_time;
 	time_t preempt_time = 0;
 	time_t resize_time = 0, now = time(NULL);
-	uint8_t reboot = 0;
+	uint8_t reboot = 0, power_flags = 0, sicp_mode = 0;
 	uint32_t array_task_id = NO_VAL;
 	uint32_t array_flags = 0, max_run_tasks = 0, tot_run_tasks = 0;
 	uint32_t min_exit_code = 0, max_exit_code = 0, tot_comp_tasks = 0;
-	uint16_t job_state, details, batch_flag, step_flag;
+	uint16_t details, batch_flag, step_flag;
 	uint16_t kill_on_node_fail, direct_set_prio;
 	uint16_t alloc_resp_port, other_port, mail_type, state_reason;
 	uint16_t restart_cnt, ckpt_interval;
 	uint16_t wait_all_nodes, warn_flags = 0, warn_signal, warn_time;
-	uint16_t limit_set_max_cpus = 0, limit_set_max_nodes = 0;
-	uint16_t limit_set_min_cpus = 0, limit_set_min_nodes = 0;
-	uint16_t limit_set_pn_min_memory = 0;
-	uint16_t limit_set_time = 0, limit_set_qos = 0;
-	uint16_t uint16_tmp;
+	acct_policy_limit_set_t limit_set;
 	uint16_t start_protocol_ver = SLURM_MIN_PROTOCOL_VERSION;
+	uint16_t uint16_tmp;
 	char *nodes = NULL, *partition = NULL, *name = NULL, *resp_host = NULL;
 	char *account = NULL, *network = NULL, *mail_user = NULL;
 	char *comment = NULL, *nodes_completing = NULL, *alloc_node = NULL;
 	char *licenses = NULL, *state_desc = NULL, *wckey = NULL;
 	char *resv_name = NULL, *gres = NULL, *batch_host = NULL;
 	char *gres_alloc = NULL, *gres_req = NULL, *gres_used = NULL;
-	char *task_id_str = NULL;
+	char *burst_buffer = NULL, *task_id_str = NULL;
 	uint32_t task_id_size = NO_VAL;
 	char **spank_job_env = (char **) NULL;
 	List gres_list = NULL, part_ptr_list = NULL;
@@ -1280,12 +1287,18 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	dynamic_plugin_data_t *select_jobinfo = NULL;
 	job_resources_t *job_resources = NULL;
 	check_jobinfo_t check_job = NULL;
-	slurmdb_association_rec_t assoc_rec;
+	slurmdb_assoc_rec_t assoc_rec;
 	slurmdb_qos_rec_t qos_rec;
 	bool job_finished = false;
 	char jbuf[JBUFSIZ];
+	double billable_tres = (double)NO_VAL;
+	char *tres_alloc_str = NULL, *tres_fmt_alloc_str = NULL,
+		*tres_req_str = NULL, *tres_fmt_req_str = NULL;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	memset(&limit_set, 0, sizeof(acct_policy_limit_set_t));
+	limit_set.tres = xmalloc(sizeof(uint16_t) * slurmctld_tres_cnt);
+
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&array_job_id, buffer);
 		safe_unpack32(&array_task_id, buffer);
 
@@ -1354,7 +1367,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		safe_unpack_time(&tot_sus_time, buffer);
 
 		safe_unpack16(&direct_set_prio, buffer);
-		safe_unpack16(&job_state, buffer);
+		safe_unpack32(&job_state, buffer);
 		safe_unpack16(&kill_on_node_fail, buffer);
 		safe_unpack16(&batch_flag, buffer);
 		safe_unpack16(&mail_type, buffer);
@@ -1365,20 +1378,19 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		safe_unpack16(&warn_flags, buffer);
 		safe_unpack16(&warn_signal, buffer);
 		safe_unpack16(&warn_time, buffer);
-		safe_unpack16(&limit_set_max_cpus, buffer);
-		safe_unpack16(&limit_set_max_nodes, buffer);
-		safe_unpack16(&limit_set_min_cpus, buffer);
-		safe_unpack16(&limit_set_min_nodes, buffer);
-		safe_unpack16(&limit_set_pn_min_memory, buffer);
-		safe_unpack16(&limit_set_time, buffer);
-		safe_unpack16(&limit_set_qos, buffer);
+
+		_unpack_acct_policy_limit_members(&limit_set, buffer,
+						  protocol_version);
 
 		safe_unpackstr_xmalloc(&state_desc, &name_len, buffer);
 		safe_unpackstr_xmalloc(&resp_host, &name_len, buffer);
 
 		safe_unpack16(&alloc_resp_port, buffer);
 		safe_unpack16(&other_port, buffer);
+		safe_unpack8(&power_flags, buffer);
+		safe_unpack8(&sicp_mode, buffer);
 		safe_unpack16(&start_protocol_ver, buffer);
+		safe_unpackdouble(&billable_tres, buffer);
 
 		if (job_state & JOB_COMPLETING) {
 			safe_unpackstr_xmalloc(&nodes_completing,
@@ -1392,12 +1404,14 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		}
 		part_ptr = find_part_record (partition);
 		if (part_ptr == NULL) {
-			part_ptr_list = get_part_list(partition);
+			char *err_part = NULL;
+			part_ptr_list = get_part_list(partition, &err_part);
 			if (part_ptr_list) {
 				part_ptr = list_peek(part_ptr_list);
 			} else {
 				verbose("Invalid partition (%s) for job_id %u",
-					partition, job_id);
+					err_part, job_id);
+				xfree(err_part);
 				/* not fatal error, partition could have been
 				 * removed, reset_job_bitmaps() will clean-up
 				 * this job */
@@ -1418,6 +1432,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		safe_unpackstr_xmalloc(&mail_user, &name_len, buffer);
 		safe_unpackstr_xmalloc(&resv_name, &name_len, buffer);
 		safe_unpackstr_xmalloc(&batch_host, &name_len, buffer);
+		safe_unpackstr_xmalloc(&burst_buffer, &name_len, buffer);
 
 		if (select_g_select_jobinfo_unpack(&select_jobinfo, buffer,
 						   protocol_version))
@@ -1463,9 +1478,29 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 				goto unpack_error;
 			safe_unpack16(&step_flag, buffer);
 		}
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		safe_unpack32(&job_ptr->bit_flags, buffer);
+		safe_unpackstr_xmalloc(&tres_alloc_str,
+				       &name_len, buffer);
+		safe_unpackstr_xmalloc(&tres_fmt_alloc_str,
+				       &name_len, buffer);
+		safe_unpackstr_xmalloc(&tres_req_str, &name_len, buffer);
+		safe_unpackstr_xmalloc(&tres_fmt_req_str, &name_len, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		safe_unpack32(&array_job_id, buffer);
 		safe_unpack32(&array_task_id, buffer);
+
+		/* Job Array record */
+		safe_unpack32(&task_id_size, buffer);
+		if (task_id_size != NO_VAL) {
+			safe_unpackstr_xmalloc(&task_id_str, &name_len, buffer);
+			safe_unpack32(&array_flags,    buffer);
+			safe_unpack32(&max_run_tasks,  buffer);
+			safe_unpack32(&tot_run_tasks,  buffer);
+			safe_unpack32(&min_exit_code,  buffer);
+			safe_unpack32(&max_exit_code,  buffer);
+			safe_unpack32(&tot_comp_tasks, buffer);
+		}
+
 		safe_unpack32(&assoc_id, buffer);
 		safe_unpack32(&job_id, buffer);
 
@@ -1516,29 +1551,40 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		safe_unpack_time(&tot_sus_time, buffer);
 
 		safe_unpack16(&direct_set_prio, buffer);
-		safe_unpack16(&job_state, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		job_state = uint16_tmp;
 		safe_unpack16(&kill_on_node_fail, buffer);
 		safe_unpack16(&batch_flag, buffer);
 		safe_unpack16(&mail_type, buffer);
 		safe_unpack16(&state_reason, buffer);
+		safe_unpack8 (&reboot, buffer);
 		safe_unpack16(&restart_cnt, buffer);
 		safe_unpack16(&wait_all_nodes, buffer);
 		safe_unpack16(&warn_flags, buffer);
 		safe_unpack16(&warn_signal, buffer);
 		safe_unpack16(&warn_time, buffer);
-		safe_unpack16(&limit_set_max_cpus, buffer);
-		safe_unpack16(&limit_set_max_nodes, buffer);
-		safe_unpack16(&limit_set_min_cpus, buffer);
-		safe_unpack16(&limit_set_min_nodes, buffer);
-		safe_unpack16(&limit_set_pn_min_memory, buffer);
-		safe_unpack16(&limit_set_time, buffer);
-		safe_unpack16(&limit_set_qos, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		limit_set.tres[TRES_ARRAY_CPU] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		limit_set.tres[TRES_ARRAY_NODE] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		if (!limit_set.tres[TRES_ARRAY_CPU] && uint16_tmp)
+			limit_set.tres[TRES_ARRAY_CPU] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		if (!limit_set.tres[TRES_ARRAY_NODE] && uint16_tmp)
+			limit_set.tres[TRES_ARRAY_NODE] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		limit_set.tres[TRES_ARRAY_MEM] = uint16_tmp;
+		safe_unpack16(&limit_set.time, buffer);
+		safe_unpack16(&limit_set.qos, buffer);
+
 
 		safe_unpackstr_xmalloc(&state_desc, &name_len, buffer);
 		safe_unpackstr_xmalloc(&resp_host, &name_len, buffer);
 
 		safe_unpack16(&alloc_resp_port, buffer);
 		safe_unpack16(&other_port, buffer);
+		safe_unpack16(&start_protocol_ver, buffer);
 
 		if (job_state & JOB_COMPLETING) {
 			safe_unpackstr_xmalloc(&nodes_completing,
@@ -1552,12 +1598,14 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		}
 		part_ptr = find_part_record (partition);
 		if (part_ptr == NULL) {
-			part_ptr_list = get_part_list(partition);
+			char *err_part = NULL;
+			part_ptr_list = get_part_list(partition, &err_part);
 			if (part_ptr_list) {
 				part_ptr = list_peek(part_ptr_list);
 			} else {
 				verbose("Invalid partition (%s) for job_id %u",
-					partition, job_id);
+					err_part, job_id);
+				xfree(err_part);
 				/* not fatal error, partition could have been
 				 * removed, reset_job_bitmaps() will clean-up
 				 * this job */
@@ -1623,13 +1671,18 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 				goto unpack_error;
 			safe_unpack16(&step_flag, buffer);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		if (job_id > 0x7fffffff) {
+			error("JobID %u can not be recovered, JobID too high",
+			      job_id);
+			job_ptr->job_state = JOB_FAILED;
+			job_ptr->exit_code = 1;
+			job_ptr->state_reason = FAIL_SYSTEM;
+			xfree(job_ptr->state_desc);
+			job_ptr->end_time = now;
+		}
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpack32(&array_job_id, buffer);
-		safe_unpack16(&uint16_tmp, buffer);
-		if (uint16_tmp == (uint16_t) NO_VAL)
-			array_task_id = NO_VAL;
-		else
-			array_task_id = (uint32_t) uint16_tmp;
+		safe_unpack32(&array_task_id, buffer);
 		safe_unpack32(&assoc_id, buffer);
 		safe_unpack32(&job_id, buffer);
 
@@ -1680,23 +1733,31 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		safe_unpack_time(&tot_sus_time, buffer);
 
 		safe_unpack16(&direct_set_prio, buffer);
-		safe_unpack16(&job_state, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		job_state = uint16_tmp;
 		safe_unpack16(&kill_on_node_fail, buffer);
 		safe_unpack16(&batch_flag, buffer);
 		safe_unpack16(&mail_type, buffer);
 		safe_unpack16(&state_reason, buffer);
 		safe_unpack16(&restart_cnt, buffer);
-		safe_unpack16(&uint16_tmp, buffer);	/* Was resv_flags */
 		safe_unpack16(&wait_all_nodes, buffer);
+		safe_unpack16(&warn_flags, buffer);
 		safe_unpack16(&warn_signal, buffer);
 		safe_unpack16(&warn_time, buffer);
-		safe_unpack16(&limit_set_max_cpus, buffer);
-		safe_unpack16(&limit_set_max_nodes, buffer);
-		safe_unpack16(&limit_set_min_cpus, buffer);
-		safe_unpack16(&limit_set_min_nodes, buffer);
-		safe_unpack16(&limit_set_pn_min_memory, buffer);
-		safe_unpack16(&limit_set_time, buffer);
-		safe_unpack16(&limit_set_qos, buffer);
+		safe_unpack16(&uint16_tmp, buffer);
+		limit_set.tres[TRES_ARRAY_CPU] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		limit_set.tres[TRES_ARRAY_NODE] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		if (!limit_set.tres[TRES_ARRAY_CPU] && uint16_tmp)
+			limit_set.tres[TRES_ARRAY_CPU] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		if (!limit_set.tres[TRES_ARRAY_NODE] && uint16_tmp)
+			limit_set.tres[TRES_ARRAY_NODE] = uint16_tmp;
+		safe_unpack16(&uint16_tmp, buffer);
+		limit_set.tres[TRES_ARRAY_MEM] = uint16_tmp;
+		safe_unpack16(&limit_set.time, buffer);
+		safe_unpack16(&limit_set.qos, buffer);
 
 		safe_unpackstr_xmalloc(&state_desc, &name_len, buffer);
 		safe_unpackstr_xmalloc(&resp_host, &name_len, buffer);
@@ -1716,12 +1777,14 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		}
 		part_ptr = find_part_record (partition);
 		if (part_ptr == NULL) {
-			part_ptr_list = get_part_list(partition);
+			char *err_part = NULL;
+			part_ptr_list = get_part_list(partition, &err_part);
 			if (part_ptr_list) {
 				part_ptr = list_peek(part_ptr_list);
 			} else {
 				verbose("Invalid partition (%s) for job_id %u",
-					partition, job_id);
+					err_part, job_id);
+				xfree(err_part);
 				/* not fatal error, partition could have been
 				 * removed, reset_job_bitmaps() will clean-up
 				 * this job */
@@ -1787,6 +1850,15 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 				goto unpack_error;
 			safe_unpack16(&step_flag, buffer);
 		}
+		if (job_id > 0x7fffffff) {
+			error("JobID %u can not be recovered, JobID too high",
+			      job_id);
+			job_ptr->job_state = JOB_FAILED;
+			job_ptr->exit_code = 1;
+			job_ptr->state_reason = FAIL_SYSTEM;
+			xfree(job_ptr->state_desc);
+			job_ptr->end_time = now;
+		}
 	} else {
 		error("_load_job_state: protocol_version "
 		      "%hu not supported", protocol_version);
@@ -1813,6 +1885,22 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	if (job_id_sequence <= job_id)
 		job_id_sequence = job_id + 1;
 
+	xfree(job_ptr->tres_alloc_str);
+	job_ptr->tres_alloc_str = tres_alloc_str;
+	tres_alloc_str = NULL;
+
+	xfree(job_ptr->tres_req_str);
+	job_ptr->tres_req_str = tres_req_str;
+	tres_req_str = NULL;
+
+	xfree(job_ptr->tres_fmt_alloc_str);
+	job_ptr->tres_fmt_alloc_str = tres_fmt_alloc_str;
+	tres_fmt_alloc_str = NULL;
+
+	xfree(job_ptr->tres_fmt_req_str);
+	job_ptr->tres_fmt_req_str = tres_fmt_req_str;
+	tres_fmt_req_str = NULL;
+
 	xfree(job_ptr->account);
 	job_ptr->account = account;
 	xstrtolower(job_ptr->account);
@@ -1827,9 +1915,13 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	xfree(job_ptr->batch_host);
 	job_ptr->batch_host   = batch_host;
 	batch_host            = NULL;  /* reused, nothing left to free */
+	xfree(job_ptr->burst_buffer);
+	job_ptr->burst_buffer = burst_buffer;
+	burst_buffer          = NULL;  /* reused, nothing left to free */
 	xfree(job_ptr->comment);
 	job_ptr->comment      = comment;
 	comment               = NULL;  /* reused, nothing left to free */
+	job_ptr->billable_tres = billable_tres;
 	xfree(job_ptr->gres);
 	job_ptr->gres         = gres;
 	gres                  = NULL;  /* reused, nothing left to free */
@@ -1878,6 +1970,8 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		nodes_completing = NULL;  /* reused, nothing left to free */
 	}
 	job_ptr->other_port   = other_port;
+	job_ptr->power_flags  = power_flags;
+	job_ptr->sicp_mode    = sicp_mode;
 	xfree(job_ptr->partition);
 	job_ptr->partition    = partition;
 	partition             = NULL;	/* reused, nothing left to free */
@@ -1910,20 +2004,25 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		if (!job_ptr->array_recs)
 			job_ptr->array_recs=xmalloc(sizeof(job_array_struct_t));
 		FREE_NULL_BITMAP(job_ptr->array_recs->task_id_bitmap);
-		job_ptr->array_recs->task_id_bitmap = bit_alloc(task_id_size);
 		xfree(job_ptr->array_recs->task_id_str);
-		if (task_id_str) {
-			bit_unfmt_hexmask(job_ptr->array_recs->task_id_bitmap,
-					  task_id_str);
-			job_ptr->array_recs->task_id_str = task_id_str;
-			task_id_str = NULL;
-		}
-		job_ptr->array_recs->task_cnt =
-			bit_set_count(job_ptr->array_recs->task_id_bitmap);
-
-		if (job_ptr->array_recs->task_cnt > 1)
-			job_count += (job_ptr->array_recs->task_cnt - 1);
+		if (task_id_size) {
+			job_ptr->array_recs->task_id_bitmap =
+				bit_alloc(task_id_size);
+			if (task_id_str) {
+				bit_unfmt_hexmask(
+					job_ptr->array_recs->task_id_bitmap,
+					task_id_str);
+				job_ptr->array_recs->task_id_str = task_id_str;
+				task_id_str = NULL;
+			}
+			job_ptr->array_recs->task_cnt =
+				bit_set_count(job_ptr->array_recs->
+					      task_id_bitmap);
 
+			if (job_ptr->array_recs->task_cnt > 1)
+				job_count += (job_ptr->array_recs->task_cnt-1);
+		} else
+			xfree(task_id_str);
 		job_ptr->array_recs->array_flags    = array_flags;
 		job_ptr->array_recs->max_run_tasks  = max_run_tasks;
 		job_ptr->array_recs->tot_run_tasks  = tot_run_tasks;
@@ -1951,13 +2050,11 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	job_ptr->warn_flags   = warn_flags;
 	job_ptr->warn_signal  = warn_signal;
 	job_ptr->warn_time    = warn_time;
-	job_ptr->limit_set_max_cpus  = limit_set_max_cpus;
-	job_ptr->limit_set_max_nodes = limit_set_max_nodes;
-	job_ptr->limit_set_min_cpus  = limit_set_min_cpus;
-	job_ptr->limit_set_min_nodes = limit_set_min_nodes;
-	job_ptr->limit_set_pn_min_memory = limit_set_pn_min_memory;
-	job_ptr->limit_set_time      = limit_set_time;
-	job_ptr->limit_set_qos       = limit_set_qos;
+
+	memcpy(&job_ptr->limit_set, &limit_set,
+	       sizeof(acct_policy_limit_set_t));
+	limit_set.tres = NULL;
+
 	job_ptr->req_switch      = req_switch;
 	job_ptr->wait4switch     = wait4switch;
 	job_ptr->profile         = profile;
@@ -1971,7 +2068,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	_add_job_hash(job_ptr);
 	_add_job_array_hash(job_ptr);
 
-	memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+	memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 
 	/*
 	 * For speed and accurracy we will first see if we once had an
@@ -1989,8 +2086,8 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 
 	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 				    accounting_enforce,
-				    (slurmdb_association_rec_t **)
-				    &job_ptr->assoc_ptr, false) &&
+				    (slurmdb_assoc_rec_t **)
+				    &job_ptr->assoc_ptr, true) &&
 	    (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
 	    && (!IS_JOB_FINISHED(job_ptr))) {
 		info("Holding job %u with invalid association", job_id);
@@ -1999,7 +2096,7 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 	} else {
 		job_ptr->assoc_id = assoc_rec.id;
 		info("Recovered %s Assoc=%u",
-		     jobid2str(job_ptr, jbuf), job_ptr->assoc_id);
+		     jobid2str(job_ptr, jbuf, sizeof(jbuf)), job_ptr->assoc_id);
 
 		/* make sure we have started this job in accounting */
 		if (!job_ptr->db_index) {
@@ -2030,9 +2127,9 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		qos_rec.id = job_ptr->qos_id;
 		job_ptr->qos_ptr = _determine_and_validate_qos(
 			job_ptr->resv_name, job_ptr->assoc_ptr,
-			job_ptr->limit_set_qos, &qos_rec,
-			&qos_error);
-		if ((qos_error != SLURM_SUCCESS) && !job_ptr->limit_set_qos) {
+			job_ptr->limit_set.qos, &qos_rec,
+			&qos_error, true);
+		if ((qos_error != SLURM_SUCCESS) && !job_ptr->limit_set.qos) {
 			info("Holding job %u with invalid qos", job_id);
 			xfree(job_ptr->state_desc);
 			job_ptr->state_reason = FAIL_QOS;
@@ -2040,6 +2137,22 @@ static int _load_job_state(Buf buffer, uint16_t protocol_version)
 		} else
 			job_ptr->qos_id = qos_rec.id;
 	}
+
+	/* do this after the format string just incase for some
+	 * reason the tres_alloc_str is NULL but not the fmt_str */
+	if (job_ptr->tres_alloc_str)
+		assoc_mgr_set_tres_cnt_array(
+			&job_ptr->tres_alloc_cnt, job_ptr->tres_alloc_str,
+			0, true);
+	else
+		job_set_alloc_tres(job_ptr, true);
+
+	if (job_ptr->tres_req_str)
+		assoc_mgr_set_tres_cnt_array(
+			&job_ptr->tres_req_cnt, job_ptr->tres_req_str, 0, true);
+	else
+		job_set_req_tres(job_ptr, true);
+
 	build_node_details(job_ptr, false);	/* set node_addr */
 	return SLURM_SUCCESS;
 
@@ -2048,6 +2161,7 @@ unpack_error:
 	xfree(alloc_node);
 	xfree(account);
 	xfree(batch_host);
+	xfree(burst_buffer);
 	xfree(comment);
 	xfree(gres);
 	xfree(gres_alloc);
@@ -2055,6 +2169,7 @@ unpack_error:
 	xfree(gres_used);
 	xfree(resp_host);
 	xfree(licenses);
+	xfree(limit_set.tres);
 	xfree(mail_user);
 	xfree(name);
 	xfree(nodes);
@@ -2067,6 +2182,10 @@ unpack_error:
 	xfree(spank_job_env);
 	xfree(state_desc);
 	xfree(task_id_str);
+	xfree(tres_alloc_str);
+	xfree(tres_fmt_alloc_str);
+	xfree(tres_fmt_req_str);
+	xfree(tres_req_str);
 	xfree(wckey);
 	select_g_select_jobinfo_free(select_jobinfo);
 	checkpoint_free_jobinfo(check_job);
@@ -2099,7 +2218,7 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 	pack16(detail_ptr->nice, buffer);
 	pack16(detail_ptr->ntasks_per_node, buffer);
 	pack16(detail_ptr->requeue, buffer);
-	pack16(detail_ptr->task_dist, buffer);
+	pack32(detail_ptr->task_dist, buffer);
 
 	pack8(detail_ptr->share_res, buffer);
 	pack8(detail_ptr->whole_node, buffer);
@@ -2117,6 +2236,9 @@ void _dump_job_details(struct job_details *detail_ptr, Buf buffer)
 	pack32(detail_ptr->pn_min_cpus, buffer);
 	pack32(detail_ptr->pn_min_memory, buffer);
 	pack32(detail_ptr->pn_min_tmp_disk, buffer);
+	pack32(detail_ptr->cpu_freq_min, buffer);
+	pack32(detail_ptr->cpu_freq_max, buffer);
+	pack32(detail_ptr->cpu_freq_gov, buffer);
 	pack_time(detail_ptr->begin_time, buffer);
 	pack_time(detail_ptr->submit_time, buffer);
 
@@ -2152,9 +2274,12 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 	uint32_t min_nodes, max_nodes;
 	uint32_t min_cpus = 1, max_cpus = NO_VAL;
 	uint32_t pn_min_cpus, pn_min_memory, pn_min_tmp_disk;
-	uint32_t num_tasks, name_len, argc = 0, env_cnt = 0;
+	uint32_t cpu_freq_min = NO_VAL;
+	uint32_t cpu_freq_max = NO_VAL;
+	uint32_t cpu_freq_gov = NO_VAL;
+	uint32_t num_tasks, name_len, argc = 0, env_cnt = 0, task_dist;
 	uint16_t contiguous, core_spec = (uint16_t) NO_VAL, nice;
-	uint16_t ntasks_per_node, cpus_per_task, requeue, task_dist;
+	uint16_t ntasks_per_node, cpus_per_task, requeue;
 	uint16_t cpu_bind_type, mem_bind_type, plane_size;
 	uint8_t open_mode, overcommit, prolog_running;
 	uint8_t share_res, whole_node;
@@ -2163,7 +2288,7 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 	multi_core_data_t *mc_ptr;
 
 	/* unpack the job's details from the buffer */
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&min_cpus, buffer);
 		safe_unpack32(&max_cpus, buffer);
 		safe_unpack32(&min_nodes, buffer);
@@ -2171,19 +2296,13 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpack32(&num_tasks, buffer);
 
 		safe_unpackstr_xmalloc(&acctg_freq, &name_len, buffer);
-		if (acctg_freq && !strcmp(acctg_freq, "65534")) {
-			/* This fixes job state generated by version 2.6.0,
-			 * in which a version 2.5 value of NO_VAL was converted
-			 * from uint16_t to a string. */
-			xfree(acctg_freq);
-		}
 		safe_unpack16(&contiguous, buffer);
 		safe_unpack16(&core_spec, buffer);
 		safe_unpack16(&cpus_per_task, buffer);
 		safe_unpack16(&nice, buffer);
 		safe_unpack16(&ntasks_per_node, buffer);
 		safe_unpack16(&requeue, buffer);
-		safe_unpack16(&task_dist, buffer);
+		safe_unpack32(&task_dist, buffer);
 
 		safe_unpack8(&share_res, buffer);
 		safe_unpack8(&whole_node, buffer);
@@ -2201,6 +2320,9 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpack32(&pn_min_cpus, buffer);
 		safe_unpack32(&pn_min_memory, buffer);
 		safe_unpack32(&pn_min_tmp_disk, buffer);
+		safe_unpack32(&cpu_freq_min, buffer);
+		safe_unpack32(&cpu_freq_max, buffer);
+		safe_unpack32(&cpu_freq_gov, buffer);
 		safe_unpack_time(&begin_time, buffer);
 		safe_unpack_time(&submit_time, buffer);
 
@@ -2221,8 +2343,8 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 			goto unpack_error;
 		safe_unpackstr_array(&argv, &argc, buffer);
 		safe_unpackstr_array(&env_sup, &env_cnt, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		uint16_t tmp_uint16;
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint16_t old_task_dist = 0;
 		safe_unpack32(&min_cpus, buffer);
 		safe_unpack32(&max_cpus, buffer);
 		safe_unpack32(&min_nodes, buffer);
@@ -2230,29 +2352,17 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 		safe_unpack32(&num_tasks, buffer);
 
 		safe_unpackstr_xmalloc(&acctg_freq, &name_len, buffer);
-		if (acctg_freq && !strcmp(acctg_freq, "65534")) {
-			/* This fixes job state generated by version 2.6.0,
-			 * in which a version 2.5 value of NO_VAL was converted
-			 * from uint16_t to a string. */
-			xfree(acctg_freq);
-		}
 		safe_unpack16(&contiguous, buffer);
+		safe_unpack16(&core_spec, buffer);
 		safe_unpack16(&cpus_per_task, buffer);
 		safe_unpack16(&nice, buffer);
 		safe_unpack16(&ntasks_per_node, buffer);
 		safe_unpack16(&requeue, buffer);
-		safe_unpack16(&tmp_uint16, buffer);
-		if (tmp_uint16 == 0) {
-			share_res = 0;
-			whole_node = 1;
-		} else if ((tmp_uint16 == 1) || (tmp_uint16 == 2)) {
-			share_res = 1;
-			whole_node = 0;
-		} else {
-			share_res = (uint8_t) NO_VAL;
-			whole_node = 0;
-		}
-		safe_unpack16(&task_dist, buffer);
+		safe_unpack16(&old_task_dist, buffer);
+		task_dist = task_dist_old2new(old_task_dist);
+
+		safe_unpack8(&share_res, buffer);
+		safe_unpack8(&whole_node, buffer);
 
 		safe_unpackstr_xmalloc(&cpu_bind, &name_len, buffer);
 		safe_unpack16(&cpu_bind_type, buffer);
@@ -2341,6 +2451,9 @@ static int _load_job_details(struct job_record *job_ptr, Buf buffer,
 	job_ptr->details->core_spec = core_spec;
 	job_ptr->details->cpu_bind = cpu_bind;
 	job_ptr->details->cpu_bind_type = cpu_bind_type;
+	job_ptr->details->cpu_freq_min = cpu_freq_min;
+	job_ptr->details->cpu_freq_max = cpu_freq_max;
+	job_ptr->details->cpu_freq_gov = cpu_freq_gov;
 	job_ptr->details->cpus_per_task = cpus_per_task;
 	job_ptr->details->dependency = dependency;
 	job_ptr->details->orig_dependency = orig_dependency;
@@ -2428,15 +2541,15 @@ static void _remove_job_hash(struct job_record *job_entry)
 {
 	struct job_record *job_ptr, **job_pptr;
 
-        job_pptr = &job_hash[JOB_HASH_INX(job_entry->job_id)];
-        while ((job_pptr != NULL) &&
-               ((job_ptr = *job_pptr) != job_entry)) {
-                job_pptr = &job_ptr->job_next;
-        }
-        if (job_pptr == NULL) {
-                fatal("job hash error");
-                return; /* Fix CLANG false positive error */
-        }
+	job_pptr = &job_hash[JOB_HASH_INX(job_entry->job_id)];
+	while ((job_pptr != NULL) &&
+	       ((job_ptr = *job_pptr) != job_entry)) {
+		job_pptr = &job_ptr->job_next;
+	}
+	if (job_pptr == NULL) {
+		fatal("job hash error");
+		return; /* Fix CLANG false positive error */
+	}
 	*job_pptr = job_entry->job_next;
 	job_entry->job_next = NULL;
 }
@@ -2469,17 +2582,33 @@ extern void build_array_str(struct job_record *job_ptr)
 {
 	job_array_struct_t *array_recs = job_ptr->array_recs;
 
-	if (!array_recs || array_recs->task_id_str || !array_recs->task_cnt ||
-	    !array_recs->task_id_bitmap)
+	if (!array_recs || array_recs->task_id_str ||
+	    !array_recs->task_id_bitmap ||
+	    (job_ptr->array_task_id != NO_VAL) ||
+	    (bit_ffs(job_ptr->array_recs->task_id_bitmap) == -1))
 		return;
 
+
 	array_recs->task_id_str = bit_fmt_hexmask(array_recs->task_id_bitmap);
-	/* Here we set the db_index to 0 so we resend the start of the
+
+	/* While it is efficient to set the db_index to 0 here
+	 * to get the database to update the record for
+	 * pending tasks it also creates a window in which if
+	 * the association id is changed (different account or
+	 * partition) instead of returning the previous
+	 * db_index (expected) it would create a new one
+	 * leaving the other orphaned.  Setting the job_state
+	 * sets things up so the db_index isn't lost but the
+	 * start message is still sent to get the desired behavior. */
+
+	/* Here we set the JOB_UPDATE_DB flag so we resend the start of the
 	 * job updating the array task string and count of pending
 	 * jobs.  This is faster than sending the start again since
-	 * this could happen many times instead of just ever so often.
+	 * this could happen many times (like lots of array elements
+	 * starting at once) instead of just ever so often.
 	 */
-	job_ptr->db_index = 0;
+
+	job_ptr->job_state |= JOB_UPDATE_DB;
 }
 
 /* Return true if ALL tasks of specific array job ID are complete */
@@ -2534,6 +2663,31 @@ extern bool test_job_array_completed(uint32_t array_job_id)
 	return true;
 }
 
+/* Return true if ALL tasks of specific array job ID are finished */
+extern bool test_job_array_finished(uint32_t array_job_id)
+{
+	struct job_record *job_ptr;
+	int inx;
+
+	job_ptr = find_job_record(array_job_id);
+	if (job_ptr) {
+		if (!IS_JOB_FINISHED(job_ptr))
+			return false;
+	}
+
+	/* Need to test individual job array records */
+	inx = JOB_HASH_INX(array_job_id);
+	job_ptr = job_array_hash_j[inx];
+	while (job_ptr) {
+		if (job_ptr->array_job_id == array_job_id) {
+			if (!IS_JOB_FINISHED(job_ptr))
+				return false;
+		}
+		job_ptr = job_ptr->job_array_next_j;
+	}
+	return true;
+}
+
 /* Return true if ANY tasks of specific array job ID are pending */
 extern bool test_job_array_pending(uint32_t array_job_id)
 {
@@ -2561,6 +2715,25 @@ extern bool test_job_array_pending(uint32_t array_job_id)
 	return false;
 }
 
+/* For a given job ID return the number of PENDING tasks which have their
+ * own separate job_record (do not count tasks in pending META job record) */
+extern int num_pending_job_array_tasks(uint32_t array_job_id)
+{
+	struct job_record *job_ptr;
+	int count = 0, inx;
+
+	inx = JOB_HASH_INX(array_job_id);
+	job_ptr = job_array_hash_j[inx];
+	while (job_ptr) {
+		if ((job_ptr->array_job_id == array_job_id) &&
+		    IS_JOB_PENDING(job_ptr))
+			count++;
+		job_ptr = job_ptr->job_array_next_j;
+	}
+
+	return count;
+}
+
 /*
  * find_job_array_rec - return a pointer to the job record with the given
  *	array_job_id/array_task_id
@@ -2728,7 +2901,7 @@ extern int kill_job_by_part_name(char *part_name)
 			continue;
 
 		if (IS_JOB_SUSPENDED(job_ptr)) {
-			enum job_states suspend_job_state = job_ptr->job_state;
+			uint32_t suspend_job_state = job_ptr->job_state;
 			/* we can't have it as suspended when we call the
 			 * accounting stuff.
 			 */
@@ -2807,7 +2980,7 @@ extern int kill_job_by_front_end_name(char *node_name)
 			continue;	/* no match on node name */
 
 		if (IS_JOB_SUSPENDED(job_ptr)) {
-			enum job_states suspend_job_state = job_ptr->job_state;
+			uint32_t suspend_job_state = job_ptr->job_state;
 			/* we can't have it as suspended when we call the
 			 * accounting stuff.
 			 */
@@ -2820,13 +2993,13 @@ extern int kill_job_by_front_end_name(char *node_name)
 			kill_job_cnt++;
 			while ((i = bit_ffs(job_ptr->node_bitmap_cg)) >= 0) {
 				bit_clear(job_ptr->node_bitmap_cg, i);
-				job_update_cpu_cnt(job_ptr, i);
 				if (job_ptr->node_cnt)
 					(job_ptr->node_cnt)--;
 				else {
 					error("node_cnt underflow on JobId=%u",
 					      job_ptr->job_id);
 				}
+				job_update_tres_cnt(job_ptr, i);
 				if (job_ptr->node_cnt == 0) {
 					delete_step_records(job_ptr);
 					job_ptr->job_state &= (~JOB_COMPLETING);
@@ -3035,7 +3208,7 @@ extern int kill_running_job_by_node_name(char *node_name)
 		if (nonstop_ops.node_fail)
 			(nonstop_ops.node_fail)(job_ptr, node_ptr);
 		if (IS_JOB_SUSPENDED(job_ptr)) {
-			enum job_states suspend_job_state = job_ptr->job_state;
+			uint32_t suspend_job_state = job_ptr->job_state;
 			/* we can't have it as suspended when we call the
 			 * accounting stuff.
 			 */
@@ -3050,7 +3223,7 @@ extern int kill_running_job_by_node_name(char *node_name)
 				continue;
 			kill_job_cnt++;
 			bit_clear(job_ptr->node_bitmap_cg, bit_position);
-			job_update_cpu_cnt(job_ptr, bit_position);
+			job_update_tres_cnt(job_ptr, bit_position);
 			if (job_ptr->node_cnt)
 				(job_ptr->node_cnt)--;
 			else {
@@ -3222,8 +3395,8 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	long kill_on_node_fail, shared, immediate, wait_all_nodes;
 	long cpus_per_task, requeue, num_tasks, overcommit;
 	long ntasks_per_node, ntasks_per_socket, ntasks_per_core;
-	int core_spec;
-	char *mem_type, buf[100], *signal_flags, *job_id;
+	int spec_count;
+	char *mem_type, buf[100], *signal_flags, *spec_type, *job_id;
 
 	if (job_specs == NULL)
 		return;
@@ -3244,10 +3417,19 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 		(long) job_specs->min_cpus : -1L;
 	pn_min_cpus    = (job_specs->pn_min_cpus != (uint16_t) NO_VAL) ?
 		(long) job_specs->pn_min_cpus : -1L;
-	core_spec = (job_specs->core_spec != (uint16_t) NO_VAL) ?
-		    job_specs->core_spec : -1;
-	debug3("   cpus=%ld-%u pn_min_cpus=%ld core_spec=%d",
-	       min_cpus, job_specs->max_cpus, pn_min_cpus, core_spec);
+	if (job_specs->core_spec == (uint16_t) NO_VAL) {
+		spec_type  = "core";
+		spec_count = -1;
+	} else if (job_specs->core_spec & CORE_SPEC_THREAD) {
+		spec_type  = "thread";
+		spec_count = job_specs->core_spec & (~CORE_SPEC_THREAD);
+	} else {
+		spec_type  = "core";
+		spec_count = job_specs->core_spec;
+	}
+	debug3("   cpus=%ld-%u pn_min_cpus=%ld %s_spec=%d",
+	       min_cpus, job_specs->max_cpus, pn_min_cpus,
+	       spec_type, spec_count);
 
 	debug3("   -N min-[max]: %u-[%u]:%u:%u:%u",
 	       job_specs->min_nodes,   job_specs->max_nodes,
@@ -3345,7 +3527,10 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	       job_specs->work_dir,
 	       job_specs->alloc_node, job_specs->alloc_sid);
 
-	debug3("   resp_host=%s alloc_resp_port=%u  other_port=%u",
+	debug3("   sicp_mode=%u power_flags=%s",
+	       job_specs->sicp_mode, power_flags_str(job_specs->power_flags));
+
+	debug3("   resp_host=%s alloc_resp_port=%u other_port=%u",
 	       job_specs->resp_host,
 	       job_specs->alloc_resp_port, job_specs->other_port);
 	debug3("   dependency=%s account=%s qos=%s comment=%s",
@@ -3380,9 +3565,12 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 		signal_flags = "B:";
 	else
 		signal_flags = "";
-	debug3("   end_time=%s signal=%s%u@%u wait_all_nodes=%ld",
+	cpu_freq_debug(NULL, NULL, buf, sizeof(buf), job_specs->cpu_freq_gov,
+		       job_specs->cpu_freq_min, job_specs->cpu_freq_max,
+		       NO_VAL);
+	debug3("   end_time=%s signal=%s%u@%u wait_all_nodes=%ld cpu_freq=%s",
 	       buf, signal_flags, job_specs->warn_signal, job_specs->warn_time,
-	       wait_all_nodes);
+	       wait_all_nodes, buf);
 
 	ntasks_per_node = (job_specs->ntasks_per_node != (uint16_t) NO_VAL) ?
 		(long) job_specs->ntasks_per_node : -1L;
@@ -3399,6 +3587,7 @@ void dump_job_desc(job_desc_msg_t * job_specs)
 	       job_specs->mem_bind_type, job_specs->mem_bind,
 	       job_specs->plane_size);
 	debug3("   array_inx=%s", job_specs->array_inx);
+	debug3("   burst_buffer=%s", job_specs->burst_buffer);
 
 	select_g_select_jobinfo_sprint(job_specs->select_jobinfo,
 				       buf, sizeof(buf), SELECT_PRINT_MIXED);
@@ -3453,8 +3642,11 @@ extern void rehash_jobs(void)
 }
 
 /* Create an exact copy of an existing job record for a job array.
- * The array_recs structure is moved to the new job record copy */
-struct job_record *_job_rec_copy(struct job_record *job_ptr)
+ * IN job_ptr - META job record for a job array, which is to become an
+ *		individial task of the job array.
+ *		Set the job's array_task_id to the task to be split out.
+ * RET - The new job record, which is the new META job record. */
+extern struct job_record *job_array_split(struct job_record *job_ptr)
 {
 	struct job_record *job_ptr_pend = NULL, *save_job_next;
 	struct job_details *job_details, *details_new, *save_details;
@@ -3462,6 +3654,7 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 	priority_factors_object_t *save_prio_factors;
 	List save_step_list;
 	int error_code = SLURM_SUCCESS;
+	bool global_job = false;
 	int i;
 
 	job_ptr_pend = _create_job_record(&error_code, 0);
@@ -3472,7 +3665,9 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 
 	_remove_job_hash(job_ptr);
 	job_ptr_pend->job_id = job_ptr->job_id;
-	if (_set_job_id(job_ptr) != SLURM_SUCCESS)
+	if (job_ptr->sicp_mode)
+		global_job = true;
+	if (_set_job_id(job_ptr, global_job) != SLURM_SUCCESS)
 		fatal("%s: _set_job_id error", __func__);
 	if (!job_ptr->array_recs) {
 		fatal("%s: job %u record lacks array structure",
@@ -3500,9 +3695,8 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 	job_ptr_pend->db_index = save_db_index;
 
 	job_ptr_pend->prio_factors = save_prio_factors;
-	if (job_ptr_pend->prio_factors && job_ptr->prio_factors)
-		memcpy(job_ptr_pend->prio_factors, job_ptr->prio_factors,
-				sizeof(priority_factors_object_t));
+	slurm_copy_priority_factors_object(job_ptr_pend->prio_factors,
+					   job_ptr->prio_factors);
 
 	job_ptr_pend->account = xstrdup(job_ptr->account);
 	job_ptr_pend->alias_list = xstrdup(job_ptr->alias_list);
@@ -3517,7 +3711,12 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 			  job_ptr_pend->array_task_id);
 	}
 	xfree(job_ptr_pend->array_recs->task_id_str);
-	job_ptr_pend->array_recs->task_cnt--;
+	if (job_ptr_pend->array_recs->task_cnt) {
+		job_ptr_pend->array_recs->task_cnt--;
+	} else {
+		error("Job %u array_recs->task_cnt underflow",
+		      job_ptr->array_job_id);
+	}
 	job_ptr_pend->array_task_id = NO_VAL;
 
 	job_ptr_pend->batch_host = NULL;
@@ -3525,6 +3724,7 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 		job_ptr_pend->check_job =
 			checkpoint_copy_jobinfo(job_ptr->check_job);
 	}
+	job_ptr_pend->burst_buffer = xstrdup(job_ptr->burst_buffer);
 	job_ptr_pend->comment = xstrdup(job_ptr->comment);
 
 	job_ptr_pend->front_end_ptr = NULL;
@@ -3538,6 +3738,11 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 	job_ptr_pend->gres_req = NULL;
 	job_ptr_pend->gres_used = NULL;
 
+	job_ptr_pend->limit_set.tres =
+		xmalloc(sizeof(uint16_t) * slurmctld_tres_cnt);
+	memcpy(job_ptr_pend->limit_set.tres, job_ptr->limit_set.tres,
+	       sizeof(uint16_t) * slurmctld_tres_cnt);
+
 	_add_job_hash(job_ptr);		/* Sets job_next */
 	_add_job_hash(job_ptr_pend);	/* Sets job_next */
 	_add_job_array_hash(job_ptr);
@@ -3580,6 +3785,15 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 		}
 	}
 	job_ptr_pend->state_desc = xstrdup(job_ptr->state_desc);
+
+	i = sizeof(uint64_t) * slurmctld_tres_cnt;
+	job_ptr_pend->tres_req_cnt = xmalloc(i);
+	memcpy(job_ptr_pend->tres_req_cnt, job_ptr->tres_req_cnt, i);
+	job_ptr_pend->tres_req_str = xstrdup(job_ptr->tres_req_str);
+	job_ptr_pend->tres_fmt_req_str = xstrdup(job_ptr->tres_fmt_req_str);
+	job_ptr_pend->tres_alloc_str = NULL;
+	job_ptr_pend->tres_fmt_alloc_str = NULL;
+
 	job_ptr_pend->wckey = xstrdup(job_ptr->wckey);
 
 	job_details = job_ptr->details;
@@ -3596,6 +3810,9 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 	details_new->ckpt_dir = xstrdup(job_details->ckpt_dir);
 	details_new->cpu_bind = xstrdup(job_details->cpu_bind);
 	details_new->cpu_bind_type = job_details->cpu_bind_type;
+	details_new->cpu_freq_min = job_details->cpu_freq_min;
+	details_new->cpu_freq_max = job_details->cpu_freq_max;
+	details_new->cpu_freq_gov = job_details->cpu_freq_gov;
 	details_new->depend_list = depended_list_copy(job_details->depend_list);
 	details_new->dependency = xstrdup(job_details->dependency);
 	details_new->orig_dependency = xstrdup(job_details->orig_dependency);
@@ -3647,8 +3864,9 @@ struct job_record *_job_rec_copy(struct job_record *job_ptr)
 static void _create_job_array(struct job_record *job_ptr,
 			      job_desc_msg_t *job_specs)
 {
+	struct job_details *details;
 	char *sep = NULL;
-	int max_run_tasks;
+	int max_run_tasks, min_task_id, max_task_id, step_task_id = 1;
 	uint32_t i_cnt;
 
 	if (!job_specs->array_bitmap)
@@ -3663,7 +3881,9 @@ static void _create_job_array(struct job_record *job_ptr,
 
 	job_ptr->array_job_id = job_ptr->job_id;
 	job_ptr->array_recs = xmalloc(sizeof(job_array_struct_t));
-	i_cnt = bit_fls(job_specs->array_bitmap) + 1;
+	min_task_id = bit_ffs(job_specs->array_bitmap);
+	max_task_id = bit_fls(job_specs->array_bitmap);
+	i_cnt = max_task_id + 1;
 	job_specs->array_bitmap = bit_realloc(job_specs->array_bitmap, i_cnt);
 	job_ptr->array_recs->task_id_bitmap = job_specs->array_bitmap;
 	job_specs->array_bitmap = NULL;
@@ -3679,6 +3899,24 @@ static void _create_job_array(struct job_record *job_ptr,
 		if (max_run_tasks > 0)
 			job_ptr->array_recs->max_run_tasks = max_run_tasks;
 	}
+
+	details = job_ptr->details;
+	if (details) {
+		if (job_specs->array_inx) {
+			sep = strchr(job_specs->array_inx, ':');
+			if (sep)
+				step_task_id = atoi(sep + 1);
+		}
+		details->env_sup = xrealloc(details->env_sup,
+					    (sizeof(char *) *
+					    (details->env_cnt + 3)));
+		xstrfmtcat(details->env_sup[details->env_cnt++],
+			   "SLURM_ARRAY_TASK_MIN=%d", min_task_id);
+		xstrfmtcat(details->env_sup[details->env_cnt++],
+			   "SLURM_ARRAY_TASK_MAX=%d", max_task_id);
+		xstrfmtcat(details->env_sup[details->env_cnt++],
+			   "SLURM_ARRAY_TASK_STEP=%d", step_task_id);
+	}
 }
 
 /*
@@ -3882,6 +4120,8 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 
 	no_alloc = test_only || too_fragmented ||
 		   (!top_prio) || (!independent) || !avail_front_end(job_ptr);
+
+	no_alloc = no_alloc || (bb_g_job_test_stage_in(job_ptr, no_alloc) != 1);
 	error_code = _select_nodes_parts(job_ptr, no_alloc, NULL, err_msg);
 	if (!test_only) {
 		last_job_update = now;
@@ -3898,7 +4138,9 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 	    (error_code == ESLURM_QOS_THRES) ||
 	    (error_code == ESLURM_ACCOUNTING_POLICY) ||
 	    (error_code == ESLURM_RESERVATION_NOT_USABLE) ||
-	    (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
+	    (error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) ||
+	    (error_code == ESLURM_POWER_NOT_AVAIL) ||
+	    (error_code == ESLURM_POWER_RESERVED)) {
 		/* Not fatal error, but job can't be scheduled right now */
 		if (immediate) {
 			job_ptr->job_state  = JOB_FAILED;
@@ -3933,7 +4175,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
 		job_ptr->exit_code  = 1;
 		job_ptr->start_time = job_ptr->end_time = now;
 		_purge_job_record(job_ptr->job_id);
-	} else if (!with_slurmdbd && !job_ptr->db_index)
+	} else if (!with_slurmdbd)
 		jobacct_storage_g_job_start(acct_db_conn, job_ptr);
 
 	if (!will_run) {
@@ -3952,7 +4194,7 @@ extern int job_allocate(job_desc_msg_t * job_specs, int immediate,
  * IN job_state - desired job state (JOB_BOOT_FAIL, JOB_NODE_FAIL, etc.)
  * RET 0 on success, otherwise ESLURM error code
  */
-extern int job_fail(uint32_t job_id, uint16_t job_state)
+extern int job_fail(uint32_t job_id, uint32_t job_state)
 {
 	struct job_record *job_ptr;
 	time_t now = time(NULL);
@@ -3967,7 +4209,7 @@ extern int job_fail(uint32_t job_id, uint16_t job_state)
 	if (IS_JOB_FINISHED(job_ptr))
 		return ESLURM_ALREADY_DONE;
 	if (IS_JOB_SUSPENDED(job_ptr)) {
-		enum job_states suspend_job_state = job_ptr->job_state;
+		uint32_t suspend_job_state = job_ptr->job_state;
 		/* we can't have it as suspended when we call the
 		 * accounting stuff.
 		 */
@@ -4034,7 +4276,7 @@ static int _job_signal(struct job_record *job_ptr, uint16_t signal,
 		}
 		/* build_cg_bitmap() not needed, job already completing */
 		verbose("%s: of requeuing %s successful",
-			__func__, jobid2str(job_ptr, jbuf));
+			__func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		return SLURM_SUCCESS;
 	}
 
@@ -4045,7 +4287,7 @@ static int _job_signal(struct job_record *job_ptr, uint16_t signal,
 		srun_allocate_abort(job_ptr);
 		job_completion_logger(job_ptr, false);
 		verbose("%s: of pending %s successful",
-			__func__, jobid2str(job_ptr, jbuf));
+			__func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		return SLURM_SUCCESS;
 	}
 
@@ -4063,11 +4305,17 @@ static int _job_signal(struct job_record *job_ptr, uint16_t signal,
 		job_completion_logger(job_ptr, false);
 		deallocate_nodes(job_ptr, false, true, preempt);
 		verbose("%s: %u of suspended %s successful",
-			__func__, signal, jobid2str(job_ptr, jbuf));
+			__func__, signal, jobid2str(job_ptr, jbuf,
+						    sizeof(jbuf)));
 		return SLURM_SUCCESS;
 	}
 
 	if (IS_JOB_RUNNING(job_ptr)) {
+		if (signal == SIGSTOP)
+			job_ptr->job_state |= JOB_STOPPED;
+		else if (signal == SIGCONT)
+			job_ptr->job_state &= (~JOB_STOPPED);
+
 		if ((signal == SIGKILL)
 		    && !(flags & KILL_STEPS_ONLY)
 		    && !(flags & KILL_JOB_BATCH)) {
@@ -4090,13 +4338,14 @@ static int _job_signal(struct job_record *job_ptr, uint16_t signal,
 			_signal_job(job_ptr, signal);
 		}
 		verbose("%s: %u of running %s successful 0x%x",
-			__func__, signal, jobid2str(job_ptr, jbuf),
+			__func__, signal, jobid2str(job_ptr, jbuf,
+						    sizeof(jbuf)),
 			job_ptr->job_state);
 		return SLURM_SUCCESS;
 	}
 
 	verbose("%s: %s can't be sent signal %u from state=%s",
-		__func__, jobid2str(job_ptr, jbuf), signal,
+		__func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)), signal,
 		job_state_string(job_ptr->job_state));
 
 	trace_job(job_ptr, __func__, "return");
@@ -4494,7 +4743,7 @@ extern int prolog_complete(uint32_t job_id,
  * IN job_id - id of the job which completed
  * IN uid - user id of user issuing the RPC
  * IN requeue - job should be run again if possible
- * IN node_fail - true of job terminated due to node failure
+ * IN node_fail - true if job terminated due to node failure
  * IN job_return_code - job's return code, if set then set state to FAILED
  * RET - 0 on success, otherwise ESLURM error code
  * global: job_list - pointer global job list
@@ -4519,7 +4768,7 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 	}
 
 	info("%s: %s WIFEXITED %d WEXITSTATUS %d",
-	     __func__, jobid2str(job_ptr, jbuf),
+	     __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 	     WIFEXITED(job_return_code), WEXITSTATUS(job_return_code));
 
 	if (IS_JOB_FINISHED(job_ptr)) {
@@ -4546,12 +4795,18 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 	}
 
 	if ((job_return_code == NO_VAL) &&
-	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_PENDING(job_ptr)))
-		info("%s: %s cancelled from interactive user or node failure",
-		     __func__, jobid2str(job_ptr, jbuf));
+	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_PENDING(job_ptr))) {
+		if (node_fail) {
+			info("%s: %s cancelled by node failure",
+			     __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
+		} else {
+			info("%s: %s cancelled by interactive user",
+			     __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
+		}
+	}
 
 	if (IS_JOB_SUSPENDED(job_ptr)) {
-		enum job_states suspend_job_state = job_ptr->job_state;
+		uint32_t suspend_job_state = job_ptr->job_state;
 		/* we can't have it as suspended when we call the
 		 * accounting stuff.
 		 */
@@ -4591,10 +4846,10 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 		acct_policy_add_job_submit(job_ptr);
 		if (node_fail) {
 			info("%s: requeue %s due to node failure",
-			     __func__, jobid2str(job_ptr, jbuf));
+			     __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		} else {
 			info("%s: requeue %s per user/system request",
-			     __func__, jobid2str(job_ptr, jbuf));
+			     __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		}
 		/* We have reached the maximum number of requeue
 		 * attempts hold the job with HoldMaxRequeue reason.
@@ -4627,8 +4882,8 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 			job_ptr->state_reason = FAIL_EXIT_CODE;
 			xfree(job_ptr->state_desc);
 		} else if (job_comp_flag
-		           && ((job_ptr->end_time
-		                + slurmctld_conf.over_time_limit * 60) < now)) {
+			   && ((job_ptr->end_time
+				+ slurmctld_conf.over_time_limit * 60) < now)) {
 			/* Test if the job has finished before its allowed
 			 * over time has expired.
 			 */
@@ -4659,7 +4914,7 @@ extern int job_complete(uint32_t job_id, uid_t uid, bool requeue,
 		deallocate_nodes(job_ptr, false, suspended, false);
 	}
 
-	info("%s: %s done", __func__, jobid2str(job_ptr, jbuf));
+	info("%s: %s done", __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 
 	return SLURM_SUCCESS;
 }
@@ -4806,7 +5061,8 @@ fini:
 
 static int _get_job_parts(job_desc_msg_t * job_desc,
 			  struct part_record **part_pptr,
-			  List *part_pptr_list)
+			  List *part_pptr_list,
+			  char **err_msg)
 {
 	struct part_record *part_ptr = NULL, *part_ptr_new = NULL;
 	List part_ptr_list = NULL;
@@ -4814,15 +5070,24 @@ static int _get_job_parts(job_desc_msg_t * job_desc,
 
 	/* Identify partition(s) and set pointer(s) to their struct */
 	if (job_desc->partition) {
+		char *err_part = NULL;
 		part_ptr = find_part_record(job_desc->partition);
 		if (part_ptr == NULL) {
-			part_ptr_list = get_part_list(job_desc->partition);
+			part_ptr_list = get_part_list(job_desc->partition,
+						      &err_part);
 			if (part_ptr_list)
 				part_ptr = list_peek(part_ptr_list);
 		}
 		if (part_ptr == NULL) {
 			info("%s: invalid partition specified: %s",
 			     __func__, job_desc->partition);
+			if (err_msg) {
+				xfree(*err_msg);
+				xstrfmtcat(*err_msg,
+					"invalid partition specified: %s",
+					err_part);
+				xfree(err_part);
+			}
 			return ESLURM_INVALID_PARTITION_NAME;
 		}
 	} else {
@@ -4900,12 +5165,12 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 			   uid_t submit_uid, bitstr_t *req_bitmap,
 			   struct part_record **part_pptr,
 			   List part_ptr_list,
-			   slurmdb_association_rec_t *assoc_ptr,
+			   slurmdb_assoc_rec_t *assoc_ptr,
 			   slurmdb_qos_rec_t *qos_ptr)
 {
 	int rc = SLURM_SUCCESS;
 	struct part_record *part_ptr = *part_pptr, *part_ptr_tmp;
-	slurmdb_association_rec_t assoc_rec;
+	slurmdb_assoc_rec_t assoc_rec;
 	uint32_t min_nodes_orig = INFINITE, max_nodes_orig = 1;
 	uint32_t max_time = 0;
 
@@ -4921,7 +5186,7 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 			 * associations.
 			 */
 			memset(&assoc_rec, 0,
-			       sizeof(slurmdb_association_rec_t));
+			       sizeof(slurmdb_assoc_rec_t));
 			if (assoc_ptr) {
 				assoc_rec.acct      = assoc_ptr->acct;
 				assoc_rec.partition = part_ptr_tmp->name;
@@ -4933,9 +5198,9 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 			}
 
 			if (assoc_ptr && assoc_rec.id != assoc_ptr->id) {
-				info("_valid_job_part: can't check multiple "
+				info("%s: can't check multiple "
 				     "partitions with partition based "
-				     "associations");
+				     "associations", __func__);
 				rc = SLURM_ERROR;
 			} else
 				rc = _part_access_check(part_ptr_tmp, job_desc,
@@ -5003,9 +5268,9 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 		   slurmctld_conf.enforce_part_limits &&
 		   (!qos_ptr || (qos_ptr && !(qos_ptr->flags &
 					      QOS_FLAG_PART_MIN_NODE)))) {
-		info("_valid_job_part: job's min nodes greater than "
+		info("%s: job's min nodes greater than "
 		     "partition's max nodes (%u > %u)",
-		     job_desc->min_nodes, max_nodes_orig);
+		     __func__, job_desc->min_nodes, max_nodes_orig);
 		rc = ESLURM_INVALID_NODE_COUNT;
 		goto fini;
 	} else if ((job_desc->min_nodes < min_nodes_orig) &&
@@ -5019,15 +5284,15 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 	    (job_desc->max_nodes < min_nodes_orig) &&
 	    (!qos_ptr || (qos_ptr && !(qos_ptr->flags
 				       & QOS_FLAG_PART_MAX_NODE)))) {
-		info("_valid_job_part: job's max nodes less than partition's "
+		info("%s: job's max nodes less than partition's "
 		     "min nodes (%u < %u)",
-		     job_desc->max_nodes, min_nodes_orig);
+		     __func__, job_desc->max_nodes, min_nodes_orig);
 		rc = ESLURM_INVALID_NODE_COUNT;
 		goto fini;
 	}
 #ifndef HAVE_FRONT_END
 	if ((job_desc->min_nodes == 0) && (job_desc->script == NULL)) {
-		info("_valid_job_part: min_nodes==0 for non-batch job");
+		info("%s: min_nodes==0 for non-batch job", __func__);
 		rc = ESLURM_INVALID_NODE_COUNT;
 		goto fini;
 	}
@@ -5035,7 +5300,7 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 
 	if ((job_desc->time_limit   == NO_VAL) &&
 	    (part_ptr->default_time == 0)) {
-		info("_valid_job_part: job's default time is 0");
+		info("%s: job's default time is 0", __func__);
 		rc = ESLURM_INVALID_TIME_LIMIT;
 		goto fini;
 	}
@@ -5048,9 +5313,9 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 	    (job_desc->time_min >  max_time) &&
 	    (!qos_ptr || (qos_ptr && !(qos_ptr->flags &
 				       QOS_FLAG_PART_TIME_LIMIT)))) {
-		info("_valid_job_part: job's min time greater than "
+		info("%s: job's min time greater than "
 		     "partition's (%u > %u)",
-		     job_desc->time_min, max_time);
+		     __func__, job_desc->time_min, max_time);
 		rc = ESLURM_INVALID_TIME_LIMIT;
 		goto fini;
 	}
@@ -5060,9 +5325,9 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 	    slurmctld_conf.enforce_part_limits &&
 	    (!qos_ptr || (qos_ptr && !(qos_ptr->flags &
 				       QOS_FLAG_PART_TIME_LIMIT)))) {
-		info("_valid_job_part: job's time limit greater than "
+		info("%s: job's time limit greater than "
 		     "partition's (%u > %u)",
-		     job_desc->time_limit, max_time);
+		     __func__, job_desc->time_limit, max_time);
 		rc = ESLURM_INVALID_TIME_LIMIT;
 		goto fini;
 	}
@@ -5070,9 +5335,9 @@ static int _valid_job_part(job_desc_msg_t * job_desc,
 	    (job_desc->time_min >  job_desc->time_limit) &&
 	    (!qos_ptr || (qos_ptr && !(qos_ptr->flags &
 				       QOS_FLAG_PART_TIME_LIMIT)))) {
-		info("_valid_job_part: job's min_time greater time limit "
+		info("%s: job's min_time greater time limit "
 		     "(%u > %u)",
-		     job_desc->time_min, job_desc->time_limit);
+		     __func__, job_desc->time_min, job_desc->time_limit);
 		rc = ESLURM_INVALID_TIME_LIMIT;
 		goto fini;
 	}
@@ -5095,7 +5360,7 @@ extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
 	struct part_record *part_ptr = NULL;
 	struct job_record *job_ptr = NULL;
 	slurmdb_qos_rec_t  *qos_ptr;
-	slurmdb_association_rec_t *assoc_ptr;
+	slurmdb_assoc_rec_t *assoc_ptr;
 	uint32_t job_min_nodes, job_max_nodes;
 	uint32_t part_min_nodes, part_max_nodes;
 	uint32_t time_check;
@@ -5206,7 +5471,7 @@ extern int job_limits_check(struct job_record **job_pptr, bool check_min_time)
  *	ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE is returned
  */
 
-static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
+static int _job_create(job_desc_msg_t *job_desc, int allocate, int will_run,
 		       struct job_record **job_pptr, uid_t submit_uid,
 		       char **err_msg, uint16_t protocol_version)
 {
@@ -5216,8 +5481,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	List part_ptr_list = NULL;
 	bitstr_t *req_bitmap = NULL, *exc_bitmap = NULL;
 	struct job_record *job_ptr = NULL;
-	slurmdb_association_rec_t assoc_rec, *assoc_ptr = NULL;
-	List license_list = NULL;
+	slurmdb_assoc_rec_t assoc_rec, *assoc_ptr = NULL;
+	List license_list = NULL, gres_list = NULL;
 	bool valid;
 	slurmdb_qos_rec_t qos_rec, *qos_ptr;
 	uint32_t user_submit_priority;
@@ -5250,6 +5515,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	}
 
 	memset(&acct_policy_limit_set, 0, sizeof(acct_policy_limit_set_t));
+	acct_policy_limit_set.tres =
+		xmalloc(sizeof(uint16_t) * slurmctld_tres_cnt);
 
 	*job_pptr = (struct job_record *) NULL;
 	/*
@@ -5302,12 +5569,13 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		goto cleanup_fail;
 	}
 
-	error_code = _get_job_parts(job_desc, &part_ptr, &part_ptr_list);
+	error_code = _get_job_parts(job_desc, &part_ptr, &part_ptr_list,
+				    err_msg);
 	if (error_code != SLURM_SUCCESS)
 		goto cleanup_fail;
 
 
-	memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+	memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 	assoc_rec.acct      = job_desc->account;
 	assoc_rec.partition = part_ptr->name;
 	assoc_rec.uid       = job_desc->user_id;
@@ -5354,7 +5622,8 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	}
 
 	qos_ptr = _determine_and_validate_qos(
-		job_desc->reservation, assoc_ptr, false, &qos_rec, &qos_error);
+		job_desc->reservation, assoc_ptr, false, &qos_rec, &qos_error,
+		false);
 
 	if (qos_error != SLURM_SUCCESS) {
 		error_code = qos_error;
@@ -5369,10 +5638,41 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		goto cleanup_fail;
 
 	if ((error_code = _validate_job_desc(job_desc, allocate, submit_uid,
-	                                     part_ptr, part_ptr_list))) {
+					     part_ptr, part_ptr_list))) {
+		goto cleanup_fail;
+	}
+
+	job_desc->tres_req_cnt = xmalloc(sizeof(uint64_t) * slurmctld_tres_cnt);
+	job_desc->tres_req_cnt[TRES_ARRAY_NODE] = job_desc->min_nodes;
+	job_desc->tres_req_cnt[TRES_ARRAY_CPU] = job_desc->min_cpus;
+	job_desc->tres_req_cnt[TRES_ARRAY_MEM] =  job_get_tres_mem(
+		job_desc->pn_min_memory,
+		job_desc->tres_req_cnt[TRES_ARRAY_CPU],
+		job_desc->min_nodes);
+
+	license_list = license_validate(job_desc->licenses,
+					job_desc->tres_req_cnt, &valid);
+	if (!valid) {
+		info("Job's requested licenses are invalid: %s",
+		     job_desc->licenses);
+		error_code = ESLURM_INVALID_LICENSES;
+		goto cleanup_fail;
+	}
+
+	if (gres_plugin_job_state_validate(job_desc->gres, &gres_list)) {
+		error_code = ESLURM_INVALID_GRES;
 		goto cleanup_fail;
 	}
 
+	gres_set_job_tres_cnt(gres_list,
+			      job_desc->min_nodes,
+			      job_desc->tres_req_cnt,
+			      false);
+
+	if ((error_code = bb_g_job_validate(job_desc, submit_uid))
+	    != SLURM_SUCCESS)
+		goto cleanup_fail;
+
 	if ((accounting_enforce & ACCOUNTING_ENFORCE_LIMITS) &&
 	    (!acct_policy_validate(job_desc, part_ptr,
 				   assoc_ptr, qos_ptr, NULL,
@@ -5498,14 +5798,6 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		goto cleanup_fail;
 	}
 
-	license_list = license_validate(job_desc->licenses, &valid);
-	if (!valid) {
-		info("Job's requested licenses are invalid: %s",
-		     job_desc->licenses);
-		error_code = ESLURM_INVALID_LICENSES;
-		goto cleanup_fail;
-	}
-
 	if ((error_code = _copy_job_desc_to_job_record(job_desc,
 						       job_pptr,
 						       &req_bitmap,
@@ -5526,13 +5818,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		goto cleanup_fail;
 	}
 
-	job_ptr->limit_set_max_cpus = acct_policy_limit_set.max_cpus;
-	job_ptr->limit_set_max_nodes = acct_policy_limit_set.max_nodes;
-	job_ptr->limit_set_min_cpus = acct_policy_limit_set.min_cpus;
-	job_ptr->limit_set_min_nodes = acct_policy_limit_set.min_nodes;
-	job_ptr->limit_set_pn_min_memory = acct_policy_limit_set.pn_min_memory;
-	job_ptr->limit_set_time = acct_policy_limit_set.time;
-	job_ptr->limit_set_qos = acct_policy_limit_set.qos;
+	memcpy(&job_ptr->limit_set, &acct_policy_limit_set,
+	       sizeof(acct_policy_limit_set_t));
+	acct_policy_limit_set.tres = NULL;
 
 	job_ptr->assoc_id = assoc_rec.id;
 	job_ptr->assoc_ptr = (void *) assoc_ptr;
@@ -5579,10 +5867,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	/* NOTE: If this job is being used to expand another job, this job's
 	 * gres_list has already been filled in with a copy of gres_list job
 	 * to be expanded by update_job_dependency() */
-	if ((job_ptr->details->expanding_jobid == 0) &&
-	    gres_plugin_job_state_validate(job_ptr->gres, &job_ptr->gres_list)){
-		error_code = ESLURM_INVALID_GRES;
-		goto cleanup_fail;
+	if (!job_ptr->details->expanding_jobid) {
+		job_ptr->gres_list = gres_list;
+		gres_list = NULL;
 	}
 	gres_plugin_job_state_log(job_ptr->gres_list, job_ptr->job_id);
 
@@ -5599,6 +5886,9 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 		job_ptr->batch_flag = 1;
 	} else
 		job_ptr->batch_flag = 0;
+	if (!will_run &&
+	    (error_code = bb_g_job_validate2(job_ptr, err_msg)))
+		goto cleanup_fail;
 
 	job_ptr->license_list = license_list;
 	license_list = NULL;
@@ -5614,6 +5904,7 @@ static int _job_create(job_desc_msg_t * job_desc, int allocate, int will_run,
 	job_ptr->best_switch = true;
 
 	FREE_NULL_LIST(license_list);
+	FREE_NULL_LIST(gres_list);
 	FREE_NULL_BITMAP(req_bitmap);
 	FREE_NULL_BITMAP(exc_bitmap);
 	return error_code;
@@ -5629,6 +5920,8 @@ cleanup_fail:
 		*job_pptr = (struct job_record *) NULL;
 	}
 	FREE_NULL_LIST(license_list);
+	xfree(acct_policy_limit_set.tres);
+	FREE_NULL_LIST(gres_list);
 	FREE_NULL_LIST(part_ptr_list);
 	FREE_NULL_BITMAP(req_bitmap);
 	FREE_NULL_BITMAP(exc_bitmap);
@@ -5735,30 +6028,19 @@ static bool _valid_array_inx(job_desc_msg_t *job_desc)
 	return valid;
 }
 
-/* Perform some size checks on strings we store to prevent
- * malicious user filling slurmctld's memory
- * IN job_desc   - user job submit request
- * IN submit_uid - UID making job submit request
- * OUT err_msg   - custom error message to return
- * RET 0 or error code */
-extern int validate_job_create_req(job_desc_msg_t * job_desc, uid_t submit_uid,
-				   char **err_msg)
+/* Make sure a job descriptor's strings are not huge, which could result in
+ * a denial of service attack due to memory demands by the slurmctld */
+static int _test_job_desc_fields(job_desc_msg_t * job_desc)
 {
-	int rc;
-
-	rc = job_submit_plugin_submit(job_desc, (uint32_t) submit_uid, err_msg);
-	if (rc != SLURM_SUCCESS)
-		return rc;
-
 	if (_test_strlen(job_desc->account, "account", 1024)		||
 	    _test_strlen(job_desc->alloc_node, "alloc_node", 1024)	||
 	    _test_strlen(job_desc->array_inx, "array_inx", 1024 * 4)	||
 	    _test_strlen(job_desc->blrtsimage, "blrtsimage", 1024)	||
+	    _test_strlen(job_desc->burst_buffer, "burst_buffer",1024*8) ||
 	    _test_strlen(job_desc->ckpt_dir, "ckpt_dir", 1024)		||
 	    _test_strlen(job_desc->comment, "comment", 1024)		||
 	    _test_strlen(job_desc->cpu_bind, "cpu_bind", 1024)		||
 	    _test_strlen(job_desc->dependency, "dependency", 1024*128)	||
-	    _test_strlen(job_desc->exc_nodes, "exc_nodes", 1024*64)	||
 	    _test_strlen(job_desc->features, "features", 1024)		||
 	    _test_strlen(job_desc->gres, "gres", 1024)			||
 	    _test_strlen(job_desc->licenses, "licenses", 1024)		||
@@ -5771,7 +6053,6 @@ extern int validate_job_create_req(job_desc_msg_t * job_desc, uid_t submit_uid,
 	    _test_strlen(job_desc->partition, "partition", 1024)	||
 	    _test_strlen(job_desc->qos, "qos", 1024)			||
 	    _test_strlen(job_desc->ramdiskimage, "ramdiskimage", 1024)	||
-	    _test_strlen(job_desc->req_nodes, "req_nodes", 1024*64)	||
 	    _test_strlen(job_desc->reservation, "reservation", 1024)	||
 	    _test_strlen(job_desc->script, "script", 1024 * 1024 * 4)	||
 	    _test_strlen(job_desc->std_err, "std_err", MAXPATHLEN)	||
@@ -5781,6 +6062,28 @@ extern int validate_job_create_req(job_desc_msg_t * job_desc, uid_t submit_uid,
 	    _test_strlen(job_desc->work_dir, "work_dir", MAXPATHLEN))
 		return ESLURM_PATHNAME_TOO_LONG;
 
+	return SLURM_SUCCESS;
+}
+
+/* Perform some size checks on strings we store to prevent
+ * malicious user filling slurmctld's memory
+ * IN job_desc   - user job submit request
+ * IN submit_uid - UID making job submit request
+ * OUT err_msg   - custom error message to return
+ * RET 0 or error code */
+extern int validate_job_create_req(job_desc_msg_t * job_desc, uid_t submit_uid,
+				   char **err_msg)
+{
+	int rc;
+
+	rc = job_submit_plugin_submit(job_desc, (uint32_t) submit_uid, err_msg);
+	if (rc != SLURM_SUCCESS)
+		return rc;
+
+	rc = _test_job_desc_fields(job_desc);
+	if (rc != SLURM_SUCCESS)
+		return rc;
+
 	if (!_valid_array_inx(job_desc))
 		return ESLURM_INVALID_ARRAY;
 
@@ -6370,6 +6673,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	int error_code;
 	struct job_details *detail_ptr;
 	struct job_record *job_ptr;
+	bool global_job = false;
 
 	if (slurm_get_track_wckey()) {
 		if (!job_desc->wckey) {
@@ -6429,7 +6733,9 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	if (job_desc->job_id != NO_VAL) {	/* already confirmed unique */
 		job_ptr->job_id = job_desc->job_id;
 	} else {
-		error_code = _set_job_id(job_ptr);
+		if (job_desc->sicp_mode || job_desc->clusters)
+			global_job = true;
+		error_code = _set_job_id(job_ptr, global_job);
 		if (error_code)
 			return error_code;
 	}
@@ -6439,6 +6745,15 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	if (job_desc->wckey)
 		job_ptr->wckey = xstrdup(job_desc->wckey);
 
+	/* Since this is only used in the slurmctld copy it now.
+	 */
+	job_ptr->tres_req_cnt = job_desc->tres_req_cnt;
+	job_desc->tres_req_cnt = NULL;
+	job_ptr->tres_req_str = assoc_mgr_make_tres_str_from_array(
+		job_ptr->tres_req_cnt, TRES_STR_FLAG_SIMPLE, false);
+	job_ptr->tres_fmt_req_str = assoc_mgr_make_tres_str_from_array(
+		job_ptr->tres_req_cnt, 0, false);
+
 	_add_job_hash(job_ptr);
 
 	job_ptr->user_id    = (uid_t) job_desc->user_id;
@@ -6450,6 +6765,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	job_ptr->alloc_sid  = job_desc->alloc_sid;
 	job_ptr->alloc_node = xstrdup(job_desc->alloc_node);
 	job_ptr->account    = xstrdup(job_desc->account);
+	job_ptr->burst_buffer = xstrdup(job_desc->burst_buffer);
 	job_ptr->gres       = xstrdup(job_desc->gres);
 	job_ptr->network    = xstrdup(job_desc->network);
 	job_ptr->resv_name  = xstrdup(job_desc->reservation);
@@ -6472,6 +6788,8 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	job_ptr->resp_host = xstrdup(job_desc->resp_host);
 	job_ptr->alloc_resp_port = job_desc->alloc_resp_port;
 	job_ptr->other_port = job_desc->other_port;
+	job_ptr->power_flags = job_desc->power_flags;
+	job_ptr->sicp_mode = job_desc->sicp_mode;
 	job_ptr->time_last_active = time(NULL);
 	job_ptr->cr_enabled = 0;
 	job_ptr->derived_ec = 0;
@@ -6479,7 +6797,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	job_ptr->licenses  = xstrdup(job_desc->licenses);
 	job_ptr->mail_type = job_desc->mail_type;
 	job_ptr->mail_user = xstrdup(job_desc->mail_user);
-
+	job_ptr->bit_flags = job_desc->bitflags;
 	job_ptr->ckpt_interval = job_desc->ckpt_interval;
 	job_ptr->spank_job_env = job_desc->spank_job_env;
 	job_ptr->spank_job_env_size = job_desc->spank_job_env_size;
@@ -6502,6 +6820,9 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	detail_ptr->acctg_freq = xstrdup(job_desc->acctg_freq);
 	detail_ptr->cpu_bind_type = job_desc->cpu_bind_type;
 	detail_ptr->cpu_bind   = xstrdup(job_desc->cpu_bind);
+	detail_ptr->cpu_freq_gov = job_desc->cpu_freq_gov;
+	detail_ptr->cpu_freq_max = job_desc->cpu_freq_max;
+	detail_ptr->cpu_freq_min = job_desc->cpu_freq_min;
 	detail_ptr->nice       = job_desc->nice;
 	detail_ptr->open_mode  = job_desc->open_mode;
 	detail_ptr->min_cpus   = job_desc->min_cpus;
@@ -6530,6 +6851,9 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	} else if (job_desc->shared == 1) {
 		detail_ptr->share_res  = 1;
 		detail_ptr->whole_node = 0;
+	} else if (job_desc->shared == 2) {
+		detail_ptr->share_res  = (uint8_t) NO_VAL;
+		detail_ptr->whole_node = 2;
 	} else {
 		detail_ptr->share_res  = (uint8_t) NO_VAL;
 		detail_ptr->whole_node = 0;
@@ -6542,7 +6866,7 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 		detail_ptr->core_spec = (uint16_t) NO_VAL;
 	if (detail_ptr->core_spec != (uint16_t) NO_VAL)
 		detail_ptr->whole_node = 1;
-	if (job_desc->task_dist != (uint16_t) NO_VAL)
+	if (job_desc->task_dist != NO_VAL)
 		detail_ptr->task_dist = job_desc->task_dist;
 	if (job_desc->cpus_per_task != (uint16_t) NO_VAL)
 		detail_ptr->cpus_per_task = MAX(job_desc->cpus_per_task, 1);
@@ -6606,8 +6930,8 @@ _copy_job_desc_to_job_record(job_desc_msg_t * job_desc,
 	/* The priority needs to be set after this since we don't have
 	 * an association rec yet
 	 */
-
 	detail_ptr->mc_ptr = _set_multi_core_data(job_desc);
+
 	return SLURM_SUCCESS;
 }
 
@@ -6721,7 +7045,9 @@ void job_time_limit(void)
 			    slurmctld_conf.msg_timeout + 1);
 	time_t over_run;
 	int resv_status = 0;
-
+#ifndef HAVE_BG
+	uint8_t prolog;
+#endif
 	if (slurmctld_conf.over_time_limit == (uint16_t) INFINITE)
 		over_run = now - (365 * 24 * 60 * 60);	/* one year */
 	else
@@ -6740,7 +7066,12 @@ void job_time_limit(void)
 		 * power_node_bitmap so bit_overlap always returns 0
 		 * and erroneously removes the flag.
 		 */
-		if (IS_JOB_CONFIGURING(job_ptr)) {
+		prolog = 0;
+		if (job_ptr->details)
+			prolog = job_ptr->details->prolog_running;
+
+		if (prolog == 0
+		    && IS_JOB_CONFIGURING(job_ptr)) {
 			if (!IS_JOB_RUNNING(job_ptr) ||
 			    (bit_overlap(job_ptr->node_bitmap,
 					 power_node_bitmap) == 0)) {
@@ -6888,18 +7219,155 @@ void job_time_limit(void)
 	fini_job_resv_check();
 }
 
-extern int job_update_cpu_cnt(struct job_record *job_ptr, int node_inx)
+/* job write lock must be locked before calling this */
+extern void job_set_req_tres(
+	struct job_record *job_ptr, bool assoc_mgr_locked)
 {
-	int cnt, offset, rc = SLURM_SUCCESS;
+	uint32_t cpu_cnt = 0, mem_cnt = 0, node_cnt = 0;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
-	xassert(job_ptr);
+	xfree(job_ptr->tres_req_str);
+	xfree(job_ptr->tres_fmt_req_str);
+	xfree(job_ptr->tres_req_cnt);
+
+	if (!assoc_mgr_locked)
+		assoc_mgr_lock(&locks);
+
+	xfree(job_ptr->tres_req_cnt);
+	job_ptr->tres_req_cnt = xmalloc(sizeof(uint64_t) * g_tres_count);
+
+	if (job_ptr->details) {
+		node_cnt = job_ptr->details->min_nodes;
+		cpu_cnt = job_ptr->details->min_cpus;
+		if (job_ptr->details->pn_min_memory)
+			mem_cnt = job_ptr->details->pn_min_memory;
+	}
+
+	/* if this is set just override */
+	if (job_ptr->total_cpus)
+		cpu_cnt = job_ptr->total_cpus;
+
+#ifdef HAVE_BG
+	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
+				    SELECT_JOBDATA_NODE_CNT,
+				    &node_cnt);
+#else
+	if (job_ptr->node_cnt)
+		node_cnt = job_ptr->node_cnt;
+#endif
+
+	job_ptr->tres_req_cnt[TRES_ARRAY_NODE] = (uint64_t)node_cnt;
+	job_ptr->tres_req_cnt[TRES_ARRAY_CPU] = (uint64_t)cpu_cnt;
+	job_ptr->tres_req_cnt[TRES_ARRAY_MEM] = (uint64_t)mem_cnt;
+
+	license_set_job_tres_cnt(job_ptr->license_list,
+				 job_ptr->tres_req_cnt,
+				 true);
+
+	/* FIXME: this assumes that all nodes have equal TRES */
+	gres_set_job_tres_cnt(job_ptr->gres_list,
+			      node_cnt,
+			      job_ptr->tres_req_cnt,
+			      true);
+
+	bb_g_job_set_tres_cnt(job_ptr,
+			      job_ptr->tres_req_cnt,
+			      true);
+
+	/* now that the array is filled lets make the string from it */
+	job_ptr->tres_req_str =	assoc_mgr_make_tres_str_from_array(
+		job_ptr->tres_req_cnt, TRES_STR_FLAG_SIMPLE, true);
+
+	job_ptr->tres_fmt_req_str = assoc_mgr_make_tres_str_from_array(
+		job_ptr->tres_req_cnt, 0, true);
+
+	if (!assoc_mgr_locked)
+		assoc_mgr_unlock(&locks);
+}
+
+extern void job_set_alloc_tres(struct job_record *job_ptr,
+			       bool assoc_mgr_locked)
+{
+	uint64_t tres_count;
+	uint32_t alloc_nodes = 0;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	xfree(job_ptr->tres_alloc_str);
+	xfree(job_ptr->tres_alloc_cnt);
+
+	/* We only need to do this on non-pending jobs */
+	if (IS_JOB_PENDING(job_ptr))
+		return;
+
+	if (!assoc_mgr_locked)
+		assoc_mgr_lock(&locks);
+	xfree(job_ptr->tres_alloc_cnt);
+
+	job_ptr->tres_alloc_cnt = xmalloc(
+		sizeof(uint64_t) * slurmctld_tres_cnt);
+
+	job_ptr->tres_alloc_cnt[TRES_ARRAY_CPU] = (uint64_t)job_ptr->total_cpus;
+
+#ifdef HAVE_BG
+	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
+				    SELECT_JOBDATA_NODE_CNT,
+				    &alloc_nodes);
+#else
+	alloc_nodes = job_ptr->node_cnt;
+#endif
+	job_ptr->tres_alloc_cnt[TRES_ARRAY_NODE] = (uint64_t)alloc_nodes;
+
+	tres_count = (uint64_t)job_ptr->details->pn_min_memory;
+	if (tres_count & MEM_PER_CPU) {
+		tres_count &= (~MEM_PER_CPU);
+		tres_count *= job_ptr->tres_alloc_cnt[TRES_ARRAY_CPU];
+	} else {
+		tres_count *= job_ptr->tres_alloc_cnt[TRES_ARRAY_NODE];
+	}
+	job_ptr->tres_alloc_cnt[TRES_ARRAY_MEM] = tres_count;
+
+	license_set_job_tres_cnt(job_ptr->license_list,
+				 job_ptr->tres_alloc_cnt,
+				 true);
+
+	gres_set_job_tres_cnt(job_ptr->gres_list,
+			      alloc_nodes,
+			      job_ptr->tres_alloc_cnt,
+			      true);
+
+	bb_g_job_set_tres_cnt(job_ptr,
+			      job_ptr->tres_alloc_cnt,
+			      true);
+
+	/* now that the array is filled lets make the string from it */
+	xfree(job_ptr->tres_alloc_str);
+	job_ptr->tres_alloc_str = assoc_mgr_make_tres_str_from_array(
+		job_ptr->tres_alloc_cnt, TRES_STR_FLAG_SIMPLE, true);
+
+	xfree(job_ptr->tres_fmt_alloc_str);
+	job_ptr->tres_fmt_alloc_str = assoc_mgr_make_tres_str_from_array(
+		job_ptr->tres_alloc_cnt, 0, true);
+
+	if (!assoc_mgr_locked)
+		assoc_mgr_unlock(&locks);
+
+	return;
+}
+
+extern int job_update_tres_cnt(struct job_record *job_ptr, int node_inx)
+{
+	int cpu_cnt, offset = -1, rc = SLURM_SUCCESS;
+
+	xassert(job_ptr);
 
 #ifdef HAVE_BG
 	/* This function doesn't apply to a bluegene system since the
 	 * cpu count isn't set up on that system. */
 	return SLURM_SUCCESS;
 #endif
-	if (job_ptr->details->whole_node) {
+	if (job_ptr->details->whole_node == 1) {
 		/* Since we are allocating whole nodes don't rely on
 		 * the job_resrcs since it could be less because the
 		 * node could of only used 1 thread per core.
@@ -6907,38 +7375,40 @@ extern int job_update_cpu_cnt(struct job_record *job_ptr, int node_inx)
 		struct node_record *node_ptr =
 			node_record_table_ptr + node_inx;
 		if (slurmctld_conf.fast_schedule)
-			cnt = node_ptr->config_ptr->cpus;
+			cpu_cnt = node_ptr->config_ptr->cpus;
 		else
-			cnt = node_ptr->cpus;
+			cpu_cnt = node_ptr->cpus;
 	} else {
 		if ((offset = job_resources_node_inx_to_cpu_inx(
 			     job_ptr->job_resrcs, node_inx)) < 0) {
-			error("job_update_cpu_cnt: problem getting "
+			error("job_update_tres_cnt: problem getting "
 			      "offset of job %u",
 			      job_ptr->job_id);
 			job_ptr->cpu_cnt = 0;
 			return SLURM_ERROR;
 		}
 
-		cnt = job_ptr->job_resrcs->cpus[offset];
+		cpu_cnt = job_ptr->job_resrcs->cpus[offset];
 	}
-	if (cnt > job_ptr->cpu_cnt) {
-		error("job_update_cpu_cnt: cpu_cnt underflow on job_id %u",
+	if (cpu_cnt > job_ptr->cpu_cnt) {
+		error("job_update_tres_cnt: cpu_cnt underflow on job_id %u",
 		      job_ptr->job_id);
 		job_ptr->cpu_cnt = 0;
 		rc = SLURM_ERROR;
 	} else
-		job_ptr->cpu_cnt -= cnt;
+		job_ptr->cpu_cnt -= cpu_cnt;
 
 	if (IS_JOB_RESIZING(job_ptr)) {
-		if (cnt > job_ptr->total_cpus) {
-			error("job_update_cpu_cnt: total_cpus "
+		if (cpu_cnt > job_ptr->total_cpus) {
+			error("job_update_tres_cnt: total_cpus "
 			      "underflow on job_id %u",
 			      job_ptr->job_id);
 			job_ptr->total_cpus = 0;
 			rc = SLURM_ERROR;
 		} else
-			job_ptr->total_cpus -= cnt;
+			job_ptr->total_cpus -= cpu_cnt;
+
+		job_set_alloc_tres(job_ptr, false);
 	}
 	return rc;
 }
@@ -6970,8 +7440,8 @@ static void _job_timed_out(struct job_record *job_ptr)
  * IN submit_uid - who request originated
  */
 static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
-                              uid_t submit_uid, struct part_record *part_ptr,
-                              List part_list)
+			      uid_t submit_uid, struct part_record *part_ptr,
+			      List part_list)
 {
 	if ((job_desc_msg->min_cpus  == NO_VAL) &&
 	    (job_desc_msg->min_nodes == NO_VAL) &&
@@ -6995,7 +7465,7 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
 	if (job_desc_msg->contiguous == (uint16_t) NO_VAL)
 		job_desc_msg->contiguous = 0;
 
-	if (job_desc_msg->task_dist == (uint16_t) NO_VAL) {
+	if (job_desc_msg->task_dist == NO_VAL) {
 		/* not typically set by salloc or sbatch */
 		job_desc_msg->task_dist = SLURM_DIST_CYCLIC;
 	}
@@ -7070,7 +7540,7 @@ static int _validate_job_desc(job_desc_msg_t * job_desc_msg, int allocate,
  */
 static bool
 _validate_min_mem_partition(job_desc_msg_t *job_desc_msg,
-                            struct part_record *part_ptr, List part_list)
+			    struct part_record *part_ptr, List part_list)
 {
 	ListIterator iter;
 	struct part_record *part;
@@ -7163,6 +7633,7 @@ static void _list_delete_job(void *job_entry)
 		xfree(job_ptr->array_recs);
 	}
 	xfree(job_ptr->batch_host);
+	xfree(job_ptr->burst_buffer);
 	xfree(job_ptr->comment);
 	free_job_resources(&job_ptr->job_resrcs);
 	xfree(job_ptr->gres);
@@ -7172,6 +7643,7 @@ static void _list_delete_job(void *job_entry)
 	FREE_NULL_LIST(job_ptr->gres_list);
 	xfree(job_ptr->licenses);
 	FREE_NULL_LIST(job_ptr->license_list);
+	xfree(job_ptr->limit_set.tres);
 	xfree(job_ptr->mail_user);
 	xfree(job_ptr->name);
 	xfree(job_ptr->network);
@@ -7191,6 +7663,12 @@ static void _list_delete_job(void *job_entry)
 		xfree(job_ptr->spank_job_env[i]);
 	xfree(job_ptr->spank_job_env);
 	xfree(job_ptr->state_desc);
+	xfree(job_ptr->tres_alloc_cnt);
+	xfree(job_ptr->tres_alloc_str);
+	xfree(job_ptr->tres_fmt_alloc_str);
+	xfree(job_ptr->tres_req_cnt);
+	xfree(job_ptr->tres_req_str);
+	xfree(job_ptr->tres_fmt_req_str);
 	step_list_purge(job_ptr);
 	select_g_select_jobinfo_free(job_ptr->select_jobinfo);
 	xfree(job_ptr->wckey);
@@ -7252,7 +7730,7 @@ static int _list_find_job_old(void *job_entry, void *key)
 	if (job_ptr->end_time > min_age)
 		return 0;	/* Too new to purge */
 
-	if (!(IS_JOB_FINISHED(job_ptr)))
+	if (!(IS_JOB_COMPLETED(job_ptr)))
 		return 0;	/* Job still active */
 
 	if (job_ptr->step_list && list_count(job_ptr->step_list)) {
@@ -7275,10 +7753,14 @@ static int _list_find_job_old(void *job_entry, void *key)
 	if (cleaning)
 		return 0;      /* Job hasn't finished yet */
 
+	if (bb_g_job_test_stage_out(job_ptr) != 1)
+		return 0;      /* Stage out in progress */
+
 	/* If we don't have a db_index by now and we are running with
-	   the slurmdbd lets put it on the list to be handled later
-	   when it comes back up since we won't get another chance.
-	*/
+	 * the slurmdbd, lets put it on the list to be handled later
+	 * when slurmdbd comes back up since we won't get another chance.
+	 * job_start won't pend for job_db_inx when the job is finished.
+	 */
 	if (with_slurmdbd && !job_ptr->db_index)
 		jobacct_storage_g_job_start(acct_db_conn, job_ptr);
 
@@ -7322,6 +7804,61 @@ static bool _hide_job(struct job_record *job_ptr, uid_t uid)
 	return false;
 }
 
+/*
+ * pack_all_sicp - dump inter-cluster job state information
+ * OUT buffer_ptr - the pointer is set to the allocated buffer.
+ * OUT buffer_size - set to size of the buffer in bytes
+ * IN uid - uid of user making request (for job/partition filtering)
+ * NOTE: the buffer at *buffer_ptr must be xfreed by the caller
+ * NOTE: change _unpack_sicp_msg() in common/slurm_protocol_pack.c
+ *	whenever the data format changes
+ */
+extern void pack_all_sicp(char **buffer_ptr, int *buffer_size,
+			  uid_t uid, uint16_t protocol_version)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr;
+	uint32_t jobs_packed = 0, tmp_offset;
+	Buf buffer;
+
+	buffer_ptr[0] = NULL;
+	*buffer_size = 0;
+
+	buffer = init_buf(BUF_SIZE);
+
+	/* write message body header : size */
+	/* put in a place holder job record count of 0 for now */
+	pack32(jobs_packed, buffer);
+
+	/* write individual job records */
+	part_filter_set(uid);
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		xassert (job_ptr->magic == JOB_MAGIC);
+		if ((job_ptr->job_id & 0x80000000) == 0)
+			continue;
+		if ((uid != 0) && _all_parts_hidden(job_ptr))
+			continue;
+		if (_hide_job(job_ptr, uid))
+			continue;
+
+		pack32(job_ptr->job_id,    buffer);
+		pack32(job_ptr->job_state, buffer);
+		jobs_packed++;
+	}
+	list_iterator_destroy(job_iterator);
+	part_filter_clear();
+
+	/* put the real record count in the message body header */
+	tmp_offset = get_buf_offset(buffer);
+	set_buf_offset(buffer, 0);
+	pack32(jobs_packed, buffer);
+	set_buf_offset(buffer, tmp_offset);
+
+	*buffer_size = get_buf_offset(buffer);
+	buffer_ptr[0] = xfer_buf_data(buffer);
+}
+
 /*
  * pack_all_jobs - dump all job information for all jobs in
  *	machine independent form (for network transmission)
@@ -7373,8 +7910,8 @@ extern void pack_all_jobs(char **buffer_ptr, int *buffer_size,
 		pack_job(job_ptr, show_flags, buffer, protocol_version, uid);
 		jobs_packed++;
 	}
-	part_filter_clear();
 	list_iterator_destroy(job_iterator);
+	part_filter_clear();
 
 	/* put the real record count in the message body header */
 	tmp_offset = get_buf_offset(buffer);
@@ -7486,10 +8023,10 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 	struct job_details *detail_ptr;
 	time_t begin_time = 0;
 	char *nodelist = NULL;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		detail_ptr = dump_job_ptr->details;
 		pack32(dump_job_ptr->array_job_id, buffer);
 		pack32(dump_job_ptr->array_task_id, buffer);
@@ -7501,16 +8038,19 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 			packnull(buffer);
 			pack32((uint32_t) 0, buffer);
 		}
+
 		pack32(dump_job_ptr->assoc_id, buffer);
 		pack32(dump_job_ptr->job_id,   buffer);
 		pack32(dump_job_ptr->user_id,  buffer);
 		pack32(dump_job_ptr->group_id, buffer);
 		pack32(dump_job_ptr->profile,  buffer);
 
-		pack16(dump_job_ptr->job_state,    buffer);
+		pack32(dump_job_ptr->job_state,    buffer);
 		pack16(dump_job_ptr->batch_flag,   buffer);
 		pack16(dump_job_ptr->state_reason, buffer);
+		pack8(dump_job_ptr->power_flags,   buffer);
 		pack8(dump_job_ptr->reboot,        buffer);
+		pack8(dump_job_ptr->sicp_mode,     buffer);
 		pack16(dump_job_ptr->restart_cnt,  buffer);
 		pack16(show_flags,  buffer);
 
@@ -7545,6 +8085,7 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		pack_time(dump_job_ptr->resize_time, buffer);
 		pack_time(dump_job_ptr->preempt_time, buffer);
 		pack32(dump_job_ptr->priority, buffer);
+		packdouble(dump_job_ptr->billable_tres, buffer);
 
 		/* Only send the allocated nodelist since we are only sending
 		 * the number of cpus and nodes that are currently allocated. */
@@ -7578,6 +8119,7 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		} else {
 			packnull(buffer);
 		}
+		packstr(dump_job_ptr->burst_buffer, buffer);
 
 		assoc_mgr_lock(&locks);
 		if (assoc_mgr_qos_list) {
@@ -7628,19 +8170,31 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		else
 			_pack_pending_job_details(NULL, buffer,
 						  protocol_version);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		pack32(dump_job_ptr->bit_flags, buffer);
+		packstr(dump_job_ptr->tres_fmt_alloc_str, buffer);
+		packstr(dump_job_ptr->tres_fmt_req_str, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		detail_ptr = dump_job_ptr->details;
 		pack32(dump_job_ptr->array_job_id, buffer);
 		pack32(dump_job_ptr->array_task_id, buffer);
+		if (dump_job_ptr->array_recs) {
+			build_array_str(dump_job_ptr);
+			packstr(dump_job_ptr->array_recs->task_id_str, buffer);
+			pack32(dump_job_ptr->array_recs->max_run_tasks, buffer);
+		} else {
+			packnull(buffer);
+			pack32((uint32_t) 0, buffer);
+		}
 		pack32(dump_job_ptr->assoc_id, buffer);
-		pack32(dump_job_ptr->job_id, buffer);
-		pack32(dump_job_ptr->user_id, buffer);
+		pack32(dump_job_ptr->job_id,   buffer);
+		pack32(dump_job_ptr->user_id,  buffer);
 		pack32(dump_job_ptr->group_id, buffer);
-		pack32(dump_job_ptr->profile, buffer);
+		pack32(dump_job_ptr->profile,  buffer);
 
 		pack16(dump_job_ptr->job_state,    buffer);
 		pack16(dump_job_ptr->batch_flag,   buffer);
 		pack16(dump_job_ptr->state_reason, buffer);
+		pack8(dump_job_ptr->reboot,        buffer);
 		pack16(dump_job_ptr->restart_cnt,  buffer);
 		pack16(show_flags,  buffer);
 
@@ -7687,6 +8241,8 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 			xfree(nodelist);
 		}
 
+		packstr(dump_job_ptr->sched_nodes, buffer);
+
 		if (!IS_JOB_PENDING(dump_job_ptr) && dump_job_ptr->part_ptr)
 			packstr(dump_job_ptr->part_ptr->name, buffer);
 		else
@@ -7756,9 +8312,10 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		else
 			_pack_pending_job_details(NULL, buffer,
 						  protocol_version);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		detail_ptr = dump_job_ptr->details;
 		pack32(dump_job_ptr->array_job_id, buffer);
-		pack16((uint16_t) dump_job_ptr->array_task_id, buffer);
+		pack32(dump_job_ptr->array_task_id, buffer);
 		pack32(dump_job_ptr->assoc_id, buffer);
 		pack32(dump_job_ptr->job_id, buffer);
 		pack32(dump_job_ptr->user_id, buffer);
@@ -7871,7 +8428,6 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 		select_g_select_jobinfo_pack(dump_job_ptr->select_jobinfo,
 					     buffer, protocol_version);
 
-		detail_ptr = dump_job_ptr->details;
 		/* A few details are always dumped here */
 		_pack_default_job_details(dump_job_ptr, buffer,
 					  protocol_version);
@@ -7892,9 +8448,16 @@ void pack_job(struct job_record *dump_job_ptr, uint16_t show_flags, Buf buffer,
 
 static void _find_node_config(int *cpu_cnt_ptr, int *core_cnt_ptr)
 {
-	int i, max_cpu_cnt = 1, max_core_cnt = 1;
+	static int max_cpu_cnt = -1, max_core_cnt = -1;
+	int i;
 	struct node_record *node_ptr = node_record_table_ptr;
 
+	*cpu_cnt_ptr  = max_cpu_cnt;
+	*core_cnt_ptr = max_core_cnt;
+
+	if (max_cpu_cnt != -1)
+		return;
+
 	for (i = 0; i < node_record_count; i++, node_ptr++) {
 #ifndef HAVE_BG
 		if (slurmctld_conf.fast_schedule) {
@@ -7912,15 +8475,13 @@ static void _find_node_config(int *cpu_cnt_ptr, int *core_cnt_ptr)
 		}
 #endif
 	}
-	*cpu_cnt_ptr  = max_cpu_cnt;
-	*core_cnt_ptr = max_core_cnt;
 }
 
 /* pack default job details for "get_job_info" RPC */
 static void _pack_default_job_details(struct job_record *job_ptr,
 				      Buf buffer, uint16_t protocol_version)
 {
-	static int max_cpu_cnt = -1, max_core_cnt = -1;
+	int max_cpu_cnt = -1, max_core_cnt = -1;
 	int i;
 	struct job_details *detail_ptr = job_ptr->details;
 	char *cmd_line = NULL;
@@ -7935,10 +8496,14 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 	else if ((detail_ptr->share_res == 0) ||
 		 (detail_ptr->whole_node == 1))	/* User --exclusive */
 		shared = 0;
+	else if (detail_ptr->whole_node == 2)	/* User --exclusive=user */
+		shared = 2;
 	else if (job_ptr->part_ptr) {
 		/* Report shared status based upon latest partition info */
-		if ((job_ptr->part_ptr->max_share & SHARED_FORCE) &&
-		    ((job_ptr->part_ptr->max_share & (~SHARED_FORCE)) > 1))
+		if (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)
+			shared = 2;
+		else if ((job_ptr->part_ptr->max_share & SHARED_FORCE) &&
+			 ((job_ptr->part_ptr->max_share & (~SHARED_FORCE)) > 1))
 			shared = 1;		/* Partition Shared=force */
 		else if (job_ptr->part_ptr->max_share == 0)
 			shared = 0;		/* Partition Shared=exclusive */
@@ -7947,10 +8512,154 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 	} else
 		shared = (uint16_t) NO_VAL;	/* No user or partition info */
 
-	if (max_cpu_cnt == -1)
+	if (job_ptr->part_ptr && job_ptr->part_ptr->max_cpu_cnt) {
+		max_cpu_cnt  = job_ptr->part_ptr->max_cpu_cnt;
+		max_core_cnt = job_ptr->part_ptr->max_core_cnt;
+	} else
 		_find_node_config(&max_cpu_cnt, &max_core_cnt);
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (detail_ptr) {
+			packstr(detail_ptr->features,   buffer);
+			packstr(detail_ptr->work_dir,   buffer);
+			packstr(detail_ptr->dependency, buffer);
+
+			if (detail_ptr->argv) {
+				/* Determine size needed for a string
+				 * containing all arguments */
+				for (i =0; detail_ptr->argv[i]; i++) {
+					len += strlen(detail_ptr->argv[i]);
+				}
+				len += i;
+
+				cmd_line = xmalloc(len*sizeof(char));
+				tmp = cmd_line;
+				for (i = 0; detail_ptr->argv[i]; i++) {
+					if (i != 0) {
+						*tmp = ' ';
+						tmp++;
+					}
+					strcpy(tmp,detail_ptr->argv[i]);
+					tmp += strlen(detail_ptr->argv[i]);
+				}
+				packstr(cmd_line, buffer);
+				xfree(cmd_line);
+			} else
+				packnull(buffer);
+
+			if (IS_JOB_COMPLETING(job_ptr) && job_ptr->cpu_cnt) {
+				pack32(job_ptr->cpu_cnt, buffer);
+				pack32((uint32_t) 0, buffer);
+			} else if (job_ptr->total_cpus &&
+				   !IS_JOB_PENDING(job_ptr)) {
+				/* If job is PENDING ignore total_cpus,
+				 * which may have been set by previous run
+				 * followed by job requeue. */
+				pack32(job_ptr->total_cpus, buffer);
+				pack32((uint32_t) 0, buffer);
+			} else {
+				pack32(detail_ptr->min_cpus, buffer);
+				if (detail_ptr->max_cpus != NO_VAL)
+					pack32(detail_ptr->max_cpus, buffer);
+				else
+					pack32((uint32_t) 0, buffer);
+
+			}
+
+			if (IS_JOB_COMPLETING(job_ptr) && job_ptr->node_cnt) {
+				pack32(job_ptr->node_cnt, buffer);
+				pack32((uint32_t) 0, buffer);
+			} else if (job_ptr->total_nodes) {
+				pack32(job_ptr->total_nodes, buffer);
+				pack32((uint32_t) 0, buffer);
+			} else if (job_ptr->node_cnt_wag) {
+				/* This should catch everything else, but
+				 * just incase this is 0 (startup or
+				 * whatever) we will keep the rest of
+				 * this if statement around.
+				 */
+				pack32(job_ptr->node_cnt_wag, buffer);
+				pack32((uint32_t) detail_ptr->max_nodes,
+				       buffer);
+			} else if (detail_ptr->ntasks_per_node) {
+				/* min_nodes based upon task count and ntasks
+				 * per node */
+				uint32_t min_nodes;
+				min_nodes = detail_ptr->num_tasks /
+					    detail_ptr->ntasks_per_node;
+				min_nodes = MAX(min_nodes,
+						detail_ptr->min_nodes);
+				pack32(min_nodes, buffer);
+				pack32(detail_ptr->max_nodes, buffer);
+			} else if (detail_ptr->cpus_per_task > 1) {
+				/* min_nodes based upon task count and cpus
+				 * per task */
+				uint32_t min_cpus, min_nodes;
+				min_cpus = detail_ptr->num_tasks *
+					   detail_ptr->cpus_per_task;
+				min_nodes = min_cpus + max_cpu_cnt - 1;
+				min_nodes /= max_cpu_cnt;
+				min_nodes = MAX(min_nodes,
+						detail_ptr->min_nodes);
+				pack32(min_nodes, buffer);
+				pack32(detail_ptr->max_nodes, buffer);
+			} else if (detail_ptr->mc_ptr &&
+				   detail_ptr->mc_ptr->ntasks_per_core &&
+				   (detail_ptr->mc_ptr->ntasks_per_core
+				    != (uint16_t)INFINITE)) {
+				/* min_nodes based upon task count and ntasks
+				 * per core */
+				uint32_t min_cores, min_nodes;
+				min_cores = detail_ptr->num_tasks +
+					    detail_ptr->mc_ptr->ntasks_per_core
+					    - 1;
+				min_cores /= detail_ptr->mc_ptr->ntasks_per_core;
+
+				min_nodes = min_cores + max_core_cnt - 1;
+				min_nodes /= max_core_cnt;
+				min_nodes = MAX(min_nodes,
+						detail_ptr->min_nodes);
+				pack32(min_nodes, buffer);
+				pack32(detail_ptr->max_nodes, buffer);
+			} else {
+				/* min_nodes based upon task count only */
+				uint32_t min_nodes;
+				min_nodes = detail_ptr->num_tasks +
+					    max_cpu_cnt - 1;
+				min_nodes /= max_cpu_cnt;
+				min_nodes = MAX(min_nodes,
+						detail_ptr->min_nodes);
+				pack32(min_nodes, buffer);
+				pack32(detail_ptr->max_nodes, buffer);
+			}
+			pack16(detail_ptr->requeue,   buffer);
+			pack16(detail_ptr->ntasks_per_node, buffer);
+			pack16(shared, buffer);
+			pack32(detail_ptr->cpu_freq_min, buffer);
+			pack32(detail_ptr->cpu_freq_max, buffer);
+			pack32(detail_ptr->cpu_freq_gov, buffer);
+		} else {
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+
+			if (job_ptr->total_cpus)
+				pack32(job_ptr->total_cpus, buffer);
+			else
+				pack32(job_ptr->cpu_cnt, buffer);
+			pack32((uint32_t) 0, buffer);
+
+			pack32(job_ptr->node_cnt, buffer);
+			pack32((uint32_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack32((uint32_t) 0, buffer);
+			pack32((uint32_t) 0, buffer);
+			pack32((uint32_t) 0, buffer);
+		}
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		if (detail_ptr) {
 			packstr(detail_ptr->features,   buffer);
 			packstr(detail_ptr->work_dir,   buffer);
@@ -8073,29 +8782,71 @@ static void _pack_default_job_details(struct job_record *job_ptr,
 			packnull(buffer);
 			packnull(buffer);
 
-			if (job_ptr->total_cpus)
-				pack32(job_ptr->total_cpus, buffer);
-			else
-				pack32(job_ptr->cpu_cnt, buffer);
-			pack32((uint32_t) 0, buffer);
+			if (job_ptr->total_cpus)
+				pack32(job_ptr->total_cpus, buffer);
+			else
+				pack32(job_ptr->cpu_cnt, buffer);
+			pack32((uint32_t) 0, buffer);
+
+			pack32(job_ptr->node_cnt, buffer);
+			pack32((uint32_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+		}
+	} else {
+		error("_pack_default_job_details: protocol_version "
+		      "%hu not supported", protocol_version);
+	}
+}
+
+/* pack pending job details for "get_job_info" RPC */
+static void _pack_pending_job_details(struct job_details *detail_ptr,
+				      Buf buffer, uint16_t protocol_version)
+{
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		if (detail_ptr) {
+			pack16(detail_ptr->contiguous, buffer);
+			pack16(detail_ptr->core_spec, buffer);
+			pack16(detail_ptr->cpus_per_task, buffer);
+			pack16(detail_ptr->pn_min_cpus, buffer);
+
+			pack32(detail_ptr->pn_min_memory, buffer);
+			pack32(detail_ptr->pn_min_tmp_disk, buffer);
+
+			packstr(detail_ptr->req_nodes, buffer);
+			pack_bit_fmt(detail_ptr->req_node_bitmap, buffer);
+			/* detail_ptr->req_node_layout is not packed */
+			packstr(detail_ptr->exc_nodes, buffer);
+			pack_bit_fmt(detail_ptr->exc_node_bitmap, buffer);
+
+			packstr(detail_ptr->std_err, buffer);
+			packstr(detail_ptr->std_in, buffer);
+			packstr(detail_ptr->std_out, buffer);
+
+			pack_multi_core_data(detail_ptr->mc_ptr, buffer,
+					     protocol_version);
+		} else {
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
+
+			pack32((uint32_t) 0, buffer);
+			pack32((uint32_t) 0, buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
 
-			pack32(job_ptr->node_cnt, buffer);
-			pack32((uint32_t) 0, buffer);
-			pack16((uint16_t) 0, buffer);
-			pack16((uint16_t) 0, buffer);
-			pack16((uint16_t) 0, buffer);
+			pack_multi_core_data(NULL, buffer, protocol_version);
 		}
-	} else {
-		error("_pack_default_job_details: protocol_version "
-		      "%hu not supported", protocol_version);
-	}
-}
-
-/* pack pending job details for "get_job_info" RPC */
-static void _pack_pending_job_details(struct job_details *detail_ptr,
-				      Buf buffer, uint16_t protocol_version)
-{
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		if (detail_ptr) {
 			pack16(detail_ptr->contiguous, buffer);
 			pack16(detail_ptr->core_spec, buffer);
@@ -8137,9 +8888,10 @@ static void _pack_pending_job_details(struct job_details *detail_ptr,
 
 			pack_multi_core_data(NULL, buffer, protocol_version);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		if (detail_ptr) {
 			pack16(detail_ptr->contiguous, buffer);
+			pack16(detail_ptr->core_spec, buffer);
 			pack16(detail_ptr->cpus_per_task, buffer);
 			pack16(detail_ptr->pn_min_cpus, buffer);
 
@@ -8152,12 +8904,17 @@ static void _pack_pending_job_details(struct job_details *detail_ptr,
 			packstr(detail_ptr->exc_nodes, buffer);
 			pack_bit_fmt(detail_ptr->exc_node_bitmap, buffer);
 
+			packstr(detail_ptr->std_err, buffer);
+			packstr(detail_ptr->std_in, buffer);
+			packstr(detail_ptr->std_out, buffer);
+
 			pack_multi_core_data(detail_ptr->mc_ptr, buffer,
 					     protocol_version);
 		} else {
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
 			pack16((uint16_t) 0, buffer);
+			pack16((uint16_t) 0, buffer);
 
 			pack32((uint32_t) 0, buffer);
 			pack32((uint32_t) 0, buffer);
@@ -8167,6 +8924,10 @@ static void _pack_pending_job_details(struct job_details *detail_ptr,
 			packnull(buffer);
 			packnull(buffer);
 
+			packnull(buffer);
+			packnull(buffer);
+			packnull(buffer);
+
 			pack_multi_core_data(NULL, buffer, protocol_version);
 		}
 	} else {
@@ -8195,21 +8956,39 @@ void purge_old_job(void)
 		if (test_job_dependency(job_ptr) == 2) {
 			char jbuf[JBUFSIZ];
 
-			if (kill_invalid_dep) {
+			/* Check what are the job disposition
+			 * to deal with invalid dependecies
+			 */
+			if (job_ptr->bit_flags & KILL_INV_DEP) {
+				_kill_dependent(job_ptr);
+			} else if (job_ptr->bit_flags & NO_KILL_INV_DEP) {
+				debug("\
+%s: %s job dependency condition never satisfied", __func__,
+				      jobid2str(job_ptr, jbuf, sizeof(jbuf)));
+				job_ptr->state_reason = WAIT_DEP_INVALID;
+				xfree(job_ptr->state_desc);
+			} else if (kill_invalid_dep) {
 				_kill_dependent(job_ptr);
 			} else {
-				debug("%s: %s dependency condition never satisfied",
-				      __func__, jobid2str(job_ptr, jbuf));
+				debug("\
+%s: %s dependency condition never satisfied", __func__,
+				      jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 				job_ptr->state_reason = WAIT_DEP_INVALID;
 				xfree(job_ptr->state_desc);
 			}
 		}
-		if (job_ptr->state_reason == WAIT_DEP_INVALID
-		    && kill_invalid_dep) {
-			/* The job got the WAIT_DEP_INVALID
-			 * before slurmctld was reconfigured.
-			 */
-			_kill_dependent(job_ptr);
+
+		if (job_ptr->state_reason == WAIT_DEP_INVALID) {
+			if (job_ptr->bit_flags & KILL_INV_DEP) {
+				/* The job got the WAIT_DEP_INVALID
+				 * before slurmctld was reconfigured.
+				 */
+				_kill_dependent(job_ptr);
+			} else if (job_ptr->bit_flags & NO_KILL_INV_DEP) {
+				continue;
+			} else if (kill_invalid_dep) {
+				_kill_dependent(job_ptr);
+			}
 		}
 	}
 	list_iterator_destroy(job_iterator);
@@ -8217,7 +8996,7 @@ void purge_old_job(void)
 	i = list_delete_all(job_list, &_list_find_job_old, "");
 	if (i) {
 		debug2("purge_old_job: purged %d old job records", i);
-/*		last_job_update = now;		don't worry about state save */
+		last_job_update = time(NULL);
 	}
 }
 
@@ -8278,16 +9057,19 @@ void reset_job_bitmaps(void)
 			part_ptr = NULL;
 			job_fail = true;
 		} else {
+			char *err_part = NULL;
 			part_ptr = find_part_record(job_ptr->partition);
 			if (part_ptr == NULL) {
-				part_ptr_list = get_part_list(job_ptr->
-							      partition);
+				part_ptr_list = get_part_list(
+						job_ptr->partition,
+						&err_part);
 				if (part_ptr_list)
 					part_ptr = list_peek(part_ptr_list);
 			}
 			if (part_ptr == NULL) {
 				error("Invalid partition (%s) for job %u",
-				      job_ptr->partition, job_ptr->job_id);
+				      err_part, job_ptr->job_id);
+				xfree(err_part);
 				job_fail = true;
 			}
 		}
@@ -8433,6 +9215,7 @@ static void _reset_step_bitmaps(struct job_record *job_ptr)
 			delete_step_record (job_ptr, step_ptr->step_id);
 		}
 		if ((step_ptr->step_node_bitmap == NULL) &&
+		    (step_ptr->step_id != SLURM_EXTERN_CONT) &&
 		    (step_ptr->batch_step == 0)) {
 			error("Missing node_list for step_id %u.%u",
 			      job_ptr->job_id, step_ptr->step_id);
@@ -8470,40 +9253,70 @@ extern uint32_t get_next_job_id(void)
  * _set_job_id - set a default job_id, insure that it is unique
  * IN job_ptr - pointer to the job_record
  */
-static int _set_job_id(struct job_record *job_ptr)
+static int _set_job_id(struct job_record *job_ptr, bool global_job)
 {
 	int i;
-	uint32_t new_id, max_jobs;
+	uint32_t global_base, new_id, max_jobs;
 
 	xassert(job_ptr);
 	xassert (job_ptr->magic == JOB_MAGIC);
 
-	job_id_sequence = MAX(job_id_sequence, slurmctld_conf.first_job_id);
 	max_jobs = slurmctld_conf.max_job_id - slurmctld_conf.first_job_id;
+	if (global_job) {
+uint16_t cluster_id = 0;	/* FIXME: Temporary value */
+		/* 0x80000000 set for global jobs
+		 * 0x7E000000 contains the cluster ID (0 t0 63)
+		 * 0x01ffffff contains a sequence number (1 to 33,554,431) */
+		global_base = 0x80000000 | (cluster_id << 25);
+		max_jobs = MIN(max_jobs, 0x01ffffff);
+		for (i = 0; i < max_jobs; i++) {
+			if (++job_id_sequence >= slurmctld_conf.max_job_id)
+				job_id_sequence = slurmctld_conf.first_job_id;
+			new_id = job_id_sequence + global_base;
+			if (find_job_record(new_id))
+				continue;
+			if (_dup_job_file_test(new_id))
+				continue;
 
-	/* Insure no conflict in job id if we roll over 32 bits */
-	for (i = 0; i < max_jobs; i++) {
-		if (++job_id_sequence >= slurmctld_conf.max_job_id)
-			job_id_sequence = slurmctld_conf.first_job_id;
-		new_id = job_id_sequence;
-		if (find_job_record(new_id))
-			continue;
-		if (_dup_job_file_test(new_id))
-			continue;
+			job_ptr->job_id = new_id;
+			/* When we get a new job id might as well make sure
+			 * the db_index is 0 since there is no way it will be
+			 * correct otherwise :).
+			 */
+			job_ptr->db_index = 0;
+			return SLURM_SUCCESS;
+		}
+		error("We have exhausted our supply of global job id values");
+		job_ptr->job_id = NO_VAL;
+		return EAGAIN;
+	} else {
+		job_id_sequence = MAX(job_id_sequence,
+				      slurmctld_conf.first_job_id);
+
+		/* Insure no conflict in job id if we roll over 32 bits */
+		for (i = 0; i < max_jobs; i++) {
+			if (++job_id_sequence >= slurmctld_conf.max_job_id)
+				job_id_sequence = slurmctld_conf.first_job_id;
+			new_id = job_id_sequence;
+			if (find_job_record(new_id))
+				continue;
+			if (_dup_job_file_test(new_id))
+				continue;
 
-		job_ptr->job_id = new_id;
-		/* When we get a new job id might as well make sure
-		 * the db_index is 0 since there is no way it will be
-		 * correct otherwise :).
-		 */
-		job_ptr->db_index = 0;
-		return SLURM_SUCCESS;
+			job_ptr->job_id = new_id;
+			/* When we get a new job id might as well make sure
+			 * the db_index is 0 since there is no way it will be
+			 * correct otherwise :).
+			 */
+			job_ptr->db_index = 0;
+			return SLURM_SUCCESS;
+		}
+		error("We have exhausted our supply of valid job id values. "
+		      "FirstJobId=%u MaxJobId=%u", slurmctld_conf.first_job_id,
+		      slurmctld_conf.max_job_id);
+		job_ptr->job_id = NO_VAL;
+		return EAGAIN;
 	}
-	error("We have exhausted our supply of valid job id values. "
-	      "FirstJobId=%u MaxJobId=%u", slurmctld_conf.first_job_id,
-	      slurmctld_conf.max_job_id);
-	job_ptr->job_id = NO_VAL;
-	return EAGAIN;
 }
 
 
@@ -8679,6 +9492,9 @@ static void _merge_job_licenses(struct job_record *shrink_job_ptr,
 	xassert(shrink_job_ptr);
 	xassert(expand_job_ptr);
 
+	/* FIXME: do we really need to update accounting here?  It
+	 * might already happen */
+
 	if (!shrink_job_ptr->licenses)		/* No licenses to add */
 		return;
 
@@ -8716,6 +9532,14 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	multi_core_data_t *mc_ptr = NULL;
 	bool update_accounting = false;
 	acct_policy_limit_set_t acct_policy_limit_set;
+	uint16_t tres[slurmctld_tres_cnt];
+	bool acct_limit_already_set;
+	int tres_pos;
+	uint64_t tres_req_cnt[slurmctld_tres_cnt];
+	List gres_list = NULL;
+	List license_list = NULL;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
 #ifdef HAVE_BG
 	uint16_t conn_type[SYSTEM_DIMENSIONS] = {(uint16_t) NO_VAL};
@@ -8732,7 +9556,6 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		select_g_alter_node_cnt(SELECT_GET_NODE_CPU_CNT,
 					&cpus_per_node);
 #endif
-	memset(&acct_policy_limit_set, 0, sizeof(acct_policy_limit_set_t));
 
 	if (job_specs->user_id == NO_VAL) {
 		/* Used by job_submit/lua to find default partition and
@@ -8744,15 +9567,114 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	if (error_code != SLURM_SUCCESS)
 		return error_code;
 
+	error_code = _test_job_desc_fields(job_specs);
+	if (error_code != SLURM_SUCCESS)
+		return error_code;
+
 	admin = validate_operator(uid);
-	authorized = admin || assoc_mgr_is_user_acct_coord(
-		acct_db_conn, uid, job_ptr->account);
+
+	memset(&acct_policy_limit_set, 0, sizeof(acct_policy_limit_set_t));
+	acct_policy_limit_set.tres = tres;
+
+	if ((authorized = admin || assoc_mgr_is_user_acct_coord(
+		     acct_db_conn, uid, job_ptr->account))) {
+		/* set up the acct_policy if we are authorized */
+		for (tres_pos = 0; tres_pos < slurmctld_tres_cnt; tres_pos++)
+			acct_policy_limit_set.tres[tres_pos] = ADMIN_SET_LIMIT;
+		acct_policy_limit_set.time = ADMIN_SET_LIMIT;
+		acct_policy_limit_set.qos = ADMIN_SET_LIMIT;
+	} else
+		memset(tres, 0, sizeof(tres));
+
 	if ((job_ptr->user_id != uid) && !authorized) {
 		error("Security violation, JOB_UPDATE RPC from uid %d",
 		      uid);
 		return ESLURM_USER_ID_MISSING;
 	}
 
+	detail_ptr = job_ptr->details;
+	if (detail_ptr)
+		mc_ptr = detail_ptr->mc_ptr;
+	last_job_update = now;
+
+	memset(tres_req_cnt, 0, sizeof(tres_req_cnt));
+	job_specs->tres_req_cnt = tres_req_cnt;
+	if (job_specs->min_cpus != NO_VAL)
+		job_specs->tres_req_cnt[TRES_ARRAY_CPU] = job_specs->min_cpus;
+
+	job_specs->tres_req_cnt[TRES_ARRAY_MEM] = job_get_tres_mem(
+		job_specs->pn_min_memory,
+		job_specs->tres_req_cnt[TRES_ARRAY_CPU] ?
+		job_specs->tres_req_cnt[TRES_ARRAY_CPU] :
+		job_ptr->tres_req_cnt[TRES_ARRAY_CPU],
+		job_specs->min_nodes != NO_VAL ?
+		job_specs->min_nodes :
+		detail_ptr ? detail_ptr->min_nodes : 1);
+
+	if (job_specs->gres) {
+		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL) ||
+		    (detail_ptr->expanding_jobid != 0)) {
+			error_code = ESLURM_JOB_NOT_PENDING;
+		} else if (gres_plugin_job_state_validate(job_specs->gres,
+							  &gres_list)) {
+			info("sched: update_job: invalid gres %s for job %u",
+			     job_specs->gres, job_ptr->job_id);
+			error_code = ESLURM_INVALID_GRES;
+		} else {
+			gres_set_job_tres_cnt(gres_list,
+					      detail_ptr->min_nodes,
+					      job_specs->tres_req_cnt,
+					      false);
+		}
+	}
+
+	if (error_code != SLURM_SUCCESS)
+		goto fini;
+
+	if (job_specs->licenses) {
+		bool valid, pending = IS_JOB_PENDING(job_ptr);
+		license_list = license_validate(job_specs->licenses,
+						pending ?
+						tres_req_cnt : NULL,
+						&valid);
+
+		if (!valid) {
+			info("sched: update_job: invalid licenses: %s",
+			     job_specs->licenses);
+			error_code = ESLURM_INVALID_LICENSES;
+		}
+	}
+
+	if (error_code != SLURM_SUCCESS)
+		goto fini;
+
+
+	/* Check to see if the requested job_specs exceeds any
+	 * existing limit.  If it passes cool, we will check the new
+	 * association/qos later in the code.  This will prevent the
+	 * update returning an error code that is confusing since many
+	 * things could successfully update and we are now just
+	 * violating a limit.  The job won't be allowed to run, but it
+	 * will allow the update to happen which is most likely what
+	 * was desired.
+	 *
+	 * FIXME: Should we really be looking at the potentially old
+	 * part, assoc, and qos pointer?  This patch is from bug 1381
+	 * for future reference.
+	 */
+
+	acct_limit_already_set = false;
+	if (!authorized && (accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)) {
+		if (!acct_policy_validate(job_specs, job_ptr->part_ptr,
+					  job_ptr->assoc_ptr, job_ptr->qos_ptr,
+					  NULL, &acct_policy_limit_set, 1)) {
+			debug("%s: exceeded association's cpu, node, "
+			      "memory or time limit for user %u",
+			      __func__, job_specs->user_id);
+			acct_limit_already_set = true;
+		}
+	}
+
 	if (!wiki_sched_test) {
 		char *sched_type = slurm_get_sched_type();
 		if (strcmp(sched_type, "sched/wiki") == 0)
@@ -8764,19 +9686,12 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		xfree(sched_type);
 		wiki_sched_test = true;
 	}
-	detail_ptr = job_ptr->details;
-	if (detail_ptr)
-		mc_ptr = detail_ptr->mc_ptr;
-	last_job_update = now;
 
 	if (job_specs->account
 	    && !xstrcmp(job_specs->account, job_ptr->account)) {
 		debug("sched: update_job: new account identical to "
 		      "old account %u", job_ptr->job_id);
-		xfree(job_specs->account);
-	}
-
-	if (job_specs->account) {
+	} else if (job_specs->account) {
 		if (!IS_JOB_PENDING(job_ptr))
 			error_code = ESLURM_JOB_NOT_PENDING;
 		else {
@@ -8809,13 +9724,12 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			if (exc_bitmap) {
 				xfree(detail_ptr->exc_nodes);
 				detail_ptr->exc_nodes =
-					job_specs->exc_nodes;
+					xstrdup(job_specs->exc_nodes);
 				FREE_NULL_BITMAP(detail_ptr->exc_node_bitmap);
 				detail_ptr->exc_node_bitmap = exc_bitmap;
 				info("sched: update_job: setting exc_nodes to "
 				     "%s for job_id %u", job_specs->exc_nodes,
 				     job_ptr->job_id);
-				job_specs->exc_nodes = NULL;
 			}
 		}
 	}
@@ -8860,8 +9774,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			update_accounting = true;
 		}
 		FREE_NULL_BITMAP(req_bitmap);
-		xfree(job_specs->req_nodes);
-	}
+	} else	/* NOTE: continues to "if" logic below */
 #endif
 
 	if (job_specs->req_nodes) {
@@ -8882,20 +9795,28 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			if (req_bitmap) {
 				xfree(detail_ptr->req_nodes);
 				detail_ptr->req_nodes =
-					job_specs->req_nodes;
+					xstrdup(job_specs->req_nodes);
 				FREE_NULL_BITMAP(detail_ptr->req_node_bitmap);
 				xfree(detail_ptr->req_node_layout);
 				detail_ptr->req_node_bitmap = req_bitmap;
 				info("sched: update_job: setting req_nodes to "
 				     "%s for job_id %u", job_specs->req_nodes,
 				     job_ptr->job_id);
-				job_specs->req_nodes = NULL;
 			}
 		}
 	}
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
+	if (job_specs->burst_buffer) {
+		/* burst_buffer contents are validated at job submit time and
+		 * data is possibly being staged at later times. It can not
+		 * be changed. */
+		error_code = ESLURM_NOT_SUPPORTED;
+	}
+	if (error_code != SLURM_SUCCESS)
+		goto fini;
+
 	if (job_specs->min_nodes == INFINITE) {
 		/* Used by scontrol just to get current configuration info */
 		job_specs->min_nodes = NO_VAL;
@@ -8947,21 +9868,19 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 
 			new_qos_ptr = _determine_and_validate_qos(
 				resv_name, job_ptr->assoc_ptr,
-				authorized, &qos_rec, &error_code);
+				authorized, &qos_rec, &error_code, false);
 			if (error_code == SLURM_SUCCESS) {
 				info("%s: setting QOS to %s for job_id %u",
 				     __func__, job_specs->qos, job_ptr->job_id);
 				if (job_ptr->qos_id != qos_rec.id) {
 					job_ptr->qos_id = qos_rec.id;
 					job_ptr->qos_ptr = new_qos_ptr;
-					if (authorized)
-						job_ptr->limit_set_qos =
-							ADMIN_SET_LIMIT;
-					else
-						job_ptr->limit_set_qos = 0;
+					job_ptr->limit_set.qos =
+						acct_policy_limit_set.qos;
 					update_accounting = true;
 				} else {
-					debug("sched: %s: new QOS identical to old QOS %u",
+					debug("sched: %s: new QOS identical "
+					      "to old QOS %u",
 					      __func__, job_ptr->job_id);
 				}
 			}
@@ -8974,12 +9893,10 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	    && !xstrcmp(job_specs->partition, job_ptr->partition)) {
 		debug("sched: update_job: new partition identical to "
 		      "old partition %u", job_ptr->job_id);
-		xfree(job_specs->partition);
-	}
-
-	if (job_specs->partition) {
+	} else if (job_specs->partition) {
 		List part_ptr_list = NULL;
-		bool old_res = false;
+		bool resv_reset = false;
+		char *resv_orig = NULL;
 
 		if (!IS_JOB_PENDING(job_ptr)) {
 			error_code = ESLURM_JOB_NOT_PENDING;
@@ -9015,30 +9932,30 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			job_specs->time_limit = job_ptr->time_limit;
 		if (!job_specs->reservation
 		    || job_specs->reservation[0] == '\0') {
-			/* just incase the reservation is '\0' */
-			xfree(job_specs->reservation);
+			resv_reset = true;
+			resv_orig = job_specs->reservation;
 			job_specs->reservation = job_ptr->resv_name;
-			old_res = true;
 		}
 
 		error_code = _get_job_parts(job_specs,
-					    &tmp_part_ptr, &part_ptr_list);
+					    &tmp_part_ptr,
+					    &part_ptr_list, NULL);
 
 		if (error_code != SLURM_SUCCESS)
 			;
 		else if ((tmp_part_ptr->state_up & PARTITION_SUBMIT) == 0)
 			error_code = ESLURM_PARTITION_NOT_AVAIL;
 		else {
-			slurmdb_association_rec_t assoc_rec;
+			slurmdb_assoc_rec_t assoc_rec;
 			memset(&assoc_rec, 0,
-			       sizeof(slurmdb_association_rec_t));
+			       sizeof(slurmdb_assoc_rec_t));
 			assoc_rec.acct      = job_ptr->account;
 			assoc_rec.partition = tmp_part_ptr->name;
 			assoc_rec.uid       = job_ptr->user_id;
 			if (assoc_mgr_fill_in_assoc(
 				    acct_db_conn, &assoc_rec,
 				    accounting_enforce,
-				    (slurmdb_association_rec_t **)
+				    (slurmdb_assoc_rec_t **)
 				    &job_ptr->assoc_ptr, false)) {
 				info("job_update: invalid account %s "
 				     "for job %u",
@@ -9073,8 +9990,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		}
 		FREE_NULL_LIST(part_ptr_list);	/* error clean-up */
 
-		if (old_res)
-			job_specs->reservation = NULL;
+		if (resv_reset)
+			job_specs->reservation = resv_orig;
 
 		if (error_code != SLURM_SUCCESS)
 			goto fini;
@@ -9088,8 +10005,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		error_code = ESLURM_ACCESS_DENIED;
 	} else if (job_specs->comment) {
 		xfree(job_ptr->comment);
-		job_ptr->comment = job_specs->comment;
-		job_specs->comment = NULL;	/* Nothing left to free */
+		job_ptr->comment = xstrdup(job_specs->comment);
 		info("update_job: setting comment to %s for job_id %u",
 		     job_ptr->comment, job_ptr->job_id);
 
@@ -9116,7 +10032,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 
 				new_qos_ptr = _determine_and_validate_qos(
 					resv_name, job_ptr->assoc_ptr,
-					authorized, &qos_rec, &error_code);
+					authorized, &qos_rec, &error_code,
+					false);
 				if (error_code == SLURM_SUCCESS) {
 					info("update_job: setting qos to %s "
 					     "for job_id %u",
@@ -9124,12 +10041,9 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 					if (job_ptr->qos_id != qos_rec.id) {
 						job_ptr->qos_id = qos_rec.id;
 						job_ptr->qos_ptr = new_qos_ptr;
-						if (authorized)
-							job_ptr->limit_set_qos =
-								ADMIN_SET_LIMIT;
-						else
-							job_ptr->limit_set_qos
-								= 0;
+						job_ptr->limit_set.qos =
+							acct_policy_limit_set.
+							qos;
 						update_accounting = true;
 					} else
 						debug("sched: update_job: "
@@ -9147,7 +10061,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	if (!authorized && (accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)) {
 		if (!acct_policy_validate(job_specs, job_ptr->part_ptr,
 					  job_ptr->assoc_ptr, job_ptr->qos_ptr,
-					  NULL, &acct_policy_limit_set, 1)) {
+					  NULL, &acct_policy_limit_set, 1)
+		    && acct_limit_already_set == false) {
 			info("update_job: exceeded association's cpu, node, "
 			     "memory or time limit for user %u",
 			     job_specs->user_id);
@@ -9156,36 +10071,13 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		}
 
 		/* Perhaps the limit was removed, so we will remove it
-		   since it was imposed previously.
-		*/
-		if (!acct_policy_limit_set.max_cpus
-		    && (job_ptr->limit_set_max_cpus == 1))
-			job_ptr->details->max_cpus = NO_VAL;
-
-		if (!acct_policy_limit_set.max_nodes
-		    && (job_ptr->limit_set_max_nodes == 1))
-			job_ptr->details->max_nodes = NO_VAL;
-
-		if (!acct_policy_limit_set.time
-		    && (job_ptr->limit_set_time == 1))
-			job_ptr->time_limit = NO_VAL;
-
-		if (job_ptr->limit_set_max_cpus != ADMIN_SET_LIMIT)
-			job_ptr->limit_set_max_cpus =
-				acct_policy_limit_set.max_cpus;
-		if (job_ptr->limit_set_max_nodes != ADMIN_SET_LIMIT)
-			job_ptr->limit_set_max_nodes =
-				acct_policy_limit_set.max_nodes;
-		if (job_ptr->limit_set_time != ADMIN_SET_LIMIT)
-			job_ptr->limit_set_time = acct_policy_limit_set.time;
-	} else if (authorized) {
-		acct_policy_limit_set.max_cpus = ADMIN_SET_LIMIT;
-		acct_policy_limit_set.max_nodes = ADMIN_SET_LIMIT;
-		acct_policy_limit_set.min_cpus = ADMIN_SET_LIMIT;
-		acct_policy_limit_set.min_nodes = ADMIN_SET_LIMIT;
-		acct_policy_limit_set.pn_min_memory = ADMIN_SET_LIMIT;
-		acct_policy_limit_set.time = ADMIN_SET_LIMIT;
-		acct_policy_limit_set.qos = ADMIN_SET_LIMIT;
+		 * since it was imposed previously.
+		 *
+		 * acct_policy_validate will only set the time limit
+		 * so don't worry about any of the others
+		 */
+		if (job_ptr->limit_set.time != ADMIN_SET_LIMIT)
+			job_ptr->limit_set.time = acct_policy_limit_set.time;
 	}
 
 
@@ -9213,6 +10105,19 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		else {
 			save_min_cpus = detail_ptr->min_cpus;
 			detail_ptr->min_cpus = job_specs->min_cpus;
+			job_ptr->tres_req_cnt[TRES_ARRAY_CPU] =
+				(uint64_t)detail_ptr->min_cpus;
+			xfree(job_ptr->tres_req_str);
+			job_ptr->tres_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt,
+					TRES_STR_FLAG_SIMPLE, false);
+
+			xfree(job_ptr->tres_fmt_req_str);
+			job_ptr->tres_fmt_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt,
+					0, false);
 		}
 	}
 	if (job_specs->max_cpus != NO_VAL) {
@@ -9229,12 +10134,26 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		if (save_min_cpus) {
 			detail_ptr->min_cpus = save_min_cpus;
 			save_min_cpus = 0;
+			/* revert it */
+			job_ptr->tres_req_cnt[TRES_ARRAY_CPU] =
+				(uint64_t)detail_ptr->min_cpus;
+			xfree(job_ptr->tres_req_str);
+			job_ptr->tres_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt,
+					TRES_STR_FLAG_SIMPLE, false);
+
+			xfree(job_ptr->tres_fmt_req_str);
+			job_ptr->tres_fmt_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt, 0, false);
 		}
 		if (save_max_cpus) {
 			detail_ptr->max_cpus = save_max_cpus;
 			save_max_cpus = 0;
 		}
 	}
+
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
@@ -9256,7 +10175,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		info("update_job: setting min_cpus from "
 		     "%u to %u for job_id %u",
 		     save_min_cpus, detail_ptr->min_cpus, job_ptr->job_id);
-		job_ptr->limit_set_min_cpus = acct_policy_limit_set.min_cpus;
+		job_ptr->limit_set.tres[TRES_ARRAY_CPU] =
+			acct_policy_limit_set.tres[TRES_ARRAY_CPU];
 		update_accounting = true;
 	}
 	if (save_max_cpus && (detail_ptr->max_cpus != save_max_cpus)) {
@@ -9265,7 +10185,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		     save_max_cpus, detail_ptr->max_cpus, job_ptr->job_id);
 		/* Always use the acct_policy_limit_set.* since if set by a
 		 * super user it be set correctly */
-		job_ptr->limit_set_max_cpus = acct_policy_limit_set.max_cpus;
+		job_ptr->limit_set.tres[TRES_ARRAY_CPU] =
+			acct_policy_limit_set.tres[TRES_ARRAY_CPU];
 		update_accounting = true;
 	}
 
@@ -9274,7 +10195,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL)) {
 			error_code = ESLURM_JOB_NOT_PENDING;
-		}else {
+		} else {
 			detail_ptr->pn_min_cpus = job_specs->pn_min_cpus;
 			info("update_job: setting pn_min_cpus to %u for "
 			     "job_id %u", job_specs->pn_min_cpus,
@@ -9320,10 +10241,10 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 					 * acct_policy_limit_set.*
 					 * since if set by a
 					 * super user it be set correctly */
-					job_ptr->limit_set_min_cpus =
-						acct_policy_limit_set.min_cpus;
-					job_ptr->limit_set_max_cpus =
-						acct_policy_limit_set.max_cpus;
+					job_ptr->limit_set.
+						tres[TRES_ARRAY_CPU] =
+						acct_policy_limit_set.
+						tres[TRES_ARRAY_CPU];
 				}
 			}
 		}
@@ -9377,7 +10298,21 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		info("update_job: setting min_nodes from "
 		     "%u to %u for job_id %u",
 		     save_min_nodes, detail_ptr->min_nodes, job_ptr->job_id);
-		job_ptr->limit_set_min_nodes = acct_policy_limit_set.min_nodes;
+		job_ptr->limit_set.tres[TRES_ARRAY_NODE] =
+			acct_policy_limit_set.tres[TRES_ARRAY_NODE];
+		job_ptr->tres_req_cnt[TRES_ARRAY_NODE] =
+			(uint64_t)detail_ptr->min_nodes;
+		xfree(job_ptr->tres_req_str);
+		job_ptr->tres_req_str =
+			assoc_mgr_make_tres_str_from_array(
+				job_ptr->tres_req_cnt,
+				TRES_STR_FLAG_SIMPLE, false);
+
+		xfree(job_ptr->tres_fmt_req_str);
+		job_ptr->tres_fmt_req_str =
+			assoc_mgr_make_tres_str_from_array(
+				job_ptr->tres_req_cnt,
+				0, false);
 		update_accounting = true;
 	}
 	if (save_max_nodes && (save_max_nodes != detail_ptr->max_nodes)) {
@@ -9386,7 +10321,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		     save_max_nodes, detail_ptr->max_nodes, job_ptr->job_id);
 		/* Always use the acct_policy_limit_set.* since if set by a
 		 * super user it be set correctly */
-		job_ptr->limit_set_max_nodes = acct_policy_limit_set.max_nodes;
+		job_ptr->limit_set.tres[TRES_ARRAY_NODE] =
+			acct_policy_limit_set.tres[TRES_ARRAY_NODE];
 		update_accounting = true;
 	}
 
@@ -9431,7 +10367,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			     job_ptr->job_id);
 			/* Always use the acct_policy_limit_set.*
 			 * since if set by a super user it be set correctly */
-			job_ptr->limit_set_time = acct_policy_limit_set.time;
+			job_ptr->limit_set.time = acct_policy_limit_set.time;
 			update_accounting = true;
 		} else if (IS_JOB_PENDING(job_ptr) && job_ptr->part_ptr &&
 			   (job_ptr->part_ptr->max_time >=
@@ -9442,7 +10378,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			     job_ptr->job_id);
 			/* Always use the acct_policy_limit_set.*
 			 * since if set by a super user it be set correctly */
-			job_ptr->limit_set_time = acct_policy_limit_set.time;
+			job_ptr->limit_set.time = acct_policy_limit_set.time;
 			update_accounting = true;
 		} else {
 			info("sched: Attempt to increase time limit for job %u",
@@ -9486,7 +10422,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			     job_ptr->job_id);
 			/* Always use the acct_policy_limit_set.*
 			 * since if set by a super user it be set correctly */
-			job_ptr->limit_set_time = acct_policy_limit_set.time;
+			job_ptr->limit_set.time = acct_policy_limit_set.time;
 			update_accounting = true;
 		} else {
 			info("sched: Attempt to extend end time for job %u",
@@ -9497,15 +10433,12 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
+	/* this needs to be after partition and QOS checks */
 	if (job_specs->reservation
 	    && !xstrcmp(job_specs->reservation, job_ptr->resv_name)) {
 		debug("sched: update_job: new reservation identical to "
 		      "old reservation %u", job_ptr->job_id);
-		xfree(job_specs->reservation);
-	}
-
-	/* this needs to be after partition and qos checks */
-	if (job_specs->reservation) {
+	} else if (job_specs->reservation) {
 		if (!IS_JOB_PENDING(job_ptr) && !IS_JOB_RUNNING(job_ptr)) {
 			error_code = ESLURM_JOB_NOT_PENDING_NOR_RUNNING;
 		} else {
@@ -9513,13 +10446,10 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			char *save_resv_name = job_ptr->resv_name;
 			slurmctld_resv_t *save_resv_ptr = job_ptr->resv_ptr;
 
-			job_ptr->resv_name = job_specs->reservation;
-			job_specs->reservation = NULL;	/* Nothing to free */
+			job_ptr->resv_name = xstrdup(job_specs->reservation);
 			rc = validate_job_resv(job_ptr);
-			/* Make sure this job isn't using a partition
-			   or qos that requires it to be in a
-			   reservation.
-			*/
+			/* Make sure this job isn't using a partition or QOS
+			 * that requires it to be in a reservation. */
 			if (rc == SLURM_SUCCESS && !job_ptr->resv_name) {
 				struct part_record *part_ptr =
 					job_ptr->part_ptr;
@@ -9543,7 +10473,6 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 				update_accounting = true;
 			} else {
 				/* Restore reservation info */
-				job_specs->reservation = job_ptr->resv_name;
 				job_ptr->resv_name = save_resv_name;
 				job_ptr->resv_ptr = save_resv_ptr;
 				error_code = rc;
@@ -9607,7 +10536,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 					 * persistent change to a job's
 					 * priority, except holding a job */
 					job_ptr->direct_set_prio = 1;
-				}
+				} else
+					error_code = ESLURM_PRIO_RESET_FAIL;
 				job_ptr->priority = job_specs->priority;
 			}
 			info("sched: update_job: setting priority to %u for "
@@ -9690,20 +10620,32 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			     job_ptr->job_id);
 			/* Always use the acct_policy_limit_set.*
 			 * since if set by a super user it be set correctly */
-			job_ptr->limit_set_pn_min_memory =
-				acct_policy_limit_set.pn_min_memory;
+			job_ptr->limit_set.tres[TRES_ARRAY_MEM] =
+				acct_policy_limit_set.tres[TRES_ARRAY_MEM];
+			job_ptr->tres_req_cnt[TRES_ARRAY_MEM] =
+				(uint64_t)detail_ptr->pn_min_memory;
+			xfree(job_ptr->tres_req_str);
+			job_ptr->tres_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt,
+					TRES_STR_FLAG_SIMPLE, false);
+
+			xfree(job_ptr->tres_fmt_req_str);
+			job_ptr->tres_fmt_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt, 0, false);
 		}
 	}
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
 	if (job_specs->pn_min_tmp_disk != NO_VAL) {
-
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL)) {
 			error_code = ESLURM_JOB_NOT_PENDING;
 		} else {
 			detail_ptr->pn_min_tmp_disk =
 				job_specs->pn_min_tmp_disk;
+
 			info("sched: update_job: setting job_min_tmp_disk to "
 			     "%u for job_id %u", job_specs->pn_min_tmp_disk,
 			     job_ptr->job_id);
@@ -9791,7 +10733,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	if (job_specs->core_spec != (uint16_t) NO_VAL) {
 		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL))
 			error_code = ESLURM_JOB_NOT_PENDING;
-		else if (authorized) {
+		else if (authorized && slurm_get_use_spec_resources()) {
 			if (job_specs->core_spec == (uint16_t) INFINITE)
 				detail_ptr->core_spec = (uint16_t) NO_VAL;
 			else
@@ -9799,6 +10741,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			info("sched: update_job: setting core_spec to %u "
 			     "for job_id %u", detail_ptr->core_spec,
 			     job_ptr->job_id);
+			if (detail_ptr->core_spec != (uint16_t) NO_VAL)
+				detail_ptr->whole_node = 1;
 		} else {
 			error("sched: Attempt to modify core_spec for job %u",
 			      job_ptr->job_id);
@@ -9814,14 +10758,13 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		else if (job_specs->features[0] != '\0') {
 			char *old_features = detail_ptr->features;
 			List old_list = detail_ptr->feature_list;
-			detail_ptr->features = job_specs->features;
+			detail_ptr->features = xstrdup(job_specs->features);
 			detail_ptr->feature_list = NULL;
 			if (build_feature_list(job_ptr)) {
 				info("sched: update_job: invalid features"
 				     "(%s) for job_id %u",
 				     job_specs->features, job_ptr->job_id);
-				if (detail_ptr->feature_list)
-					list_destroy(detail_ptr->feature_list);
+				FREE_NULL_LIST(detail_ptr->feature_list);
 				detail_ptr->features = old_features;
 				detail_ptr->feature_list = old_list;
 				error_code = ESLURM_INVALID_FEATURE;
@@ -9830,68 +10773,57 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 				     "%s for job_id %u",
 				     job_specs->features, job_ptr->job_id);
 				xfree(old_features);
-				if (old_list)
-					list_destroy(old_list);
-				job_specs->features = NULL;
+				FREE_NULL_LIST(old_list);
 			}
 		} else {
 			info("sched: update_job: cleared features for job %u",
 			     job_ptr->job_id);
 			xfree(detail_ptr->features);
-			if (detail_ptr->feature_list) {
-				list_destroy(detail_ptr->feature_list);
-				detail_ptr->feature_list = NULL;
-			}
+			FREE_NULL_LIST(detail_ptr->feature_list);
 		}
 	}
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
 
-	if (job_specs->gres) {
-		List tmp_gres_list = NULL;
-		if ((!IS_JOB_PENDING(job_ptr)) || (detail_ptr == NULL) ||
-		    (detail_ptr->expanding_jobid != 0)) {
-			error_code = ESLURM_JOB_NOT_PENDING;
-		} else if (job_specs->gres[0] == '\0') {
-			info("sched: update_job: cleared gres for job %u",
-			     job_ptr->job_id);
-			xfree(job_ptr->gres);
-			FREE_NULL_LIST(job_ptr->gres_list);
-		} else if (gres_plugin_job_state_validate(job_specs->gres,
-							  &tmp_gres_list)) {
-			info("sched: update_job: invalid gres %s for job %u",
-			     job_specs->gres, job_ptr->job_id);
-			error_code = ESLURM_INVALID_GRES;
-			FREE_NULL_LIST(tmp_gres_list);
-		} else {
-			info("sched: update_job: setting gres to "
-			     "%s for job_id %u",
-			     job_specs->gres, job_ptr->job_id);
-			xfree(job_ptr->gres);
-			job_ptr->gres = job_specs->gres;
-			job_specs->gres = NULL;
-			FREE_NULL_LIST(job_ptr->gres_list);
-			job_ptr->gres_list = tmp_gres_list;
-		}
+	if (gres_list) {
+		info("sched: update_job: setting gres to %s for job_id %u",
+		     job_specs->gres, job_ptr->job_id);
+
+		xfree(job_ptr->gres);
+		job_ptr->gres = job_specs->gres;
+		job_specs->gres = NULL;
+
+		FREE_NULL_LIST(job_ptr->gres_list);
+		job_ptr->gres_list = gres_list;
+		gres_list = NULL;
+
+		assoc_mgr_lock(&locks);
+		gres_set_job_tres_cnt(job_ptr->gres_list,
+				      job_ptr->details ?
+				      job_ptr->details->min_nodes : 0,
+				      job_ptr->tres_req_cnt,
+				      true);
+		xfree(job_ptr->tres_req_str);
+		job_ptr->tres_req_str =	assoc_mgr_make_tres_str_from_array(
+			job_ptr->tres_req_cnt, TRES_STR_FLAG_SIMPLE, true);
+
+		xfree(job_ptr->tres_fmt_req_str);
+		job_ptr->tres_fmt_req_str = assoc_mgr_make_tres_str_from_array(
+				job_ptr->tres_req_cnt, 0, false);
+		assoc_mgr_unlock(&locks);
 	}
-	if (error_code != SLURM_SUCCESS)
-		goto fini;
 
 	if (job_specs->name
 	    && !xstrcmp(job_specs->name, job_ptr->name)) {
 		debug("sched: update_job: new name identical to "
 		      "old name %u", job_ptr->job_id);
-		xfree(job_specs->name);
-	}
-
-	if (job_specs->name) {
+	} if (job_specs->name) {
 		if (IS_JOB_FINISHED(job_ptr)) {
 			error_code = ESLURM_JOB_FINISHED;
 			goto fini;
 		} else {
 			xfree(job_ptr->name);
-			job_ptr->name = job_specs->name;
-			job_specs->name = NULL;
+			job_ptr->name = xstrdup(job_specs->name);
 
 			info("sched: update_job: setting name to %s for "
 			     "job_id %u", job_ptr->name, job_ptr->job_id);
@@ -9904,8 +10836,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			error_code = ESLURM_JOB_NOT_PENDING;
 		else if (detail_ptr) {
 			xfree(detail_ptr->std_out);
-			detail_ptr->std_out = job_specs->std_out;
-			job_specs->std_out = NULL;
+			detail_ptr->std_out = xstrdup(job_specs->std_out);
 		}
 	}
 	if (error_code != SLURM_SUCCESS)
@@ -9915,10 +10846,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	    && !xstrcmp(job_specs->wckey, job_ptr->wckey)) {
 		debug("sched: update_job: new wckey identical to "
 		      "old wckey %u", job_ptr->job_id);
-		xfree(job_specs->wckey);
-	}
-
-	if (job_specs->wckey) {
+	} else if (job_specs->wckey) {
 		if (!IS_JOB_PENDING(job_ptr))
 			error_code = ESLURM_JOB_NOT_PENDING;
 		else {
@@ -10103,24 +11031,31 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	}
 
 	if (job_specs->licenses) {
-		List license_list;
-		bool valid;
-
-		license_list = license_validate(job_specs->licenses, &valid);
-		if (!valid) {
-			info("sched: update_job: invalid licenses: %s",
-			     job_specs->licenses);
-			error_code = ESLURM_INVALID_LICENSES;
-		} else if (IS_JOB_PENDING(job_ptr)) {
+		if (IS_JOB_PENDING(job_ptr)) {
 			FREE_NULL_LIST(job_ptr->license_list);
 			job_ptr->license_list = license_list;
+			license_list = NULL;
 			info("sched: update_job: changing licenses from '%s' "
 			     "to '%s' for pending job %u",
 			     job_ptr->licenses, job_specs->licenses,
 			     job_ptr->job_id);
 			xfree(job_ptr->licenses);
-			job_ptr->licenses = job_specs->licenses;
-			job_specs->licenses = NULL; /* nothing to free */
+			job_ptr->licenses = xstrdup(job_specs->licenses);
+			assoc_mgr_lock(&locks);
+			license_set_job_tres_cnt(job_ptr->license_list,
+						 job_ptr->tres_req_cnt,
+						 true);
+			xfree(job_ptr->tres_req_str);
+			job_ptr->tres_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt,
+					TRES_STR_FLAG_SIMPLE, true);
+
+			xfree(job_ptr->tres_fmt_req_str);
+			job_ptr->tres_fmt_req_str =
+				assoc_mgr_make_tres_str_from_array(
+					job_ptr->tres_req_cnt, 0, false);
+			assoc_mgr_unlock(&locks);
 		} else if (IS_JOB_RUNNING(job_ptr) &&
 			   (authorized || (license_list == NULL))) {
 			/* NOTE: This can result in oversubscription of
@@ -10133,8 +11068,7 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			     job_ptr->licenses, job_specs->licenses,
 			     job_ptr->job_id);
 			xfree(job_ptr->licenses);
-			job_ptr->licenses = job_specs->licenses;
-			job_specs->licenses = NULL; /* nothing to free */
+			job_ptr->licenses = xstrdup(job_specs->licenses);
 			license_job_get(job_ptr);
 		} else {
 			/* licenses are valid, but job state or user not
@@ -10144,6 +11078,8 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 			error_code = ESLURM_JOB_NOT_PENDING_NOR_RUNNING;
 			FREE_NULL_LIST(license_list);
 		}
+
+		update_accounting = 1;
 	}
 	if (error_code != SLURM_SUCCESS)
 		goto fini;
@@ -10380,7 +11316,6 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 		    || !strcmp(job_specs->network, "none")) {
 			info("sched: update_job: clearing Network option "
 			     "for jobid %u", job_ptr->job_id);
-
 		} else {
 			job_ptr->network = xstrdup(job_specs->network);
 			info("sched: update_job: setting Network to %s "
@@ -10393,13 +11328,15 @@ static int _update_job(struct job_record *job_ptr, job_desc_msg_t * job_specs,
 	}
 
 fini:
+	/* This was a local variable, so set it back to NULL */
+	job_specs->tres_req_cnt = NULL;
+
+	FREE_NULL_LIST(gres_list);
+	FREE_NULL_LIST(license_list);
 	if (update_accounting) {
 		info("updating accounting");
-		if (job_ptr->details && job_ptr->details->begin_time) {
-			/* Update job record in accounting to reflect changes */
-			jobacct_storage_g_job_start(acct_db_conn,
-						    job_ptr);
-		}
+		/* Update job record in accounting to reflect changes */
+		jobacct_storage_job_start_direct(acct_db_conn, job_ptr);
 	}
 
 	/* If job update is successful and priority is calculated (not only
@@ -10579,10 +11516,10 @@ extern int update_job_str(slurm_msg_t *msg, uid_t uid)
 			else
 				i_last = -2;
 			for (i = i_first; i <= i_last; i++) {
-				if (!bit_test(array_bitmap, i))
+				if (!bit_test(tmp_bitmap, i))
 					continue;
 				job_ptr->array_task_id = i;
-				new_job_ptr = _job_rec_copy(job_ptr);
+				new_job_ptr = job_array_split(job_ptr);
 				if (!new_job_ptr) {
 					error("update_job_str: Unable to copy "
 					      "record for job %u",
@@ -10590,6 +11527,7 @@ extern int update_job_str(slurm_msg_t *msg, uid_t uid)
 				} else {
 					/* The array_recs structure is moved
 					 * to the new job record copy */
+					bb_g_job_validate2(job_ptr, NULL);
 					job_ptr = new_job_ptr;
 				}
 			}
@@ -10618,17 +11556,17 @@ extern int update_job_str(slurm_msg_t *msg, uid_t uid)
 	}
 
 reply:
-        if (msg->conn_fd >= 0) {
+	if (msg->conn_fd >= 0) {
 		slurm_msg_t_init(&resp_msg);
 		resp_msg.protocol_version = msg->protocol_version;
 		if (resp_array) {
-		        resp_array_msg = _resp_array_xlate(resp_array, job_id);
-		        resp_msg.msg_type  = RESPONSE_JOB_ARRAY_ERRORS;
-		        resp_msg.data      = resp_array_msg;
+			resp_array_msg = _resp_array_xlate(resp_array, job_id);
+			resp_msg.msg_type  = RESPONSE_JOB_ARRAY_ERRORS;
+			resp_msg.data      = resp_array_msg;
 		} else {
-		        resp_msg.msg_type  = RESPONSE_SLURM_RC;
-		        rc_msg.return_code = rc;
-		        resp_msg.data      = &rc_msg;
+			resp_msg.msg_type  = RESPONSE_SLURM_RC;
+			rc_msg.return_code = rc;
+			resp_msg.data      = &rc_msg;
 		}
 		slurm_send_node_msg(msg->conn_fd, &resp_msg);
 
@@ -10637,7 +11575,7 @@ reply:
 			resp_msg.data = NULL;
 		}
 	}
-        _resp_array_free(resp_array);
+	_resp_array_free(resp_array);
 
 	FREE_NULL_BITMAP(array_bitmap);
 
@@ -10758,13 +11696,18 @@ extern void job_post_resize_acctg(struct job_record *job_ptr)
 	   code it that way. */
 	xassert(IS_JOB_RESIZING(job_ptr));
 	acct_policy_add_job_submit(job_ptr);
+	/* job_set_alloc_tres has to be done
+	 * before acct_policy_job_begin */
+	job_set_alloc_tres(job_ptr, false);
 	acct_policy_job_begin(job_ptr);
+	job_claim_resv(job_ptr);
 
 	if (job_ptr->resize_time)
 		job_ptr->details->submit_time = job_ptr->resize_time;
 
 	job_ptr->resize_time = time(NULL);
 
+	/* FIXME: see if this can be changed to job_start_direct() */
 	jobacct_storage_g_job_start(acct_db_conn, job_ptr);
 
 	job_ptr->details->submit_time = org_submit;
@@ -10823,6 +11766,8 @@ validate_jobs_on_node(slurm_node_registration_status_msg_t *reg_msg)
 			     reg_msg->node_name);
 			continue;
 		}
+		if (reg_msg->step_id[i] == SLURM_EXTERN_CONT)
+			continue;
 
 		job_ptr = find_job_record(reg_msg->job_id[i]);
 		if (job_ptr == NULL) {
@@ -10966,11 +11911,14 @@ static void _purge_missing_jobs(int node_inx, time_t now)
 		    (job_ptr->start_time       < startup_time)	&&
 		    (node_inx == bit_ffs(job_ptr->node_bitmap))) {
 			bool requeue = false;
-			if ((job_ptr->start_time < node_ptr->boot_time) &&
-			    (job_ptr->details && job_ptr->details->requeue))
+			char *requeue_msg = "";
+			if (job_ptr->details && job_ptr->details->requeue) {
 				requeue = true;
+				requeue_msg = ", Requeuing job";
+			}
 			info("Batch JobId=%u missing from node 0 (not found "
-			     "BatchStartTime after startup)", job_ptr->job_id);
+			     "BatchStartTime after startup)%s",
+			     job_ptr->job_id, requeue_msg);
 			job_ptr->exit_code = 1;
 			job_complete(job_ptr->job_id, 0, requeue, true, NO_VAL);
 		} else {
@@ -11138,6 +12086,7 @@ extern int
 job_alloc_info(uint32_t uid, uint32_t job_id, struct job_record **job_pptr)
 {
 	struct job_record *job_ptr;
+	uint8_t prolog = 0;
 
 	job_ptr = find_job_record(job_id);
 	if (job_ptr == NULL)
@@ -11150,9 +12099,11 @@ job_alloc_info(uint32_t uid, uint32_t job_id, struct job_record **job_pptr)
 		return ESLURM_JOB_PENDING;
 	if (IS_JOB_FINISHED(job_ptr))
 		return ESLURM_ALREADY_DONE;
+	if (job_ptr->details)
+		prolog = job_ptr->details->prolog_running;
 
 	if (job_ptr->alias_list && !strcmp(job_ptr->alias_list, "TBD") &&
-	    job_ptr->node_bitmap &&
+	    (prolog == 0) && job_ptr->node_bitmap &&
 	    (bit_overlap(power_node_bitmap, job_ptr->node_bitmap) == 0)) {
 		job_ptr->job_state &= (~JOB_CONFIGURING);
 		set_job_alias_list(job_ptr);
@@ -11179,7 +12130,7 @@ int sync_job_files(void)
 	_get_batch_job_dir_ids(batch_dirs);
 	_validate_job_files(batch_dirs);
 	_remove_defunct_batch_dirs(batch_dirs);
-	list_destroy(batch_dirs);
+	FREE_NULL_LIST(batch_dirs);
 	return SLURM_SUCCESS;
 }
 
@@ -11354,6 +12305,24 @@ _xmit_new_end_time(struct job_record *job_ptr)
 	return;
 }
 
+extern uint64_t job_get_tres_mem(uint32_t pn_min_memory,
+				 uint32_t cpu_cnt, uint32_t node_cnt)
+{
+	uint64_t count = 0;
+
+	if (pn_min_memory == NO_VAL)
+		return count;
+
+	if (pn_min_memory & MEM_PER_CPU) {
+		if (cpu_cnt != NO_VAL) {
+			count = (uint64_t)(pn_min_memory & (~MEM_PER_CPU));
+			count *= cpu_cnt;
+		}
+	} else if (node_cnt != NO_VAL)
+		count = (uint64_t)(pn_min_memory * node_cnt);
+
+	return count;
+}
 
 /*
  * job_epilog_complete - Note the completion of the epilog script for a
@@ -11395,17 +12364,18 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 		if (base_state == NODE_STATE_DOWN) {
 			debug("%s: %s complete response from DOWN "
 			      "node %s", __func__,
-			      jobid2str(job_ptr, jbuf), node_name);
+			      jobid2str(job_ptr, jbuf,
+					sizeof(jbuf)), node_name);
 		} else if (job_ptr->restart_cnt) {
 			/* Duplicate epilog complete can be due to race
 			 * condition, especially with select/serial */
 			debug("%s: %s duplicate epilog complete response",
-			      __func__, jobid2str(job_ptr, jbuf));
+			      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		} else {
 
 			error("%s: %s is non-running slurmctld"
 			      "and slurmd out of sync",
-			      __func__, jobid2str(job_ptr, jbuf));
+			      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		}
 #endif
 		return false;
@@ -11419,7 +12389,7 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 	*/
 	if (return_code)
 		error("%s: %s epilog error on %s",
-		      __func__, jobid2str(job_ptr, jbuf),
+		      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 		      job_ptr->batch_host);
 
 	if (job_ptr->front_end_ptr && IS_JOB_COMPLETING(job_ptr)) {
@@ -11429,7 +12399,7 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 		else {
 			error("%s: %s job_cnt_comp underflow on "
 			      "front end %s", __func__,
-			      jobid2str(job_ptr, jbuf),
+			      jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 			      front_end_ptr->name);
 		}
 		if (front_end_ptr->job_cnt_comp == 0)
@@ -11467,7 +12437,8 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
 #else
 	if (return_code) {
 		error("%s: %s epilog error on %s, draining the node",
-		      __func__, jobid2str(job_ptr, jbuf), node_name);
+		      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
+		      node_name);
 		drain_nodes(node_name, "Epilog error",
 			    slurm_get_slurm_user_id());
 	}
@@ -11491,11 +12462,13 @@ extern bool job_epilog_complete(uint32_t job_id, char *node_name,
  * subsequent jobs appear in a separate accounting record. */
 void batch_requeue_fini(struct job_record  *job_ptr)
 {
+	char jbuf[JBUFSIZ];
+
 	if (IS_JOB_COMPLETING(job_ptr) ||
 	    !IS_JOB_PENDING(job_ptr) || !job_ptr->batch_flag)
 		return;
 
-	info("requeue batch job %u", job_ptr->job_id);
+	info("Requeuing %s", jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 
 	/* Clear everything so this appears to be a new job and then restart
 	 * it in accounting. */
@@ -11525,12 +12498,19 @@ void batch_requeue_fini(struct job_record  *job_ptr)
 	FREE_NULL_BITMAP(job_ptr->node_bitmap_cg);
 	if (job_ptr->details) {
 		time_t now = time(NULL);
-		/* the time stamp on the new batch launch credential must be
+		/* The time stamp on the new batch launch credential must be
 		 * larger than the time stamp on the revoke request. Also the
-		 * I/O must be all cleared out and the named socket purged,
-		 * so delay for at least ten seconds. */
-		if (job_ptr->details->begin_time <= now)
-			job_ptr->details->begin_time = now + 10;
+		 * I/O must be all cleared out, the named socket purged and
+		 * the job credential purged by slurmd. */
+		if (job_ptr->details->begin_time <= now) {
+			/* See src/common/slurm_cred.c
+			 * #define DEFAULT_EXPIRATION_WINDOW 1200 */
+			int cred_lifetime = 1200;
+			(void) slurm_cred_ctx_get(slurmctld_config.cred_ctx,
+						  SLURM_CRED_OPT_EXPIRY_WINDOW,
+						  &cred_lifetime);
+			job_ptr->details->begin_time = now + cred_lifetime + 1;
+		}
 
 		/* Since this could happen on a launch we need to make sure the
 		 * submit isn't the same as the last submit so put now + 1 so
@@ -11551,15 +12531,12 @@ void batch_requeue_fini(struct job_record  *job_ptr)
 /* job_fini - free all memory associated with job records */
 void job_fini (void)
 {
-	if (job_list) {
-		list_destroy(job_list);
-		job_list = NULL;
-	}
+	FREE_NULL_LIST(job_list);
 	xfree(job_hash);
 	xfree(job_array_hash_j);
 	xfree(job_array_hash_t);
-	xfree(requeue_exit);
-	xfree(requeue_exit_hold);
+	FREE_NULL_BITMAP(requeue_exit);
+	FREE_NULL_BITMAP(requeue_exit_hold);
 }
 
 /* Record the start of one job array task */
@@ -11641,8 +12618,11 @@ extern void job_completion_logger(struct job_record *job_ptr, bool requeue)
 	xassert(job_ptr);
 
 	acct_policy_remove_job_submit(job_ptr);
+	(void) bb_g_job_start_stage_out(job_ptr);
 
-	if (!IS_JOB_RESIZING(job_ptr)) {
+	if (!IS_JOB_RESIZING(job_ptr) &&
+	    ((job_ptr->array_task_id == NO_VAL) ||
+	     test_job_array_finished(job_ptr->array_job_id))) {
 		/* Remove configuring state just to make sure it isn't there
 		 * since it will throw off displays of the job. */
 		job_ptr->job_state &= (~JOB_CONFIGURING);
@@ -11676,9 +12656,9 @@ extern void job_completion_logger(struct job_record *job_ptr, bool requeue)
 		return;
 
 	if (!job_ptr->assoc_id) {
-		slurmdb_association_rec_t assoc_rec;
+		slurmdb_assoc_rec_t assoc_rec;
 		/* In case accounting enabled after starting the job */
-		memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+		memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 		assoc_rec.acct      = job_ptr->account;
 		if (job_ptr->part_ptr)
 			assoc_rec.partition = job_ptr->part_ptr->name;
@@ -11686,7 +12666,7 @@ extern void job_completion_logger(struct job_record *job_ptr, bool requeue)
 
 		if (!(assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 					      accounting_enforce,
-					      (slurmdb_association_rec_t **)
+					      (slurmdb_assoc_rec_t **)
 					      &job_ptr->assoc_ptr, false))) {
 			job_ptr->assoc_id = assoc_rec.id;
 			/* we have to call job start again because the
@@ -11737,11 +12717,18 @@ extern bool job_independent(struct job_record *job_ptr, int will_run)
 	} else if (depend_rc == 2) {
 		char jbuf[JBUFSIZ];
 
-		if (kill_invalid_dep) {
+		if (job_ptr->bit_flags & KILL_INV_DEP) {
+			_kill_dependent(job_ptr);
+		} else if (job_ptr->bit_flags & NO_KILL_INV_DEP) {
+			debug("%s: %s job dependency condition never satisfied",
+			      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
+			job_ptr->state_reason = WAIT_DEP_INVALID;
+			xfree(job_ptr->state_desc);
+		} else if (kill_invalid_dep) {
 			_kill_dependent(job_ptr);
 		} else {
 			debug("%s: %s dependency condition never satisfied",
-			      __func__, jobid2str(job_ptr, jbuf));
+			      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 			job_ptr->state_reason = WAIT_DEP_INVALID;
 			xfree(job_ptr->state_desc);
 		}
@@ -12200,6 +13187,7 @@ static int _job_suspend(struct job_record *job_ptr, uint16_t op, bool indf_susp)
 		if (!IS_JOB_SUSPENDED(job_ptr))
 			return ESLURM_JOB_NOT_SUSPENDED;
 		rc = _resume_job_nodes(job_ptr, indf_susp);
+		power_g_job_resume(job_ptr);
 		if (rc != SLURM_SUCCESS)
 			return rc;
 		_suspend_job(job_ptr, op, indf_susp);
@@ -12519,7 +13507,7 @@ static int _job_requeue(uid_t uid, struct job_record *job_ptr, bool preempt,
 	}
 
 	if (IS_JOB_SUSPENDED(job_ptr)) {
-		enum job_states suspend_job_state = job_ptr->job_state;
+		uint32_t suspend_job_state = job_ptr->job_state;
 		/* we can't have it as suspended when we call the
 		 * accounting stuff.
 		 */
@@ -12566,6 +13554,12 @@ static int _job_requeue(uid_t uid, struct job_record *job_ptr, bool preempt,
 	job_ptr->job_state = JOB_PENDING;
 	if (job_ptr->node_cnt)
 		job_ptr->job_state |= JOB_COMPLETING;
+	/* If we set the time limit it means the user didn't so reset
+	   it here or we could bust some limit when we try again */
+	if (job_ptr->limit_set.time == 1) {
+		job_ptr->time_limit = NO_VAL;
+		job_ptr->limit_set.time = 0;
+	}
 
 reply:
 	job_ptr->pre_sus_time = (time_t) 0;
@@ -12588,7 +13582,12 @@ reply:
 	if (state & JOB_REQUEUE_HOLD) {
 		job_ptr->state_reason = WAIT_HELD_USER;
 		xfree(job_ptr->state_desc);
-		job_ptr->state_desc = xstrdup("job requeued in held state");
+		if (state & JOB_LAUNCH_FAILED)
+			job_ptr->state_desc
+				= xstrdup("launch failed requeued held");
+		else
+			job_ptr->state_desc
+				= xstrdup("job requeued in held state");
 		job_ptr->priority = 0;
 	}
 
@@ -12610,8 +13609,8 @@ reply:
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_requeue(uid_t uid, uint32_t job_id,
-                       slurm_fd_t conn_fd, uint16_t protocol_version,
-                       bool preempt, uint32_t state)
+		       slurm_fd_t conn_fd, uint16_t protocol_version,
+		       bool preempt, uint32_t state)
 {
 	int rc = SLURM_SUCCESS;
 	struct job_record *job_ptr = NULL;
@@ -12647,8 +13646,8 @@ extern int job_requeue(uid_t uid, uint32_t job_id,
  * RET 0 on success, otherwise ESLURM error code
  */
 extern int job_requeue2(uid_t uid, requeue_msg_t *req_ptr,
-                       slurm_fd_t conn_fd, uint16_t protocol_version,
-                       bool preempt)
+		       slurm_fd_t conn_fd, uint16_t protocol_version,
+		       bool preempt)
 {
 	slurm_ctl_conf_t *conf;
 	int rc = SLURM_SUCCESS, rc2;
@@ -12873,11 +13872,11 @@ extern int job_hold_by_assoc_id(uint32_t assoc_id)
 			}
 
 			job_ptr->assoc_ptr =
-				((slurmdb_association_rec_t *)
+				((slurmdb_assoc_rec_t *)
 				 job_ptr->assoc_ptr)->usage->parent_assoc_ptr;
 			if (job_ptr->assoc_ptr)
 				job_ptr->assoc_id =
-					((slurmdb_association_rec_t *)
+					((slurmdb_assoc_rec_t *)
 					 job_ptr->assoc_ptr)->id;
 		}
 
@@ -12956,7 +13955,7 @@ extern int job_hold_by_qos_id(uint32_t qos_id)
 extern int update_job_account(char *module, struct job_record *job_ptr,
 			      char *new_account)
 {
-	slurmdb_association_rec_t assoc_rec;
+	slurmdb_assoc_rec_t assoc_rec;
 
 	if ((!IS_JOB_PENDING(job_ptr)) || (job_ptr->details == NULL)) {
 		info("%s: attempt to modify account for non-pending "
@@ -12965,14 +13964,14 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 	}
 
 
-	memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+	memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 	assoc_rec.acct      = new_account;
 	if (job_ptr->part_ptr)
 		assoc_rec.partition = job_ptr->part_ptr->name;
 	assoc_rec.uid       = job_ptr->user_id;
 	if (assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 				    accounting_enforce,
-				    (slurmdb_association_rec_t **)
+				    (slurmdb_assoc_rec_t **)
 				    &job_ptr->assoc_ptr, false)) {
 		info("%s: invalid account %s for job_id %u",
 		     module, new_account, job_ptr->job_id);
@@ -12987,7 +13986,7 @@ extern int update_job_account(char *module, struct job_record *job_ptr,
 		assoc_rec.acct = NULL;
 		assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 					accounting_enforce,
-					(slurmdb_association_rec_t **)
+					(slurmdb_assoc_rec_t **)
 					&job_ptr->assoc_ptr, false);
 		if (!job_ptr->assoc_ptr) {
 			debug("%s: we didn't have an association for account "
@@ -13095,9 +14094,9 @@ extern int send_jobs_to_accounting(void)
 	itr = list_iterator_create(job_list);
 	while ((job_ptr = list_next(itr))) {
 		if (!job_ptr->assoc_id) {
-			slurmdb_association_rec_t assoc_rec;
+			slurmdb_assoc_rec_t assoc_rec;
 			memset(&assoc_rec, 0,
-			       sizeof(slurmdb_association_rec_t));
+			       sizeof(slurmdb_assoc_rec_t));
 			assoc_rec.acct      = job_ptr->account;
 			if (job_ptr->part_ptr)
 				assoc_rec.partition = job_ptr->part_ptr->name;
@@ -13106,7 +14105,7 @@ extern int send_jobs_to_accounting(void)
 			if (assoc_mgr_fill_in_assoc(
 				   acct_db_conn, &assoc_rec,
 				   accounting_enforce,
-				   (slurmdb_association_rec_t **)
+				   (slurmdb_assoc_rec_t **)
 				   &job_ptr->assoc_ptr, false) &&
 			    (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS)
 			    && (!IS_JOB_FINISHED(job_ptr))) {
@@ -13411,6 +14410,9 @@ _copy_job_record_to_job_desc(struct job_record *job_ptr)
 	job_desc->core_spec         = details->core_spec;
 	job_desc->cpu_bind          = xstrdup(details->cpu_bind);
 	job_desc->cpu_bind_type     = details->cpu_bind_type;
+	job_desc->cpu_freq_min      = details->cpu_freq_min;
+	job_desc->cpu_freq_max      = details->cpu_freq_max;
+	job_desc->cpu_freq_gov      = details->cpu_freq_gov;
 	job_desc->dependency        = xstrdup(details->dependency);
 	job_desc->end_time          = 0; /* Unused today */
 	job_desc->environment       = get_job_env(job_ptr,
@@ -13433,6 +14435,8 @@ _copy_job_record_to_job_desc(struct job_record *job_ptr)
 	job_desc->num_tasks         = details->num_tasks;
 	job_desc->open_mode         = details->open_mode;
 	job_desc->other_port        = job_ptr->other_port;
+	job_desc->power_flags       = job_ptr->power_flags;
+	job_desc->sicp_mode         = job_ptr->sicp_mode;
 	job_desc->overcommit        = details->overcommit;
 	job_desc->partition         = xstrdup(job_ptr->partition);
 	job_desc->plane_size        = details->plane_size;
@@ -13449,8 +14453,10 @@ _copy_job_record_to_job_desc(struct job_record *job_ptr)
 	job_desc->script            = get_job_script(job_ptr);
 	if (details->share_res == 1)
 		job_desc->shared     = 1;
-	else if (details->whole_node)
+	else if (details->whole_node == 1)
 		job_desc->shared     = 0;
+	else if (details->whole_node == 2)
+		job_desc->shared     = 2;
 	else
 		job_desc->shared     = (uint16_t) NO_VAL;
 	job_desc->spank_job_env_size = job_ptr->spank_job_env_size;
@@ -13581,12 +14587,8 @@ extern int job_restart(checkpoint_msg_t *ckpt_ptr, uid_t uid, slurm_fd_t conn_fd
 	/* unpack version string */
 	safe_unpackstr_xmalloc(&ver_str, &tmp_uint32, buffer);
 	debug3("Version string in job_ckpt header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, JOB_CKPT_VERSION))
-			safe_unpack16(&ckpt_version, buffer);
-		else
-			ckpt_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, JOB_CKPT_VERSION))
+		safe_unpack16(&ckpt_version, buffer);
 
 	if (ckpt_version == (uint16_t)NO_VAL) {
 		error("***************************************************");
@@ -13806,24 +14808,19 @@ extern void job_hold_requeue(struct job_record *job_ptr)
 }
 
 /* init_requeue_policy()
- * Initialize the requeue exit/hold arrays.
+ * Initialize the requeue exit/hold bitmaps.
  */
-void
-init_requeue_policy(void)
+extern void init_requeue_policy(void)
 {
 	char *sched_params;
 
-	/* clean first as we can be reconfiguring
-	 */
-	num_exit = 0;
-	xfree(requeue_exit);
-	num_hold = 0;
-	xfree(requeue_exit_hold);
+	/* clean first as we can be reconfiguring */
+	FREE_NULL_BITMAP(requeue_exit);
+	FREE_NULL_BITMAP(requeue_exit_hold);
 
-	requeue_exit = _make_requeue_array(slurmctld_conf.requeue_exit,
-					   &num_exit);
+	requeue_exit = _make_requeue_array(slurmctld_conf.requeue_exit);
 	requeue_exit_hold = _make_requeue_array(
-		slurmctld_conf.requeue_exit_hold, &num_hold);
+		slurmctld_conf.requeue_exit_hold);
 	/* Check if users want to kill a job whose dependency
 	 * can never be satisfied.
 	 */
@@ -13842,88 +14839,75 @@ init_requeue_policy(void)
 /* _make_requeue_array()
  *
  * Process the RequeueExit|RequeueExitHold configuration
- * parameters creating two arrays holding the exit values
+ * parameters creating two bitmaps holding the exit values
  * of jobs for which they have to be requeued.
  */
-static int32_t *
-_make_requeue_array(char *conf_buf, uint32_t *num)
+static bitstr_t *_make_requeue_array(char *conf_buf)
 {
 	hostset_t hs;
+	bitstr_t *bs = NULL;
 	char *tok = NULL, *end_ptr = NULL;
-	int32_t *ar = NULL, cc = 0;
 	long val;
 
-	*num = 0;
 	if (conf_buf == NULL)
-		return ar;
+		return bs;
 
 	xstrfmtcat(tok, "[%s]", conf_buf);
 	hs = hostset_create(tok);
 	xfree(tok);
 	if (!hs) {
 		error("%s: exit values: %s", __func__, conf_buf);
-		return ar;
+		return bs;
 	}
 
 	debug("%s: exit values: %s", __func__, conf_buf);
 
-	ar = xmalloc(sizeof(int32_t) * hostset_count(hs));
+	bs = bit_alloc(MAX_EXIT_VAL + 1);
 	while ((tok = hostset_shift(hs))) {
 		val = strtol(tok, &end_ptr, 10);
-		if ((end_ptr[0] == '\0') && (val >= 0)) {
-			ar[cc++] = val;
+		if ((end_ptr[0] == '\0') &&
+		    (val >= 0) && (val <= MAX_EXIT_VAL)) {
+			bit_set(bs, val);
 		} else {
 			error("%s: exit values: %s (%s)",
 			      __func__, conf_buf, tok);
 		}
 		free(tok);
 	}
-	*num = cc;
 	hostset_destroy(hs);
 
-	return ar;
+	return bs;
 }
 
 /* _set_job_requeue_exit_value()
  *
  * Compared the job exit values with the configured
- * RequeueExit and RequeueHoldExit and it mach is
- * found set the appropriate state for job_hold_requeue()
- * If RequeueExit or RequeueExitHold are not defined
- * the mum_exit and num_hold are zero.
- *
+ * RequeueExit and RequeueHoldExit and a match is
+ * found, set the appropriate state for job_hold_requeue()
  */
 static void
 _set_job_requeue_exit_value(struct job_record *job_ptr)
 {
-	int cc;
 	int exit_code;
 
-	/* Search the arrays for a matching value
-	 * based on the job exit code
-	 */
 	exit_code = WEXITSTATUS(job_ptr->exit_code);
-	for (cc = 0; cc < num_exit; cc++) {
-		if (exit_code == requeue_exit[cc]) {
-			debug2("%s: job %d exit code %d state JOB_REQUEUE",
-			       __func__, job_ptr->job_id, exit_code);
-			job_ptr->job_state |= JOB_REQUEUE;
-			return;
-		}
+	if ((exit_code < 0) || (exit_code > MAX_EXIT_VAL))
+		return;
+
+	if (requeue_exit && bit_test(requeue_exit, exit_code)) {
+		debug2("%s: job %d exit code %d state JOB_REQUEUE",
+		       __func__, job_ptr->job_id, exit_code);
+		job_ptr->job_state |= JOB_REQUEUE;
+		return;
 	}
 
-	for (cc = 0; cc < num_hold; cc++) {
-		if (exit_code == requeue_exit_hold[cc]) {
-			/* Bah... not sure if want to set special
-			 * exit state in this case, but for sure
-			 * don't want another array...
-			 */
-			debug2("%s: job %d exit code %d state JOB_SPECIAL_EXIT",
-			       __func__, job_ptr->job_id, exit_code);
-			job_ptr->job_state |= JOB_REQUEUE;
-			job_ptr->job_state |= JOB_SPECIAL_EXIT;
-			return;
-		}
+	if (requeue_exit_hold && bit_test(requeue_exit_hold, exit_code)) {
+		/* Not sure if want to set special exit state in this case */
+		debug2("%s: job %d exit code %d state JOB_SPECIAL_EXIT",
+		       __func__, job_ptr->job_id, exit_code);
+		job_ptr->job_state |= JOB_REQUEUE;
+		job_ptr->job_state |= JOB_SPECIAL_EXIT;
+		return;
 	}
 }
 
@@ -13943,32 +14927,57 @@ extern void job_end_time_reset(struct job_record  *job_ptr)
 	job_ptr->end_time_exp = job_ptr->end_time;
 }
 
+/*
+ * jobid2fmt() - print a job ID including job array information.
+ */
+extern char *jobid2fmt(struct job_record *job_ptr, char *buf, int buf_size)
+{
+	if (job_ptr == NULL)
+		return "jobid2fmt: Invalid job_ptr argument";
+	if (buf == NULL)
+		return "jobid2fmt: Invalid buf argument";
+
+	if (job_ptr->array_recs && (job_ptr->array_task_id == NO_VAL)) {
+		snprintf(buf, buf_size, "JobID=%u_*",
+			 job_ptr->array_job_id);
+	} else if (job_ptr->array_task_id == NO_VAL) {
+		snprintf(buf, buf_size, "JobID=%u", job_ptr->job_id);
+	} else {
+		snprintf(buf, buf_size, "JobID=%u_%u(%u)",
+			 job_ptr->array_job_id, job_ptr->array_task_id,
+			 job_ptr->job_id);
+	}
+
+       return buf;
+}
+
 /*
  * jobid2str() - print all the parts that uniquely identify a job.
  */
 extern char *
-jobid2str(struct job_record *job_ptr, char *buf)
+jobid2str(struct job_record *job_ptr, char *buf, int buf_size)
 {
+
 	if (job_ptr == NULL)
 		return "jobid2str: Invalid job_ptr argument";
 	if (buf == NULL)
 		return "jobid2str: Invalid buf argument";
 
 	if (job_ptr->array_recs && (job_ptr->array_task_id == NO_VAL)) {
-		sprintf(buf, "JobID=%u_* State=0x%x NodeCnt=%u",
-			job_ptr->job_id, job_ptr->job_state,
+		snprintf(buf, buf_size, "JobID=%u_* State=0x%x NodeCnt=%u",
+			job_ptr->array_job_id, job_ptr->job_state,
 			job_ptr->node_cnt);
 	} else if (job_ptr->array_task_id == NO_VAL) {
-		sprintf(buf, "JobID=%u State=0x%x NodeCnt=%u",
+		snprintf(buf, buf_size, "JobID=%u State=0x%x NodeCnt=%u",
 			job_ptr->job_id, job_ptr->job_state,
 			job_ptr->node_cnt);
 	} else {
-		sprintf(buf, "JobID=%u_%u(%u) State=0x%x NodeCnt=%u",
+		snprintf(buf, buf_size, "JobID=%u_%u(%u) State=0x%x NodeCnt=%u",
 			job_ptr->array_job_id, job_ptr->array_task_id,
-			job_ptr->job_id, job_ptr->job_state,job_ptr->node_cnt);
+			job_ptr->job_id, job_ptr->job_state, job_ptr->node_cnt);
 	}
 
-       return buf;
+	return buf;
 }
 
 /* trace_job() - print the job details if
@@ -13979,13 +14988,16 @@ trace_job(struct job_record *job_ptr, const char *func, const char *extra)
 {
 	char jbuf[JBUFSIZ];
 
-	if (slurmctld_conf.debug_flags & DEBUG_FLAG_TRACE_JOBS)
-		info("%s: %s job %s", func, extra, jobid2str(job_ptr, jbuf));
+	if (slurmctld_conf.debug_flags & DEBUG_FLAG_TRACE_JOBS) {
+		info("%s: %s %s", func, extra, jobid2str(job_ptr, jbuf,
+							 sizeof(jbuf)));
+	}
 }
 
 /* If this is a job array meta-job, prepare it for being scheduled */
 extern void job_array_pre_sched(struct job_record *job_ptr)
 {
+	char jbuf[JBUFSIZ];
 	int32_t i;
 
 	if (!job_ptr->array_recs || !job_ptr->array_recs->task_id_bitmap)
@@ -13993,7 +15005,11 @@ extern void job_array_pre_sched(struct job_record *job_ptr)
 
 	i = bit_ffs(job_ptr->array_recs->task_id_bitmap);
 	if (i < 0) {
-		error("job %u has empty task_id_bitmap", job_ptr->job_id);
+		/* This happens if the final task in a meta-job is requeued */
+		if (job_ptr->restart_cnt == 0) {
+			error("%s has empty task_id_bitmap",
+			      jobid2str(job_ptr, jbuf, sizeof(jbuf)));
+		}
 		FREE_NULL_BITMAP(job_ptr->array_recs->task_id_bitmap);
 		return;
 	}
@@ -14006,6 +15022,7 @@ extern void job_array_pre_sched(struct job_record *job_ptr)
 extern void job_array_post_sched(struct job_record *job_ptr)
 {
 	struct job_record *new_job_ptr;
+	char jobid_buf[32];
 
 	if (!job_ptr->array_recs || !job_ptr->array_recs->task_id_bitmap)
 		return;
@@ -14021,9 +15038,19 @@ extern void job_array_post_sched(struct job_record *job_ptr)
 			      job_ptr->array_job_id, job_ptr->array_task_id);
 		}
 		xfree(job_ptr->array_recs->task_id_str);
-		/* Most efficient way to update new task_id_str to accounting
-		 * for pending tasks. */
-		job_ptr->db_index = 0;
+		if (job_ptr->array_recs->task_cnt == 0)
+			FREE_NULL_BITMAP(job_ptr->array_recs->task_id_bitmap);
+
+		/* While it is efficient to set the db_index to 0 here
+		 * to get the database to update the record for
+		 * pending tasks it also creates a window in which if
+		 * the association id is changed (different account or
+		 * partition) instead of returning the previous
+		 * db_index (expected) it would create a new one
+		 * leaving the other orphaned.  Setting the job_state
+		 * sets things up so the db_index isn't lost but the
+		 * start message is still sent to get the desired behavior. */
+		job_ptr->job_state |= JOB_UPDATE_DB;
 
 		/* If job is requeued, it will already be in the hash table */
 		if (!find_job_array_rec(job_ptr->array_job_id,
@@ -14031,12 +15058,15 @@ extern void job_array_post_sched(struct job_record *job_ptr)
 			_add_job_array_hash(job_ptr);
 		}
 	} else {
-		new_job_ptr = _job_rec_copy(job_ptr);
+		new_job_ptr = job_array_split(job_ptr);
 		if (new_job_ptr) {
 			new_job_ptr->job_state = JOB_PENDING;
 			new_job_ptr->start_time = (time_t) 0;
-			/* Do NOT clear db_index here, it is handled when
-			 * task_id_str is created elsewhere */
+			/* Do NOT set the JOB_UPDATE_DB flag here, it
+			 * is handled when task_id_str is created elsewhere */
+		} else {
+			error("%s: Unable to copy record for %s", __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
 		}
 	}
 }
@@ -14107,7 +15137,7 @@ _kill_dependent(struct job_record *job_ptr)
 	now = time(NULL);
 
 	info("%s: Job dependency can't be satisfied, cancelling "
-	     "job %s", __func__, jobid2str(job_ptr, jbuf));
+	     "job %s", __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 	job_ptr->job_state = JOB_CANCELLED;
 	xfree(job_ptr->state_desc);
 	job_ptr->start_time = now;
diff --git a/src/slurmctld/job_scheduler.c b/src/slurmctld/job_scheduler.c
index feda32e0f..f7c6e1438 100644
--- a/src/slurmctld/job_scheduler.c
+++ b/src/slurmctld/job_scheduler.c
@@ -49,6 +49,7 @@
 #include <stdlib.h>
 #include <string.h>
 #include <unistd.h>
+#include <poll.h>
 
 #if HAVE_SYS_PRCTL_H
 #  include <sys/prctl.h>
@@ -60,6 +61,7 @@
 #include "src/common/list.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
+#include "src/common/power.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_acct_gather.h"
 #include "src/common/timers.h"
@@ -69,6 +71,7 @@
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
@@ -83,6 +86,9 @@
 #include "src/slurmctld/state_save.h"
 
 #define _DEBUG 0
+#ifndef BB_STAGE_ARRAY_TASK_CNT
+#  define BB_STAGE_ARRAY_TASK_CNT 4
+#endif
 #define BUILD_TIMEOUT 2000000	/* Max build_job_queue() run time in usec */
 #define MAX_FAILED_RESV 10
 #define MAX_RETRIES 10
@@ -113,7 +119,6 @@ static int	_valid_node_feature(char *feature);
 #ifndef HAVE_FRONT_END
 static void *	_wait_boot(void *arg);
 #endif
-
 static int	build_queue_timeout = BUILD_TIMEOUT;
 static int	save_last_part_update = 0;
 
@@ -135,7 +140,7 @@ extern diag_stats_t slurmctld_diag_stats;
  * IN  user_id - user id
  * IN  job_name - job name constraint
  * RET the job queue
- * NOTE: the caller must call list_destroy() on RET value to free memory
+ * NOTE: the caller must call FREE_NULL_LIST() on RET value to free memory
  */
 static List _build_user_job_list(uint32_t user_id, char* job_name)
 {
@@ -165,6 +170,7 @@ static void _job_queue_append(List job_queue, struct job_record *job_ptr,
 	job_queue_rec_t *job_queue_rec;
 
 	job_queue_rec = xmalloc(sizeof(job_queue_rec_t));
+	job_queue_rec->array_task_id = job_ptr->array_task_id;
 	job_queue_rec->job_id   = job_ptr->job_id;
 	job_queue_rec->job_ptr  = job_ptr;
 	job_queue_rec->part_ptr = part_ptr;
@@ -178,10 +184,11 @@ static void _job_queue_rec_del(void *x)
 }
 
 /* Job test for ability to run now, excludes partition specific tests */
-static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
+static bool _job_runnable_test1(struct job_record *job_ptr, bool sched_plugin)
 {
 	bool job_indepen = false;
 	uint16_t cleaning = 0;
+	time_t now = time(NULL);
 
 	xassert(job_ptr->magic == JOB_MAGIC);
 	if (!IS_JOB_PENDING(job_ptr) || IS_JOB_COMPLETING(job_ptr))
@@ -190,7 +197,8 @@ static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
 	select_g_select_jobinfo_get(job_ptr->select_jobinfo,
 				    SELECT_JOBDATA_CLEANING,
 				    &cleaning);
-	if (cleaning) {
+	if (cleaning ||
+	    (job_ptr->step_list && list_count(job_ptr->step_list))) {
 		/* Job's been requeued and the
 		 * previous run hasn't finished yet */
 		job_ptr->state_reason = WAIT_CLEANING;
@@ -206,12 +214,12 @@ static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
 	if (job_ptr->state_reason == WAIT_FRONT_END) {
 		job_ptr->state_reason = WAIT_NO_REASON;
 		xfree(job_ptr->state_desc);
-		last_job_update = time(NULL);
+		last_job_update = now;
 	}
 #endif
 
 	job_indepen = job_independent(job_ptr, 0);
-	if (clear_start)
+	if (sched_plugin)
 		job_ptr->start_time = (time_t) 0;
 	if (job_ptr->priority == 0)	{ /* held */
 		if (job_ptr->state_reason != FAIL_BAD_CONSTRAINTS
@@ -220,7 +228,7 @@ static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
 		    && job_ptr->state_reason != WAIT_MAX_REQUEUE) {
 			job_ptr->state_reason = WAIT_HELD;
 			xfree(job_ptr->state_desc);
-			last_job_update = time(NULL);
+			last_job_update = now;
 		}
 		debug3("sched: JobId=%u. State=%s. Reason=%s. Priority=%u.",
 		       job_ptr->job_id,
@@ -240,6 +248,7 @@ static bool _job_runnable_test1(struct job_record *job_ptr, bool clear_start)
 
 	if (!job_indepen)	/* can not run now */
 		return false;
+
 	return true;
 }
 
@@ -287,30 +296,79 @@ static int _delta_tv(struct timeval *tv)
 }
 /*
  * build_job_queue - build (non-priority ordered) list of pending jobs
- * IN clear_start - if set then clear the start_time for pending jobs
+ * IN clear_start - if set then clear the start_time for pending jobs,
+ *		    true when called from sched/backfill or sched/builtin
  * IN backfill - true if running backfill scheduler, enforce min time limit
  * RET the job queue
- * NOTE: the caller must call list_destroy() on RET value to free memory
+ * NOTE: the caller must call FREE_NULL_LIST() on RET value to free memory
  */
 extern List build_job_queue(bool clear_start, bool backfill)
 {
+	static time_t last_log_time = 0;
 	List job_queue;
 	ListIterator job_iterator, part_iterator;
-	struct job_record *job_ptr = NULL;
+	struct job_record *job_ptr = NULL, *new_job_ptr;
 	struct part_record *part_ptr;
-	int reason;
+	int i, pend_cnt, reason;
 	struct timeval start_tv = {0, 0};
 	int tested_jobs = 0;
+	char jobid_buf[32];
+	int job_part_pairs = 0;
 
+	(void) _delta_tv(&start_tv);
 	job_queue = list_create(_job_queue_rec_del);
+
+	/* Create individual job records for job arrays that need burst buffer
+	 * staging */
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (!job_ptr->burst_buffer || !job_ptr->array_recs ||
+		    !job_ptr->array_recs->task_id_bitmap ||
+		    (job_ptr->array_task_id != NO_VAL))
+			continue;
+		if ((i = bit_ffs(job_ptr->array_recs->task_id_bitmap)) < 0)
+			continue;
+		pend_cnt = num_pending_job_array_tasks(job_ptr->array_job_id);
+		if (pend_cnt >= BB_STAGE_ARRAY_TASK_CNT)
+			continue;
+		if (job_ptr->array_recs->task_cnt < 1)
+			continue;
+		if (job_ptr->array_recs->task_cnt == 1) {
+			job_ptr->array_task_id = i;
+			job_array_post_sched(job_ptr);
+			continue;
+		}
+		job_ptr->array_task_id = i;
+		new_job_ptr = job_array_split(job_ptr);
+		if (new_job_ptr) {
+			debug("%s: Split out %s for burst buffer use", __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+			new_job_ptr->job_state = JOB_PENDING;
+			new_job_ptr->start_time = (time_t) 0;
+			/* Do NOT clear db_index here, it is handled when
+			 * task_id_str is created elsewhere */
+			(void) bb_g_job_validate2(job_ptr, NULL);
+		} else {
+			error("%s: Unable to copy record for %s", __func__,
+			      jobid2fmt(job_ptr, jobid_buf, sizeof(jobid_buf)));
+		}
+	}
+	list_iterator_destroy(job_iterator);
+
 	job_iterator = list_iterator_create(job_list);
 	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
 		if (((tested_jobs % 100) == 0) &&
 		    (_delta_tv(&start_tv) >= build_queue_timeout)) {
-			info("build_job_queue has been running for %d usec, "
-			     "exiting with %d of %d jobs tested",
-			     build_queue_timeout, tested_jobs,
-			     list_count(job_list));
+			time_t now = time(NULL);
+			if (difftime(now, last_log_time) > 600) {
+				/* Log at most once every 10 minutes */
+				info("%s has run for %d usec, exiting with %d "
+				     "of %d jobs tested, %d job-partition "
+				     "pairs added",
+				     __func__, build_queue_timeout, tested_jobs,
+				     list_count(job_list), job_part_pairs);
+				last_log_time = now;
+			}
 			break;
 		}
 		tested_jobs++;
@@ -337,6 +395,7 @@ extern List build_job_queue(bool clear_start, bool backfill)
 				inx++;
 				if (reason != WAIT_NO_REASON)
 					continue;
+				job_part_pairs++;
 				if (job_ptr->priority_array) {
 					_job_queue_append(job_queue, job_ptr,
 							  part_ptr,
@@ -365,6 +424,7 @@ extern List build_job_queue(bool clear_start, bool backfill)
 			}
 			if (!_job_runnable_test2(job_ptr, backfill))
 				continue;
+			job_part_pairs++;
 			_job_queue_append(job_queue, job_ptr,
 					  job_ptr->part_ptr, job_ptr->priority);
 		}
@@ -499,7 +559,7 @@ static void _do_diag_stats(long delta_t)
  * fini_job_ptr IN - Pointer to job that just completed and needs replacement
  * RET true if there are pending jobs that might use the resources
  */
-extern bool replace_batch_job(slurm_msg_t * msg, void *fini_job)
+extern bool replace_batch_job(slurm_msg_t * msg, void *fini_job, bool locked)
 {
 	static int select_serial = -1;
 	/* Locks: Read config, write job, write node, read partition */
@@ -527,13 +587,15 @@ extern bool replace_batch_job(slurm_msg_t * msg, void *fini_job)
 
 	now = time(NULL);
 	min_age = now - slurmctld_conf.min_job_age;
-	lock_slurmctld(job_write_lock);
+	if (!locked)
+		lock_slurmctld(job_write_lock);
 	if (!fini_job_ptr->job_resrcs ||
 	    !fini_job_ptr->job_resrcs->node_bitmap) {
 		/* This should never happen, but if it does, avoid using
 		 * a bad pointer below. */
 		error("job_resrcs empty for job %u", fini_job_ptr->job_id);
-		unlock_slurmctld(job_write_lock);
+		if (!locked)
+			unlock_slurmctld(job_write_lock);
 		goto send_reply;
 	}
 	job_iterator = list_iterator_create(job_list);
@@ -558,11 +620,12 @@ extern bool replace_batch_job(slurm_msg_t * msg, void *fini_job)
 				/* If we don't have a db_index by now and we
 				 * are running with the slurmdbd lets put it on
 				 * the list to be handled later when it comes
-				 * back up since we won't get another chance */
-				if (with_slurmdbd && !job_ptr->db_index) {
-					jobacct_storage_g_job_start(acct_db_conn,
-								    job_ptr);
-				}
+				 * back up since we won't get another chance.
+				 * This is fine because start() doesn't wait
+				 * for db_index for a finished job.
+				 */
+				jobacct_storage_job_start_direct(acct_db_conn,
+								 job_ptr);
 				list_delete_item(job_iterator);
 			}
 			continue;
@@ -590,8 +653,8 @@ next_part:		part_ptr = (struct part_record *)
 
 		/* Test for valid account, QOS and required nodes on each pass */
 		if (job_ptr->state_reason == FAIL_ACCOUNT) {
-			slurmdb_association_rec_t assoc_rec;
-			memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+			slurmdb_assoc_rec_t assoc_rec;
+			memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 			assoc_rec.acct      = job_ptr->account;
 			if (job_ptr->part_ptr)
 				assoc_rec.partition = job_ptr->part_ptr->name;
@@ -599,7 +662,7 @@ next_part:		part_ptr = (struct part_record *)
 
 			if (!assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 						     accounting_enforce,
-						     (slurmdb_association_rec_t **)
+						     (slurmdb_assoc_rec_t **)
 						     &job_ptr->assoc_ptr,
 						     false)) {
 				job_ptr->state_reason = WAIT_NO_REASON;
@@ -611,12 +674,12 @@ next_part:		part_ptr = (struct part_record *)
 			}
 		}
 		if (job_ptr->qos_id) {
-			slurmdb_association_rec_t *assoc_ptr =
-				(slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+			slurmdb_assoc_rec_t *assoc_ptr =
+				(slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 			if (assoc_ptr &&
 			    !bit_test(assoc_ptr->usage->valid_qos,
 				      job_ptr->qos_id) &&
-			    !job_ptr->limit_set_qos) {
+			    !job_ptr->limit_set.qos) {
 				info("sched: JobId=%u has invalid QOS",
 					job_ptr->job_id);
 				xfree(job_ptr->state_desc);
@@ -700,9 +763,9 @@ next_part:		part_ptr = (struct part_record *)
 		job_ptr->details->exc_node_bitmap = orig_exc_bitmap;
 		if (error_code == SLURM_SUCCESS) {
 			last_job_update = now;
-			info("sched: Allocate JobId=%u NodeList=%s #CPUs=%u",
-			     job_ptr->job_id, job_ptr->nodes,
-			     job_ptr->total_cpus);
+			info("sched: Allocate JobId=%u Partition=%s NodeList=%s #CPUs=%u",
+			     job_ptr->job_id, job_ptr->part_ptr->name,
+			     job_ptr->nodes, job_ptr->total_cpus);
 			if (job_ptr->details->prolog_running == 0) {
 				launch_msg = build_launch_job_msg(job_ptr,
 							msg->protocol_version);
@@ -710,7 +773,8 @@ next_part:		part_ptr = (struct part_record *)
 		}
 		break;
 	}
-	unlock_slurmctld(job_write_lock);
+	if (!locked)
+		unlock_slurmctld(job_write_lock);
 	if (job_iterator)
 		list_iterator_destroy(job_iterator);
 	if (part_iterator)
@@ -718,15 +782,33 @@ next_part:		part_ptr = (struct part_record *)
 
 send_reply:
 	if (launch_msg) {
-		slurm_msg_t response_msg;
-		slurm_msg_t_init(&response_msg);
-		response_msg.flags = msg->flags;
-		response_msg.protocol_version = msg->protocol_version;
-		response_msg.address = msg->address;
-		response_msg.msg_type = REQUEST_BATCH_JOB_LAUNCH;
-		response_msg.data = launch_msg;
-		slurm_send_node_msg(msg->conn_fd, &response_msg);
-		slurmctld_free_batch_job_launch_msg(launch_msg);
+		if (msg->msg_index && msg->ret_list) {
+			slurm_msg_t *resp_msg = xmalloc_nz(sizeof(slurm_msg_t));
+			slurm_msg_t_init(resp_msg);
+
+			resp_msg->msg_index = msg->msg_index;
+			resp_msg->ret_list = NULL;
+			/* The return list here is the list we are sending to
+			   the node, so after we attach this message to it set
+			   it to NULL to remove it.
+			*/
+			resp_msg->flags = msg->flags;
+			resp_msg->protocol_version = msg->protocol_version;
+			resp_msg->address = msg->address;
+			resp_msg->msg_type = REQUEST_BATCH_JOB_LAUNCH;
+			resp_msg->data = launch_msg;
+			list_append(msg->ret_list, resp_msg);
+		} else {
+			slurm_msg_t response_msg;
+			slurm_msg_t_init(&response_msg);
+			response_msg.flags = msg->flags;
+			response_msg.protocol_version = msg->protocol_version;
+			response_msg.address = msg->address;
+			response_msg.msg_type = REQUEST_BATCH_JOB_LAUNCH;
+			response_msg.data = launch_msg;
+			slurm_send_node_msg(msg->conn_fd, &response_msg);
+			slurmctld_free_batch_job_launch_msg(launch_msg);
+		}
 		return false;
 	}
 	slurm_send_rc_msg(msg, SLURM_SUCCESS);
@@ -780,6 +862,9 @@ extern int schedule(uint32_t job_limit)
 	struct timeval now;
 	long delta_t;
 
+	if (slurmctld_config.scheduling_disabled)
+		return 0;
+
 	gettimeofday(&now, NULL);
 	if (sched_last.tv_sec == 0) {
 		delta_t = sched_min_interval;
@@ -885,8 +970,8 @@ static int _schedule(uint32_t job_limit)
 	ListIterator job_iterator = NULL, part_iterator = NULL;
 	List job_queue = NULL;
 	int failed_part_cnt = 0, failed_resv_cnt = 0, job_cnt = 0;
-	int error_code, i, j, part_cnt, time_limit, pend_time;
-	uint32_t job_depth = 0;
+	int error_code, bb, i, j, part_cnt, time_limit, pend_time;
+	uint32_t job_depth = 0, array_task_id;
 	job_queue_rec_t *job_queue_rec;
 	struct job_record *job_ptr = NULL;
 	struct part_record *part_ptr, **failed_parts = NULL;
@@ -894,7 +979,7 @@ static int _schedule(uint32_t job_limit)
 	struct slurmctld_resv **failed_resv = NULL;
 	bitstr_t *save_avail_node_bitmap;
 	struct part_record **sched_part_ptr = NULL;
-	int *sched_part_jobs = NULL;
+	int *sched_part_jobs = NULL, bb_wait_cnt = 0;
 	/* Locks: Read config, write job, write node, read partition */
 	slurmctld_lock_t job_write_lock =
 	    { READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
@@ -914,21 +999,17 @@ static int _schedule(uint32_t job_limit)
 	static int def_job_limit = 100;
 	static int max_jobs_per_part = 0;
 	static int defer_rpc_cnt = 0;
-	time_t now, sched_start;
+	time_t now, last_job_sched_start, sched_start;
 	uint32_t reject_array_job_id = 0;
 	struct part_record *reject_array_part = NULL;
 	uint16_t reject_state_reason = WAIT_NO_REASON;
+	char job_id_buf[32];
 	char *unavail_node_str = NULL;
 #if HAVE_SYS_PRCTL_H
 	char get_name[16];
 #endif
 	DEF_TIMERS;
 
-#ifdef HAVE_ALPS_CRAY
-	if (!slurmctld_primary)
-		return 0;
-#endif
-
 	if (slurmctld_config.shutdown_time)
 		return 0;
 
@@ -1092,6 +1173,7 @@ static int _schedule(uint32_t job_limit)
 	lock_slurmctld(job_write_lock);
 	now = time(NULL);
 	sched_start = now;
+	last_job_sched_start = now;
 	START_TIMER;
 	if (!avail_front_end(NULL)) {
 		ListIterator job_iterator = list_iterator_create(job_list);
@@ -1101,6 +1183,8 @@ static int _schedule(uint32_t job_limit)
 				continue;
 			if ((job_ptr->state_reason != WAIT_NO_REASON) &&
 			    (job_ptr->state_reason != WAIT_RESOURCES) &&
+			    (job_ptr->state_reason != WAIT_POWER_NOT_AVAIL) &&
+			    (job_ptr->state_reason != WAIT_POWER_RESERVED) &&
 			    (job_ptr->state_reason != WAIT_NODE_NOT_AVAIL))
 				continue;
 			job_ptr->state_reason = WAIT_FRONT_END;
@@ -1213,6 +1297,7 @@ next_part:			part_ptr = (struct part_record *)
 			job_queue_rec = list_pop(job_queue);
 			if (!job_queue_rec)
 				break;
+			array_task_id = job_queue_rec->array_task_id;
 			job_ptr  = job_queue_rec->job_ptr;
 			part_ptr = job_queue_rec->part_ptr;
 			xfree(job_queue_rec);
@@ -1222,8 +1307,14 @@ next_part:			part_ptr = (struct part_record *)
 				last_job_update = now;
 				continue;
 			}
-			if (!IS_JOB_PENDING(job_ptr))
-				continue;  /* started in another partition */
+			if ((job_ptr->array_task_id != array_task_id) &&
+			    (array_task_id == NO_VAL)) {
+				/* Job array element started in other partition,
+				 * reset pointer to "master" job array record */
+				job_ptr = find_job_record(job_ptr->array_job_id);
+			}
+			if (!job_ptr || !IS_JOB_PENDING(job_ptr))
+				continue;	/* started in other partition */
 			job_ptr->part_ptr = part_ptr;
 		}
 		if (job_ptr->preempt_in_progress)
@@ -1337,8 +1428,8 @@ next_task:
 
 		/* Test for valid account, QOS and required nodes on each pass */
 		if (job_ptr->state_reason == FAIL_ACCOUNT) {
-			slurmdb_association_rec_t assoc_rec;
-			memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+			slurmdb_assoc_rec_t assoc_rec;
+			memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 			assoc_rec.acct      = job_ptr->account;
 			if (job_ptr->part_ptr)
 				assoc_rec.partition = job_ptr->part_ptr->name;
@@ -1346,7 +1437,7 @@ next_task:
 
 			if (!assoc_mgr_fill_in_assoc(acct_db_conn, &assoc_rec,
 						    accounting_enforce,
-						    (slurmdb_association_rec_t **)
+						    (slurmdb_assoc_rec_t **)
 						     &job_ptr->assoc_ptr,
 						     false)) {
 				job_ptr->state_reason = WAIT_NO_REASON;
@@ -1363,13 +1454,13 @@ next_task:
 			}
 		}
 		if (job_ptr->qos_id) {
-			slurmdb_association_rec_t *assoc_ptr;
-			assoc_ptr = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+			slurmdb_assoc_rec_t *assoc_ptr;
+			assoc_ptr = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 			if (assoc_ptr
 			    && (accounting_enforce & ACCOUNTING_ENFORCE_QOS)
 			    && !bit_test(assoc_ptr->usage->valid_qos,
 					 job_ptr->qos_id)
-			    && !job_ptr->limit_set_qos) {
+			    && !job_ptr->limit_set.qos) {
 				debug("sched: JobId=%u has invalid QOS",
 				      job_ptr->job_id);
 				xfree(job_ptr->state_desc);
@@ -1442,9 +1533,37 @@ next_task:
 			continue;
 		}
 
+		last_job_sched_start = MAX(last_job_sched_start,
+					   job_ptr->start_time);
+		bb = bb_g_job_test_stage_in(job_ptr, false);
+		if (bb != 1) {
+			if (bb == 0) {
+				job_ptr->state_reason =
+					WAIT_BURST_BUFFER_STAGING;
+			} else {
+				job_ptr->state_reason =
+					WAIT_BURST_BUFFER_RESOURCE;
+			}
+			if (job_ptr->start_time == 0) {
+				job_ptr->start_time = last_job_sched_start;
+				bb_wait_cnt++;
+			}
+			xfree(job_ptr->state_desc);
+			last_job_update = now;
+			debug3("sched: JobId=%u. State=%s. Reason=%s. "
+			       "Priority=%u.",
+			       job_ptr->job_id,
+			       job_state_string(job_ptr->job_state),
+			       job_reason_string(job_ptr->state_reason),
+			       job_ptr->priority);
+			continue;
+		}
+
 		error_code = select_nodes(job_ptr, false, NULL,
 					  unavail_node_str, NULL);
-		if (error_code == ESLURM_NODES_BUSY) {
+		if ((error_code == ESLURM_NODES_BUSY) ||
+		    (error_code == ESLURM_POWER_NOT_AVAIL) ||
+		    (error_code == ESLURM_POWER_RESERVED)) {
 			debug3("sched: JobId=%u. State=%s. Reason=%s. "
 			       "Priority=%u. Partition=%s.",
 			       job_ptr->job_id,
@@ -1555,31 +1674,15 @@ next_task:
 			} else {
 				sprintf(tmp_char,"%s",job_ptr->nodes);
 			}
-			if (job_ptr->array_task_id != NO_VAL) {
-				info("sched: Allocate JobId=%u_%u (%u) "
-				     "MidplaneList=%s",
-				     job_ptr->array_job_id,
-				     job_ptr->array_task_id,
-				     job_ptr->job_id, tmp_char);
-			} else {
-				info("sched: Allocate JobId=%u MidplaneList=%s",
-				     job_ptr->job_id, tmp_char);
-			}
+
+			info("sched: Allocate %s MidplaneList=%s",
+			     jobid2fmt(job_ptr, job_id_buf, sizeof(job_id_buf)),
+			     tmp_char);
 			xfree(ionodes);
 #else
-			if (job_ptr->array_task_id != NO_VAL) {
-				info("sched: Allocate JobId=%u_%u (%u) "
-				     "NodeList=%s #CPUs=%u",
-				     job_ptr->array_job_id,
-				     job_ptr->array_task_id,
-				     job_ptr->job_id, job_ptr->nodes,
-				     job_ptr->total_cpus);
-			} else {
-				info("sched: Allocate JobId=%u NodeList=%s "
-				     "#CPUs=%u",
-				     job_ptr->job_id, job_ptr->nodes,
-				     job_ptr->total_cpus);
-			}
+			info("sched: Allocate %s NodeList=%s #CPUs=%u",
+			     jobid2fmt(job_ptr, job_id_buf, sizeof(job_id_buf)),
+			     job_ptr->nodes, job_ptr->total_cpus);
 #endif
 			if (job_ptr->batch_flag == 0)
 				srun_allocate(job_ptr->job_id);
@@ -1591,7 +1694,8 @@ next_task:
 			    (job_ptr->array_task_id != NO_VAL)) {
 				/* Try starting another task of the job array */
 				job_ptr = find_job_record(job_ptr->array_job_id);
-				if (job_ptr && IS_JOB_PENDING(job_ptr))
+				if (job_ptr && IS_JOB_PENDING(job_ptr) &&
+				    (bb_g_job_test_stage_in(job_ptr,false) ==1))
 					goto next_task;
 			}
 			continue;
@@ -1604,9 +1708,10 @@ next_task:
 		} else if ((error_code !=
 			    ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) &&
 			   (error_code != ESLURM_NODE_NOT_AVAIL)      &&
+			   (error_code != ESLURM_INVALID_BURST_BUFFER_REQUEST)&&
 			   (error_code != ESLURM_ACCOUNTING_POLICY)) {
 			info("sched: schedule: %s non-runnable:%s",
-			     jobid2str(job_ptr, job_id_str),
+			     jobid2str(job_ptr, job_id_str, sizeof(job_id_str)),
 			     slurm_strerror(error_code));
 			if (!wiki_sched) {
 				last_job_update = now;
@@ -1626,6 +1731,9 @@ next_task:
 		}
 	}
 
+	if (bb_wait_cnt)
+		(void) bb_g_job_try_stage_in();
+
 	save_last_part_update = last_part_update;
 	FREE_NULL_BITMAP(avail_node_bitmap);
 	avail_node_bitmap = save_avail_node_bitmap;
@@ -1675,6 +1783,7 @@ extern int sort_job_queue2(void *x, void *y)
 	bool has_resv1, has_resv2;
 	static time_t config_update = 0;
 	static bool preemption_enabled = true;
+	uint32_t job_id1, job_id2;
 	uint32_t p1, p2;
 
 	/* The following block of code is designed to minimize run time in
@@ -1726,7 +1835,21 @@ extern int sort_job_queue2(void *x, void *y)
 		return -1;
 
 	/* If the priorities are the same sort by increasing job id's */
-	if (job_rec1->job_id > job_rec2->job_id)
+	if (job_rec1->array_task_id == NO_VAL)
+		job_id1 = job_rec1->job_id;
+	else
+		job_id1 = job_rec1->job_ptr->array_job_id;
+	if (job_rec2->array_task_id == NO_VAL)
+		job_id2 = job_rec2->job_id;
+	else
+		job_id2 = job_rec2->job_ptr->array_job_id;
+	if (job_id1 > job_id2)
+		return 1;
+	else if (job_id1 < job_id2)
+		return -1;
+
+	/* If job IDs match compare task IDs */
+	if (job_rec1->array_task_id > job_rec2->array_task_id)
 		return 1;
 
 	return -1;
@@ -1846,6 +1969,22 @@ extern batch_job_launch_msg_t *build_launch_job_msg(struct job_record *job_ptr,
 	launch_msg_ptr->select_jobinfo = select_g_select_jobinfo_copy(
 					 job_ptr->select_jobinfo);
 
+	if (job_ptr->account) {
+		launch_msg_ptr->account = xstrdup(job_ptr->account);
+	}
+	if (job_ptr->qos_ptr) {
+		slurmdb_qos_rec_t *qos;
+
+		qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+		if (strcmp(qos->description, "Normal QOS default") == 0)
+			launch_msg_ptr->qos = xstrdup("normal");
+		else
+			launch_msg_ptr->qos = xstrdup(qos->description);
+	}
+	if (job_ptr->resv_name) {
+		launch_msg_ptr->resv_name = xstrdup(job_ptr->resv_name);
+	}
+
 	return launch_msg_ptr;
 }
 
@@ -1917,6 +2056,7 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 	cred_arg.stepid    = launch_msg_ptr->step_id;
 	cred_arg.uid       = launch_msg_ptr->uid;
 
+	cred_arg.job_constraints     = job_ptr->details->features;
 	cred_arg.job_hostlist        = job_resrcs_ptr->nodes;
 	cred_arg.job_core_bitmap     = job_resrcs_ptr->core_bitmap;
 	cred_arg.job_core_spec       = job_ptr->details->core_spec;
@@ -1982,7 +2122,7 @@ extern void print_job_dependency(struct job_record *job_ptr)
 {
 	ListIterator depend_iter;
 	struct depend_spec *dep_ptr;
-	char *array_task_id, *dep_str;
+	char *array_task_id, *dep_flags, *dep_str;
 
 	info("Dependency information for job %u", job_ptr->job_id);
 	if ((job_ptr->details == NULL) ||
@@ -1996,6 +2136,11 @@ extern void print_job_dependency(struct job_record *job_ptr)
 			continue;
 		}
 
+		if (dep_ptr->depend_flags & SLURM_FLAGS_OR)
+			dep_flags = "OR";
+		else
+			dep_flags = "";
+
 		if      (dep_ptr->depend_type == SLURM_DEPEND_AFTER)
 			dep_str = "after";
 		else if (dep_ptr->depend_type == SLURM_DEPEND_AFTER_ANY)
@@ -2012,12 +2157,13 @@ extern void print_job_dependency(struct job_record *job_ptr)
 			array_task_id = "_*";
 		else
 			array_task_id = "";
-		info("  %s:%u%s", dep_str, dep_ptr->job_id, array_task_id);
+		info("  %s:%u%s %s",
+		     dep_str, dep_ptr->job_id, array_task_id, dep_flags);
 	}
 	list_iterator_destroy(depend_iter);
 }
 
-static void _depend_list2str(struct job_record *job_ptr)
+static void _depend_list2str(struct job_record *job_ptr, bool set_or_flag)
 {
 	ListIterator depend_iter;
 	struct depend_spec *dep_ptr;
@@ -2056,7 +2202,13 @@ static void _depend_list2str(struct job_record *job_ptr)
 			array_task_id = "";
 		xstrfmtcat(job_ptr->details->dependency, "%s%s:%u%s",
 			   sep, dep_str, dep_ptr->job_id, array_task_id);
-		sep = ",";
+
+		if (set_or_flag)
+			dep_ptr->depend_flags |= SLURM_FLAGS_OR;
+		if (dep_ptr->depend_flags & SLURM_FLAGS_OR)
+			sep = "?";
+		else
+			sep = ",";
 	}
 	list_iterator_destroy(depend_iter);
 }
@@ -2072,6 +2224,7 @@ extern int test_job_dependency(struct job_record *job_ptr)
 	ListIterator depend_iter, job_iterator;
 	struct depend_spec *dep_ptr;
 	bool failure = false, depends = false, rebuild_str = false;
+	bool or_satisfied = false;
  	List job_queue = NULL;
  	bool run_now;
 	int results = 0;
@@ -2128,7 +2281,7 @@ extern int test_job_dependency(struct job_record *job_ptr)
  				}
  			}
 			list_iterator_destroy(job_iterator);
-			list_destroy(job_queue);
+			FREE_NULL_LIST(job_queue);
 			/* job can run now, delete dependency */
  			if (run_now)
  				list_delete_item(depend_iter);
@@ -2232,13 +2385,19 @@ extern int test_job_dependency(struct job_record *job_ptr)
 		} else
 			failure = true;
 		if (clear_dep) {
-			list_delete_item(depend_iter);
 			rebuild_str = true;
+			if (dep_ptr->depend_flags & SLURM_FLAGS_OR) {
+				or_satisfied = true;
+				break;
+			}
+			list_delete_item(depend_iter);
 		}
 	}
 	list_iterator_destroy(depend_iter);
+	if (or_satisfied)
+		list_flush(job_ptr->details->depend_list);
 	if (rebuild_str)
-		_depend_list2str(job_ptr);
+		_depend_list2str(job_ptr, false);
 	if (list_count(job_ptr->details->depend_list) == 0)
 		xfree(job_ptr->details->dependency);
 
@@ -2276,7 +2435,8 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	List new_depend_list = NULL;
 	struct depend_spec *dep_ptr;
 	struct job_record *dep_job_ptr;
-	bool expand_cnt = 0;
+	int expand_cnt = 0;
+	bool or_flag = false;
 
 	if (job_ptr->details == NULL)
 		return EINVAL;
@@ -2286,10 +2446,7 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	if ((new_depend == NULL) || (new_depend[0] == '\0') ||
 	    ((new_depend[0] == '0') && (new_depend[1] == '\0'))) {
 		xfree(job_ptr->details->dependency);
-		if (job_ptr->details->depend_list) {
-			list_destroy(job_ptr->details->depend_list);
-			job_ptr->details->depend_list = NULL;
-		}
+		FREE_NULL_LIST(job_ptr->details->depend_list);
 		return rc;
 
 	}
@@ -2412,7 +2569,7 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 			if ((sep_ptr2 == NULL) ||
 			    (job_id == 0) || (job_id == job_ptr->job_id) ||
 			    ((sep_ptr2[0] != '\0') && (sep_ptr2[0] != ',') &&
-			     (sep_ptr2[0] != ':'))) {
+			     (sep_ptr2[0] != '?')  && (sep_ptr2[0] != ':'))) {
 				rc = ESLURM_DEPENDENCY;
 				break;
 			}
@@ -2445,15 +2602,31 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 				break;
 			}
 			if (depend_type == SLURM_DEPEND_EXPAND) {
+				assoc_mgr_lock_t locks = {
+					NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+					READ_LOCK, NO_LOCK, NO_LOCK };
+
 				job_ptr->details->expanding_jobid = job_id;
 				/* GRES configuration of this job must match
 				 * the job being expanded */
 				xfree(job_ptr->gres);
 				job_ptr->gres = xstrdup(dep_job_ptr->gres);
-				if (job_ptr->gres_list)
-					list_destroy(job_ptr->gres_list);
-				gres_plugin_job_state_validate(job_ptr->gres,
-						&job_ptr->gres_list);
+				FREE_NULL_LIST(job_ptr->gres_list);
+				gres_plugin_job_state_validate(
+					job_ptr->gres, &job_ptr->gres_list);
+				assoc_mgr_lock(&locks);
+				gres_set_job_tres_cnt(job_ptr->gres_list,
+						      job_ptr->details ?
+						      job_ptr->details->
+						      min_nodes : 0,
+						      job_ptr->tres_req_cnt,
+						      true);
+				xfree(job_ptr->tres_req_str);
+				job_ptr->tres_req_str =
+					assoc_mgr_make_tres_str_from_array(
+						job_ptr->tres_req_cnt,
+						TRES_STR_FLAG_SIMPLE, true);
+				assoc_mgr_unlock(&locks);
 			}
 			if (dep_job_ptr) {	/* job still active */
 				dep_ptr = xmalloc(sizeof(struct depend_spec));
@@ -2472,10 +2645,14 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 				break;
 			sep_ptr = sep_ptr2 + 1;	/* skip over ":" */
 		}
-		if (sep_ptr2 && (sep_ptr2[0] == ','))
+		if (sep_ptr2 && (sep_ptr2[0] == ',')) {
 			tok = sep_ptr2 + 1;
-		else
+		} else if (sep_ptr2 && (sep_ptr2[0] == '?')) {
+			tok = sep_ptr2 + 1;
+			or_flag = true;
+		} else {
 			break;
+		}
 	}
 
 	if (rc == SLURM_SUCCESS) {
@@ -2486,15 +2663,14 @@ extern int update_job_dependency(struct job_record *job_ptr, char *new_depend)
 	}
 
 	if (rc == SLURM_SUCCESS) {
-		if (job_ptr->details->depend_list)
-			list_destroy(job_ptr->details->depend_list);
+		FREE_NULL_LIST(job_ptr->details->depend_list);
 		job_ptr->details->depend_list = new_depend_list;
-		_depend_list2str(job_ptr);
+		_depend_list2str(job_ptr, or_flag);
 #if _DEBUG
 		print_job_dependency(job_ptr);
 #endif
 	} else {
-		list_destroy(new_depend_list);
+		FREE_NULL_LIST(new_depend_list);
 	}
 	return rc;
 }
@@ -2704,7 +2880,7 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 			max_nodes = MIN(job_ptr->details->max_nodes,
 					part_ptr->max_nodes);
 		max_nodes = MIN(max_nodes, 500000);	/* prevent overflows */
-		if (!job_ptr->limit_set_max_nodes &&
+		if (!job_ptr->limit_set.tres[TRES_ARRAY_NODE] &&
 		    job_ptr->details->max_nodes)
 			req_nodes = max_nodes;
 		else
@@ -2764,10 +2940,8 @@ extern int job_start_data(job_desc_msg_t *job_desc_msg,
 		rc = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
 	}
 
-	if (preemptee_candidates)
-		list_destroy(preemptee_candidates);
-	if (preemptee_job_list)
-		list_destroy(preemptee_job_list);
+	FREE_NULL_LIST(preemptee_candidates);
+	FREE_NULL_LIST(preemptee_job_list);
 	FREE_NULL_BITMAP(avail_bitmap);
 	return rc;
 }
@@ -2925,6 +3099,7 @@ static void *_run_epilog(void *arg)
 	pid_t cpid;
 	int i, status, wait_rc;
 	char *argv[2];
+	uint16_t tm;
 
 	argv[0] = epilog_arg->epilog_slurmctld;
 	argv[1] = NULL;
@@ -2945,15 +3120,17 @@ static void *_run_epilog(void *arg)
 		exit(127);
 	}
 
+	/* Prolog and epilog use the same timeout
+	 */
+	tm = slurm_get_prolog_timeout();
 	while (1) {
-		wait_rc = waitpid(cpid, &status, 0);
+		wait_rc = waitpid_timeout(__func__, cpid, &status, tm);
 		if (wait_rc < 0) {
 			if (errno == EINTR)
 				continue;
-			error("epilog_slurmctld waitpid error: %m");
+			error("%s: waitpid error: %m", __func__);
 			break;
 		} else if (wait_rc > 0) {
-			killpg(cpid, SIGKILL);	/* kill children too */
 			break;
 		}
 	}
@@ -3134,6 +3311,8 @@ extern int prolog_slurmctld(struct job_record *job_ptr)
 	if (job_ptr->details)
 		job_ptr->details->prolog_running++;
 
+	job_ptr->job_state |= JOB_CONFIGURING;
+
 	slurm_attr_init(&thread_attr_prolog);
 	pthread_attr_setdetachstate(&thread_attr_prolog,
 				    PTHREAD_CREATE_DETACHED);
@@ -3167,6 +3346,7 @@ static void *_run_prolog(void *arg)
 	bitstr_t *node_bitmap = NULL;
 	time_t now = time(NULL);
 	uint16_t resume_timeout = slurm_get_resume_timeout();
+	uint16_t tm;
 
 	lock_slurmctld(config_read_lock);
 	argv[0] = xstrdup(slurmctld_conf.prolog_slurmctld);
@@ -3201,18 +3381,19 @@ static void *_run_prolog(void *arg)
 		exit(127);
 	}
 
+	tm = slurm_get_prolog_timeout();
 	while (1) {
-		wait_rc = waitpid(cpid, &status, 0);
+		wait_rc = waitpid_timeout(__func__, cpid, &status, tm);
 		if (wait_rc < 0) {
 			if (errno == EINTR)
 				continue;
-			error("prolog_slurmctld waitpid error: %m");
+			error("%s: waitpid error: %m", __func__);
 			break;
 		} else if (wait_rc > 0) {
-			killpg(cpid, SIGKILL);	/* kill children too */
 			break;
 		}
 	}
+
 	if (status != 0) {
 		bool kill_job = false;
 		slurmctld_lock_t job_write_lock = {
@@ -3246,13 +3427,7 @@ static void *_run_prolog(void *arg)
 		if (job_ptr == NULL)
 			error("prolog_slurmctld job %u now defunct", job_id);
 	}
-	if (job_ptr) {
-		if (job_ptr->details)
-			job_ptr->details->prolog_running--;
-		if (job_ptr->batch_flag &&
-		    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr)))
-			launch_job(job_ptr);
-	}
+	prolog_running_decr(job_ptr);
 	if (job_ptr && job_ptr->node_bitmap) {
 		for (i=0; i<node_record_count; i++) {
 			if (bit_test(job_ptr->node_bitmap, i) == 0)
@@ -3274,6 +3449,23 @@ static void *_run_prolog(void *arg)
 	return NULL;
 }
 
+/* Decrement a job's prolog_running counter and launch the job if zero */
+extern void prolog_running_decr(struct job_record *job_ptr)
+{
+	if (!job_ptr)
+		return;
+
+	if (job_ptr->details && job_ptr->details->prolog_running &&
+	    (--job_ptr->details->prolog_running > 0))
+		return;
+
+	job_ptr->job_state &= ~JOB_CONFIGURING;
+	if (job_ptr->batch_flag &&
+	    (IS_JOB_RUNNING(job_ptr) || IS_JOB_SUSPENDED(job_ptr))) {
+		launch_job(job_ptr);
+	}
+}
+
 /*
  * Copy a job's feature list
  * IN feature_list_src - a job's depend_lst
@@ -3551,3 +3743,46 @@ cleanup_completing(struct job_record *job_ptr)
 
 	slurm_sched_g_schedule();
 }
+
+/*
+ * _waitpid_timeout()
+ *
+ *  Same as waitpid(2) but kill process group for pid after timeout secs.
+ */
+int
+waitpid_timeout(const char *name, pid_t pid, int *pstatus, int timeout)
+{
+	int timeout_ms = 1000 * timeout; /* timeout in ms                   */
+	int max_delay =  1000;           /* max delay between waitpid calls */
+	int delay = 10;                  /* initial delay                   */
+	int rc;
+	int options = WNOHANG;
+
+	if (timeout <= 0 || timeout == (uint16_t)NO_VAL)
+		options = 0;
+
+	while ((rc = waitpid (pid, pstatus, options)) <= 0) {
+		if (rc < 0) {
+			if (errno == EINTR)
+				continue;
+			error("waidpid: %m");
+			return -1;
+		}
+		else if (timeout_ms <= 0) {
+			info("%s%stimeout after %ds: killing pgid %d",
+			     name != NULL ? name : "",
+			     name != NULL ? ": " : "",
+			     timeout, pid);
+			killpg(pid, SIGKILL);
+			options = 0;
+		}
+		else {
+			poll(NULL, 0, delay);
+			timeout_ms -= delay;
+			delay = MIN (timeout_ms, MIN(max_delay, delay*2));
+		}
+	}
+
+	killpg(pid, SIGKILL);  /* kill children too */
+	return pid;
+}
diff --git a/src/slurmctld/job_scheduler.h b/src/slurmctld/job_scheduler.h
index aaac0bbbf..6913e9eb6 100644
--- a/src/slurmctld/job_scheduler.h
+++ b/src/slurmctld/job_scheduler.h
@@ -45,6 +45,7 @@
 #include "src/slurmctld/slurmctld.h"
 
 typedef struct job_queue_rec {
+	uint32_t array_task_id;		/* Job array, task ID */
 	uint32_t job_id;		/* Job ID */
 	struct job_record *job_ptr;	/* Pointer to job record */
 	struct part_record *part_ptr;	/* Pointer to partition record. Each
@@ -117,6 +118,9 @@ extern int make_batch_job_cred(batch_job_launch_msg_t *launch_msg_ptr,
 /* Print a job's dependency information based upon job_ptr->depend_list */
 extern void print_job_dependency(struct job_record *job_ptr);
 
+/* Decrement a job's prolog_running counter and launch the job if zero */
+extern void prolog_running_decr(struct job_record *job_ptr);
+
 /*
  * prolog_slurmctld - execute the prolog_slurmctld for a job that has just
  *	been allocated resources.
@@ -142,9 +146,10 @@ extern void rebuild_job_part_list(struct job_record *job_ptr);
  * message type, alternately send a return code fo SLURM_SUCCESS
  * msg IN - The original message from slurmd
  * fini_job_ptr IN - Pointer to job that just completed and needs replacement
+ * locked IN - whether the job_write lock is locked or not.
  * RET true if there are pending jobs that might use the resources
  */
-extern bool replace_batch_job(slurm_msg_t * msg, void *fini_job);
+extern bool replace_batch_job(slurm_msg_t * msg, void *fini_job, bool locked);
 
 /*
  * schedule - attempt to schedule all pending jobs
diff --git a/src/slurmctld/licenses.c b/src/slurmctld/licenses.c
index 62d94a4b9..85818bcf0 100644
--- a/src/slurmctld/licenses.c
+++ b/src/slurmctld/licenses.c
@@ -86,7 +86,7 @@ static inline void _licenses_print(char *header, List licenses, int job_id)
 	list_iterator_destroy(iter);
 }
 
-/* Free a license_t record (for use by list_destroy) */
+/* Free a license_t record (for use by FREE_NULL_LIST) */
 extern void license_free_rec(void *x)
 {
 	licenses_t *license_entry = (licenses_t *) x;
@@ -105,7 +105,7 @@ static int _license_find_rec(void *x, void *key)
 
 	if ((license_entry->name == NULL) || (name == NULL))
 		return 0;
-	if (strcmp(license_entry->name, name))
+	if (xstrcmp(license_entry->name, name))
 		return 0;
 	return 1;
 }
@@ -170,8 +170,7 @@ static List _build_license_list(char *licenses, bool *valid)
 	xfree(tmp_str);
 
 	if (*valid == false) {
-		list_destroy(lic_list);
-		lic_list = NULL;
+		FREE_NULL_LIST(lic_list);
 	}
 	return lic_list;
 }
@@ -313,7 +312,7 @@ extern int license_update(char *licenses)
         }
         list_iterator_destroy(iter);
 
-        list_destroy(license_list);
+        FREE_NULL_LIST(license_list);
         license_list = new_list;
         _licenses_print("update_license", license_list, 0);
         slurm_mutex_unlock(&license_mutex);
@@ -512,20 +511,34 @@ extern void license_free(void)
 /*
  * license_validate - Test if the required licenses are valid
  * IN licenses - required licenses
+ * OUT tres_req_cnt - appropriate counts for each requested gres,
+ *                    since this only matters on pending jobs you can
+ *                    send in NULL otherwise
  * OUT valid - true if required licenses are valid and a sufficient number
  *             are configured (though not necessarily available now)
  * RET license_list, must be destroyed by caller
  */
-extern List license_validate(char *licenses, bool *valid)
+extern List license_validate(char *licenses,
+			     uint64_t *tres_req_cnt, bool *valid)
 {
 	ListIterator iter;
 	licenses_t *license_entry, *match;
 	List job_license_list;
+	static bool first_run = 1;
+	static slurmdb_tres_rec_t tres_req;
+	int tres_pos;
 
 	job_license_list = _build_license_list(licenses, valid);
 	if (!job_license_list)
 		return job_license_list;
 
+	/* we only need to init this once */
+	if (first_run) {
+		first_run = 0;
+		memset(&tres_req, 0, sizeof(slurmdb_tres_rec_t));
+		tres_req.type = "license";
+	}
+
 	slurm_mutex_lock(&license_mutex);
 	_licenses_print("request_license", job_license_list, 0);
 	iter = list_iterator_create(job_license_list);
@@ -548,13 +561,20 @@ extern List license_validate(char *licenses, bool *valid)
 			*valid = false;
 			break;
 		}
+
+		if (tres_req_cnt) {
+			tres_req.name = license_entry->name;
+			if ((tres_pos = assoc_mgr_find_tres_pos(
+				     &tres_req, false)) != -1)
+				tres_req_cnt[tres_pos] =
+					(uint64_t)license_entry->total;
+		}
 	}
 	list_iterator_destroy(iter);
 	slurm_mutex_unlock(&license_mutex);
 
 	if (!(*valid)) {
-		list_destroy(job_license_list);
-		job_license_list = NULL;
+		FREE_NULL_LIST(job_license_list);
 	}
 	return job_license_list;
 }
@@ -812,6 +832,124 @@ get_all_license_info(char **buffer_ptr,
 	buffer_ptr[0] = xfer_buf_data(buffer);
 }
 
+extern uint32_t get_total_license_cnt(char *name)
+{
+	uint32_t count = 0;
+	licenses_t *lic;
+
+	slurm_mutex_lock(&license_mutex);
+	if (license_list) {
+		lic = list_find_first(
+			license_list, _license_find_rec, name);
+
+		if (lic)
+			count = lic->total;
+	}
+	slurm_mutex_unlock(&license_mutex);
+
+	return count;
+}
+
+/* Get how many of a given license are in a list */
+extern uint32_t license_get_total_cnt_from_list(List license_list, char *name)
+{
+	licenses_t *license_entry;
+	uint32_t total = 0;
+
+	license_entry = list_find_first(
+		license_list, _license_find_rec, name);
+
+	if(license_entry)
+		total = license_entry->total;
+	return total;
+}
+
+/* node_read should be locked before coming in here
+ * returns 1 if change happened.
+ */
+extern char *licenses_2_tres_str(List license_list)
+{
+	ListIterator itr;
+	slurmdb_tres_rec_t *tres_rec;
+	licenses_t *license_entry;
+	char *tres_str = NULL;
+	static bool first_run = 1;
+	static slurmdb_tres_rec_t tres_req;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	if (!license_list)
+		return NULL;
+
+	/* we only need to init this once */
+	if (first_run) {
+		first_run = 0;
+		memset(&tres_req, 0, sizeof(slurmdb_tres_rec_t));
+		tres_req.type = "license";
+	}
+
+	assoc_mgr_lock(&locks);
+	itr = list_iterator_create(license_list);
+	while ((license_entry = list_next(itr))) {
+		tres_req.name = license_entry->name;
+		if (!(tres_rec = assoc_mgr_find_tres_rec(&tres_req)))
+			continue; /* not tracked */
+
+		if (slurmdb_find_tres_count_in_string(
+			    tres_str, tres_rec->id))
+			continue; /* already handled */
+
+		/* New license */
+		xstrfmtcat(tres_str, "%s%u=%"PRIu64,
+			   tres_str ? "," : "",
+			   tres_rec->id, (uint64_t)license_entry->total);
+	}
+	list_iterator_destroy(itr);
+	assoc_mgr_unlock(&locks);
+
+	return tres_str;
+}
+
+extern void license_set_job_tres_cnt(List license_list,
+				     uint64_t *tres_cnt,
+				     bool locked)
+{
+	ListIterator itr;
+	licenses_t *license_entry;
+	static bool first_run = 1;
+	static slurmdb_tres_rec_t tres_rec;
+	int tres_pos;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
+	/* we only need to init this once */
+	if (first_run) {
+		first_run = 0;
+		memset(&tres_rec, 0, sizeof(slurmdb_tres_rec_t));
+		tres_rec.type = "license";
+	}
+
+	if (!license_list || !tres_cnt)
+		return;
+
+	if (!locked)
+		assoc_mgr_lock(&locks);
+
+	itr = list_iterator_create(license_list);
+	while ((license_entry = list_next(itr))) {
+		tres_rec.name = license_entry->name;
+		if ((tres_pos = assoc_mgr_find_tres_pos(
+			     &tres_rec, locked)) != -1)
+			tres_cnt[tres_pos] = (uint64_t)license_entry->total;
+	}
+	list_iterator_destroy(itr);
+
+	if (!locked)
+		assoc_mgr_unlock(&locks);
+
+	return;
+}
+
 /* pack_license()
  *
  * Encode the licenses data structure.
diff --git a/src/slurmctld/licenses.h b/src/slurmctld/licenses.h
index f5ce97814..8e08a88a7 100644
--- a/src/slurmctld/licenses.h
+++ b/src/slurmctld/licenses.h
@@ -114,11 +114,13 @@ extern int license_job_test(struct job_record *job_ptr, time_t when);
 /*
  * license_validate - Test if the required licenses are valid
  * IN licenses - required licenses
+ * OUT tres_req_cnt - appropriate counts for each requested gres
  * OUT valid - true if required licenses are valid and a sufficient number
  *             are configured (though not necessarily available now)
  * RET license_list, must be destroyed by caller
  */
-extern List license_validate(char *licenses, bool *valid);
+extern List license_validate(char *licenses,
+			     uint64_t *tres_req_cnt, bool *valid);
 
 /*
  * license_list_overlap - test if there is any overlap in licenses
@@ -137,4 +139,32 @@ get_all_license_info(char **buffer_ptr,
                      uid_t uid,
                      uint16_t protocol_version);
 
+/*
+ * get_total_license_cnt - give me the total count of a given license name.
+ *
+ */
+extern uint32_t get_total_license_cnt(char *name);
+
+/*
+ * lic_get_value_by_type - Return count of named licenses used by job
+ * IN licenses - list containing licenses_t records
+ * IN name - name of the license
+ * RET number of licenses of the particular type used
+ */
+extern uint32_t license_get_total_cnt_from_list(List license_list, char *name);
+
+/* node_read should be locked before coming in here
+ * returns tres_str of the license_list.
+ */
+extern char *licenses_2_tres_str(List license_list);
+
+
+/* node_read should be locked before coming in here
+ * fills in tres_cnt of the license_list.
+ * locked if assoc_mgr tres read lock is locked or not.
+ */
+extern void license_set_job_tres_cnt(List license_list,
+				     uint64_t *tres_cnt,
+				     bool locked);
+
 #endif /* !_LICENSES_H */
diff --git a/src/slurmctld/locks.h b/src/slurmctld/locks.h
index ad38eb715..dce75c92a 100644
--- a/src/slurmctld/locks.h
+++ b/src/slurmctld/locks.h
@@ -82,6 +82,10 @@
  * For example: no lock on the config data structure, read lock on the job
  * and node data structures, and write lock on the partition data structure
  * would look like this: "{ NO_LOCK, READ_LOCK, READ_LOCK, WRITE_LOCK }"
+ *
+ * NOTE: When using lock_slurmctld() and assoc_mgr_lock(), always call
+ * lock_slurmctld() before calling assoc_mgr_lock() and then call
+ * assoc_mgr_unlock() before calling unlock_slurmctld().
 \*****************************************************************************/
 
 #ifndef _SLURMCTLD_LOCKS_H
diff --git a/src/slurmctld/node_mgr.c b/src/slurmctld/node_mgr.c
index afb68c497..0cea57247 100644
--- a/src/slurmctld/node_mgr.c
+++ b/src/slurmctld/node_mgr.c
@@ -60,13 +60,14 @@
 #include "src/common/macros.h"
 #include "src/common/pack.h"
 #include "src/common/parse_time.h"
-#include "src/common/xassert.h"
-#include "src/common/xstring.h"
+#include "src/common/power.h"
 #include "src/common/node_select.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_acct_gather_energy.h"
 #include "src/common/slurm_ext_sensors.h"
+#include "src/common/xassert.h"
+#include "src/common/xstring.h"
 #include "src/slurmctld/agent.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/locks.h"
@@ -84,9 +85,8 @@
 #define _DEBUG		0
 #define MAX_RETRIES	10
 
-/* Change NODE_STATE_VERSION value when changing the state save format */
+/* No need to change we always pack SLURM_PROTOCOL_VERSION */
 #define NODE_STATE_VERSION        "PROTOCOL_VERSION"
-#define NODE_2_6_STATE_VERSION    "VER006"	/* SLURM version 2.6 */
 
 /* Global variables */
 bitstr_t *avail_node_bitmap = NULL;	/* bitmap of available nodes */
@@ -332,12 +332,8 @@ extern int load_all_node_state ( bool state_only )
 
 	safe_unpackstr_xmalloc( &ver_str, &name_len, buffer);
 	debug3("Version string in node_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, NODE_STATE_VERSION))
-			safe_unpack16(&protocol_version, buffer);
-		else if (!strcmp(ver_str, NODE_2_6_STATE_VERSION))
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !xstrcmp(ver_str, NODE_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
 
 	if (protocol_version == (uint16_t)NO_VAL) {
 		error("*****************************************************");
@@ -355,7 +351,7 @@ extern int load_all_node_state ( bool state_only )
 		uint32_t base_state;
 		uint16_t base_state2;
 		uint16_t obj_protocol_version = (uint16_t)NO_VAL;
-		if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 			safe_unpackstr_xmalloc (&comm_name, &name_len, buffer);
 			safe_unpackstr_xmalloc (&node_name, &name_len, buffer);
 			safe_unpackstr_xmalloc (&node_hostname,
@@ -383,7 +379,7 @@ extern int load_all_node_state ( bool state_only )
 				    protocol_version) != SLURM_SUCCESS)
 				goto unpack_error;
 			base_state = node_state & NODE_STATE_BASE;
-		} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 			safe_unpackstr_xmalloc (&comm_name, &name_len, buffer);
 			safe_unpackstr_xmalloc (&node_name, &name_len, buffer);
 			safe_unpackstr_xmalloc (&node_hostname,
@@ -391,13 +387,17 @@ extern int load_all_node_state ( bool state_only )
 			safe_unpackstr_xmalloc (&reason,    &name_len, buffer);
 			safe_unpackstr_xmalloc (&features,  &name_len, buffer);
 			safe_unpackstr_xmalloc (&gres,      &name_len, buffer);
-			safe_unpack16 (&node_state2,  buffer);
+			safe_unpackstr_xmalloc (&cpu_spec_list,
+							    &name_len, buffer);
+			safe_unpack32 (&node_state,  buffer);
 			safe_unpack16 (&cpus,        buffer);
 			safe_unpack16 (&boards,     buffer);
 			safe_unpack16 (&sockets,     buffer);
 			safe_unpack16 (&cores,       buffer);
+			safe_unpack16 (&core_spec_cnt, buffer);
 			safe_unpack16 (&threads,     buffer);
 			safe_unpack32 (&real_memory, buffer);
+			safe_unpack32 (&mem_spec_limit, buffer);
 			safe_unpack32 (&tmp_disk,    buffer);
 			safe_unpack32 (&reason_uid,  buffer);
 			safe_unpack_time (&reason_time, buffer);
@@ -406,13 +406,8 @@ extern int load_all_node_state ( bool state_only )
 				    &gres_list, buffer, node_name,
 				    protocol_version) != SLURM_SUCCESS)
 				goto unpack_error;
-			base_state2 = node_state2 & NODE_STATE_BASE;
-			/* First decode the quantities as 16 bit
-			 * then assign to 32 bit.
-			 */
-			node_state = node_state2;
-			base_state = base_state2;
-		} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+			base_state = node_state & NODE_STATE_BASE;
+		} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 			safe_unpackstr_xmalloc (&comm_name, &name_len, buffer);
 			safe_unpackstr_xmalloc (&node_name, &name_len, buffer);
 			safe_unpackstr_xmalloc (&node_hostname,
@@ -430,6 +425,7 @@ extern int load_all_node_state ( bool state_only )
 			safe_unpack32 (&tmp_disk,    buffer);
 			safe_unpack32 (&reason_uid,  buffer);
 			safe_unpack_time (&reason_time, buffer);
+			safe_unpack16 (&obj_protocol_version, buffer);
 			if (gres_plugin_node_state_unpack(
 				    &gres_list, buffer, node_name,
 				    protocol_version) != SLURM_SUCCESS)
@@ -536,7 +532,7 @@ extern int load_all_node_state ( bool state_only )
 					node_ptr->cpu_spec_list =
 						cpu_spec_list;
 					cpu_spec_list = NULL;/* Nothing */
-							     /*to free */
+							     /* to free */
 					node_ptr->threads       = threads;
 					node_ptr->real_memory   = real_memory;
 					node_ptr->mem_spec_limit =
@@ -631,10 +627,7 @@ extern int load_all_node_state ( bool state_only )
 
 		xfree(features);
 		xfree(gres);
-		if (gres_list) {
-			list_destroy(gres_list);
-			gres_list = NULL;
-		}
+		FREE_NULL_LIST(gres_list);
 		xfree (comm_name);
 		xfree (node_hostname);
 		xfree (node_name);
@@ -656,10 +649,7 @@ unpack_error:
 	error_code = EFAULT;
 	xfree(features);
 	xfree(gres);
-	if (gres_list) {
-		list_destroy(gres_list);
-		gres_list = NULL;
-	}
+	FREE_NULL_LIST(gres_list);
 	xfree(comm_name);
 	xfree(node_hostname);
 	xfree(node_name);
@@ -854,8 +844,9 @@ extern void pack_one_node (char **buffer_ptr, int *buffer_size,
 				hidden = true;
 			else if (IS_NODE_FUTURE(node_ptr))
 				hidden = true;
-			else if (_is_cloud_hidden(node_ptr))
-				hidden = false;
+//			Don't hide the node if explicitly requested by name
+//			else if (_is_cloud_hidden(node_ptr))
+//				hidden = true;
 			else if ((node_ptr->name == NULL) ||
 				 (node_ptr->name[0] == '\0'))
 				hidden = true;
@@ -888,8 +879,8 @@ extern void pack_one_node (char **buffer_ptr, int *buffer_size,
  * IN/OUT buffer - buffer where data is placed, pointers automatically updated
  * IN protocol_version - slurm protocol version of client
  * IN show_flags -
- * NOTE: if you make any changes here be sure to make the corresponding
- *	changes to load_node_config in api/node_info.c
+ * NOTE: if you make any changes here be sure to make the corresponding changes
+ * 	to _unpack_node_info_members() in common/slurm_protocol_pack.c
  * NOTE: READ lock_slurmctld config before entry
  */
 static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
@@ -897,7 +888,7 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 {
 	char *gres_drain = NULL, *gres_used = NULL;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		packstr (dump_node_ptr->name, buffer);
 		packstr (dump_node_ptr->node_hostname, buffer);
 		packstr (dump_node_ptr->comm_name, buffer);
@@ -928,11 +919,13 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 #ifndef HAVE_BG
 		}
 #endif
+		pack32(dump_node_ptr->owner, buffer);
 		pack16(dump_node_ptr->core_spec_cnt, buffer);
 		pack32(dump_node_ptr->mem_spec_limit, buffer);
 		packstr(dump_node_ptr->cpu_spec_list, buffer);
 
 		pack32(dump_node_ptr->cpu_load, buffer);
+		pack32(dump_node_ptr->free_mem, buffer);
 		pack32(dump_node_ptr->config_ptr->weight, buffer);
 		pack32(dump_node_ptr->reason_uid, buffer);
 
@@ -967,16 +960,19 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 		acct_gather_energy_pack(dump_node_ptr->energy, buffer,
 					protocol_version);
 		ext_sensors_data_pack(dump_node_ptr->ext_sensors, buffer,
-					protocol_version);
-	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+				      protocol_version);
+		power_mgmt_data_pack(dump_node_ptr->power, buffer,
+				     protocol_version);
+
+		packstr(dump_node_ptr->tres_fmt_str,buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
 		packstr (dump_node_ptr->name, buffer);
 		packstr (dump_node_ptr->node_hostname, buffer);
 		packstr (dump_node_ptr->comm_name, buffer);
-		pack16  (dump_node_ptr->node_state, buffer);
+		pack32(dump_node_ptr->node_state, buffer);
 		packstr (dump_node_ptr->version, buffer);
 		/* On a bluegene system always use the regular node
-		* infomation not what is in the config_ptr.
-		*/
+		* infomation not what is in the config_ptr. */
 #ifndef HAVE_BG
 		if (slurmctld_conf.fast_schedule) {
 			/* Only data from config_record used for scheduling */
@@ -1000,6 +996,10 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 #ifndef HAVE_BG
 		}
 #endif
+		pack16(dump_node_ptr->core_spec_cnt, buffer);
+		pack32(dump_node_ptr->mem_spec_limit, buffer);
+		packstr(dump_node_ptr->cpu_spec_list, buffer);
+
 		pack32(dump_node_ptr->cpu_load, buffer);
 		pack32(dump_node_ptr->config_ptr->weight, buffer);
 		pack32(dump_node_ptr->reason_uid, buffer);
@@ -1017,17 +1017,31 @@ static void _pack_node (struct node_record *dump_node_ptr, Buf buffer,
 			packstr(dump_node_ptr->gres, buffer);
 		else
 			packstr(dump_node_ptr->config_ptr->gres, buffer);
+
+		/* Gathering GRES deails is slow, so don't by default */
+		if (show_flags & SHOW_DETAIL) {
+			gres_drain =
+				gres_get_node_drain(dump_node_ptr->gres_list);
+			gres_used  =
+				gres_get_node_used(dump_node_ptr->gres_list);
+		}
+		packstr(gres_drain, buffer);
+		packstr(gres_used, buffer);
+		xfree(gres_drain);
+		xfree(gres_used);
+
 		packstr(dump_node_ptr->os, buffer);
 		packstr(dump_node_ptr->reason, buffer);
 		acct_gather_energy_pack(dump_node_ptr->energy, buffer,
 					protocol_version);
 		ext_sensors_data_pack(dump_node_ptr->ext_sensors, buffer,
 					protocol_version);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		packstr (dump_node_ptr->name, buffer);
 		packstr (dump_node_ptr->node_hostname, buffer);
 		packstr (dump_node_ptr->comm_name, buffer);
 		pack16  (dump_node_ptr->node_state, buffer);
+		packstr (dump_node_ptr->version, buffer);
 		/* On a bluegene system always use the regular node
 		* infomation not what is in the config_ptr.
 		*/
@@ -1409,20 +1423,22 @@ int update_node ( update_node_msg_t * update_node_msg )
 					info("power down request repeating "
 					     "for node %s", this_node_name);
 				} else {
-					if (IS_NODE_DOWN(node_ptr) &&
-					    IS_NODE_POWER_UP(node_ptr)) {
-						/* Abort power up request */
+					if (IS_NODE_DOWN(node_ptr)) {
+						/* Abort any power up request */
 						node_ptr->node_state &=
 							(~NODE_STATE_POWER_UP);
-#ifndef HAVE_FRONT_END
-						node_ptr->node_state |=
-							NODE_STATE_NO_RESPOND;
-#endif
 						node_ptr->node_state =
 							NODE_STATE_IDLE |
 							(node_ptr->node_state &
 							 NODE_STATE_FLAGS);
+					} else {
+						node_ptr->node_state &=
+							(~NODE_STATE_POWER_SAVE);
 					}
+#ifndef HAVE_FRONT_END
+					node_ptr->node_state |=
+						NODE_STATE_NO_RESPOND;
+#endif
 					node_ptr->last_idle = 0;
 					info("powering down node %s",
 					     this_node_name);
@@ -1512,18 +1528,6 @@ int update_node ( update_node_msg_t * update_node_msg )
 	return error_code;
 }
 
-/* variation of strcmp that accepts NULL pointers */
-static int _strcmp(char *str1, char *str2)
-{
-	if (!str1 && !str2)
-		return 0;
-	if (str1 && !str2)
-		return 1;
-	if (!str1 && str2)
-		return -1;
-	return strcmp(str1, str2);
-}
-
 /*
  * restore_node_features - Make node and config (from slurm.conf) fields
  *	consistent for Features, Gres and Weight
@@ -1551,7 +1555,7 @@ extern void restore_node_features(int recover)
 			}
 		}
 
-		if (_strcmp(node_ptr->config_ptr->feature, node_ptr->features)){
+		if (xstrcmp(node_ptr->config_ptr->feature, node_ptr->features)){
 			error("Node %s Features(%s) differ from slurm.conf",
 			      node_ptr->name, node_ptr->features);
 			if (recover == 2) {
@@ -2224,6 +2228,11 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg,
 		node_ptr->cpu_load_time = now;
 		last_node_update = now;
 	}
+	if (node_ptr->free_mem != reg_msg->free_mem) {
+		node_ptr->free_mem = reg_msg->free_mem;
+		node_ptr->free_mem_time = now;
+		last_node_update = now;
+	}
 
 	if (IS_NODE_NO_RESPOND(node_ptr)) {
 		if (IS_NODE_POWER_UP(node_ptr))
@@ -2368,6 +2377,8 @@ extern int validate_node_specs(slurm_node_registration_status_msg_t *reg_msg,
 			}
 			last_node_update = now;
 		}
+		if (IS_NODE_IDLE(node_ptr))
+			node_ptr->owner = NO_VAL;
 
 		select_g_update_node_config(node_inx);
 		select_g_update_node_state(node_ptr);
@@ -2709,6 +2720,8 @@ extern int validate_nodes_via_front_end(
 				      "with %u running jobs",
 				      node_ptr->name, reg_msg->job_count);
 			}
+			if (IS_NODE_IDLE(node_ptr))
+				node_ptr->owner = NO_VAL;
 
 			select_g_update_node_config(i);
 			select_g_update_node_state(node_ptr);
@@ -2807,11 +2820,21 @@ static void _node_did_resp(struct node_record *node_ptr)
 {
 	int node_inx;
 	uint32_t node_flags;
-	time_t now = time(NULL);
+	time_t boot_req_time, now = time(NULL);
 
 	node_inx = node_ptr - node_record_table_ptr;
 	/* Do not change last_response value (in the future) for nodes being
 	 *  booted so unexpected reboots are recognized */
+	if (IS_NODE_DOWN(node_ptr) &&
+	    !xstrcmp(node_ptr->reason, "Scheduled reboot")) {
+		boot_req_time = node_ptr->last_response -
+				slurm_get_resume_timeout();
+		if (node_ptr->boot_time < boot_req_time) {
+			debug("Still waiting for boot of node %s",
+			      node_ptr->name);
+			return;
+		}
+	}
 	if (node_ptr->last_response < now)
 		node_ptr->last_response = now;
 	if (IS_NODE_NO_RESPOND(node_ptr) || IS_NODE_POWER_UP(node_ptr)) {
@@ -2940,13 +2963,17 @@ void node_not_resp (char *name, time_t msg_time, slurm_msg_type_t resp_type)
 		      node_ptr->name);
 		return;
 	}
-	node_ptr->node_state |= NODE_STATE_NO_RESPOND;
+
+	if (!IS_NODE_POWER_SAVE(node_ptr)) {
+		node_ptr->node_state |= NODE_STATE_NO_RESPOND;
 #ifdef HAVE_FRONT_END
-	last_front_end_update = time(NULL);
+		last_front_end_update = time(NULL);
 #else
-	last_node_update = time(NULL);
-	bit_clear (avail_node_bitmap, (node_ptr - node_record_table_ptr));
+		last_node_update = time(NULL);
+		bit_clear (avail_node_bitmap, (node_ptr - node_record_table_ptr));
 #endif
+	}
+
 	return;
 }
 
@@ -3170,6 +3197,11 @@ extern void make_node_alloc(struct node_record *node_ptr,
 		(node_ptr->no_share_job_cnt)++;
 	}
 
+	if (job_ptr->details && (job_ptr->details->whole_node == 2)) {
+		node_ptr->owner_job_cnt++;
+		node_ptr->owner = job_ptr->user_id;
+	}
+
 	node_flags = node_ptr->node_state & NODE_STATE_FLAGS;
 	node_ptr->node_state = NODE_STATE_ALLOCATED | node_flags;
 	xfree(node_ptr->reason);
@@ -3258,6 +3290,7 @@ static void _make_node_down(struct node_record *node_ptr, time_t event_time)
 	node_flags = node_ptr->node_state & NODE_STATE_FLAGS;
 	node_flags &= (~NODE_STATE_COMPLETING);
 	node_ptr->node_state = NODE_STATE_DOWN | node_flags;
+	node_ptr->owner = NO_VAL;
 	bit_clear (avail_node_bitmap, inx);
 	bit_clear (cg_node_bitmap,    inx);
 	bit_set   (idle_node_bitmap,  inx);
@@ -3274,7 +3307,7 @@ static void _make_node_down(struct node_record *node_ptr, time_t event_time)
 /*
  * make_node_idle - flag specified node as having finished with a job
  * IN node_ptr - pointer to node reporting job completion
- * IN job_ptr - pointer to job that just completed
+ * IN job_ptr - pointer to job that just completed or NULL if not applicable
  */
 void make_node_idle(struct node_record *node_ptr,
 		    struct job_record *job_ptr)
@@ -3285,7 +3318,7 @@ void make_node_idle(struct node_record *node_ptr,
 	bitstr_t *node_bitmap = NULL;
 	char jbuf[JBUFSIZ];
 
-	if (job_ptr) { /* Specific job completed */
+	if (job_ptr) {
 		if (job_ptr->node_bitmap_cg)
 			node_bitmap = job_ptr->node_bitmap_cg;
 		else
@@ -3300,7 +3333,7 @@ void make_node_idle(struct node_record *node_ptr,
 		last_job_update = now;
 		bit_clear(node_bitmap, inx);
 
-		job_update_cpu_cnt(job_ptr, inx);
+		job_update_tres_cnt(job_ptr, inx);
 
 		if (job_ptr->node_cnt) {
 			/* Clean up the JOB_COMPLETING flag
@@ -3315,7 +3348,7 @@ void make_node_idle(struct node_record *node_ptr,
 				cleanup_completing(job_ptr);
 		} else {
 			error("%s: %s node_cnt underflow",
-			      __func__, jobid2str(job_ptr, jbuf));
+			      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)));
 		}
 
 		if (IS_JOB_SUSPENDED(job_ptr)) {
@@ -3324,7 +3357,8 @@ void make_node_idle(struct node_record *node_ptr,
 				(node_ptr->sus_job_cnt)--;
 			else
 				error("%s: %s node %s sus_job_cnt underflow",
-				      __func__, jobid2str(job_ptr, jbuf),
+				      __func__, jobid2str(job_ptr, jbuf,
+							  sizeof(jbuf)),
 				      node_ptr->name);
 		} else if (IS_JOB_RUNNING(job_ptr)) {
 			/* Remove node from running job */
@@ -3332,15 +3366,21 @@ void make_node_idle(struct node_record *node_ptr,
 				(node_ptr->run_job_cnt)--;
 			else
 				error("%s: %s node %s run_job_cnt underflow",
-				      __func__, jobid2str(job_ptr, jbuf),
+				      __func__, jobid2str(job_ptr, jbuf,
+							  sizeof(jbuf)),
 				      node_ptr->name);
 		} else {
-			if (node_ptr->comp_job_cnt)
+			if (node_ptr->comp_job_cnt) {
 				(node_ptr->comp_job_cnt)--;
-			else
-				error("%s: %s node %s run_job_cnt underflow",
-				      __func__, jobid2str(job_ptr, jbuf),
+			} else if (IS_NODE_DOWN(node_ptr)) {
+				/* We were not expecting this response,
+				 * ignore it */
+			} else {
+				error("%s: %s node %s comp_job_cnt underflow",
+				      __func__, jobid2str(job_ptr, jbuf,
+							  sizeof(jbuf)),
 				      node_ptr->name);
+			}
 			if (node_ptr->comp_job_cnt > 0)
 				return;		/* More jobs completing */
 		}
@@ -3349,11 +3389,20 @@ void make_node_idle(struct node_record *node_ptr,
 	if (node_ptr->comp_job_cnt == 0) {
 		node_ptr->node_state &= (~NODE_STATE_COMPLETING);
 		bit_clear(cg_node_bitmap, inx);
+		if (IS_NODE_IDLE(node_ptr))
+			node_ptr->owner = NO_VAL;
+	}
+
+	if (job_ptr && job_ptr->details && (job_ptr->details->whole_node == 2)){
+		if (--node_ptr->owner_job_cnt == 0)
+			node_ptr->owner = NO_VAL;
 	}
+
 	node_flags = node_ptr->node_state & NODE_STATE_FLAGS;
 	if (IS_NODE_DOWN(node_ptr)) {
 		debug3("%s: %s node %s being left DOWN",
-		       __func__, jobid2str(job_ptr, jbuf), node_ptr->name);
+		       __func__, jobid2str(job_ptr, jbuf,
+					   sizeof(jbuf)), node_ptr->name);
 		return;
 	}
 	bit_set(up_node_bitmap, inx);
@@ -3369,7 +3418,8 @@ void make_node_idle(struct node_record *node_ptr,
 		node_ptr->node_state = NODE_STATE_IDLE | node_flags;
 		bit_set(idle_node_bitmap, inx);
 		debug3("%s: %s node %s is DRAINED",
-		       __func__, jobid2str(job_ptr, jbuf), node_ptr->name);
+		       __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
+		       node_ptr->name);
 		node_ptr->last_idle = now;
 		trigger_node_drained(node_ptr);
 		clusteracct_storage_g_node_down(acct_db_conn,
@@ -3496,6 +3546,26 @@ extern void reset_node_load(char *node_name, uint32_t cpu_load)
 		node_ptr->cpu_load_time = now;
 		last_node_update = now;
 	} else
-		error("is_node_resp unable to find node %s", node_name);
+		error("reset_node_load unable to find node %s", node_name);
+#endif
+}
+
+/* Reset a node's free memory value */
+extern void reset_node_free_mem(char *node_name, uint32_t free_mem)
+{
+#ifdef HAVE_FRONT_END
+	return;
+#else
+	struct node_record *node_ptr;
+
+	node_ptr = find_node_record(node_name);
+	if (node_ptr) {
+		time_t now = time(NULL);
+		node_ptr->free_mem = free_mem;
+		node_ptr->free_mem_time = now;
+		last_node_update = now;
+	} else
+		error("reset_node_free_mem unable to find node %s", node_name);
 #endif
 }
+
diff --git a/src/slurmctld/node_scheduler.c b/src/slurmctld/node_scheduler.c
index bd5930eb7..3ea00e6e9 100644
--- a/src/slurmctld/node_scheduler.c
+++ b/src/slurmctld/node_scheduler.c
@@ -4,7 +4,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2015 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -63,18 +63,22 @@
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/node_select.h"
+#include "src/common/power.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_priority.h"
+#include "src/common/slurm_topology.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
 #include "src/slurmctld/acct_policy.h"
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/node_scheduler.h"
+#include "src/slurmctld/powercapping.h"
 #include "src/slurmctld/preempt.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/reservation.h"
@@ -101,11 +105,16 @@ struct node_set {		/* set of nodes with same configuration */
 
 static int  _build_node_list(struct job_record *job_ptr,
 			     struct node_set **node_set_pptr,
-			     int *node_set_size, char **err_msg);
+			     int *node_set_size, char **err_msg,
+			     bool test_only);
 static int  _fill_in_gres_fields(struct job_record *job_ptr);
+static void _filter_by_node_owner(struct job_record *job_ptr,
+				  bitstr_t *usable_node_mask);
 static void _filter_nodes_in_set(struct node_set *node_set_ptr,
 				 struct job_details *detail_ptr,
 				 char **err_msg);
+
+static bool _first_array_task(struct job_record *job_ptr);
 static void _launch_prolog(struct job_record *job_ptr);
 static int  _match_feature(char *seek, struct node_set *node_set_ptr);
 static int _nodes_in_sets(bitstr_t *req_bitmap,
@@ -572,7 +581,7 @@ extern void deallocate_nodes(struct job_record *job_ptr, bool timeout,
 				build_cg_bitmap(job_ptr);
 			}
 			bit_clear(job_ptr->node_bitmap_cg, i);
-			job_update_cpu_cnt(job_ptr, i);
+			job_update_tres_cnt(job_ptr, i);
 			/* node_cnt indicates how many nodes we are waiting
 			 * to get epilog complete messages from, so do not
 			 * count down nodes. NOTE: The job's node_cnt will not
@@ -725,6 +734,51 @@ _resolve_shared_status(struct job_record *job_ptr, uint16_t part_max_share,
 	}
 }
 
+/* Remove nodes from consideration for allocation based upon "ownership" by
+ * other users
+ * job_ptr IN - Job to be scheduled
+ * usable_node_mask IN/OUT - Nodes available for use by this job's user
+ */
+static void _filter_by_node_owner(struct job_record *job_ptr,
+				  bitstr_t *usable_node_mask)
+{
+	ListIterator job_iterator;
+	struct job_record *job_ptr2;
+	struct node_record *node_ptr;
+	int i;
+
+	if ((job_ptr->details->whole_node == 0) &&
+	    (job_ptr->part_ptr->flags & PART_FLAG_EXCLUSIVE_USER))
+		job_ptr->details->whole_node = 2;
+
+	if (job_ptr->details->whole_node == 2) {
+		/* Need to remove all nodes allocated to any active job from
+		 * any other user */
+		job_iterator = list_iterator_create(job_list);
+		while ((job_ptr2 = (struct job_record *)
+				   list_next(job_iterator))) {
+			if (IS_JOB_PENDING(job_ptr2) ||
+			    IS_JOB_COMPLETED(job_ptr2) ||
+			    (job_ptr->user_id == job_ptr2->user_id) ||
+			    !job_ptr2->node_bitmap)
+				continue;
+			bit_not(job_ptr2->node_bitmap);
+			bit_and(usable_node_mask, job_ptr2->node_bitmap);
+			bit_not(job_ptr2->node_bitmap);
+		}
+		list_iterator_destroy(job_iterator);
+		return;
+	}
+
+	/* Need to filter out any nodes exclusively allocated to other users */
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if ((node_ptr->owner != NO_VAL) &&
+		    (node_ptr->owner != job_ptr->user_id))
+			bit_clear(usable_node_mask, i);
+	}
+}
+
 /*
  * If the job has required feature counts, then accumulate those
  * required resources using multiple calls to _pick_best_nodes()
@@ -752,6 +806,8 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	List preemptee_candidates = NULL;
 	bool has_xand = false;
 	bool resv_overlap = false;
+	uint32_t powercap;
+	int layout_power;
 
 	/* Mark nodes reserved for other jobs as off limit for this job.
 	 * If the job has a reservation, we've already limited the contents
@@ -796,6 +852,10 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 		FREE_NULL_BITMAP(resv_bitmap);
 	}
 
+	if (!save_avail_node_bitmap)
+		save_avail_node_bitmap = bit_copy(avail_node_bitmap);
+	_filter_by_node_owner(job_ptr, avail_node_bitmap);
+
 	/* save job and request state */
 	saved_min_nodes = min_nodes;
 	saved_req_nodes = req_nodes;
@@ -857,10 +917,7 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 			req_nodes = feat_ptr->count;
 			job_ptr->details->min_nodes = feat_ptr->count;
 			job_ptr->details->min_cpus = feat_ptr->count;
-			if (*preemptee_job_list) {
-				list_destroy(*preemptee_job_list);
-				*preemptee_job_list = NULL;
-			}
+			FREE_NULL_LIST(*preemptee_job_list);
 			error_code = _pick_best_nodes(tmp_node_set_ptr,
 					tmp_node_set_size, &feature_bitmap,
 					job_ptr, part_ptr, min_nodes,
@@ -940,6 +997,7 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 		job_ptr->details->min_cpus = saved_min_cpus;
 		job_ptr->details->min_nodes = saved_job_min_nodes;
 	}
+
 #if 0
 {
 	char *tmp_str = bitmap2node_name(job_ptr->details->req_node_bitmap);
@@ -951,10 +1009,7 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 #endif
 	xfree(tmp_node_set_ptr);
 	if (error_code == SLURM_SUCCESS) {
-		if (*preemptee_job_list) {
-			list_destroy(*preemptee_job_list);
-			*preemptee_job_list = NULL;
-		}
+		FREE_NULL_LIST(*preemptee_job_list);
 		error_code = _pick_best_nodes(node_set_ptr, node_set_size,
 				select_bitmap, job_ptr, part_ptr, min_nodes,
 				max_nodes, req_nodes, test_only,
@@ -969,8 +1024,165 @@ _get_req_features(struct node_set *node_set_ptr, int node_set_size,
 	xfree(tmp_str);
 }
 #endif
-	if (preemptee_candidates)
-		list_destroy(preemptee_candidates);
+
+	/* 
+	 * PowerCapping logic : now that we have the list of selected nodes
+	 * we need to ensure that using this nodes respects the amount of 
+	 * available power as returned by the capping logic.
+	 * If it is not the case, then ensure that the job stays pending 
+	 * by returning a relevant error code : 
+	 *  ESLURM_POWER_NOT_AVAIL : if the current capping is blocking
+	 *  ESLURM_POWER_RESERVED  : if the current capping and the power
+	 *                           reservations are blocking
+	 */
+	if (error_code != SLURM_SUCCESS) {
+		debug3("powercapping: checking job %u : skipped, not eligible",
+		       job_ptr->job_id);
+	} else if ((powercap = powercap_get_cluster_current_cap()) == 0) {
+		debug3("powercapping: checking job %u : skipped, capping "
+		       "disabled", job_ptr->job_id);
+	} else if ((layout_power = which_power_layout()) == 0) {
+		debug3("powercapping disabled %d", which_power_layout());
+	} else if (!power_layout_ready()){
+		debug3("powercapping:checking job %u : skipped, problems with"
+		       "layouts, capping disabled", job_ptr->job_id);
+	} else {
+		uint32_t min_watts, max_watts, job_cap, tmp_pcap_cpu_freq = 0;
+		uint32_t cur_max_watts, tmp_max_watts = 0;
+		uint32_t cpus_per_node, *tmp_max_watts_dvfs = NULL;
+		bitstr_t *tmp_bitmap;
+		int k = 1, *allowed_freqs;
+		float ratio = 0;
+
+		/*
+		 * get current powercapping logic state (min,cur,max)
+		 */
+		max_watts = powercap_get_cluster_max_watts();
+		min_watts = powercap_get_cluster_min_watts();
+		cur_max_watts = powercap_get_cluster_current_max_watts();
+		/* in case of INFINITE cap, set it to max watts as it
+		 * is done in the powercapping logic */
+		if (powercap == INFINITE)
+			powercap = max_watts;
+
+		/* build a temporary bitmap using idle_node_bitmap and
+		 * remove the selected bitmap from this bitmap.
+		 * Then compute the amount of power required for such a
+		 * configuration to check that is is allowed by the current
+		 * power cap */
+		tmp_bitmap = bit_copy(idle_node_bitmap);
+		bit_not(*select_bitmap);
+		bit_and(tmp_bitmap, *select_bitmap);
+		bit_not(*select_bitmap);
+		if (layout_power == 1)
+			tmp_max_watts = 
+				 powercap_get_node_bitmap_maxwatts(tmp_bitmap);
+		else if (layout_power == 2) {
+			allowed_freqs = 
+				 powercap_get_job_nodes_numfreq(*select_bitmap,
+					  job_ptr->details->cpu_freq_min,
+					  job_ptr->details->cpu_freq_max);
+			if (allowed_freqs[0] != 0) {
+				tmp_max_watts_dvfs =
+					xmalloc(sizeof(uint32_t) *
+						(allowed_freqs[0]+1));
+			}
+			cpus_per_node = job_ptr->details->min_cpus /
+					job_ptr->details->min_nodes;
+			tmp_max_watts =
+				powercap_get_node_bitmap_maxwatts_dvfs(
+					tmp_bitmap, *select_bitmap,
+					tmp_max_watts_dvfs, allowed_freqs,
+					cpus_per_node);
+		}			
+		bit_free(tmp_bitmap);
+
+		/* get job cap based on power reservation on the system,
+		 * if no reservation matches the job caracteristics, the
+		 * powercap or the max_wattswill be returned.
+		 * select the return code based on the impact of
+		 * reservations on the failure */
+		job_cap = powercap_get_job_cap(job_ptr, time(NULL));
+		
+		if ((layout_power == 1) ||
+		    ((layout_power == 2) && (allowed_freqs[0] == 0))) {
+			if (tmp_max_watts > job_cap) {
+				FREE_NULL_BITMAP(*select_bitmap);
+				if ((job_cap < powercap) &&
+			    		 (tmp_max_watts <= powercap))
+					error_code = ESLURM_POWER_RESERVED;
+				else
+					error_code = ESLURM_POWER_NOT_AVAIL;
+			}
+		} else if (layout_power == 2) {
+			if (((tmp_max_watts > job_cap) ||
+			    (job_cap < powercap) ||
+			    (powercap < max_watts)) && (tmp_max_watts_dvfs)) {
+		
+			/* Calculation of the CPU Frequency to set for the job:
+		 	 * The optimal CPU Frequency is the maximum allowed 
+		 	 * CPU Frequency that all idle nodes could run so that 
+		 	 * the total power consumption of the cluster is below 
+		 	 * the powercap value.since the number of Idle nodes 
+		 	 * may change in every schedule the optimal CPU 
+		 	 * Frequency may also change from one job to another.*/
+				k = powercap_get_job_optimal_cpufreq(job_cap, 
+							  allowed_freqs);
+				while ((tmp_max_watts_dvfs[k] > job_cap) && 
+				       (k < allowed_freqs[0] + 1)) {
+					k++;
+				}
+				if (k == allowed_freqs[0] + 1) {
+					if ((job_cap < powercap) &&
+					    (tmp_max_watts_dvfs[k] <= powercap)){
+						error_code =
+							ESLURM_POWER_RESERVED;
+					} else {
+						error_code =
+							ESLURM_POWER_NOT_AVAIL;
+					}
+				} else {
+					tmp_max_watts = tmp_max_watts_dvfs[k];
+					tmp_pcap_cpu_freq =
+						powercap_get_cpufreq(
+							*select_bitmap,
+							allowed_freqs[k]);
+				}
+	
+				job_ptr->details->cpu_freq_min = tmp_pcap_cpu_freq;
+				job_ptr->details->cpu_freq_max = tmp_pcap_cpu_freq;
+				job_ptr->details->cpu_freq_gov = 0x10;
+
+			/* Since we alter the DVFS of jobs we need to deal with
+			 * their time_limit to calculate the extra time needed 
+			 * for them to complete the execution without getting 
+			 * killed there should be a parameter to declare the 
+			 * effect of cpu frequency on execution time for the 
+			 * moment we use time_limit and time_min
+			 * This has to be done to allow backfilling */
+
+				ratio = (1 + (float)allowed_freqs[k] /
+					     (float)allowed_freqs[-1]);
+				if ((job_ptr->time_limit != INFINITE) &&
+				    (job_ptr->time_limit != NO_VAL))
+					job_ptr->time_limit = (ratio *
+						  job_ptr->time_limit);
+				if ((job_ptr->time_min != INFINITE) &&
+				    (job_ptr->time_min != NO_VAL))
+					job_ptr->time_min = (ratio *
+						  job_ptr->time_min);
+			}
+		}
+		xfree(tmp_max_watts_dvfs);
+
+		debug2("powercapping: checking job %u : min=%u cur=%u "
+		       "[new=%u] [resv_cap=%u] [cap=%u] max=%u : %s",
+		       job_ptr->job_id, min_watts, cur_max_watts,
+		       tmp_max_watts, job_cap, powercap, max_watts,
+		       slurm_strerror(error_code));
+	}
+
+	FREE_NULL_LIST(preemptee_candidates);
 
 	/* restore job's initial required node bitmap */
 	FREE_NULL_BITMAP(job_ptr->details->req_node_bitmap);
@@ -1114,7 +1326,8 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		if (total_nodes > max_nodes) {	/* exceeds node limit */
 			return ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE;
 		}
-		if (job_ptr->details->core_spec != (uint16_t) NO_VAL) {
+		if ((job_ptr->details->core_spec != (uint16_t) NO_VAL) &&
+		    ((job_ptr->details->core_spec & CORE_SPEC_THREAD) == 0)) {
 			i = bit_ffs(job_ptr->details->req_node_bitmap);
 			if (i >= 0) {
 				node_ptr = node_record_table_ptr + i;
@@ -1270,8 +1483,14 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				avail_bitmap = bit_copy(node_set_ptr[i].
 							my_bitmap);
 			}
-			avail_nodes = bit_set_count(avail_bitmap);
+
 			tried_sched = false;	/* need to test these nodes */
+			if ((switch_record_cnt > 1) &&
+			    ((i+1) < node_set_size)) {
+				/* Keep accumulating to optimize topology */
+				continue;
+			}
+
 			if ((shared || preempt_flag)	&&
 			    ((i+1) < node_set_size)	&&
 			    (node_set_ptr[i].weight ==
@@ -1281,6 +1500,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 				continue;
 			}
 
+			avail_nodes = bit_set_count(avail_bitmap);
 			if ((avail_nodes  < min_nodes)	||
 			    ((avail_nodes >= min_nodes)	&&
 			     (avail_nodes < req_nodes)	&&
@@ -1290,10 +1510,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 			/* NOTE: select_g_job_test() is destructive of
 			 * avail_bitmap, so save a backup copy */
 			backup_bitmap = bit_copy(avail_bitmap);
-			if (*preemptee_job_list) {
-				list_destroy(*preemptee_job_list);
-				*preemptee_job_list = NULL;
-			}
+			FREE_NULL_LIST(*preemptee_job_list);
 			if (job_ptr->details->req_node_bitmap == NULL)
 				bit_and(avail_bitmap, avail_node_bitmap);
 			/* Only preempt jobs when all possible nodes are being
@@ -1349,10 +1566,7 @@ _pick_best_nodes(struct node_set *node_set_ptr, int node_set_size,
 		    ((job_ptr->details->req_node_bitmap == NULL) ||
 		     bit_super_set(job_ptr->details->req_node_bitmap,
 				   avail_bitmap))) {
-			if (*preemptee_job_list) {
-				list_destroy(*preemptee_job_list);
-				*preemptee_job_list = NULL;
-			}
+			FREE_NULL_LIST(*preemptee_job_list);
 			pick_code = select_g_job_test(job_ptr, avail_bitmap,
 						      min_nodes, max_nodes,
 						      req_nodes,
@@ -1454,11 +1668,12 @@ static void _preempt_jobs(List preemptee_job_list, bool kill_pending,
 	ListIterator iter;
 	struct job_record *job_ptr;
 	uint16_t mode;
-	int job_cnt = 0, rc = SLURM_SUCCESS;
+	int job_cnt = 0, rc;
 	checkpoint_msg_t ckpt_msg;
 
 	iter = list_iterator_create(preemptee_job_list);
 	while ((job_ptr = (struct job_record *) list_next(iter))) {
+		rc = SLURM_SUCCESS;
 		mode = slurm_job_preempt_mode(job_ptr);
 		if (mode == PREEMPT_MODE_CANCEL) {
 			job_cnt++;
@@ -1505,9 +1720,10 @@ static void _preempt_jobs(List preemptee_job_list, bool kill_pending,
 			   (slurm_get_preempt_mode() & PREEMPT_MODE_GANG)) {
 			debug("preempted job %u suspended by gang scheduler",
 			      job_ptr->job_id);
-		} else {
-			error("Invalid preempt_mode: %u", mode);
-			rc = SLURM_ERROR;
+		} else if (mode == PREEMPT_MODE_OFF) {
+			error("%s: Invalid preempt_mode %u for job %u",
+			      __func__, mode, job_ptr->job_id);
+			continue;
 		}
 
 		if (rc != SLURM_SUCCESS) {
@@ -1517,12 +1733,13 @@ static void _preempt_jobs(List preemptee_job_list, bool kill_pending,
 				continue;
 
 			rc = job_signal(job_ptr->job_id, SIGKILL, 0, 0, true);
-			if (rc == SLURM_SUCCESS)
-				info("preempted job %u had to be killed",
-				     job_ptr->job_id);
-			else {
-				info("preempted job %u kill failure %s",
-				     job_ptr->job_id, slurm_strerror(rc));
+			if (rc == SLURM_SUCCESS) {
+				info("%s: preempted job %u had to be killed",
+				     __func__, job_ptr->job_id);
+			} else {
+				info("%s: preempted job %u kill failure %s",
+				     __func__, job_ptr->job_id,
+				     slurm_strerror(rc));
 			}
 		}
 	}
@@ -1532,6 +1749,29 @@ static void _preempt_jobs(List preemptee_job_list, bool kill_pending,
 		*error_code = ESLURM_NODES_BUSY;
 }
 
+/* Return true if this job record is 
+ * 1) not a job array OR
+ * 2) the first task of a job array to begin execution */
+static bool _first_array_task(struct job_record *job_ptr)
+{
+	struct job_record *meta_job_ptr;
+
+	if (job_ptr->array_task_id == NO_VAL)
+		return true;
+
+	meta_job_ptr = find_job_record(job_ptr->array_job_id);
+	if (!meta_job_ptr || !meta_job_ptr->array_recs) {
+		error("%s: Could not find meta job record for job %u",
+		      __func__, job_ptr->array_job_id);
+		return true;
+	}
+	if ((meta_job_ptr->array_recs->tot_run_tasks == 1) &&	/* This task */
+	    (meta_job_ptr->array_recs->tot_comp_tasks == 0))
+		return true;
+
+	return false;
+}
+
 /*
  * select_nodes - select and allocate nodes to a specific job
  * IN job_ptr - pointer to the job record
@@ -1568,8 +1808,9 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	bool configuring = false;
 	List preemptee_job_list = NULL;
 	slurmdb_qos_rec_t *qos_ptr = NULL;
-	slurmdb_association_rec_t *assoc_ptr = NULL;
+	slurmdb_assoc_rec_t *assoc_ptr = NULL;
 	uint32_t selected_node_cnt = NO_VAL;
+	uint64_t tres_req_cnt[slurmctld_tres_cnt];
 
 	xassert(job_ptr);
 	xassert(job_ptr->magic == JOB_MAGIC);
@@ -1579,7 +1820,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 
 	part_ptr = job_ptr->part_ptr;
 	qos_ptr = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
-	assoc_ptr = (slurmdb_association_rec_t *)job_ptr->assoc_ptr;
+	assoc_ptr = (slurmdb_assoc_rec_t *)job_ptr->assoc_ptr;
 
 	/* identify partition */
 	if (part_ptr == NULL) {
@@ -1624,7 +1865,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 
 	/* build sets of usable nodes based upon their configuration */
 	error_code = _build_node_list(job_ptr, &node_set_ptr, &node_set_size,
-				      err_msg);
+				      err_msg, test_only);
 	if (error_code)
 		return error_code;
 
@@ -1646,7 +1887,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	/*    job_ptr->details->max_nodes, part_ptr->max_nodes); */
 
 	/* On BlueGene systems don't adjust the min/max node limits
-	   here.  We are working on midplane values. */
+	 * here.  We are working on midplane values. */
 	if (qos_ptr && (qos_ptr->flags & QOS_FLAG_PART_MIN_NODE))
 		min_nodes = job_ptr->details->min_nodes;
 	else
@@ -1661,8 +1902,7 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 				part_ptr->max_nodes);
 
 	/* Don't call functions in MIN/MAX it will result in the
-	   function being called multiple times.
-	*/
+	 * function being called multiple times. */
 	acct_max_nodes = acct_policy_get_max_nodes(job_ptr, &wait_reason);
 	max_nodes = MIN(max_nodes, acct_max_nodes);
 
@@ -1678,7 +1918,8 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	}
 
 	max_nodes = MIN(max_nodes, 500000);	/* prevent overflows */
-	if (!job_ptr->limit_set_max_nodes && job_ptr->details->max_nodes)
+	if (!job_ptr->limit_set.tres[TRES_ARRAY_NODE] &&
+	    job_ptr->details->max_nodes)
 		req_nodes = max_nodes;
 	else
 		req_nodes = min_nodes;
@@ -1727,13 +1968,24 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		job_ptr->node_cnt_wag = selected_node_cnt;
 	}
 
+	memcpy(tres_req_cnt, job_ptr->tres_req_cnt, sizeof(tres_req_cnt));
+	tres_req_cnt[TRES_ARRAY_CPU] =
+		(uint64_t)(job_ptr->total_cpus ?
+			   job_ptr->total_cpus : job_ptr->details->min_cpus);
+	tres_req_cnt[TRES_ARRAY_MEM] = job_get_tres_mem(
+		job_ptr->details->pn_min_memory,
+		tres_req_cnt[TRES_ARRAY_CPU],
+		selected_node_cnt);
+	tres_req_cnt[TRES_ARRAY_NODE] = (uint64_t)selected_node_cnt;
+
+	gres_set_job_tres_cnt(job_ptr->gres_list,
+			      selected_node_cnt,
+			      tres_req_cnt,
+			      false);
+
 	if (!test_only && (error_code == SLURM_SUCCESS)
 	    && (selected_node_cnt != NO_VAL)
-	    && !acct_policy_job_runnable_post_select(
-		    job_ptr, selected_node_cnt,
-		    job_ptr->total_cpus ? job_ptr->total_cpus
-		    : job_ptr->details->min_cpus,
-		    job_ptr->details->pn_min_memory)) {
+	    && !acct_policy_job_runnable_post_select(job_ptr, tres_req_cnt)) {
 		error_code = ESLURM_ACCOUNTING_POLICY;
 		goto cleanup;
 	}
@@ -1794,10 +2046,20 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		} else if ((job_ptr->state_reason == WAIT_BLOCK_MAX_ERR) ||
 			   (job_ptr->state_reason == WAIT_BLOCK_D_ACTION)) {
 			/* state_reason was already setup */
+		} else if ((job_ptr->state_reason == WAIT_HELD) &&
+			   (job_ptr->priority == 0)) {
+			/* Held by select plugin due to some failure */
 		} else {
-			job_ptr->state_reason = WAIT_RESOURCES;
+			if (error_code == ESLURM_POWER_NOT_AVAIL)
+				job_ptr->state_reason = WAIT_POWER_NOT_AVAIL;
+			else if (error_code == ESLURM_POWER_RESERVED)
+				job_ptr->state_reason = WAIT_POWER_RESERVED;
+			else
+				job_ptr->state_reason = WAIT_RESOURCES;
 			xfree(job_ptr->state_desc);
-			if (error_code == ESLURM_NODES_BUSY)
+			if ((error_code == ESLURM_NODES_BUSY) ||
+			    (error_code == ESLURM_POWER_NOT_AVAIL) ||
+			    (error_code == ESLURM_POWER_RESERVED))
 				slurm_sched_g_job_is_pending();
 		}
 		goto cleanup;
@@ -1817,8 +2079,8 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	xfree(job_ptr->sched_nodes);
 	job_ptr->exit_code = 0;
 	gres_plugin_job_clear(job_ptr->gres_list);
-	step_list_purge(job_ptr);
-	job_ptr->step_list = list_create(NULL);
+	if (!job_ptr->step_list)
+		job_ptr->step_list = list_create(NULL);
 
 	job_ptr->node_bitmap = select_bitmap;
 
@@ -1834,11 +2096,25 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 			job_ptr->time_limit = part_ptr->default_time;
 		else
 			job_ptr->time_limit = part_ptr->max_time;
+		job_ptr->limit_set.time = 1;
 	}
 
 	job_end_time_reset(job_ptr);
 
 	job_array_post_sched(job_ptr);
+	if (bb_g_job_begin(job_ptr) != SLURM_SUCCESS) {
+		/* Leave job queued, something is hosed */
+		error_code = ESLURM_INVALID_BURST_BUFFER_REQUEST;
+		error("bb_g_job_begin(%u): %s", job_ptr->job_id,
+		      slurm_strerror(error_code));
+		job_ptr->start_time = 0;
+		job_ptr->time_last_active = 0;
+		job_ptr->end_time = 0;
+		job_ptr->node_bitmap = NULL;
+		job_ptr->priority = 0;
+		job_ptr->state_reason = WAIT_HELD;
+		goto cleanup;
+	}
 	if (select_g_job_begin(job_ptr) != SLURM_SUCCESS) {
 		/* Leave job queued, something is hosed */
 		error("select_g_job_begin(%u): %m", job_ptr->job_id);
@@ -1880,12 +2156,17 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 		error("select_g_select_nodeinfo_set(%u): %m", job_ptr->job_id);
 		/* not critical ... by now */
 	}
-	if (job_ptr->mail_type & MAIL_JOB_BEGIN)
+	if ((job_ptr->mail_type & MAIL_JOB_BEGIN) && _first_array_task(job_ptr))
 		mail_job_info(job_ptr, MAIL_JOB_BEGIN);
 
 	slurmctld_diag_stats.jobs_started++;
+
+	/* job_set_alloc_tres has to be done before acct_policy_job_begin */
+	job_set_alloc_tres(job_ptr, false);
 	acct_policy_job_begin(job_ptr);
 
+	job_claim_resv(job_ptr);
+
 	/* Update the job_record's gres and gres_alloc fields with
 	 * strings representing the amount of each GRES type requested
 	 *  and allocated. */
@@ -1900,25 +2181,26 @@ extern int select_nodes(struct job_record *job_ptr, bool test_only,
 	 * job if happening right away.  If the job has already
 	 * become eligible and registered in the db then the start
 	 * message. */
-	if (!with_slurmdbd || (with_slurmdbd && job_ptr->db_index))
-		jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+	jobacct_storage_job_start_direct(acct_db_conn, job_ptr);
 
 	prolog_slurmctld(job_ptr);
 	reboot_job_nodes(job_ptr);
 	slurm_sched_g_newalloc(job_ptr);
+	power_g_job_start(job_ptr);
 
 	/* Request asynchronous launch of a prolog for a
 	 * non batch job. */
-	if (slurmctld_conf.prolog_flags & PROLOG_FLAG_ALLOC)
+	if ((slurmctld_conf.prolog_flags & PROLOG_FLAG_ALLOC) ||
+	    (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN))
 		_launch_prolog(job_ptr);
 
       cleanup:
 	if (job_ptr->array_recs && job_ptr->array_recs->task_id_bitmap &&
-	    !IS_JOB_STARTED(job_ptr)) {
+	    !IS_JOB_STARTED(job_ptr) &&
+	    (bit_ffs(job_ptr->array_recs->task_id_bitmap) != -1)) {
 		job_ptr->array_task_id = NO_VAL;
 	}
-	if (preemptee_job_list)
-		list_destroy(preemptee_job_list);
+	FREE_NULL_LIST(preemptee_job_list);
 	if (select_node_bitmap)
 		*select_node_bitmap = select_bitmap;
 	else
@@ -1949,6 +2231,8 @@ static void _launch_prolog(struct job_record *job_ptr)
 {
 	prolog_launch_msg_t *prolog_msg_ptr;
 	agent_arg_t *agent_arg_ptr;
+	job_resources_t *job_resrcs_ptr;
+	slurm_cred_arg_t cred_arg;
 #ifndef HAVE_FRONT_END
 	int i;
 #endif
@@ -1966,7 +2250,8 @@ static void _launch_prolog(struct job_record *job_ptr)
 	prolog_msg_ptr = xmalloc(sizeof(prolog_launch_msg_t));
 
 	/* Locks: Write job */
-	if (!(slurmctld_conf.prolog_flags & PROLOG_FLAG_NOHOLD))
+	if ((slurmctld_conf.prolog_flags & PROLOG_FLAG_ALLOC) &&
+	    !(slurmctld_conf.prolog_flags & PROLOG_FLAG_NOHOLD))
 		job_ptr->state_reason = WAIT_PROLOG;
 
 	prolog_msg_ptr->job_id = job_ptr->job_id;
@@ -1982,6 +2267,35 @@ static void _launch_prolog(struct job_record *job_ptr)
 	prolog_msg_ptr->spank_job_env = xduparray(job_ptr->spank_job_env_size,
 						  job_ptr->spank_job_env);
 
+	xassert(job_ptr->job_resrcs);
+	job_resrcs_ptr = job_ptr->job_resrcs;
+	memset(&cred_arg, 0, sizeof(slurm_cred_arg_t));
+	cred_arg.jobid               = job_ptr->job_id;
+	cred_arg.stepid              = SLURM_EXTERN_CONT;
+	cred_arg.uid                 = job_ptr->user_id;
+	cred_arg.job_core_spec       = job_ptr->details->core_spec;
+	cred_arg.job_gres_list       = job_ptr->gres_list;
+	cred_arg.job_nhosts          = job_ptr->job_resrcs->nhosts;
+	cred_arg.job_constraints     = job_ptr->details->features;
+	cred_arg.job_mem_limit       = job_ptr->details->pn_min_memory;
+	cred_arg.step_mem_limit      = job_ptr->details->pn_min_memory;
+	cred_arg.cores_per_socket    = job_resrcs_ptr->cores_per_socket;
+	cred_arg.job_core_bitmap     = job_resrcs_ptr->core_bitmap;
+	cred_arg.step_core_bitmap    = job_resrcs_ptr->core_bitmap;
+	cred_arg.sockets_per_node    = job_resrcs_ptr->sockets_per_node;
+	cred_arg.sock_core_rep_count = job_resrcs_ptr->sock_core_rep_count;
+
+#ifdef HAVE_FRONT_END
+	xassert(job_ptr->batch_host);
+	cred_arg.job_hostlist    = job_ptr->batch_host;
+	cred_arg.step_hostlist   = job_ptr->batch_host;
+#else
+	cred_arg.job_hostlist    = job_ptr->job_resrcs->nodes;
+	cred_arg.step_hostlist   = job_ptr->job_resrcs->nodes;
+#endif
+	prolog_msg_ptr->cred = slurm_cred_create(slurmctld_config.cred_ctx,
+						 &cred_arg,
+						 SLURM_15_08_PROTOCOL_VERSION);
 	agent_arg_ptr = (agent_arg_t *) xmalloc(sizeof(agent_arg_t));
 	agent_arg_ptr->retry = 0;
 #ifdef HAVE_FRONT_END
@@ -2029,7 +2343,7 @@ static int _fill_in_gres_fields(struct job_record *job_ptr)
 	char *subtok, *sublast = NULL;
 	char *req_config  = job_ptr->gres;
 	char *tmp_str;
-	uint32_t ngres_req;
+	uint64_t ngres_req;
 	int      rv = SLURM_SUCCESS;
 
 	/* First build the GRES requested field. */
@@ -2064,11 +2378,11 @@ static int _fill_in_gres_fields(struct job_record *job_ptr)
 			 * GRES type but don't find a quantity for it,
 			 * we simply write ":0" for the quantity.
 			 */
-			if (ngres_req == NO_VAL)
+			if (ngres_req == NO_VAL64)
 				ngres_req = 0;
 
 			/* Append value to the gres string. */
-			snprintf(buf, sizeof(buf), "%s%s:%u",
+			snprintf(buf, sizeof(buf), "%s%s:%"PRIu64,
 				 prefix, subtok,
 				 ngres_req * job_ptr->node_cnt);
 
@@ -2078,7 +2392,7 @@ static int _fill_in_gres_fields(struct job_record *job_ptr)
 				prefix = ",";
 			if (slurmctld_conf.debug_flags & DEBUG_FLAG_GRES) {
 				debug("(%s:%d) job id:%u -- ngres_req:"
-				      "%u, gres_req substring = (%s)",
+				      "%"PRIu64", gres_req substring = (%s)",
 				      THIS_FILE, __LINE__,
 				      job_ptr->job_id, ngres_req, buf);
 			}
@@ -2320,11 +2634,12 @@ static int _no_reg_nodes(void)
  * OUT node_set_pptr - list of node sets which could be used for the job
  * OUT node_set_size - number of node_set entries
  * OUT err_msg - error message for job, caller must xfree
+ * IN  test_only - true if only testing if job can be started at some point
  * RET error code
  */
 static int _build_node_list(struct job_record *job_ptr,
 			    struct node_set **node_set_pptr,
-			    int *node_set_size, char **err_msg)
+			    int *node_set_size, char **err_msg, bool test_only)
 {
 	int adj_cpus, i, node_set_inx, power_cnt, rc;
 	struct node_set *node_set_ptr;
@@ -2376,6 +2691,7 @@ static int _build_node_list(struct job_record *job_ptr,
 	}
 	if ((job_ptr->details->min_nodes == 0) &&
 	    (job_ptr->details->max_nodes == 0)) {
+		FREE_NULL_BITMAP(usable_node_mask);
 		*node_set_pptr = NULL;
 		*node_set_size = 0;
 		return SLURM_SUCCESS;
@@ -2517,8 +2833,8 @@ static int _build_node_list(struct job_record *job_ptr,
 
 	if (node_set_inx == 0) {
 		rc = ESLURM_REQUESTED_NODE_CONFIG_UNAVAILABLE;
-		info("No nodes satisfy job %u requirements in partition %s",
-		     job_ptr->job_id, job_ptr->part_ptr->name);
+		info("%s: No nodes satisfy job %u requirements in partition %s",
+		     __func__, job_ptr->job_id, job_ptr->part_ptr->name);
 		xfree(node_set_ptr);
 		xfree(job_ptr->state_desc);
 		if (job_ptr->resv_name) {
@@ -2899,7 +3215,7 @@ extern void re_kill_job(struct job_record *job_ptr)
 				    (!bit_test(job_ptr->node_bitmap_cg, i)))
 					continue;
 				bit_clear(job_ptr->node_bitmap_cg, i);
-				job_update_cpu_cnt(job_ptr, i);
+				job_update_tres_cnt(job_ptr, i);
 				if (node_ptr->comp_job_cnt)
 					(node_ptr->comp_job_cnt)--;
 				if ((job_ptr->node_cnt > 0) &&
@@ -2929,7 +3245,7 @@ extern void re_kill_job(struct job_record *job_ptr)
 		} else if (IS_NODE_DOWN(node_ptr)) {
 			/* Consider job already completed */
 			bit_clear(job_ptr->node_bitmap_cg, i);
-			job_update_cpu_cnt(job_ptr, i);
+			job_update_tres_cnt(job_ptr, i);
 			if (node_ptr->comp_job_cnt)
 				(node_ptr->comp_job_cnt)--;
 			if ((job_ptr->node_cnt > 0) &&
diff --git a/src/slurmctld/partition_mgr.c b/src/slurmctld/partition_mgr.c
index ef5a931ff..648f5aa69 100644
--- a/src/slurmctld/partition_mgr.c
+++ b/src/slurmctld/partition_mgr.c
@@ -60,6 +60,7 @@
 #include "src/common/pack.h"
 #include "src/common/uid.h"
 #include "src/common/xstring.h"
+#include "src/common/assoc_mgr.h"
 
 #include "src/slurmctld/groups.h"
 #include "src/slurmctld/locks.h"
@@ -69,11 +70,12 @@
 #include "src/slurmctld/sched_plugin.h"
 #include "src/slurmctld/slurmctld.h"
 #include "src/slurmctld/state_save.h"
+#include "src/slurmctld/licenses.h"
+#include "src/slurmctld/burst_buffer.h"
 
 
-/* Change PART_STATE_VERSION value when changing the state save format */
+/* No need to change we always pack SLURM_PROTOCOL_VERSION */
 #define PART_STATE_VERSION        "PROTOCOL_VERSION"
-#define PART_2_6_STATE_VERSION    "VER004"	/* SLURM version 2.6 */
 
 /* Global variables */
 struct part_record default_part;	/* default configuration values */
@@ -83,7 +85,6 @@ struct part_record *default_part_loc = NULL; /* default partition location */
 time_t last_part_update;	/* time of last update to partition records */
 uint16_t part_max_priority = 0;         /* max priority in all partitions */
 
-static int    _build_part_bitmap(struct part_record *part_ptr);
 static int    _delete_part_record(char *name);
 static void   _dump_part_state(struct part_record *part_ptr,
 			       Buf buffer);
@@ -94,9 +95,62 @@ static int    _open_part_state_file(char **state_file);
 static int    _uid_list_size(uid_t * uid_list_ptr);
 static void   _unlink_free_nodes(bitstr_t *old_bitmap,
 			struct part_record *part_ptr);
+static uid_t *_remove_duplicate_uids(uid_t *);
+static int _uid_cmp(const void *, const void *);
+
+static void _calc_part_tres(struct part_record *part_ptr)
+{
+	int i, j;
+	struct node_record *node_ptr;
+	uint64_t *tres_cnt;
+
+	xfree(part_ptr->tres_cnt);
+	xfree(part_ptr->tres_fmt_str);
+	part_ptr->tres_cnt = xmalloc(sizeof(uint64_t) * slurmctld_tres_cnt);
+	tres_cnt = part_ptr->tres_cnt;
+
+	/* sum up nodes' tres in the partition. */
+	node_ptr = node_record_table_ptr;
+	for (i = 0; i < node_record_count; i++, node_ptr++) {
+		if (!bit_test(part_ptr->node_bitmap, i))
+			continue;
+		for (j = 0; j < slurmctld_tres_cnt; j++)
+			tres_cnt[j] += node_ptr->tres_cnt[j];
+	}
+
+	/* Just to be safe, lets do this after the node TRES ;) */
+	tres_cnt[TRES_ARRAY_NODE] = part_ptr->total_nodes;
+
+	/* grab the global tres and stick in partiton for easy reference. */
+	for(i = 0; i < slurmctld_tres_cnt; i++) {
+		slurmdb_tres_rec_t *tres_rec = assoc_mgr_tres_array[i];
+
+		if (!strcasecmp(tres_rec->type, "bb") ||
+		    !strcasecmp(tres_rec->type, "license"))
+			tres_cnt[i] = tres_rec->count;
+	}
+
+	part_ptr->tres_fmt_str =
+		assoc_mgr_make_tres_str_from_array(part_ptr->tres_cnt, 0, true);
+}
+
+/*
+ * Calcuate and populate the number of tres' for all partitions.
+ */
+extern void set_partition_tres()
+{
+	struct part_record * part_ptr;
+	ListIterator itr = list_iterator_create(part_list);
+
+	while ((part_ptr = (struct part_record *)list_next(itr))) {
+		xfree(part_ptr->tres_cnt);
+		_calc_part_tres(part_ptr);
+	}
+	list_iterator_destroy(itr);
+}
 
 /*
- * _build_part_bitmap - update the total_cpus, total_nodes, and node_bitmap
+ * build_part_bitmap - update the total_cpus, total_nodes, and node_bitmap
  *	for the specified partition, also reset the partition pointers in
  *	the node back to this partition.
  * IN part_ptr - pointer to the partition
@@ -105,7 +159,7 @@ static void   _unlink_free_nodes(bitstr_t *old_bitmap,
  * NOTE: this does not report nodes defined in more than one partition. this
  *	is checked only upon reading the configuration file, not on an update
  */
-static int _build_part_bitmap(struct part_record *part_ptr)
+extern int build_part_bitmap(struct part_record *part_ptr)
 {
 	char *this_node_name;
 	bitstr_t *old_bitmap;
@@ -114,6 +168,8 @@ static int _build_part_bitmap(struct part_record *part_ptr)
 
 	part_ptr->total_cpus = 0;
 	part_ptr->total_nodes = 0;
+	part_ptr->max_cpu_cnt = 0;
+	part_ptr->max_core_cnt = 0;
 
 	if (part_ptr->node_bitmap == NULL) {
 		part_ptr->node_bitmap = bit_alloc(node_record_count);
@@ -130,6 +186,12 @@ static int _build_part_bitmap(struct part_record *part_ptr)
 		return 0;
 	}
 
+	if (!strcmp(part_ptr->nodes, "ALL")) {
+		bit_nset(part_ptr->node_bitmap, 0, node_record_count - 1);
+		xfree(part_ptr->nodes);
+		part_ptr->nodes = bitmap2node_name(part_ptr->node_bitmap);
+		bit_nclear(part_ptr->node_bitmap, 0, node_record_count - 1);
+	}
 	if ((host_list = hostlist_create(part_ptr->nodes)) == NULL) {
 		FREE_NULL_BITMAP(old_bitmap);
 		error("hostlist_create error on %s, %m",
@@ -138,9 +200,9 @@ static int _build_part_bitmap(struct part_record *part_ptr)
 	}
 
 	while ((this_node_name = hostlist_shift(host_list))) {
-		node_ptr = find_node_record(this_node_name);
+		node_ptr = find_node_record_no_alias(this_node_name);
 		if (node_ptr == NULL) {
-			error("_build_part_bitmap: invalid node name %s",
+			error("build_part_bitmap: invalid node name %s",
 				this_node_name);
 			free(this_node_name);
 			FREE_NULL_BITMAP(old_bitmap);
@@ -148,10 +210,19 @@ static int _build_part_bitmap(struct part_record *part_ptr)
 			return ESLURM_INVALID_NODE_NAME;
 		}
 		part_ptr->total_nodes++;
-		if (slurmctld_conf.fast_schedule)
+		if (slurmctld_conf.fast_schedule) {
 			part_ptr->total_cpus += node_ptr->config_ptr->cpus;
-		else
+			part_ptr->max_cpu_cnt = MAX(part_ptr->max_cpu_cnt,
+					node_ptr->config_ptr->cpus);
+			part_ptr->max_core_cnt = MAX(part_ptr->max_core_cnt,
+					node_ptr->config_ptr->cores);
+		} else {
 			part_ptr->total_cpus += node_ptr->cpus;
+			part_ptr->max_cpu_cnt  = MAX(part_ptr->max_cpu_cnt,
+					node_ptr->cpus);
+			part_ptr->max_core_cnt = MAX(part_ptr->max_core_cnt,
+					node_ptr->cores);
+		}
 		node_ptr->part_cnt++;
 		xrealloc(node_ptr->part_pptr, (node_ptr->part_cnt *
 			sizeof(struct part_record *)));
@@ -254,7 +325,8 @@ struct part_record *create_part_record(void)
 
 	if (default_part.allow_qos) {
 		part_ptr->allow_qos = xstrdup(default_part.allow_qos);
-		qos_list_build(part_ptr->allow_qos, &part_ptr->allow_qos_bitstr);
+		qos_list_build(part_ptr->allow_qos,
+			       &part_ptr->allow_qos_bitstr);
 	} else
 		part_ptr->allow_qos = NULL;
 
@@ -271,6 +343,23 @@ struct part_record *create_part_record(void)
 	} else
 		part_ptr->deny_qos = NULL;
 
+	if (default_part.qos_char) {
+		slurmdb_qos_rec_t qos_rec;
+		xfree(part_ptr->qos_char);
+		part_ptr->qos_char = xstrdup(default_part.qos_char);
+
+		memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+		qos_rec.name = part_ptr->qos_char;
+		if (assoc_mgr_fill_in_qos(
+			    acct_db_conn, &qos_rec, accounting_enforce,
+			    (slurmdb_qos_rec_t **)&part_ptr->qos_ptr, 0)
+		    != SLURM_SUCCESS) {
+			fatal("Partition %s has an invalid qos (%s), "
+			      "please check your configuration",
+			      part_ptr->name, qos_rec.name);
+		}
+	}
+
 	if (default_part.allow_alloc_nodes)
 		part_ptr->allow_alloc_nodes = xstrdup(default_part.
 						      allow_alloc_nodes);
@@ -435,6 +524,7 @@ static void _dump_part_state(struct part_record *part_ptr, Buf buffer)
 	packstr(part_ptr->allow_accounts, buffer);
 	packstr(part_ptr->allow_groups,  buffer);
 	packstr(part_ptr->allow_qos,     buffer);
+	packstr(part_ptr->qos_char,      buffer);
 	packstr(part_ptr->allow_alloc_nodes, buffer);
 	packstr(part_ptr->alternate,     buffer);
 	packstr(part_ptr->deny_accounts, buffer);
@@ -467,7 +557,7 @@ static int _open_part_state_file(char **state_file)
 	} else 	/* Success */
 		return state_fd;
 
-	error("NOTE: Trying backup state save file. Information may be lost!");
+	error("NOTE: Trying backup partition state save file. Information may be lost!");
 	xstrcat(*state_file, ".old");
 	state_fd = open(*state_file, O_RDONLY);
 	return state_fd;
@@ -483,8 +573,8 @@ int load_all_part_state(void)
 {
 	char *part_name = NULL, *nodes = NULL;
 	char *allow_accounts = NULL, *allow_groups = NULL, *allow_qos = NULL;
-	char *deny_accounts = NULL, *deny_qos = NULL;
-	char *state_file, *data = NULL;
+	char *deny_accounts = NULL, *deny_qos = NULL, *qos_char = NULL;
+	char *state_file = NULL, *data = NULL;
 	uint32_t max_time, default_time, max_nodes, min_nodes;
 	uint32_t max_cpus_per_node = INFINITE, grace_time = 0;
 	time_t time;
@@ -534,15 +624,10 @@ int load_all_part_state(void)
 
 	buffer = create_buf(data, data_size);
 
-	safe_unpackstr_xmalloc( &ver_str, &name_len, buffer);
+	safe_unpackstr_xmalloc(&ver_str, &name_len, buffer);
 	debug3("Version string in part_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, PART_STATE_VERSION)) {
-			safe_unpack16(&protocol_version, buffer);
-		} else if (!strcmp(ver_str, PART_2_6_STATE_VERSION)) {
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-		}
-	}
+	if (ver_str && !strcmp(ver_str, PART_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
 
 	if (protocol_version == (uint16_t)NO_VAL) {
 		error("**********************************************************");
@@ -556,7 +641,7 @@ int load_all_part_state(void)
 	safe_unpack_time(&time, buffer);
 
 	while (remaining_buf(buffer) > 0) {
-		if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 			safe_unpackstr_xmalloc(&part_name, &name_len, buffer);
 			safe_unpack32(&grace_time, buffer);
 			safe_unpack32(&max_time, buffer);
@@ -582,6 +667,8 @@ int load_all_part_state(void)
 					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&allow_qos,
 					       &name_len, buffer);
+			safe_unpackstr_xmalloc(&qos_char,
+					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&deny_accounts,
 					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&deny_qos,
@@ -590,17 +677,18 @@ int load_all_part_state(void)
 					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&alternate, &name_len, buffer);
 			safe_unpackstr_xmalloc(&nodes, &name_len, buffer);
-			if ((flags & PART_FLAG_DEFAULT_CLR) ||
-			    (flags & PART_FLAG_HIDDEN_CLR)  ||
-			    (flags & PART_FLAG_NO_ROOT_CLR) ||
+			if ((flags & PART_FLAG_DEFAULT_CLR)   ||
+			    (flags & PART_FLAG_EXC_USER_CLR)  ||
+			    (flags & PART_FLAG_HIDDEN_CLR)    ||
+			    (flags & PART_FLAG_NO_ROOT_CLR)   ||
 			    (flags & PART_FLAG_ROOT_ONLY_CLR) ||
-			    (flags & PART_FLAG_REQ_RESV_CLR) ||
+			    (flags & PART_FLAG_REQ_RESV_CLR)  ||
 			    (flags & PART_FLAG_LLN_CLR)) {
 				error("Invalid data for partition %s: flags=%u",
 				      part_name, flags);
 				error_code = EINVAL;
 			}
-		} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 			safe_unpackstr_xmalloc(&part_name, &name_len, buffer);
 			safe_unpack32(&grace_time, buffer);
 			safe_unpack32(&max_time, buffer);
@@ -620,17 +708,27 @@ int load_all_part_state(void)
 			safe_unpack16(&state_up, buffer);
 			safe_unpack16(&cr_type, buffer);
 
+			safe_unpackstr_xmalloc(&allow_accounts,
+					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&allow_groups,
 					       &name_len, buffer);
+			safe_unpackstr_xmalloc(&allow_qos,
+					       &name_len, buffer);
+			safe_unpackstr_xmalloc(&deny_accounts,
+					       &name_len, buffer);
+			safe_unpackstr_xmalloc(&deny_qos,
+					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&allow_alloc_nodes,
 					       &name_len, buffer);
 			safe_unpackstr_xmalloc(&alternate, &name_len, buffer);
 			safe_unpackstr_xmalloc(&nodes, &name_len, buffer);
-			if ((flags & PART_FLAG_DEFAULT_CLR) ||
-			    (flags & PART_FLAG_HIDDEN_CLR)  ||
-			    (flags & PART_FLAG_NO_ROOT_CLR) ||
+			if ((flags & PART_FLAG_DEFAULT_CLR)   ||
+			    (flags & PART_FLAG_EXC_USER_CLR)  ||
+			    (flags & PART_FLAG_HIDDEN_CLR)    ||
+			    (flags & PART_FLAG_NO_ROOT_CLR)   ||
 			    (flags & PART_FLAG_ROOT_ONLY_CLR) ||
-			    (flags & PART_FLAG_REQ_RESV_CLR)) {
+			    (flags & PART_FLAG_REQ_RESV_CLR)  ||
+			    (flags & PART_FLAG_LLN_CLR)) {
 				error("Invalid data for partition %s: flags=%u",
 				      part_name, flags);
 				error_code = EINVAL;
@@ -652,6 +750,7 @@ int load_all_part_state(void)
 			xfree(allow_accounts);
 			xfree(allow_groups);
 			xfree(allow_qos);
+			xfree(qos_char);
 			xfree(allow_alloc_nodes);
 			xfree(alternate);
 			xfree(deny_accounts);
@@ -703,6 +802,25 @@ int load_all_part_state(void)
 		xfree(part_ptr->allow_qos);
 		part_ptr->allow_qos      = allow_qos;
 		qos_list_build(part_ptr->allow_qos,&part_ptr->allow_qos_bitstr);
+
+		if (qos_char) {
+			slurmdb_qos_rec_t qos_rec;
+			xfree(part_ptr->qos_char);
+			part_ptr->qos_char = qos_char;
+
+			memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+			qos_rec.name = part_ptr->qos_char;
+			if (assoc_mgr_fill_in_qos(
+				    acct_db_conn, &qos_rec, accounting_enforce,
+				    (slurmdb_qos_rec_t **)&part_ptr->qos_ptr, 0)
+			    != SLURM_SUCCESS) {
+				error("Partition %s has an invalid qos (%s), "
+				      "please check your configuration",
+				      part_ptr->name, qos_rec.name);
+				xfree(part_ptr->qos_char);
+			}
+		}
+
 		xfree(part_ptr->allow_alloc_nodes);
 		part_ptr->allow_alloc_nodes   = allow_alloc_nodes;
 		xfree(part_ptr->alternate);
@@ -768,10 +886,12 @@ extern List part_list_copy(List part_list_src)
 /*
  * get_part_list - find record for named partition(s)
  * IN name - partition name(s) in a comma separated list
+ * OUT err_part - The first invalid partition name.
  * RET List of pointers to the partitions or NULL if not found
  * NOTE: Caller must free the returned list
+ * NOTE: Caller must free err_part
  */
-extern List get_part_list(char *name)
+extern List get_part_list(char *name, char **err_part)
 {
 	struct part_record *part_ptr;
 	List job_part_list = NULL;
@@ -791,6 +911,10 @@ extern List get_part_list(char *name)
 			list_append(job_part_list, part_ptr);
 		} else {
 			FREE_NULL_LIST(job_part_list);
+			if (err_part) {
+				xfree(*err_part);
+				*err_part = xstrdup(token);
+			}
 			break;
 		}
 		token = strtok_r(NULL, ",", &last);
@@ -837,6 +961,8 @@ int init_part_conf(void)
 	accounts_list_free(&default_part.allow_account_array);
 	xfree(default_part.allow_groups);
 	xfree(default_part.allow_qos);
+	xfree(default_part.qos_char);
+	default_part.qos_ptr = NULL;
 	FREE_NULL_BITMAP(default_part.allow_qos_bitstr);
 	xfree(default_part.allow_uids);
 	xfree(default_part.allow_alloc_nodes);
@@ -892,6 +1018,8 @@ static void _list_delete_part(void *part_entry)
 	xfree(part_ptr->allow_uids);
 	xfree(part_ptr->allow_qos);
 	FREE_NULL_BITMAP(part_ptr->allow_qos_bitstr);
+	xfree(part_ptr->qos_char);
+	part_ptr->qos_ptr = NULL;
 	xfree(part_ptr->alternate);
 	xfree(part_ptr->deny_accounts);
 	accounts_list_free(&part_ptr->deny_account_array);
@@ -900,6 +1028,10 @@ static void _list_delete_part(void *part_entry)
 	xfree(part_ptr->name);
 	xfree(part_ptr->nodes);
 	FREE_NULL_BITMAP(part_ptr->node_bitmap);
+	xfree(part_ptr->billing_weights_str);
+	xfree(part_ptr->billing_weights);
+	xfree(part_ptr->tres_cnt);
+	xfree(part_ptr->tres_fmt_str);
 	xfree(part_entry);
 }
 
@@ -1032,7 +1164,7 @@ void pack_part(struct part_record *part_ptr, Buf buffer,
 {
 	uint32_t altered;
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		if (default_part_loc == part_ptr)
 			part_ptr->flags |= PART_FLAG_DEFAULT;
 		else
@@ -1063,12 +1195,15 @@ void pack_part(struct part_record *part_ptr, Buf buffer,
 		packstr(part_ptr->allow_groups, buffer);
 		packstr(part_ptr->allow_alloc_nodes, buffer);
 		packstr(part_ptr->allow_qos, buffer);
+		packstr(part_ptr->qos_char, buffer);
 		packstr(part_ptr->alternate, buffer);
 		packstr(part_ptr->deny_accounts, buffer);
 		packstr(part_ptr->deny_qos, buffer);
 		packstr(part_ptr->nodes, buffer);
 		pack_bit_fmt(part_ptr->node_bitmap, buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		packstr(part_ptr->billing_weights_str, buffer);
+		packstr(part_ptr->tres_fmt_str, buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		if (default_part_loc == part_ptr)
 			part_ptr->flags |= PART_FLAG_DEFAULT;
 		else
@@ -1095,9 +1230,13 @@ void pack_part(struct part_record *part_ptr, Buf buffer,
 		pack16(part_ptr->state_up, buffer);
 		pack16(part_ptr->cr_type, buffer);
 
+		packstr(part_ptr->allow_accounts, buffer);
 		packstr(part_ptr->allow_groups, buffer);
 		packstr(part_ptr->allow_alloc_nodes, buffer);
+		packstr(part_ptr->allow_qos, buffer);
 		packstr(part_ptr->alternate, buffer);
+		packstr(part_ptr->deny_accounts, buffer);
+		packstr(part_ptr->deny_qos, buffer);
 		packstr(part_ptr->nodes, buffer);
 		pack_bit_fmt(part_ptr->node_bitmap, buffer);
 	} else {
@@ -1237,6 +1376,16 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 		part_ptr->flags &= (~PART_FLAG_NO_ROOT);
 	}
 
+	if (part_desc->flags & PART_FLAG_EXCLUSIVE_USER) {
+		info("update_part: setting exclusive_user for partition %s",
+		     part_desc->name);
+		part_ptr->flags |= PART_FLAG_EXCLUSIVE_USER;
+	} else if (part_desc->flags & PART_FLAG_EXC_USER_CLR) {
+		info("update_part: clearing exclusive_user for partition %s",
+		     part_desc->name);
+		part_ptr->flags &= (~PART_FLAG_EXCLUSIVE_USER);
+	}
+
 	if (part_desc->flags & PART_FLAG_DEFAULT) {
 		if (default_part_name == NULL) {
 			info("update_part: setting default partition to %s",
@@ -1386,6 +1535,25 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 		qos_list_build(part_ptr->allow_qos,&part_ptr->allow_qos_bitstr);
 	}
 
+	if (part_desc->qos_char) {
+		slurmdb_qos_rec_t qos_rec, *backup_qos_ptr = part_ptr->qos_ptr;
+
+		memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+		qos_rec.name = part_desc->qos_char;
+		if (assoc_mgr_fill_in_qos(
+			    acct_db_conn, &qos_rec, accounting_enforce,
+			    (slurmdb_qos_rec_t **)&part_ptr->qos_ptr, 0)
+		    != SLURM_SUCCESS) {
+			error("update_part: invalid qos (%s) given",
+			      qos_rec.name);
+			error_code = ESLURM_INVALID_QOS;
+			part_ptr->qos_ptr = backup_qos_ptr;
+		} else {
+			xfree(part_ptr->qos_char);
+			part_ptr->qos_char = xstrdup(part_desc->qos_char);
+		}
+	}
+
 	if (part_desc->allow_alloc_nodes != NULL) {
 		xfree(part_ptr->allow_alloc_nodes);
 		if ((part_desc->allow_alloc_nodes[0] == '\0') ||
@@ -1492,7 +1660,7 @@ extern int update_part (update_part_msg_t * part_desc, bool create_flag)
 			}
 		}
 
-		error_code = _build_part_bitmap(part_ptr);
+		error_code = build_part_bitmap(part_ptr);
 		if (error_code) {
 			xfree(part_ptr->nodes);
 			part_ptr->nodes = backup_node_list;
@@ -1643,9 +1811,68 @@ uid_t *_get_groups_members(char *group_names)
 	}
 	xfree(tmp_names);
 
+	group_uids = _remove_duplicate_uids(group_uids);
+
 	return group_uids;
 }
 
+/* remove_duplicate_uids()
+ */
+static uid_t *
+_remove_duplicate_uids(uid_t *u)
+{
+	int i;
+	int j;
+	int num;
+	uid_t *v;
+	uid_t cur;
+
+	if (!u)
+		return NULL;
+
+	num = 1;
+	for (i = 0; u[i]; i++)
+		++num;
+
+	v = xmalloc(num * sizeof(uid_t));
+	qsort(u, num, sizeof(uid_t), _uid_cmp);
+
+	j = 0;
+	cur = u[0];
+	for (i = 0; u[i]; i++) {
+		if (u[i] == cur)
+			continue;
+		v[j] = cur;
+		cur = u[i];
+		++j;
+	}
+	v[j] = cur;
+
+	xfree(u);
+	return v;
+}
+
+/* uid_cmp
+ */
+static int
+_uid_cmp(const void *x, const void *y)
+{
+	uid_t a;
+	uid_t b;
+
+	a = *(uid_t *)x;
+	b = *(uid_t *)y;
+
+	/* Sort in decreasing order so that the 0
+	 * as at the end.
+	 */
+	if (a > b)
+		return -1;
+	if (a < b)
+		return 1;
+	return 0;
+}
+
 /* _get_group_tlm - return the time of last modification for the GROUP_FILE */
 time_t _get_group_tlm(void)
 {
@@ -1677,10 +1904,7 @@ static int _uid_list_size(uid_t * uid_list_ptr)
 /* part_fini - free all memory associated with partition records */
 void part_fini (void)
 {
-	if (part_list) {
-		list_destroy(part_list);
-		part_list = NULL;
-	}
+	FREE_NULL_LIST(part_list);
 	xfree(default_part_name);
 	xfree(default_part.name);
 	default_part_loc = (struct part_record *) NULL;
diff --git a/src/slurmctld/ping_nodes.c b/src/slurmctld/ping_nodes.c
index 1b2d7c6f5..e437c7ef6 100644
--- a/src/slurmctld/ping_nodes.c
+++ b/src/slurmctld/ping_nodes.c
@@ -152,6 +152,7 @@ void ping_nodes (void)
 #else
 	struct node_record *node_ptr = NULL;
 	time_t old_cpu_load_time = now - slurmctld_conf.slurmd_timeout;
+	time_t old_free_mem_time = now - slurmctld_conf.slurmd_timeout;
 #endif
 
 	ping_agent_args = xmalloc (sizeof (agent_arg_t));
@@ -308,7 +309,7 @@ void ping_nodes (void)
 		 * counter and gets updated configuration information
 		 * once in a while). We limit these requests since they
 		 * can generate a flood of incoming RPCs. */
-		if (IS_NODE_UNKNOWN(node_ptr) || restart_flag ||
+		if (IS_NODE_UNKNOWN(node_ptr) || (node_ptr->boot_time == 0) ||
 		    ((i >= offset) && (i < (offset + max_reg_threads)))) {
 			if (reg_agent_args->protocol_version >
 			    node_ptr->protocol_version)
@@ -322,7 +323,8 @@ void ping_nodes (void)
 
 		if ((!IS_NODE_NO_RESPOND(node_ptr)) &&
 		    (node_ptr->last_response >= still_live_time) &&
-		    (node_ptr->cpu_load_time >= old_cpu_load_time))
+		    (node_ptr->cpu_load_time >= old_cpu_load_time) &&
+		    (node_ptr->free_mem_time >= old_free_mem_time))
 			continue;
 
 		/* Do not keep pinging down nodes since this can induce
diff --git a/src/slurmctld/powercapping.c b/src/slurmctld/powercapping.c
new file mode 100644
index 000000000..3b2fe93b1
--- /dev/null
+++ b/src/slurmctld/powercapping.c
@@ -0,0 +1,597 @@
+/*****************************************************************************\
+ *  powercapping.c - Definitions for power capping logic in the controller
+ *****************************************************************************
+ *  Copyright (C) 2013 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  Copyright (C) 2014 Bull S.A.S.
+ *  Written by Yiannis Georgiou <yiannis.georgiou@bull.net>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+#include <stdlib.h>
+#include <string.h>
+
+#include "src/common/bitstring.h"
+#include "src/common/layouts_mgr.h"
+#include "src/common/macros.h"
+#include "src/common/node_conf.h"
+#include "src/common/power.h"
+#include "src/common/slurm_protocol_api.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/powercapping.h"
+#include "src/slurmctld/reservation.h"
+#include "src/slurmctld/slurmctld.h"
+
+
+#define L_NAME		"power"
+#define L_CLUSTER	"Cluster"
+#define L_SUM_MAX	"MaxSumWatts"
+#define L_SUM_IDLE	"IdleSumWatts"
+#define L_SUM_CUR	"CurrentSumPower"
+#define L_NODE_MAX	"MaxWatts"
+#define L_NODE_IDLE	"IdleWatts"
+#define L_NODE_DOWN	"DownWatts"
+#define L_NODE_SAVE	"PowerSaveWatts"
+#define L_NODE_CUR	"CurrentPower"
+#define L_NUM_FREQ	"NumFreqChoices"
+#define L_CUR_POWER	"CurrentCorePower"
+
+static bool _powercap_enabled(void)
+{
+	if (powercap_get_cluster_current_cap() == 0)
+		return false;
+	return true;
+}
+
+int _which_power_layout(char *layout)
+{
+	uint32_t max_watts;
+
+	return layouts_entity_pullget_kv(layout, L_CLUSTER, L_SUM_MAX,
+					 &max_watts, L_T_UINT32);
+
+}
+
+int which_power_layout(void)
+{
+	layout_t* layout;
+	
+	if (!_powercap_enabled())
+		return 0;
+
+	layout = layouts_get_layout("power");
+
+	if (layout == NULL)
+		return 0;
+	else if (strcmp(layout->name,"default") == 0)
+		return 1;
+	else if (strcmp(layout->name,"cpufreq") == 0)
+		return 2;
+	
+	return 0;
+}
+
+bool power_layout_ready(void)
+{
+	static time_t last_error_time = (time_t) 0;
+	time_t now = time(NULL);
+	struct node_record *node_ptr;
+	uint32_t data[2];
+	int i;
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (layouts_entity_get_mkv(L_NAME, node_ptr->name,
+		    "MaxWatts,IdleWatts", data, (sizeof(uint32_t) * 2),
+		    L_T_UINT32)) {
+			/* Limit error message frequency, once per minute */
+			if (difftime(now, last_error_time) < 60)
+				return false;
+			last_error_time = now;
+			error("%s: node %s is not in the layouts.d/power.conf file",
+			     __func__, node_ptr->name);
+			return false;
+		}
+	}
+	return true;
+}
+
+
+uint32_t powercap_get_cluster_max_watts(void)
+{
+	uint32_t max_watts;
+
+	if (!_powercap_enabled())
+		return 0;
+
+	if (!power_layout_ready())
+		return 0;
+
+	layouts_entity_pullget_kv(L_NAME, L_CLUSTER, L_SUM_MAX, &max_watts,
+				  L_T_UINT32);
+
+	return max_watts;
+}
+
+uint32_t powercap_get_cluster_min_watts(void)
+{
+	uint32_t min_watts = 0, tmp_watts, save_watts, down_watts;
+	struct node_record *node_ptr;
+	int i;
+
+	if (!_powercap_enabled())
+		return 0;
+	
+	if (!power_layout_ready())
+		return 0;
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		layouts_entity_pullget_kv(L_NAME, node_ptr->name, L_NODE_IDLE,
+					  &tmp_watts, L_T_UINT32);
+		layouts_entity_pullget_kv(L_NAME, node_ptr->name, L_NODE_DOWN,
+					  &down_watts, L_T_UINT32);
+		tmp_watts = MIN(tmp_watts, down_watts);
+		layouts_entity_pullget_kv(L_NAME, node_ptr->name, L_NODE_SAVE,
+					  &save_watts, L_T_UINT32);
+		tmp_watts = MIN(tmp_watts, save_watts);
+		min_watts += tmp_watts;
+	}
+
+	return min_watts;
+}
+
+uint32_t powercap_get_cluster_current_cap(void)
+{
+	char *end_ptr = NULL, *sched_params, *tmp_ptr;
+	uint32_t cap_watts = 0;
+
+	sched_params = slurm_get_power_parameters();
+	if (!sched_params)
+		return cap_watts;
+
+	if ((tmp_ptr = strstr(sched_params, "cap_watts=INFINITE"))) {
+		cap_watts = INFINITE;
+	} else if ((tmp_ptr = strstr(sched_params, "cap_watts=UNLIMITED"))) {
+		cap_watts = INFINITE;
+	} else if ((tmp_ptr = strstr(sched_params, "cap_watts="))) {
+		cap_watts = strtol(tmp_ptr + 10, &end_ptr, 10);
+		if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K')) {
+			cap_watts *= 1000;
+		} else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M')) {
+			cap_watts *= 1000000;
+		}
+	}
+	xfree(sched_params);
+
+	return cap_watts;
+}
+
+/* Strip "cap_watts=..." pointed to by tmp_ptr out of the string by shifting
+ * other string contents down over it. */
+static void _strip_cap_watts(char *tmp_ptr)
+{
+	char *end_ptr;
+	int i;
+
+	end_ptr = strchr(tmp_ptr, ',');
+	if (!end_ptr) {
+		tmp_ptr[0] = '\0';
+		return;
+	}
+	end_ptr++;
+	for (i = 0; ; i++) {
+		tmp_ptr[i] = end_ptr[i];
+		if (tmp_ptr[i] == '\0')
+			break;
+	}
+
+}
+
+int powercap_set_cluster_cap(uint32_t new_cap)
+{
+	char *sched_params, *sep, *tmp_ptr;
+
+	sched_params = slurm_get_power_parameters();
+	if (sched_params) {
+		while ((tmp_ptr = strstr(sched_params, "cap_watts="))) {
+			_strip_cap_watts(tmp_ptr);
+		}
+	}
+	if (sched_params && sched_params[0])
+		sep = ",";
+	else
+		sep = "";
+	if (new_cap == INFINITE)
+		xstrfmtcat(sched_params, "%scap_watts=INFINITE", sep);
+	else
+		xstrfmtcat(sched_params, "%scap_watts=%u", sep, new_cap);
+	slurm_set_power_parameters(sched_params);
+	power_g_reconfig();
+	xfree(sched_params);
+
+	return 0;
+}
+
+uint32_t powercap_get_cluster_adjusted_max_watts(void)
+{
+	uint32_t adj_max_watts = 0,val;
+	struct node_record *node_ptr;
+	int i;
+
+	if (!_powercap_enabled())
+		return 0;
+	if (!power_layout_ready())
+		return 0;
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (bit_test(power_node_bitmap, i)) {
+			layouts_entity_pullget_kv(L_NAME, node_ptr->name,
+					L_NODE_SAVE, &val, L_T_UINT32);
+		} else if (!bit_test(up_node_bitmap, i)) {
+			layouts_entity_pullget_kv(L_NAME, node_ptr->name,
+					L_NODE_DOWN, &val, L_T_UINT32);
+		} else {
+			layouts_entity_pullget_kv(L_NAME, node_ptr->name,
+					L_NODE_MAX, &val, L_T_UINT32);
+		}
+		adj_max_watts += val;
+	}
+
+	return adj_max_watts;
+}
+
+uint32_t powercap_get_cluster_current_max_watts(void)
+{
+	uint32_t cur_max_watts = 0;
+
+	if (!_powercap_enabled())
+		return 0;
+	if (!power_layout_ready())
+		return 0;
+	
+	if (which_power_layout() == 1) {
+		cur_max_watts = powercap_get_node_bitmap_maxwatts(NULL);
+	} else {
+		cur_max_watts = powercap_get_node_bitmap_maxwatts_dvfs(
+					NULL, NULL, NULL, 0, 0);
+	}
+
+	return cur_max_watts;
+}
+
+uint32_t powercap_get_node_bitmap_maxwatts(bitstr_t *idle_bitmap)
+{
+	uint32_t max_watts = 0, val;
+	struct node_record *node_ptr;
+	int i;
+	bitstr_t *tmp_bitmap = NULL;
+
+	if (!_powercap_enabled())
+		return 0;
+	if (!power_layout_ready())
+		return 0;
+
+	/* if no input bitmap, consider the current idle nodes 
+	 * bitmap as the input bitmap tagging nodes to consider 
+	 * as idle while computing the max watts of the cluster */
+	if (idle_bitmap == NULL) {
+		tmp_bitmap = bit_copy(idle_node_bitmap);
+		idle_bitmap = tmp_bitmap;
+	}
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		/* non reserved node, evaluate the different cases */
+		if (bit_test(idle_bitmap, i)) {
+			 /* idle nodes, 2 cases : power save or not */
+			if (bit_test(power_node_bitmap, i)) {
+				layouts_entity_pullget_kv(L_NAME,
+						node_ptr->name, L_NODE_SAVE,
+						&val, L_T_UINT32);
+			} else {
+				layouts_entity_pullget_kv(L_NAME,
+						node_ptr->name, L_NODE_IDLE,
+						&val, L_T_UINT32);
+			}
+		} else {
+			/* non idle nodes, 2 cases : down or not */
+			if (!bit_test(up_node_bitmap, i)) {
+				layouts_entity_pullget_kv(L_NAME, 
+						node_ptr->name, L_NODE_DOWN,
+						&val, L_T_UINT32);
+			} else {
+				layouts_entity_pullget_kv(L_NAME,
+						node_ptr->name, L_NODE_MAX,
+						&val, L_T_UINT32);
+			}
+		}
+		max_watts += val;	
+	}
+
+	if (tmp_bitmap)
+		bit_free(tmp_bitmap);
+
+	return max_watts;
+}
+
+uint32_t powercap_get_job_cap(struct job_record *job_ptr, time_t when)
+{
+	uint32_t powercap = 0, resv_watts;
+
+	powercap = powercap_get_cluster_current_cap();
+	if (powercap == INFINITE)
+		powercap = powercap_get_cluster_max_watts();
+	if (powercap == 0)
+		return 0; /* should not happened */
+
+	/* get the amount of watts reserved for the job */
+	resv_watts = job_test_watts_resv(job_ptr, when);
+
+	/* avoid underflow of the cap value, return at least 0 */
+	if (resv_watts > powercap)
+		resv_watts = powercap;
+
+	return (powercap - resv_watts);
+}
+
+uint32_t powercap_get_cpufreq(bitstr_t *select_bitmap, int k)
+{
+	int i;
+	struct node_record *node_ptr;
+	char ename[128];
+	uint32_t cpufreq = 0;
+
+	if (!_powercap_enabled())
+		return cpufreq;
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (bit_test(select_bitmap, i)) {
+			sprintf(ename, "Cpufreq%d", k);
+			layouts_entity_pullget_kv(L_NAME, node_ptr->name,
+						  ename, &cpufreq, L_T_UINT32);
+		}
+		break;
+	}
+
+	return cpufreq;
+}
+
+int powercap_get_job_optimal_cpufreq(uint32_t powercap, int *allowed_freqs)
+{
+	uint32_t cur_max_watts = 0, *tmp_max_watts_dvfs = NULL;
+	int k = 1;
+	bitstr_t *tmp_bitmap = NULL;
+
+	if (!_powercap_enabled())
+		return 0;
+
+	tmp_max_watts_dvfs = xmalloc(sizeof(uint32_t) * (allowed_freqs[0]+1));
+	tmp_bitmap = bit_copy(idle_node_bitmap);
+	bit_not(tmp_bitmap);
+
+	cur_max_watts = powercap_get_node_bitmap_maxwatts_dvfs(tmp_bitmap,
+			  idle_node_bitmap,tmp_max_watts_dvfs,allowed_freqs,0);
+
+	if (cur_max_watts > powercap) {
+		while (tmp_max_watts_dvfs[k] > powercap &&
+		      k < allowed_freqs[0] + 1) {
+			k++;
+		}
+		if (k == allowed_freqs[0] + 1)
+			k--;
+	} else
+		k = 1;
+
+	return k;
+}
+
+int* powercap_get_job_nodes_numfreq(bitstr_t *select_bitmap, 
+				    uint32_t cpu_freq_min,
+				    uint32_t cpu_freq_max)
+{
+	uint16_t num_freq = 0;
+	int i, p, *allowed_freqs = NULL, new_num_freq = 0;
+	struct node_record *node_ptr;
+	char ename[128];
+	uint32_t cpufreq;
+
+	if (!_powercap_enabled())
+		return 0;
+	if ((cpu_freq_min == NO_VAL) && (cpu_freq_max == NO_VAL)) {
+		allowed_freqs = xmalloc(sizeof(int) * 2);
+		/* allowed_freqs[0] = 0; Default value */
+		return allowed_freqs;
+	}
+
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (bit_test(select_bitmap, i)) {
+			layouts_entity_pullget_kv(L_NAME, node_ptr->name,
+					L_NUM_FREQ, &num_freq, L_T_UINT16);
+			allowed_freqs = xmalloc(sizeof(int)*((int)num_freq+2));
+			allowed_freqs[-1] = (int) num_freq;
+			for (p = num_freq; p > 0; p--) {
+				sprintf(ename, "Cpufreq%d", p);
+				layouts_entity_pullget_kv(L_NAME,
+					  	  node_ptr->name, ename,
+						  &cpufreq, L_T_UINT32);
+
+		/* In case a job is submitted with flags Low,High, etc on
+		 * --cpu-freq parameter then we consider the whole range
+		 * of available frequencies on nodes */
+				if (((cpu_freq_min <= cpufreq) &&
+				    (cpufreq <= cpu_freq_max)) ||
+				    ((cpu_freq_min & CPU_FREQ_RANGE_FLAG) ||
+				    (cpu_freq_max & CPU_FREQ_RANGE_FLAG))) {
+					new_num_freq++;
+					allowed_freqs[new_num_freq] = p;
+				}
+			}
+			break;
+		}
+	}
+
+	if (allowed_freqs) {
+		allowed_freqs[0] = new_num_freq;
+	} else {
+		allowed_freqs = xmalloc(sizeof(int) * 2);
+		/* allowed_freqs[0] = 0; Default value */
+	}
+	return allowed_freqs;
+}
+
+uint32_t powercap_get_node_bitmap_maxwatts_dvfs(bitstr_t *idle_bitmap,
+			  bitstr_t *select_bitmap, uint32_t *max_watts_dvfs,
+			  int* allowed_freqs, uint32_t num_cpus)
+{
+	uint32_t max_watts = 0, tmp_max_watts = 0, val = 0;
+	uint32_t *tmp_max_watts_dvfs = NULL;
+	struct node_record *node_ptr;
+	int i, p;
+	char ename[128], keyname[128];
+	bitstr_t *tmp_bitmap = NULL;
+	uint32_t data[5], core_data[4];
+
+	if (!_powercap_enabled())
+		return 0;
+
+	if (max_watts_dvfs != NULL) {
+		tmp_max_watts_dvfs = 
+			  xmalloc(sizeof(uint32_t)*(allowed_freqs[0]+1));
+	}
+
+	/* if no input bitmap, consider the current idle nodes 
+	 * bitmap as the input bitmap tagging nodes to consider 
+	 * as idle while computing the max watts of the cluster */
+	if (idle_bitmap == NULL && select_bitmap == NULL) {
+		tmp_bitmap = bit_copy(idle_node_bitmap);
+		idle_bitmap = tmp_bitmap;
+		select_bitmap = tmp_bitmap;
+	}
+	
+	for (i = 0, node_ptr = node_record_table_ptr; i < node_record_count;
+	     i++, node_ptr++) {
+		if (bit_test(idle_bitmap, i)) {
+			/* idle nodes, 2 cases : power save or not */
+			if (bit_test(power_node_bitmap, i)) {
+				layouts_entity_pullget_kv(L_NAME,
+						  node_ptr->name, L_NODE_SAVE,
+						  &val, L_T_UINT32);
+			} else {
+				layouts_entity_pullget_kv(L_NAME,
+						  node_ptr->name, L_NODE_IDLE,
+						  &val, L_T_UINT32);
+			}
+		
+		} else if (bit_test(select_bitmap, i)) {
+			layouts_entity_get_mkv(L_NAME, node_ptr->name,
+				"IdleWatts,MaxWatts,CoresCount,LastCore,CurrentPower",
+				data, (sizeof(uint32_t) * 5), L_T_UINT32);
+
+			/* tmp_max_watts = IdleWatts - cpus*IdleCoreWatts
+			 * + cpus*MaxCoreWatts */
+			sprintf(ename, "virtualcore%u", data[3]);
+			if (num_cpus == 0)
+				num_cpus = data[2];
+			layouts_entity_get_mkv(L_NAME, ename,
+					       "IdleCoreWatts,MaxCoreWatts",
+					       core_data,
+					       (sizeof(uint32_t) * 2),
+					       L_T_UINT32);
+			if (data[4] == 0) {
+				tmp_max_watts += data[0] -
+					  num_cpus*core_data[0] +
+					  num_cpus*core_data[1];
+			} else if (data[4] > 0) {
+				tmp_max_watts += data[4] -
+					  num_cpus*core_data[0] +
+					  num_cpus*core_data[1];
+			} else if (num_cpus == data[2])
+				tmp_max_watts += data[1];
+
+			if (!tmp_max_watts_dvfs)
+				goto skip_dvfs;
+			for (p = 1; p < (allowed_freqs[0] + 1); p++) {
+				sprintf(keyname, 
+					"IdleCoreWatts,MaxCoreWatts,"
+					"Cpufreq%dWatts,CurrentCorePower",
+					allowed_freqs[p]);
+				layouts_entity_get_mkv(L_NAME, ename, keyname,
+					  core_data, (sizeof(uint32_t) * 4),
+					  L_T_UINT32);
+				if (num_cpus == data[2]) {
+					tmp_max_watts_dvfs[p] +=
+						  num_cpus*core_data[2];
+				} else {
+					if (data[4] == 0) {
+						tmp_max_watts_dvfs[p] +=
+						 	data[0] -
+							num_cpus*core_data[0] +
+							num_cpus*core_data[2];
+					} else {
+						tmp_max_watts_dvfs[p] +=
+							data[4] -
+							num_cpus*core_data[0] +
+							num_cpus*core_data[2];
+					}
+				}
+			}
+  skip_dvfs:		;
+		} else {
+			/* non-idle nodes, 2 cases : down or not */
+			if (!bit_test(up_node_bitmap, i)) {
+				layouts_entity_pullget_kv(L_NAME,
+						  node_ptr->name, L_NODE_DOWN,
+						  &val, L_T_UINT32);
+			} else {
+				layouts_entity_pullget_kv(L_NAME,
+						  node_ptr->name, L_NODE_CUR,
+						  &val, L_T_UINT32);
+			}
+		}
+		max_watts += val;
+		val = 0;
+	}
+	if (max_watts_dvfs) {	
+		for (p = 1; p < allowed_freqs[0] + 1; p++) {
+			max_watts_dvfs[p] = max_watts + tmp_max_watts_dvfs[p];
+		}
+		xfree(tmp_max_watts_dvfs);
+	}
+	max_watts += tmp_max_watts;
+
+	if (tmp_bitmap)
+		bit_free(tmp_bitmap);
+
+	return max_watts;
+}
diff --git a/src/slurmctld/powercapping.h b/src/slurmctld/powercapping.h
new file mode 100644
index 000000000..8e0edede2
--- /dev/null
+++ b/src/slurmctld/powercapping.h
@@ -0,0 +1,178 @@
+/*****************************************************************************\
+ *  powercapping.h - Definitions for power capping logic in the controller
+ *****************************************************************************
+ *  Copyright (C) 2013 CEA/DAM/DIF
+ *  Written by Matthieu Hautreux <matthieu.hautreux@cea.fr>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _POWERCAPPING_H
+#define _POWERCAPPING_H
+
+#include <stdint.h>
+#include <time.h>
+#include "src/slurmctld/slurmctld.h"
+
+/**
+ * powercap_get_cluster_max_watts
+ * return the max power consumption of the cluster
+ * RET uint32_t - the max consumption in watts
+ */
+uint32_t powercap_get_cluster_max_watts(void);
+
+/**
+ * powercap_get_cluster_min_watts
+ * return the min power consumption of the cluster
+ * RET uint32_t - the min consumption in watts
+ */
+uint32_t powercap_get_cluster_min_watts(void);
+
+/**
+ * powercap_get_cluster_current_cap
+ * return the current powercap value
+ * RET uint32_t - powercap
+ */
+uint32_t powercap_get_cluster_current_cap(void);
+
+/**
+ * powercap_set_cluster_cap
+ * set a new powercap value
+ * IN uint32_t - new_cap
+ * RET int - 0 or error code
+ */
+int powercap_set_cluster_cap(uint32_t new_cap);
+
+/**
+ * powercap_get_cluster_adjusted_max_watts
+ * return max power consumption of the cluster,
+ * taking into consideration the nodes which are POWERED DOWN
+ * RET uint32_t - the max consumption in watts
+ */
+uint32_t powercap_get_cluster_adjusted_max_watts(void);
+
+/**
+ * powercap_get_cluster_current_max_watts
+ * return current max power consumption of the cluster,
+ * taking into consideration the nodes which are POWERED DOWN
+ * and the nodes which are idle
+ * RET uint32_t - the max consumption in watts
+ */
+uint32_t powercap_get_cluster_current_max_watts(void);
+
+/**
+ * powercap_get_node_bitmap_maxwatt
+ * return current max consumption value of the cluster,
+ * taking into consideration the nodes which are POWERED DOWN
+ * and the nodes which are idle using the input bitmap to identify
+ * them.
+ * A null argument means, use the controller idle_node_bitmap instead.
+ * IN bitstr_t* idle_bitmap
+ * RET uint32_t - the max consumption in watts
+ */
+uint32_t powercap_get_node_bitmap_maxwatts(bitstr_t* select_bitmap);
+
+/**
+ * powercap_get_job_cap
+ * return the cap value of a job taking into account the current cap
+ * as well as the power reservations defined on the interval
+ *
+ * IN struct job_record* job_ptr
+ * IN time_t when
+ * RET uint32_t - the cap the job is restricted to
+ */
+uint32_t powercap_get_job_cap(struct job_record *job_ptr, time_t when);
+
+/**
+ * power_layout_ready
+ * check if the layout has at least the minimum available attributes
+ * per node declared and possible to be retrieved
+ *
+ * RET bool - whether the layout is ready for usage
+ */
+bool power_layout_ready(void);
+
+/**
+ * which_power_layout
+ * return which power layout is activated to be used for powercapping
+ *
+ * RET int - 0 both or none, 1 power layout, 2 power_cpufreq layout
+ */
+int which_power_layout(void);
+
+/**
+ * powercap_get_job_nodes_numfreq
+ * return the number of allowed frequencies as long as in which positions
+ * they are in the layouts
+ * IN bitstr_t* select_bitmap related to the nodes that the job could allocate
+ * IN uint32_t cpu_freq_min for the job as given in the command
+ * IN uint32_t cpu_freq_max for the job as given in the command
+ * RET int* - an array of allowed frequency positions 
+ *            and in 0 the number of total allowed frequencies
+ */
+int* powercap_get_job_nodes_numfreq(bitstr_t *select_bitmap,
+			uint32_t cpu_freq_min, uint32_t cpu_freq_max);
+
+/**
+ * powercap_get_node_bitmap_maxwatts_dvfs
+ * similar with powercap_get_node_bitmap_maxwatt with the difference that
+ * there is a return on the max_watts_dvfs array of possible max_watts in case
+ * the cores get different allowed cpu frequencies
+ * IN bitstr_t* idle_bitmap 
+ * IN bitstr_t* select_bitmap
+ * IN/OUT uint32_t *max_watts_dvfs for the job as given in the command
+ * IN int* allowed_freqs for the job as given in the command
+ * IN uint32_t num_cpus par job par node 
+ * RET uint32_t - the max consumption in watts
+ */
+uint32_t powercap_get_node_bitmap_maxwatts_dvfs(bitstr_t *idle_bitmap,
+			bitstr_t *select_bitmap, uint32_t *max_watts_dvfs,
+			int* allowed_freqs, uint32_t num_cpus);
+/**
+ * powercap_get_job_optimal_cpufreq
+ * return the position upon the allowed_freqs array that gives us the optimal
+ * cpu frequency for the job to be run based on the power budget available
+ * and the usage of the already executing jobs
+ * IN uint32_t powercap 
+ * IN int* allowed_freqs
+ * RET int - the position on the allowed_freqs array for the optimal cpufreq
+ */
+int powercap_get_job_optimal_cpufreq(uint32_t powercap, int* allowed_freqs);
+
+/**
+ * powercap_get_cpufreq
+ * return the cpu frequency related to a particular position on the layouts
+ * IN bitstr_t* select_bitmap
+ * IN int k
+ * RET uint32_t - the cpu frequency
+ */
+uint32_t powercap_get_cpufreq(bitstr_t *select_bitmap, int k);
+
+#endif /* !_POWERCAPPING_H */
diff --git a/src/slurmctld/proc_req.c b/src/slurmctld/proc_req.c
index 92e3697b1..c045b9beb 100644
--- a/src/slurmctld/proc_req.c
+++ b/src/slurmctld/proc_req.c
@@ -60,28 +60,31 @@
 #include "src/common/forward.h"
 #include "src/common/gres.h"
 #include "src/common/hostlist.h"
+#include "src/common/layouts_mgr.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/pack.h"
 #include "src/common/read_config.h"
+#include "src/common/slurm_acct_gather.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_cred.h"
+#include "src/common/slurm_ext_sensors.h"
 #include "src/common/slurm_priority.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_protocol_interface.h"
 #include "src/common/slurm_topology.h"
 #include "src/common/switch.h"
 #include "src/common/xstring.h"
-#include "src/common/slurm_ext_sensors.h"
-#include "src/common/slurm_acct_gather.h"
-#include "src/common/slurm_protocol_interface.h"
 
 #include "src/slurmctld/agent.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/gang.h"
 #include "src/slurmctld/job_scheduler.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
+#include "src/slurmctld/powercapping.h"
 #include "src/slurmctld/proc_req.h"
 #include "src/slurmctld/read_config.h"
 #include "src/slurmctld/reservation.h"
@@ -109,7 +112,7 @@ static pthread_cond_t throttle_cond = PTHREAD_COND_INITIALIZER;
 static void         _fill_ctld_conf(slurm_ctl_conf_t * build_ptr);
 static void         _kill_job_on_msg_fail(uint32_t job_id);
 static int          _is_prolog_finished(uint32_t job_id);
-static int 	    _launch_batch_step(job_desc_msg_t *job_desc_msg,
+static int	    _launch_batch_step(job_desc_msg_t *job_desc_msg,
 				       uid_t uid, uint32_t *step_id,
 				       uint16_t protocol_version);
 static int          _make_step_cred(struct step_record *step_rec,
@@ -122,12 +125,15 @@ inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg);
 inline static void  _slurm_rpc_accounting_register_ctld(slurm_msg_t *msg);
 inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg);
 inline static void  _slurm_rpc_allocate_resources(slurm_msg_t * msg);
+inline static void  _slurm_rpc_block_info(slurm_msg_t * msg);
+inline static void  _slurm_rpc_burst_buffer_info(slurm_msg_t * msg);
 inline static void  _slurm_rpc_checkpoint(slurm_msg_t * msg);
 inline static void  _slurm_rpc_checkpoint_comp(slurm_msg_t * msg);
 inline static void  _slurm_rpc_checkpoint_task_comp(slurm_msg_t * msg);
 inline static void  _slurm_rpc_delete_partition(slurm_msg_t * msg);
 inline static void  _slurm_rpc_complete_job_allocation(slurm_msg_t * msg);
-inline static void  _slurm_rpc_complete_batch_script(slurm_msg_t * msg);
+inline static void  _slurm_rpc_complete_batch_script(slurm_msg_t * msg,
+						     bool locked);
 inline static void  _slurm_rpc_complete_prolog(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_conf(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_front_end(slurm_msg_t * msg);
@@ -138,12 +144,16 @@ inline static void  _slurm_rpc_dump_licenses(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_nodes(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_node_single(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_partitions(slurm_msg_t * msg);
+inline static void  _slurm_rpc_dump_sicp(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_spank(slurm_msg_t * msg);
 inline static void  _slurm_rpc_dump_stats(slurm_msg_t * msg);
 inline static void  _slurm_rpc_end_time(slurm_msg_t * msg);
-inline static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg);
+inline static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg,
+					       bool *run_scheduler,
+					       bool running_composite);
 inline static void  _slurm_rpc_get_shares(slurm_msg_t *msg);
 inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg);
+inline static void  _slurm_rpc_get_powercap(slurm_msg_t * msg);
 inline static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg);
 inline static void  _slurm_rpc_job_notify(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_ready(slurm_msg_t * msg);
@@ -152,10 +162,11 @@ inline static void  _slurm_rpc_job_step_kill(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_step_create(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_step_get_info(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_will_run(slurm_msg_t * msg);
-inline static void  _slurm_rpc_node_registration(slurm_msg_t * msg);
-inline static void  _slurm_rpc_block_info(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_alloc_info(slurm_msg_t * msg);
 inline static void  _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg);
+inline static void  _slurm_rpc_kill_job2(slurm_msg_t *msg);
+inline static void  _slurm_rpc_node_registration(slurm_msg_t *msg,
+						 bool running_composite);
 inline static void  _slurm_rpc_ping(slurm_msg_t * msg);
 inline static void  _slurm_rpc_reboot_nodes(slurm_msg_t * msg);
 inline static void  _slurm_rpc_reconfigure_controller(slurm_msg_t * msg);
@@ -163,6 +174,7 @@ inline static void  _slurm_rpc_resv_create(slurm_msg_t * msg);
 inline static void  _slurm_rpc_resv_update(slurm_msg_t * msg);
 inline static void  _slurm_rpc_resv_delete(slurm_msg_t * msg);
 inline static void  _slurm_rpc_resv_show(slurm_msg_t * msg);
+inline static void  _slurm_rpc_layout_show(slurm_msg_t * msg);
 inline static void  _slurm_rpc_requeue(slurm_msg_t * msg);
 inline static void  _slurm_rpc_takeover(slurm_msg_t * msg);
 inline static void  _slurm_rpc_set_debug_flags(slurm_msg_t *msg);
@@ -171,7 +183,7 @@ inline static void  _slurm_rpc_set_schedlog_level(slurm_msg_t *msg);
 inline static void  _slurm_rpc_shutdown_controller(slurm_msg_t * msg);
 inline static void  _slurm_rpc_shutdown_controller_immediate(slurm_msg_t *
 							     msg);
-inline static void  _slurm_rpc_step_complete(slurm_msg_t * msg);
+inline static void  _slurm_rpc_step_complete(slurm_msg_t * msg, bool locked);
 inline static void  _slurm_rpc_step_layout(slurm_msg_t * msg);
 inline static void  _slurm_rpc_step_update(slurm_msg_t * msg);
 inline static void  _slurm_rpc_submit_batch_job(slurm_msg_t * msg);
@@ -183,12 +195,20 @@ inline static void  _slurm_rpc_trigger_pull(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_front_end(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_job(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_node(slurm_msg_t * msg);
+inline static void  _slurm_rpc_update_layout(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_partition(slurm_msg_t * msg);
+inline static void  _slurm_rpc_update_powercap(slurm_msg_t * msg);
 inline static void  _slurm_rpc_update_block(slurm_msg_t * msg);
-inline static void  _slurm_rpc_kill_job2(slurm_msg_t *msg);
-
 inline static void  _update_cred_key(void);
 
+static void  _slurm_rpc_composite_msg(slurm_msg_t *msg);
+static void  _slurm_rpc_comp_msg_list(composite_msg_t * comp_msg,
+				      bool *run_scheduler,
+				      List msg_list_in,
+				      struct timeval *start_tv,
+				      int timeout);
+static void  _slurm_rpc_assoc_mgr_info(slurm_msg_t * msg);
+
 extern diag_stats_t slurmctld_diag_stats;
 
 /*
@@ -202,13 +222,13 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 	uint32_t rpc_uid;
 
 	/* Just to validate the cred */
-	rpc_uid = (uint32_t) g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	rpc_uid = (uint32_t) g_slurm_auth_get_uid(msg->auth_cred,
+						  slurm_get_auth_info());
 	if (g_slurm_auth_errno(msg->auth_cred) != SLURM_SUCCESS) {
 		error("Bad authentication: %s",
 		      g_slurm_auth_errstr(g_slurm_auth_errno(msg->auth_cred)));
 		return;
 	}
-
 	START_TIMER;
 	slurm_mutex_lock(&rpc_mutex);
 	if (rpc_type_size == 0) {
@@ -248,7 +268,7 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		char inetbuf[64];
 
 		p = rpc_num2string(msg->msg_type);
-		_slurm_print_slurm_addr(&arg->cli_addr,
+		slurm_print_slurm_addr(&arg->cli_addr,
 					inetbuf,
 					sizeof(inetbuf));
 		info("%s: received opcode %s from %s", __func__, p, inetbuf);
@@ -304,7 +324,8 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		slurm_free_part_info_request_msg(msg->data);
 		break;
 	case MESSAGE_EPILOG_COMPLETE:
-		_slurm_rpc_epilog_complete(msg);
+		i = 0;
+		_slurm_rpc_epilog_complete(msg, (bool *)&i, 0);
 		slurm_free_epilog_complete_msg(msg->data);
 		break;
 	case REQUEST_CANCEL_JOB_STEP:
@@ -321,7 +342,7 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		break;
 	case REQUEST_COMPLETE_BATCH_JOB:
 	case REQUEST_COMPLETE_BATCH_SCRIPT:
-		_slurm_rpc_complete_batch_script(msg);
+		_slurm_rpc_complete_batch_script(msg, 0);
 		slurm_free_complete_batch_script_msg(msg->data);
 		break;
 	case REQUEST_JOB_STEP_CREATE:
@@ -337,7 +358,7 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		slurm_free_job_desc_msg(msg->data);
 		break;
 	case MESSAGE_NODE_REGISTRATION_STATUS:
-		_slurm_rpc_node_registration(msg);
+		_slurm_rpc_node_registration(msg, 0);
 		slurm_free_node_registration_status_msg(msg->data);
 		break;
 	case REQUEST_JOB_ALLOCATION_INFO:
@@ -350,7 +371,7 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		break;
 	case REQUEST_JOB_SBCAST_CRED:
 		_slurm_rpc_job_sbcast_cred(msg);
-		slurm_free_job_alloc_info_msg(msg->data);
+		slurm_free_step_alloc_info_msg(msg->data);
 		break;
 	case REQUEST_PING:
 		_slurm_rpc_ping(msg);
@@ -392,11 +413,19 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		_slurm_rpc_update_node(msg);
 		slurm_free_update_node_msg(msg->data);
 		break;
+	case REQUEST_UPDATE_LAYOUT:
+		_slurm_rpc_update_layout(msg);
+		slurm_free_update_layout_msg(msg->data);
+		break;
 	case REQUEST_CREATE_PARTITION:
 	case REQUEST_UPDATE_PARTITION:
 		_slurm_rpc_update_partition(msg);
 		slurm_free_update_part_msg(msg->data);
 		break;
+	case REQUEST_UPDATE_POWERCAP:
+		_slurm_rpc_update_powercap(msg);
+		slurm_free_powercap_info_msg(msg->data);
+		break;
 	case REQUEST_DELETE_PARTITION:
 		_slurm_rpc_delete_partition(msg);
 		slurm_free_delete_part_msg(msg->data);
@@ -421,6 +450,10 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		_slurm_rpc_resv_show(msg);
 		slurm_free_resv_info_request_msg(msg->data);
 		break;
+	case REQUEST_LAYOUT_INFO:
+		_slurm_rpc_layout_show(msg);
+		slurm_free_layout_info_request_msg(msg->data);
+		break;
 	case REQUEST_NODE_REGISTRATION_STATUS:
 		error("slurmctld is talking with itself. "
 		      "SlurmctldPort == SlurmdPort");
@@ -454,8 +487,12 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		_slurm_rpc_block_info(msg);
 		slurm_free_block_info_request_msg(msg->data);
 		break;
+	case REQUEST_BURST_BUFFER_INFO:
+		_slurm_rpc_burst_buffer_info(msg);
+		/* No body to free */
+		break;
 	case REQUEST_STEP_COMPLETE:
-		_slurm_rpc_step_complete(msg);
+		_slurm_rpc_step_complete(msg, 0);
 		slurm_free_step_complete_msg(msg->data);
 		break;
 	case REQUEST_STEP_LAYOUT:
@@ -514,6 +551,10 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		_slurm_rpc_get_topo(msg);
 		/* No body to free */
 		break;
+	case REQUEST_POWERCAP_INFO:
+		_slurm_rpc_get_powercap(msg);
+		/* No body to free */
+		break;
 	case REQUEST_SPANK_ENVIRONMENT:
 		_slurm_rpc_dump_spank(msg);
 		slurm_free_spank_env_request_msg(msg->data);
@@ -534,6 +575,18 @@ void slurmctld_req(slurm_msg_t *msg, connection_arg_t *arg)
 		_slurm_rpc_kill_job2(msg);
 		slurm_free_job_step_kill_msg(msg->data);
 		break;
+	case MESSAGE_COMPOSITE:
+		_slurm_rpc_composite_msg(msg);
+		slurm_free_composite_msg(msg->data);
+		break;
+	case REQUEST_ASSOC_MGR_INFO:
+		_slurm_rpc_assoc_mgr_info(msg);
+		slurm_free_assoc_mgr_info_request_msg(msg->data);
+		break;
+	case REQUEST_SICP_INFO:
+		_slurm_rpc_dump_sicp(msg);
+		/* No body to free */
+		break;
 	default:
 		error("invalid RPC msg_type=%u", msg->msg_type);
 		slurm_send_rc_msg(msg, EINVAL);
@@ -565,7 +618,18 @@ static void _throttle_start(int *active_rpc_cnt)
 			(*active_rpc_cnt)++;
 			break;
 		}
+#if 1
+		pthread_cond_wait(&throttle_cond, &throttle_mutex);
+#else
+		/* While an RPC is being throttled due to a running RPC of the
+		 * same type, do not count that thread against the daemon's
+		 * thread limit. In extreme environments, this logic can result
+		 * in the slurmctld spawning so many pthreads that it exhausts
+		 * system resources and fails. */
+		server_thread_decr();
 		pthread_cond_wait(&throttle_cond, &throttle_mutex);
+		server_thread_incr();
+#endif
 	}
 	slurm_mutex_unlock(&throttle_mutex);
 	if (LOTS_OF_AGENTS)
@@ -603,6 +667,8 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->accounting_storage_loc =
 		xstrdup(conf->accounting_storage_loc);
 	conf_ptr->accounting_storage_port = conf->accounting_storage_port;
+	conf_ptr->accounting_storage_tres =
+		xstrdup(conf->accounting_storage_tres);
 	conf_ptr->accounting_storage_type =
 		xstrdup(conf->accounting_storage_type);
 	conf_ptr->accounting_storage_user =
@@ -627,6 +693,7 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->backup_controller   = xstrdup(conf->backup_controller);
 	conf_ptr->batch_start_timeout = conf->batch_start_timeout;
 	conf_ptr->boot_time           = slurmctld_config.boot_time;
+	conf_ptr->bb_type             = xstrdup(conf->bb_type);
 
 	conf_ptr->checkpoint_type     = xstrdup(conf->checkpoint_type);
 	conf_ptr->chos_loc            = xstrdup(conf->chos_loc);
@@ -636,13 +703,14 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->control_machine     = xstrdup(conf->control_machine);
 	conf_ptr->core_spec_plugin    = xstrdup(conf->core_spec_plugin);
 	conf_ptr->cpu_freq_def        = conf->cpu_freq_def;
+	conf_ptr->cpu_freq_govs       = conf->cpu_freq_govs;
 	conf_ptr->crypto_type         = xstrdup(conf->crypto_type);
 
 	conf_ptr->def_mem_per_cpu     = conf->def_mem_per_cpu;
 	conf_ptr->debug_flags         = conf->debug_flags;
 	conf_ptr->disable_root_jobs   = conf->disable_root_jobs;
-	conf_ptr->dynalloc_port       = conf->dynalloc_port;
 
+	conf_ptr->eio_timeout         = conf->eio_timeout;
 	conf_ptr->enforce_part_limits = conf->enforce_part_limits;
 	conf_ptr->epilog              = xstrdup(conf->epilog);
 	conf_ptr->epilog_msg_time     = conf->epilog_msg_time;
@@ -691,6 +759,7 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->kill_wait           = conf->kill_wait;
 	conf_ptr->kill_on_bad_exit    = conf->kill_on_bad_exit;
 
+	conf_ptr->launch_params       = xstrdup(conf->launch_params);
 	conf_ptr->launch_type         = xstrdup(conf->launch_type);
 	conf_ptr->layouts             = xstrdup(conf->layouts);
 	conf_ptr->licenses            = xstrdup(conf->licenses);
@@ -708,6 +777,7 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->min_job_age         = conf->min_job_age;
 	conf_ptr->mpi_default         = xstrdup(conf->mpi_default);
 	conf_ptr->mpi_params          = xstrdup(conf->mpi_params);
+	conf_ptr->msg_aggr_params     = xstrdup(conf->msg_aggr_params);
 	conf_ptr->msg_timeout         = conf->msg_timeout;
 
 	conf_ptr->next_job_id         = get_next_job_id();
@@ -717,6 +787,8 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 
 	conf_ptr->plugindir           = xstrdup(conf->plugindir);
 	conf_ptr->plugstack           = xstrdup(conf->plugstack);
+	conf_ptr->power_parameters    = xstrdup(conf->power_parameters);
+	conf_ptr->power_plugin        = xstrdup(conf->power_plugin);
 
 	conf_ptr->preempt_mode        = conf->preempt_mode;
 	conf_ptr->preempt_type        = xstrdup(conf->preempt_type);
@@ -733,10 +805,12 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->priority_weight_js  = conf->priority_weight_js;
 	conf_ptr->priority_weight_part= conf->priority_weight_part;
 	conf_ptr->priority_weight_qos = conf->priority_weight_qos;
+	conf_ptr->priority_weight_tres = xstrdup(conf->priority_weight_tres);
 
 	conf_ptr->private_data        = conf->private_data;
 	conf_ptr->proctrack_type      = xstrdup(conf->proctrack_type);
 	conf_ptr->prolog              = xstrdup(conf->prolog);
+	conf_ptr->prolog_epilog_timeout = conf->prolog_epilog_timeout;
 	conf_ptr->prolog_slurmctld    = xstrdup(conf->prolog_slurmctld);
 	conf_ptr->prolog_flags        = conf->prolog_flags;
 	conf_ptr->propagate_prio_process =
@@ -818,6 +892,7 @@ static void _fill_ctld_conf(slurm_ctl_conf_t * conf_ptr)
 	conf_ptr->task_plugin         = xstrdup(conf->task_plugin);
 	conf_ptr->task_plugin_param   = conf->task_plugin_param;
 	conf_ptr->tmp_fs              = xstrdup(conf->tmp_fs);
+	conf_ptr->topology_param      = xstrdup(conf->topology_param);
 	conf_ptr->topology_plugin     = xstrdup(conf->topology_plugin);
 	conf_ptr->track_wckey         = conf->track_wckey;
 	conf_ptr->tree_width          = conf->tree_width;
@@ -913,6 +988,7 @@ static int _make_step_cred(struct step_record *step_ptr,
 	cred_arg.stepid   = step_ptr->step_id;
 	cred_arg.uid      = job_ptr->user_id;
 
+	cred_arg.job_constraints = job_ptr->details->features;
 	cred_arg.job_core_bitmap = job_resrcs_ptr->core_bitmap;
 	cred_arg.job_core_spec   = job_ptr->details->core_spec;
 	cred_arg.job_hostlist    = job_resrcs_ptr->nodes;
@@ -950,7 +1026,7 @@ static int _make_step_cred(struct step_record *step_ptr,
 static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 {
 	static int active_rpc_cnt = 0;
-	int error_code = SLURM_SUCCESS;
+	int i, error_code = SLURM_SUCCESS;
 	slurm_msg_t response_msg;
 	DEF_TIMERS;
 	job_desc_msg_t *job_desc_msg = (job_desc_msg_t *) msg->data;
@@ -961,7 +1037,7 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 	/* Locks: Read config, write job, write node, read partition */
 	slurmctld_lock_t job_write_lock = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	int immediate = job_desc_msg->immediate;
 	bool do_unlock = false;
 	bool job_waiting = false;
@@ -971,6 +1047,11 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 	char *err_msg = NULL;
 
 	START_TIMER;
+
+	/* Zero out the record as not all fields may be set.
+	 */
+	memset(&alloc_msg, 0, sizeof(resource_allocation_response_msg_t));
+
 	if ((uid != job_desc_msg->user_id) && (!validate_slurm_user(uid))) {
 		error_code = ESLURM_USER_ID_MISSING;
 		error("Security violation, RESOURCE_ALLOCATE from uid=%d",
@@ -1031,6 +1112,8 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 
 	/* return result */
 	if ((error_code == ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) ||
+	    (error_code == ESLURM_POWER_NOT_AVAIL) ||
+	    (error_code == ESLURM_POWER_RESERVED) ||
 	    (error_code == ESLURM_RESERVATION_NOT_USABLE) ||
 	    (error_code == ESLURM_QOS_THRES) ||
 	    (error_code == ESLURM_NODE_NOT_AVAIL) ||
@@ -1062,10 +1145,15 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 			       job_ptr->job_resrcs->cpu_array_value,
 			       (sizeof(uint16_t) * job_ptr->job_resrcs->
 				cpu_array_cnt));
-		} else {
-			alloc_msg.num_cpu_groups = 0;
-			alloc_msg.cpu_count_reps = NULL;
-			alloc_msg.cpus_per_node  = NULL;
+		}
+		if (job_ptr->details->env_cnt) {
+			alloc_msg.env_size = job_ptr->details->env_cnt;
+			alloc_msg.environment = xmalloc(sizeof(char *) *
+							alloc_msg.env_size);
+			for (i = 0; i < alloc_msg.env_size; i++) {
+				alloc_msg.environment[i] =
+					xstrdup(job_ptr->details->env_sup[i]);
+			}
 		}
 		alloc_msg.error_code     = error_code;
 		alloc_msg.job_id         = job_ptr->job_id;
@@ -1081,8 +1169,27 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 		} else {
 			alloc_msg.pn_min_memory = 0;
 		}
-		unlock_slurmctld(job_write_lock);
-		_throttle_fini(&active_rpc_cnt);
+		if (job_ptr->account)
+			alloc_msg.account = xstrdup(job_ptr->account);
+		if (job_ptr->qos_ptr) {
+			slurmdb_qos_rec_t *qos;
+
+			qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+			if (strcmp(qos->description, "Normal QOS default") == 0)
+				alloc_msg.qos = xstrdup("normal");
+			else
+				alloc_msg.qos = xstrdup(qos->description);
+		}
+		if (job_ptr->resv_name)
+			alloc_msg.resv_name = xstrdup(job_ptr->resv_name);
+
+		/* This check really isn't needed, but just doing it
+		 * to be more complete.
+		 */
+		if (do_unlock) {
+			unlock_slurmctld(job_write_lock);
+			_throttle_fini(&active_rpc_cnt);
+		}
 
 		slurm_msg_t_init(&response_msg);
 		response_msg.flags = msg->flags;
@@ -1092,12 +1199,9 @@ static void _slurm_rpc_allocate_resources(slurm_msg_t * msg)
 
 		if (slurm_send_node_msg(msg->conn_fd, &response_msg) < 0)
 			_kill_job_on_msg_fail(job_ptr->job_id);
-		xfree(alloc_msg.cpu_count_reps);
-		xfree(alloc_msg.cpus_per_node);
-		xfree(alloc_msg.node_list);
-		xfree(alloc_msg.partition);
-		xfree(alloc_msg.alias_list);
-		select_g_select_jobinfo_free(alloc_msg.select_jobinfo);
+
+		slurm_free_resource_allocation_response_msg_members(&alloc_msg);
+
 		schedule_job_save();	/* has own locks */
 		schedule_node_save();	/* has own locks */
 	} else {	/* allocate error */
@@ -1125,7 +1229,7 @@ static void _slurm_rpc_dump_conf(slurm_msg_t * msg)
 	/* Locks: Read config, partition*/
 	slurmctld_lock_t config_read_lock = {
 		READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_BUILD_INFO from uid=%d",
@@ -1165,10 +1269,10 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg)
 	slurm_msg_t response_msg;
 	job_info_request_msg_t *job_info_request_msg =
 		(job_info_request_msg_t *) msg->data;
-	/* Locks: Read config job, write node (for hiding) */
+	/* Locks: Read config job, write partition (for hiding) */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug3("Processing RPC: REQUEST_JOB_INFO from uid=%d", uid);
@@ -1181,7 +1285,8 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg)
 	} else {
 		pack_all_jobs(&dump, &dump_size,
 			      job_info_request_msg->show_flags,
-			      g_slurm_auth_get_uid(msg->auth_cred, NULL),
+			      g_slurm_auth_get_uid(msg->auth_cred,
+						   slurm_get_auth_info()),
 			      NO_VAL, msg->protocol_version);
 		unlock_slurmctld(job_read_lock);
 		END_TIMER2("_slurm_rpc_dump_jobs");
@@ -1204,6 +1309,41 @@ static void _slurm_rpc_dump_jobs(slurm_msg_t * msg)
 	}
 }
 
+/* _slurm_rpc_dump_sicp - process RPC for SICP job state information */
+static void _slurm_rpc_dump_sicp(slurm_msg_t * msg)
+{
+	DEF_TIMERS;
+	char *dump;
+	int dump_size;
+	slurm_msg_t response_msg;
+	/* Locks: Read config job, write partition (for hiding) */
+	slurmctld_lock_t job_read_lock = {
+		READ_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_SICP_INFO from uid=%d", uid);
+	lock_slurmctld(job_read_lock);
+	pack_all_sicp(&dump, &dump_size,
+		      g_slurm_auth_get_uid(msg->auth_cred, NULL),
+		      msg->protocol_version);
+	unlock_slurmctld(job_read_lock);
+	END_TIMER2("_slurm_rpc_dump_sicp");
+
+	/* init response_msg structure */
+	slurm_msg_t_init(&response_msg);
+	response_msg.flags = msg->flags;
+	response_msg.protocol_version = msg->protocol_version;
+	response_msg.address = msg->address;
+	response_msg.msg_type = RESPONSE_SICP_INFO;
+	response_msg.data = dump;
+	response_msg.data_size = dump_size;
+
+	/* send message */
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	xfree(dump);
+}
+
 /* _slurm_rpc_dump_jobs - process RPC for job state information */
 static void _slurm_rpc_dump_jobs_user(slurm_msg_t * msg)
 {
@@ -1216,14 +1356,15 @@ static void _slurm_rpc_dump_jobs_user(slurm_msg_t * msg)
 	/* Locks: Read config job, write node (for hiding) */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug3("Processing RPC: REQUEST_JOB_USER_INFO from uid=%d", uid);
 	lock_slurmctld(job_read_lock);
 	pack_all_jobs(&dump, &dump_size,
 		      job_info_request_msg->show_flags,
-		      g_slurm_auth_get_uid(msg->auth_cred, NULL),
+		      g_slurm_auth_get_uid(msg->auth_cred,
+					   slurm_get_auth_info()),
 		      job_info_request_msg->user_id, msg->protocol_version);
 	unlock_slurmctld(job_read_lock);
 	END_TIMER2("_slurm_rpc_dump_job_user");
@@ -1256,7 +1397,7 @@ static void _slurm_rpc_dump_job_single(slurm_msg_t * msg)
 	/* Locks: Read config, job, and node info */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, NO_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug3("Processing RPC: REQUEST_JOB_INFO_SINGLE from uid=%d", uid);
@@ -1264,7 +1405,8 @@ static void _slurm_rpc_dump_job_single(slurm_msg_t * msg)
 
 	rc = pack_one_job(&dump, &dump_size, job_id_msg->job_id,
 			  job_id_msg->show_flags,
-			  g_slurm_auth_get_uid(msg->auth_cred, NULL),
+			  g_slurm_auth_get_uid(msg->auth_cred,
+					       slurm_get_auth_info()),
 			  msg->protocol_version);
 	unlock_slurmctld(job_read_lock);
 	END_TIMER2("_slurm_rpc_dump_job_single");
@@ -1295,14 +1437,14 @@ static void  _slurm_rpc_get_shares(slurm_msg_t *msg)
 	shares_response_msg_t resp_msg;
 	slurm_msg_t response_msg;
 
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_SHARE_INFO from uid=%d", uid);
-	resp_msg.assoc_shares_list = assoc_mgr_get_shares(acct_db_conn,
-							  uid,
-							  req_msg->acct_list,
-							  req_msg->user_list);
+
+	memset(&resp_msg, 0, sizeof(shares_response_msg_t));
+	assoc_mgr_get_shares(acct_db_conn, uid, req_msg, &resp_msg);
+
 	slurm_msg_t_init(&response_msg);
 	response_msg.flags = msg->flags;
 	response_msg.protocol_version = msg->protocol_version;
@@ -1310,8 +1452,8 @@ static void  _slurm_rpc_get_shares(slurm_msg_t *msg)
 	response_msg.msg_type = RESPONSE_SHARE_INFO;
 	response_msg.data     = &resp_msg;
 	slurm_send_node_msg(msg->conn_fd, &response_msg);
-	if (resp_msg.assoc_shares_list)
-		list_destroy(resp_msg.assoc_shares_list);
+	FREE_NULL_LIST(resp_msg.assoc_shares_list);
+	/* don't free the resp_msg.tres_names */
 	END_TIMER2("_slurm_rpc_get_share");
 	debug2("_slurm_rpc_get_shares %s", TIME_STR);
 }
@@ -1324,7 +1466,7 @@ static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg)
 	priority_factors_response_msg_t resp_msg;
 	slurm_msg_t response_msg;
 
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_PRIORITY_FACTORS from uid=%d", uid);
@@ -1337,8 +1479,7 @@ static void  _slurm_rpc_get_priority_factors(slurm_msg_t *msg)
 	response_msg.msg_type = RESPONSE_PRIORITY_FACTORS;
 	response_msg.data     = &resp_msg;
 	slurm_send_node_msg(msg->conn_fd, &response_msg);
-	if (resp_msg.priority_factors_list)
-		list_destroy(resp_msg.priority_factors_list);
+	FREE_NULL_LIST(resp_msg.priority_factors_list);
 	END_TIMER2("_slurm_rpc_get_priority_factors");
 	debug2("_slurm_rpc_get_priority_factors %s", TIME_STR);
 }
@@ -1355,7 +1496,7 @@ static void _slurm_rpc_end_time(slurm_msg_t * msg)
 	/* Locks: Read job */
 	slurmctld_lock_t job_read_lock = {
 		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_JOB_END_TIME from uid=%d", uid);
@@ -1391,7 +1532,7 @@ static void _slurm_rpc_dump_front_end(slurm_msg_t * msg)
 	/* Locks: Read config, read node */
 	slurmctld_lock_t node_read_lock = {
 		READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug3("Processing RPC: REQUEST_FRONT_END_INFO from uid=%d", uid);
@@ -1437,7 +1578,7 @@ static void _slurm_rpc_dump_nodes(slurm_msg_t * msg)
 	 * select plugins) */
 	slurmctld_lock_t node_write_lock = {
 		READ_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug3("Processing RPC: REQUEST_NODE_INFO from uid=%d", uid);
@@ -1494,7 +1635,7 @@ static void _slurm_rpc_dump_node_single(slurm_msg_t * msg)
 	/* Locks: Read config, read node */
 	slurmctld_lock_t node_read_lock = {
 		READ_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug3("Processing RPC: REQUEST_NODE_INFO_SINGLE from uid=%d", uid);
@@ -1549,7 +1690,7 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg)
 	/* Locks: Read configuration and partition */
 	slurmctld_lock_t part_read_lock = {
 		READ_LOCK, NO_LOCK, NO_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_PARTITION_INFO uid=%d", uid);
@@ -1591,7 +1732,9 @@ static void _slurm_rpc_dump_partitions(slurm_msg_t * msg)
 
 /* _slurm_rpc_epilog_complete - process RPC noting the completion of
  * the epilog denoting the completion of a job it its entirety */
-static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg)
+static void  _slurm_rpc_epilog_complete(slurm_msg_t *msg,
+					bool *run_scheduler,
+					bool running_composite)
 {
 	static int active_rpc_cnt = 0;
 	static time_t config_update = 0;
@@ -1600,10 +1743,9 @@ static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg)
 	/* Locks: Read configuration, write job, write node */
 	slurmctld_lock_t job_write_lock = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	epilog_complete_msg_t *epilog_msg =
 		(epilog_complete_msg_t *) msg->data;
-	bool run_scheduler = false;
 	struct job_record  *job_ptr;
 	char jbuf[JBUFSIZ];
 
@@ -1615,35 +1757,51 @@ static void  _slurm_rpc_epilog_complete(slurm_msg_t * msg)
 		return;
 	}
 
-	job_ptr = find_job_record(epilog_msg->job_id);
-	if (config_update != slurmctld_conf.last_update) {
-		char *sched_params = slurm_get_sched_params();
-		defer_sched = (sched_params && strstr(sched_params,"defer"));
-		xfree(sched_params);
-		config_update = slurmctld_conf.last_update;
+	/* Only throttle on none composite messages, the lock should
+	 * already be set earlier. */
+	if (!running_composite) {
+		if (config_update != slurmctld_conf.last_update) {
+			char *sched_params = slurm_get_sched_params();
+			defer_sched = (sched_params &&
+				       strstr(sched_params, "defer"));
+			xfree(sched_params);
+			config_update = slurmctld_conf.last_update;
+		}
+
+		_throttle_start(&active_rpc_cnt);
+		lock_slurmctld(job_write_lock);
 	}
 
-	_throttle_start(&active_rpc_cnt);
-	lock_slurmctld(job_write_lock);
+	if (slurmctld_conf.debug_flags & DEBUG_FLAG_ROUTE)
+		info("_slurm_rpc_epilog_complete: "
+		     "node_name = %s, job_id = %u", epilog_msg->node_name,
+		     epilog_msg->job_id);
+
 	if (job_epilog_complete(epilog_msg->job_id, epilog_msg->node_name,
 				epilog_msg->return_code))
-		run_scheduler = true;
-	unlock_slurmctld(job_write_lock);
-	_throttle_fini(&active_rpc_cnt);
+		*run_scheduler = true;
+
+	job_ptr = find_job_record(epilog_msg->job_id);
+
+	if (!running_composite) {
+		unlock_slurmctld(job_write_lock);
+		_throttle_fini(&active_rpc_cnt);
+	}
+
 	END_TIMER2("_slurm_rpc_epilog_complete");
 
 	if (epilog_msg->return_code)
 		error("%s: epilog error %s Node=%s Err=%s %s",
-		      __func__, jobid2str(job_ptr, jbuf),
+		      __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 		      epilog_msg->node_name,
 		      slurm_strerror(epilog_msg->return_code), TIME_STR);
 	else
 		debug2("%s: %s Node=%s %s",
-		       __func__, jobid2str(job_ptr, jbuf),
+		       __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 		       epilog_msg->node_name, TIME_STR);
 
 	/* Functions below provide their own locking */
-	if (run_scheduler) {
+	if (!running_composite && *run_scheduler) {
 		/*
 		 * In defer mode, avoid triggering the scheduler logic
 		 * for every epilog complete message.
@@ -1673,7 +1831,7 @@ static void _slurm_rpc_job_step_kill(slurm_msg_t * msg)
 	/* Locks: Read config, write job, write node */
 	slurmctld_lock_t job_write_lock = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	struct job_record *job_ptr;
 
 	START_TIMER;
@@ -1788,7 +1946,7 @@ static void _slurm_rpc_complete_job_allocation(slurm_msg_t * msg)
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK
 	};
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	struct job_record *job_ptr;
 	char jbuf[JBUFSIZ];
 
@@ -1814,13 +1972,14 @@ static void _slurm_rpc_complete_job_allocation(slurm_msg_t * msg)
 	/* return result */
 	if (error_code) {
 		info("%s: %s error %s ",
-		     __func__, jobid2str(job_ptr, jbuf),
+		     __func__, jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 		     slurm_strerror(error_code));
 		slurm_send_rc_msg(msg, error_code);
 	} else {
 		debug2("%s: %s %s", __func__,
-		       jobid2str(job_ptr, jbuf),
+		       jobid2str(job_ptr, jbuf, sizeof(jbuf)),
 		       TIME_STR);
+		slurmctld_diag_stats.jobs_completed++;
 		slurm_send_rc_msg(msg, SLURM_SUCCESS);
 		(void) schedule_job_save();	/* Has own locking */
 		(void) schedule_node_save();	/* Has own locking */
@@ -1844,8 +2003,8 @@ static void _slurm_rpc_complete_prolog(slurm_msg_t * msg)
 
 	/* init */
 	START_TIMER;
-	debug2("Processing RPC: REQUEST_COMPLETE_PROLOG from "
-	       "JobId=%u", comp_msg->job_id);
+	debug2("Processing RPC: REQUEST_COMPLETE_PROLOG from JobId=%u",
+	       comp_msg->job_id);
 
 	lock_slurmctld(job_write_lock);
 	error_code = prolog_complete(comp_msg->job_id, comp_msg->prolog_rc);
@@ -1867,7 +2026,7 @@ static void _slurm_rpc_complete_prolog(slurm_msg_t * msg)
 
 /* _slurm_rpc_complete_batch - process RPC from slurmstepd to note the
  *	completion of a batch script */
-static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
+static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg, bool locked)
 {
 	static int active_rpc_cnt = 0;
 	int error_code = SLURM_SUCCESS, i;
@@ -1878,7 +2037,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK
 	};
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	bool job_requeue = false;
 	bool dump_job = false, dump_node = false, run_sched = false;
 	struct job_record *job_ptr = NULL;
@@ -1902,8 +2061,11 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 		return;
 	}
 
-	_throttle_start(&active_rpc_cnt);
-	lock_slurmctld(job_write_lock);
+	if (!locked) {
+		_throttle_start(&active_rpc_cnt);
+		lock_slurmctld(job_write_lock);
+	}
+
 	job_ptr = find_job_record(comp_msg->job_id);
 
 	if (job_ptr && job_ptr->batch_host && comp_msg->node_name &&
@@ -1917,8 +2079,10 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 		      "Was the job requeued due to node failure?",
 		      comp_msg->job_id,
 		      comp_msg->node_name, job_ptr->batch_host);
-		unlock_slurmctld(job_write_lock);
-		_throttle_fini(&active_rpc_cnt);
+		if (!locked) {
+			unlock_slurmctld(job_write_lock);
+			_throttle_fini(&active_rpc_cnt);
+		}
 		slurm_send_rc_msg(msg, error_code);
 		return;
 	}
@@ -2041,9 +2205,10 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 	i = job_complete(comp_msg->job_id, uid, job_requeue, false,
 			 comp_msg->job_rc);
 	error_code = MAX(error_code, i);
-	unlock_slurmctld(job_write_lock);
-	_throttle_fini(&active_rpc_cnt);
-
+	if (!locked) {
+		unlock_slurmctld(job_write_lock);
+		_throttle_fini(&active_rpc_cnt);
+	}
 #ifdef HAVE_BG
 	if (block_desc.bg_block_id) {
 		block_desc.reason = slurm_strerror(comp_msg->slurm_rc);
@@ -2069,7 +2234,7 @@ static void _slurm_rpc_complete_batch_script(slurm_msg_t * msg)
 		       comp_msg->job_id, TIME_STR);
 		slurmctld_diag_stats.jobs_completed++;
 		dump_job = true;
-		if (replace_batch_job(msg, job_ptr))
+		if (replace_batch_job(msg, job_ptr, locked))
 			run_sched = true;
 	}
 
@@ -2097,7 +2262,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 	/* Locks: Write jobs, read nodes */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	if (slurmctld_conf.debug_flags & DEBUG_FLAG_STEPS)
@@ -2126,11 +2291,12 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 
 	_throttle_start(&active_rpc_cnt);
 	lock_slurmctld(job_write_lock);
-	error_code = step_create(req_step_msg, &step_rec, false);
+	error_code = step_create(req_step_msg, &step_rec, false,
+				 msg->protocol_version);
 
 	if (error_code == SLURM_SUCCESS) {
 		error_code = _make_step_cred(step_rec, &slurm_cred,
-					     msg->protocol_version);
+					     step_rec->start_protocol_ver);
 		ext_sensors_g_get_stepstartdata(step_rec);
 	}
 	END_TIMER2("_slurm_rpc_job_step_create");
@@ -2173,6 +2339,7 @@ static void _slurm_rpc_job_step_create(slurm_msg_t * msg)
 		}
 #endif
 		job_step_resp.cred           = slurm_cred;
+		job_step_resp.use_protocol_ver = step_rec->start_protocol_ver;
 		job_step_resp.select_jobinfo = step_rec->select_jobinfo;
 		job_step_resp.switch_job     = step_rec->switch_job;
 
@@ -2203,7 +2370,7 @@ static void _slurm_rpc_job_step_get_info(slurm_msg_t * msg)
 	/* Locks: Read config, job, write partition (for filtering) */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, NO_LOCK, WRITE_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	if (slurmctld_conf.debug_flags & DEBUG_FLAG_STEPS)
@@ -2293,7 +2460,7 @@ static void _slurm_rpc_job_will_run(slurm_msg_t * msg)
 	/* Locks: Write job, read node, read partition */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	uint16_t port;	/* dummy value */
 	slurm_addr_t resp_addr;
 	will_run_response_msg_t *resp = NULL;
@@ -2375,7 +2542,8 @@ static void _slurm_rpc_job_will_run(slurm_msg_t * msg)
 
 /* _slurm_rpc_node_registration - process RPC to determine if a node's
  *	actual configuration satisfies the configured specification */
-static void _slurm_rpc_node_registration(slurm_msg_t * msg)
+static void _slurm_rpc_node_registration(slurm_msg_t * msg,
+					 bool running_composite)
 {
 	/* init */
 	DEF_TIMERS;
@@ -2386,7 +2554,7 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg)
 	/* Locks: Read config, write job, write node */
 	slurmctld_lock_t job_write_lock = {
 		READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: MESSAGE_NODE_REGISTRATION_STATUS from uid=%d",
@@ -2414,7 +2582,8 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg)
 			      "set DebugFlags=NO_CONF_HASH in your slurm.conf.",
 			      node_reg_stat_msg->node_name);
 		}
-		lock_slurmctld(job_write_lock);
+		if (!running_composite)
+			lock_slurmctld(job_write_lock);
 #ifdef HAVE_FRONT_END		/* Operates only on front-end */
 		error_code = validate_nodes_via_front_end(node_reg_stat_msg,
 							  msg->protocol_version,
@@ -2425,7 +2594,8 @@ static void _slurm_rpc_node_registration(slurm_msg_t * msg)
 						 msg->protocol_version,
 						 &newly_up);
 #endif
-		unlock_slurmctld(job_write_lock);
+		if (!running_composite)
+			unlock_slurmctld(job_write_lock);
 		END_TIMER2("_slurm_rpc_node_registration");
 		if (newly_up) {
 			queue_job_scheduler();
@@ -2458,22 +2628,19 @@ static void _slurm_rpc_job_alloc_info(slurm_msg_t * msg)
 	/* Locks: Read config, job, read node */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	bool do_unlock = false;
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_JOB_ALLOCATION_INFO from uid=%d", uid);
 
 	/* do RPC call */
-	do_unlock = true;
 	lock_slurmctld(job_read_lock);
 	error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr);
 	END_TIMER2("_slurm_rpc_job_alloc_info");
 
 	/* return result */
 	if (error_code || (job_ptr == NULL) || (job_ptr->job_resrcs == NULL)) {
-		if (do_unlock)
-			unlock_slurmctld(job_read_lock);
+		unlock_slurmctld(job_read_lock);
 		debug2("_slurm_rpc_job_alloc_info: JobId=%u, uid=%u: %s",
 		       job_info_msg->job_id, uid,
 		       slurm_strerror(error_code));
@@ -2538,23 +2705,20 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
 	/* Locks: Read config, job, read node */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	bool do_unlock = false;
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_JOB_ALLOCATION_INFO_LITE from uid=%d",
 	       uid);
 
 	/* do RPC call */
-	do_unlock = true;
 	lock_slurmctld(job_read_lock);
 	error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr);
 	END_TIMER2("_slurm_rpc_job_alloc_info_lite");
 
 	/* return result */
 	if (error_code || (job_ptr == NULL) || (job_ptr->job_resrcs == NULL)) {
-		if (do_unlock)
-			unlock_slurmctld(job_read_lock);
+		unlock_slurmctld(job_read_lock);
 		debug2("_slurm_rpc_job_alloc_info_lite: JobId=%u, uid=%u: %s",
 		       job_info_msg->job_id, uid, slurm_strerror(error_code));
 		slurm_send_rc_msg(msg, error_code);
@@ -2562,6 +2726,9 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
 		debug("_slurm_rpc_job_alloc_info_lite JobId=%u NodeList=%s %s",
 		      job_info_msg->job_id, job_ptr->nodes, TIME_STR);
 
+		bzero(&job_info_resp_msg,
+		      sizeof(resource_allocation_response_msg_t));
+
 		/* send job_ID and node_name_ptr */
 		if (bit_equal(job_ptr->node_bitmap,
 			      job_ptr->job_resrcs->node_bitmap)) {
@@ -2608,14 +2775,34 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
 			}
 			job_info_resp_msg.num_cpu_groups = j + 1;
 		}
+		job_info_resp_msg.account        = xstrdup(job_ptr->account);
 		job_info_resp_msg.alias_list     = xstrdup(job_ptr->alias_list);
 		job_info_resp_msg.error_code     = error_code;
 		job_info_resp_msg.job_id         = job_info_msg->job_id;
 		job_info_resp_msg.node_cnt       = job_ptr->node_cnt;
 		job_info_resp_msg.node_list      = xstrdup(job_ptr->nodes);
 		job_info_resp_msg.partition      = xstrdup(job_ptr->partition);
+		if (job_ptr->qos_ptr) {
+			slurmdb_qos_rec_t *qos;
+			qos = (slurmdb_qos_rec_t *)job_ptr->qos_ptr;
+			if (strcmp(qos->description, "Normal QOS default") == 0)
+				job_info_resp_msg.qos = xstrdup("normal");
+			else
+				job_info_resp_msg.qos=xstrdup(qos->description);
+		}
+		job_info_resp_msg.resv_name      = xstrdup(job_ptr->resv_name);
 		job_info_resp_msg.select_jobinfo =
 			select_g_select_jobinfo_copy(job_ptr->select_jobinfo);
+		if (job_ptr->details->env_cnt) {
+			job_info_resp_msg.env_size = job_ptr->details->env_cnt;
+			job_info_resp_msg.environment =
+				xmalloc(sizeof(char *) *
+				        job_info_resp_msg.env_size);
+			for (i = 0; i < job_info_resp_msg.env_size; i++) {
+				job_info_resp_msg.environment[i] =
+					xstrdup(job_ptr->details->env_sup[i]);
+			}
+		}
 		unlock_slurmctld(job_read_lock);
 
 		slurm_msg_t_init(&response_msg);
@@ -2626,12 +2813,8 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
 
 		slurm_send_node_msg(msg->conn_fd, &response_msg);
 
-		xfree(job_info_resp_msg.cpu_count_reps);
-		xfree(job_info_resp_msg.cpus_per_node);
-		xfree(job_info_resp_msg.alias_list);
-		xfree(job_info_resp_msg.node_list);
-		xfree(job_info_resp_msg.partition);
-		select_g_select_jobinfo_free(job_info_resp_msg.select_jobinfo);
+		slurm_free_resource_allocation_response_msg_members(
+			&job_info_resp_msg);
 	}
 }
 
@@ -2639,18 +2822,29 @@ static void _slurm_rpc_job_alloc_info_lite(slurm_msg_t * msg)
  *	plus sbcast credential */
 static void _slurm_rpc_job_sbcast_cred(slurm_msg_t * msg)
 {
+#ifdef HAVE_FRONT_END
+	slurm_send_rc_msg(msg, ESLURM_NOT_SUPPORTED);
+#else
 	int error_code = SLURM_SUCCESS;
 	slurm_msg_t response_msg;
-	struct job_record *job_ptr;
+	struct job_record *job_ptr = NULL;
+	struct step_record *step_ptr;
+	char *node_list = NULL;
+	struct node_record *node_ptr;
+	slurm_addr_t *node_addr = NULL;
+	hostlist_t host_list = NULL;
+	char *this_node_name;
+	int node_inx = 0;
+	uint32_t node_cnt;
 	DEF_TIMERS;
-	job_alloc_info_msg_t *job_info_msg =
-		(job_alloc_info_msg_t *) msg->data;
+	step_alloc_info_msg_t *job_info_msg =
+		(step_alloc_info_msg_t *) msg->data;
 	job_sbcast_cred_msg_t job_info_resp_msg;
 	sbcast_cred_t *sbcast_cred;
 	/* Locks: Read config, job, read node */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_JOB_SBCAST_CRED from uid=%d", uid);
@@ -2658,6 +2852,45 @@ static void _slurm_rpc_job_sbcast_cred(slurm_msg_t * msg)
 	/* do RPC call */
 	lock_slurmctld(job_read_lock);
 	error_code = job_alloc_info(uid, job_info_msg->job_id, &job_ptr);
+	if (job_ptr && (job_info_msg->step_id != NO_VAL)) {
+		step_ptr = find_step_record(job_ptr, job_info_msg->step_id);
+		if (!step_ptr) {
+			job_ptr = NULL;
+			error_code = ESLURM_INVALID_JOB_ID;
+		} else if (step_ptr->step_layout &&
+			   (step_ptr->step_layout->node_cnt !=
+			    job_ptr->node_cnt)) {
+			node_cnt  = step_ptr->step_layout->node_cnt;
+			node_list = step_ptr->step_layout->node_list;
+			if ((host_list = hostlist_create(node_list)) == NULL) {
+				fatal("hostlist_create error for %s: %m",
+				      node_list);
+				return;	/* Avoid CLANG false positive */
+			}
+			node_addr = xmalloc(sizeof(slurm_addr_t) * node_cnt);
+			while ((this_node_name = hostlist_shift(host_list))) {
+				if ((node_ptr = find_node_record(this_node_name))) {
+					memcpy(&node_addr[node_inx++],
+					       &node_ptr->slurm_addr,
+					       sizeof(slurm_addr_t));
+				} else {
+					error("Invalid node %s in Step=%u.%u",
+					      this_node_name, job_ptr->job_id,
+					      step_ptr->step_id);
+				}
+				free(this_node_name);
+			}
+			hostlist_destroy(host_list);
+		}
+	}
+	if (job_ptr && !node_addr) {
+		node_addr = job_ptr->node_addr;
+		node_cnt  = job_ptr->node_cnt;
+		node_list = job_ptr->nodes;
+		node_addr = xmalloc(sizeof(slurm_addr_t) * node_cnt);
+		memcpy(node_addr, job_ptr->node_addr,
+		       (sizeof(slurm_addr_t) * node_cnt));
+	}
 	END_TIMER2("_slurm_rpc_job_alloc_info");
 
 	/* return result */
@@ -2669,24 +2902,27 @@ static void _slurm_rpc_job_sbcast_cred(slurm_msg_t * msg)
 		slurm_send_rc_msg(msg, error_code);
 	} else if ((sbcast_cred =
 		    create_sbcast_cred(slurmctld_config.cred_ctx,
-				       job_ptr->job_id,
-				       job_ptr->nodes,
+				       job_ptr->job_id, node_list,
 				       job_ptr->end_time)) == NULL){
 		unlock_slurmctld(job_read_lock);
 		error("_slurm_rpc_job_sbcast_cred JobId=%u cred create error",
 		      job_info_msg->job_id);
 		slurm_send_rc_msg(msg, SLURM_ERROR);
 	} else {
-		info("_slurm_rpc_job_sbcast_cred JobId=%u NodeList=%s %s",
-		     job_info_msg->job_id, job_ptr->nodes, TIME_STR);
+		if (job_ptr && (job_info_msg->step_id != NO_VAL)) {
+			info("_slurm_rpc_job_sbcast_cred Job=%u NodeList=%s %s",
+			     job_info_msg->job_id, node_list, TIME_STR);
+		} else {
+			info("_slurm_rpc_job_sbcast_cred Step=%u.%u "
+			     "NodeList=%s %s",
+			     job_info_msg->job_id, job_info_msg->step_id,
+			     node_list, TIME_STR);
+		}
 
 		job_info_resp_msg.job_id         = job_ptr->job_id;
-		job_info_resp_msg.node_addr      =
-			xmalloc(sizeof(slurm_addr_t) * job_ptr->node_cnt);
-		memcpy(job_info_resp_msg.node_addr, job_ptr->node_addr,
-		       (sizeof(slurm_addr_t) * job_ptr->node_cnt));
-		job_info_resp_msg.node_cnt       = job_ptr->node_cnt;
-		job_info_resp_msg.node_list      = xstrdup(job_ptr->nodes);
+		job_info_resp_msg.node_addr      = node_addr;
+		job_info_resp_msg.node_cnt       = node_cnt;
+		job_info_resp_msg.node_list      = xstrdup(node_list);
 		job_info_resp_msg.sbcast_cred    = sbcast_cred;
 		unlock_slurmctld(job_read_lock);
 
@@ -2697,10 +2933,13 @@ static void _slurm_rpc_job_sbcast_cred(slurm_msg_t * msg)
 		response_msg.data        = &job_info_resp_msg;
 
 		slurm_send_node_msg(msg->conn_fd, &response_msg);
-		xfree(job_info_resp_msg.node_addr);
+		/* job_info_resp_msg.node_addr is pointer,
+		 * xfree(node_addr) is below */
 		xfree(job_info_resp_msg.node_list);
 		delete_sbcast_cred(sbcast_cred);
 	}
+	xfree(node_addr);
+#endif
 }
 
 /* _slurm_rpc_ping - process ping RPC */
@@ -2727,7 +2966,7 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 	/* Locks: Write configuration, job, node and partition */
 	slurmctld_lock_t config_write_lock = {
 		WRITE_LOCK, WRITE_LOCK, WRITE_LOCK, WRITE_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	info("Processing RPC: REQUEST_RECONFIGURE from uid=%d", uid);
@@ -2750,7 +2989,7 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 			msg_to_slurmd(REQUEST_RECONFIGURE);
 		}
 		in_progress = false;
-		slurm_sched_g_partition_change();	/* notify sched plugin */
+		slurm_sched_g_partition_change();      /* notify sched plugin */
 		unlock_slurmctld(config_write_lock);
 		assoc_mgr_set_missing_uids();
 		start_power_mgr(&slurmctld_config.thread_id_power);
@@ -2778,7 +3017,7 @@ static void _slurm_rpc_reconfigure_controller(slurm_msg_t * msg)
 static void _slurm_rpc_takeover(slurm_msg_t * msg)
 {
 	int error_code = SLURM_SUCCESS;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	/* We could authenticate here, if desired */
 	if (!validate_super_user(uid)) {
@@ -2801,7 +3040,7 @@ static void _slurm_rpc_shutdown_controller(slurm_msg_t * msg)
 	int error_code = SLURM_SUCCESS, i;
 	uint16_t options = 0;
 	shutdown_msg_t *shutdown_msg = (shutdown_msg_t *) msg->data;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	/* Locks: Read node */
 	slurmctld_lock_t node_read_lock = {
 		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
@@ -2871,7 +3110,7 @@ static void _slurm_rpc_shutdown_controller(slurm_msg_t * msg)
 static void _slurm_rpc_shutdown_controller_immediate(slurm_msg_t * msg)
 {
 	int error_code = SLURM_SUCCESS;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	if (!validate_super_user(uid)) {
 		error("Security violation, SHUTDOWN_IMMEDIATE RPC from uid=%d",
@@ -2889,7 +3128,7 @@ static void _slurm_rpc_shutdown_controller_immediate(slurm_msg_t * msg)
  *      completion of a job step on at least some nodes.
  *	If the job step is complete, it may
  *	represent the termination of an entire job */
-static void _slurm_rpc_step_complete(slurm_msg_t *msg)
+static void _slurm_rpc_step_complete(slurm_msg_t *msg, bool locked)
 {
 	static int active_rpc_cnt = 0;
 	int error_code = SLURM_SUCCESS, rc, rem;
@@ -2899,7 +3138,7 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 	/* Locks: Write job, write node */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	bool dump_job = false, dump_node = false;
 
 	/* init */
@@ -2910,14 +3149,19 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 		     req->job_id, req->job_step_id, req->range_first,
 		     req->range_last, req->step_rc, uid);
 
-	_throttle_start(&active_rpc_cnt);
-	lock_slurmctld(job_write_lock);
+	if (!locked) {
+		_throttle_start(&active_rpc_cnt);
+		lock_slurmctld(job_write_lock);
+	}
+
 	rc = step_partial_comp(req, uid, &rem, &step_rc);
 
 	if (rc || rem) {	/* some error or not totally done */
 		/* Note: Error printed within step_partial_comp */
-		unlock_slurmctld(job_write_lock);
-		_throttle_fini(&active_rpc_cnt);
+		if (!locked) {
+			unlock_slurmctld(job_write_lock);
+			_throttle_fini(&active_rpc_cnt);
+		}
 		slurm_send_rc_msg(msg, rc);
 		if (!rc)	/* partition completion */
 			schedule_job_save();	/* Has own locking */
@@ -2928,8 +3172,10 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 		/* FIXME: test for error, possibly cause batch job requeue */
 		error_code = job_complete(req->job_id, uid, false,
 					  false, step_rc);
-		unlock_slurmctld(job_write_lock);
-		_throttle_fini(&active_rpc_cnt);
+		if (!locked) {
+			unlock_slurmctld(job_write_lock);
+			_throttle_fini(&active_rpc_cnt);
+		}
 		END_TIMER2("_slurm_rpc_step_complete");
 
 		/* return result */
@@ -2948,8 +3194,10 @@ static void _slurm_rpc_step_complete(slurm_msg_t *msg)
 	} else {
 		error_code = job_step_complete(req->job_id, req->job_step_id,
 					       uid, false, step_rc);
-		unlock_slurmctld(job_write_lock);
-		_throttle_fini(&active_rpc_cnt);
+		if (!locked) {
+			unlock_slurmctld(job_write_lock);
+			_throttle_fini(&active_rpc_cnt);
+		}
 		END_TIMER2("_slurm_rpc_step_complete");
 
 		/* return result */
@@ -2986,7 +3234,7 @@ static void _slurm_rpc_step_layout(slurm_msg_t *msg)
 	/* Locks: Read config job, write node */
 	slurmctld_lock_t job_read_lock = {
 		READ_LOCK, READ_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	struct job_record *job_ptr = NULL;
 	struct step_record *step_ptr = NULL;
 
@@ -3048,7 +3296,7 @@ static void _slurm_rpc_step_update(slurm_msg_t *msg)
 	/* Locks: Write job */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, NO_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	int rc;
 
 	START_TIMER;
@@ -3080,7 +3328,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 	/* Locks: Write job, read node, read partition */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	char *err_msg = NULL;
 
 	START_TIMER;
@@ -3152,7 +3400,7 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 				error("Security violation, uid=%d attempting "
 				      "to execute a step within job %u owned "
 				      "by user %u",
-		 		      uid, job_ptr->job_id,
+				      uid, job_ptr->job_id,
 				      job_ptr->user_id);
 				slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
 				unlock_slurmctld(job_write_lock);
@@ -3214,7 +3462,9 @@ static void _slurm_rpc_submit_batch_job(slurm_msg_t * msg)
 	    (error_code != ESLURM_NODE_NOT_AVAIL) &&
 	    (error_code != ESLURM_QOS_THRES) &&
 	    (error_code != ESLURM_RESERVATION_NOT_USABLE) &&
-	    (error_code != ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE)) {
+	    (error_code != ESLURM_REQUESTED_PART_CONFIG_UNAVAILABLE) &&
+	    (error_code != ESLURM_POWER_NOT_AVAIL) &&
+	    (error_code != ESLURM_POWER_RESERVED)) {
 		info("_slurm_rpc_submit_batch_job: %s",
 		     slurm_strerror(error_code));
 		if (err_msg)
@@ -3253,7 +3503,7 @@ static void _slurm_rpc_update_job(slurm_msg_t * msg)
 	/* Locks: Write job, read node, read partition */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, READ_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_UPDATE_JOB from uid=%d", uid);
@@ -3321,7 +3571,7 @@ extern int slurm_drain_nodes(char *node_list, char *reason, uint32_t reason_uid)
  * NOTE: This is utilzed by plugins and not via RPC and it sets its
  *      own locks.
  */
-extern int slurm_fail_job(uint32_t job_id, uint16_t job_state)
+extern int slurm_fail_job(uint32_t job_id, uint32_t job_state)
 {
 	int error_code;
 	DEF_TIMERS;
@@ -3351,7 +3601,7 @@ static void _slurm_rpc_update_front_end(slurm_msg_t * msg)
 	/* Locks: write node */
 	slurmctld_lock_t node_write_lock = {
 		NO_LOCK, NO_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_UPDATE_FRONT_END from uid=%d", uid);
@@ -3395,7 +3645,7 @@ static void _slurm_rpc_update_node(slurm_msg_t * msg)
 	/* Locks: Write job and write node */
 	slurmctld_lock_t node_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_UPDATE_NODE from uid=%d", uid);
@@ -3430,6 +3680,52 @@ static void _slurm_rpc_update_node(slurm_msg_t * msg)
 	trigger_reconfig();
 }
 
+/*
+ * _slurm_rpc_update_layout - process RPC to update the configuration of a
+ *	layout (e.g. params of entities)
+ */
+static void _slurm_rpc_update_layout(slurm_msg_t * msg)
+{
+	int error_code = SLURM_SUCCESS;
+	Buf buffer;
+	DEF_TIMERS;
+	update_layout_msg_t *msg_ptr = (update_layout_msg_t *) msg->data;
+	int shrink_size;
+
+	/* Locks: Write job and write node */
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_UPDATE_LAYOUT from uid=%d", uid);
+	if (!validate_super_user(uid)) {
+		error_code = ESLURM_USER_ID_MISSING;
+		error("Security violation, UPDATE_LAYOUT RPC from uid=%d", uid);
+	}
+
+	if (error_code == SLURM_SUCCESS) {
+		/* do RPC call */
+		buffer = init_buf(BUF_SIZE);
+		packstr(msg_ptr->arg, buffer);
+		shrink_size = (int)get_buf_offset(buffer) - size_buf(buffer);
+		set_buf_offset(buffer, 0);
+		grow_buf(buffer, shrink_size);	/* Shrink actually */
+		error_code = layouts_update_layout(msg_ptr->layout, buffer);
+		free_buf(buffer);
+		END_TIMER2("_slurm_rpc_update_node");
+	}
+
+	/* return result */
+	if (error_code) {
+		info("_slurm_rpc_update_layout for %s: %s",
+		     msg_ptr->layout, slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		debug2("_slurm_rpc_update_layout complete for %s %s",
+		       msg_ptr->layout, TIME_STR);
+		slurm_send_rc_msg(msg, SLURM_SUCCESS);
+	}
+}
+
 /* _slurm_rpc_update_partition - process RPC to update the configuration
  *	of a partition (e.g. UP/DOWN) */
 static void _slurm_rpc_update_partition(slurm_msg_t * msg)
@@ -3441,7 +3737,7 @@ static void _slurm_rpc_update_partition(slurm_msg_t * msg)
 	 * NOTE: job write lock due to gang scheduler support */
 	slurmctld_lock_t part_write_lock = {
 		READ_LOCK, WRITE_LOCK, READ_LOCK, WRITE_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_UPDATE_PARTITION from uid=%d", uid);
@@ -3480,6 +3776,74 @@ static void _slurm_rpc_update_partition(slurm_msg_t * msg)
 	}
 }
 
+/* _slurm_rpc_update_powercap - process RPC to update the powercap */
+static void _slurm_rpc_update_powercap(slurm_msg_t * msg)
+{
+	int error_code = SLURM_SUCCESS;
+	DEF_TIMERS;
+	bool valid_cap = false;
+	uint32_t min, max, orig_cap;
+	update_powercap_msg_t *ptr = (update_powercap_msg_t *) msg->data;
+
+	/* Locks: write configuration, read node */
+	slurmctld_lock_t config_write_lock = {
+		WRITE_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_UPDATE_POWERCAP from uid=%d", uid);
+	if (!validate_super_user(uid)) {
+		error_code = ESLURM_USER_ID_MISSING;
+		error("Security violation, UPDATE_POWERCAP RPC from uid=%d",
+		      uid);
+	}
+
+	if (error_code == SLURM_SUCCESS) {
+		/* do RPC call */
+		lock_slurmctld(config_write_lock);
+		if (ptr->power_cap == 0 ||
+		    ptr->power_cap == INFINITE) {
+			valid_cap = true;
+		} else if (!power_layout_ready()) {
+			/* Not using layouts/power framework */
+			valid_cap = true;
+		} else {
+			/* we need to set a cap if 
+			 * the current value is 0 in order to
+			 * enable the capping system and get 
+			 * the min and max values */
+			orig_cap = powercap_get_cluster_current_cap();
+			powercap_set_cluster_cap(INFINITE);
+			min = powercap_get_cluster_min_watts();
+			max = powercap_get_cluster_max_watts();
+			if (min <= ptr->power_cap && max >= ptr->power_cap)
+				valid_cap = true;
+			else
+				powercap_set_cluster_cap(orig_cap);
+		}
+		if (valid_cap)
+			powercap_set_cluster_cap(ptr->power_cap);
+		else
+			error_code = ESLURM_INVALID_POWERCAP;
+		unlock_slurmctld(config_write_lock);
+		END_TIMER2("_slurm_rpc_update_powercap");
+	}
+
+	/* return result */
+	if (error_code) {
+		info("_slurm_rpc_update_powercap: %s",
+		     slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		debug2("_slurm_rpc_update_powercap complete %s", TIME_STR);
+		slurm_send_rc_msg(msg, SLURM_SUCCESS);
+
+		/* NOTE: These functions provide their own locks */
+		schedule(0);
+		save_all_state();
+	}
+}
+
 /* _slurm_rpc_delete_partition - process RPC to delete a partition */
 static void _slurm_rpc_delete_partition(slurm_msg_t * msg)
 {
@@ -3490,7 +3854,7 @@ static void _slurm_rpc_delete_partition(slurm_msg_t * msg)
 	/* Locks: write job, read node, write partition */
 	slurmctld_lock_t part_write_lock = {
 		NO_LOCK, WRITE_LOCK, READ_LOCK, WRITE_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_DELETE_PARTITION from uid=%d", uid);
@@ -3533,7 +3897,7 @@ static void _slurm_rpc_resv_create(slurm_msg_t * msg)
 	/* Locks: write node, read partition */
 	slurmctld_lock_t node_write_lock = {
 		NO_LOCK, NO_LOCK, WRITE_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_CREATE_RESERVATION from uid=%d", uid);
@@ -3590,7 +3954,7 @@ static void _slurm_rpc_resv_update(slurm_msg_t * msg)
 	/* Locks: write node, read partition */
 	slurmctld_lock_t node_write_lock = {
 		NO_LOCK, NO_LOCK, WRITE_LOCK, READ_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_UPDATE_RESERVATION from uid=%d", uid);
@@ -3633,13 +3997,17 @@ static void _slurm_rpc_resv_delete(slurm_msg_t * msg)
 	/* Locks: read job, write node */
 	slurmctld_lock_t node_write_lock = {
 		NO_LOCK, READ_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
-	debug2("Processing RPC: REQUEST_DELETE_RESERVTION from uid=%d", uid);
+	debug2("Processing RPC: REQUEST_DELETE_RESERVATION from uid=%d", uid);
 	if (!validate_operator(uid)) {
 		error_code = ESLURM_USER_ID_MISSING;
-		error("Security violation, DELETE_RESERVTION RPC from uid=%d",
+		error("Security violation, DELETE_RESERVATION RPC from uid=%d",
+		      uid);
+	} else if (!resv_desc_ptr->name) {
+		error_code = ESLURM_INVALID_PARTITION_NAME;
+		error("Invalid DELETE_RESERVATION RPC from uid=%d, name is null",
 		      uid);
 	}
 
@@ -3674,7 +4042,7 @@ static void _slurm_rpc_resv_show(slurm_msg_t * msg)
 	/* Locks: read node */
 	slurmctld_lock_t node_read_lock = {
 		NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	slurm_msg_t response_msg;
 	char *dump;
 	int dump_size;
@@ -3705,6 +4073,60 @@ static void _slurm_rpc_resv_show(slurm_msg_t * msg)
 	}
 }
 
+/* _slurm_rpc_layout_show - process RPC to dump layout info */
+static void _slurm_rpc_layout_show(slurm_msg_t * msg)
+{
+	layout_info_request_msg_t *layout_req_msg = (layout_info_request_msg_t *)
+		msg->data;
+	DEF_TIMERS;
+	slurm_msg_t response_msg;
+	char *dump;
+	int dump_size;
+	static int high_buffer_size = (1024 * 1024);
+	Buf buffer = init_buf(high_buffer_size);
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_LAYOUT_INFO");
+	if (layout_req_msg->layout_type == NULL) {
+		dump = slurm_get_layouts();
+		pack32((uint32_t) 2, buffer);	/* 1 record + trailing \n */
+		packstr(dump, buffer);
+		packstr("\n", buffer); /* to be consistent with
+					* layouts internal prints */
+		xfree(dump);
+	} else {
+		if ( layouts_pack_layout(layout_req_msg->layout_type,
+					 layout_req_msg->entities,
+					 layout_req_msg->type,
+					 layout_req_msg->no_relation,
+					 buffer) != SLURM_SUCCESS) {
+			debug2("%s: unable to get layout[%s]",
+			       __func__, layout_req_msg->layout_type);
+			slurm_send_rc_msg(msg, SLURM_NO_CHANGE_IN_DATA);
+			free_buf(buffer);
+			return;
+		}
+	}
+
+	dump_size = get_buf_offset(buffer);
+	high_buffer_size = MAX(high_buffer_size, dump_size);
+	dump = xfer_buf_data(buffer);
+	END_TIMER2("_slurm_rpc_resv_show");
+
+	/* init response_msg structure */
+	slurm_msg_t_init(&response_msg);
+	response_msg.flags = msg->flags;
+	response_msg.protocol_version = msg->protocol_version;
+	response_msg.address = msg->address;
+	response_msg.msg_type = RESPONSE_LAYOUT_INFO;
+	response_msg.data = dump;
+	response_msg.data_size = dump_size;
+
+	/* send message */
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	xfree(dump);
+}
+
 /* _slurm_rpc_update_block - process RPC to update the configuration
  *	of a block (e.g. FREE/ERROR/DELETE) */
 static void _slurm_rpc_update_block(slurm_msg_t * msg)
@@ -3712,7 +4134,7 @@ static void _slurm_rpc_update_block(slurm_msg_t * msg)
 	int error_code = SLURM_SUCCESS;
 	DEF_TIMERS;
 	update_block_msg_t *block_desc_ptr = (update_block_msg_t *) msg->data;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	char *name = NULL;
 	START_TIMER;
 
@@ -3825,7 +4247,7 @@ static void  _slurm_rpc_block_info(slurm_msg_t * msg)
 	slurmctld_lock_t config_read_lock = {
 		READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 	DEF_TIMERS;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_BLOCK_INFO from uid=%d", uid);
@@ -3865,6 +4287,46 @@ static void  _slurm_rpc_block_info(slurm_msg_t * msg)
 	}
 }
 
+/* get node select info plugin */
+static void  _slurm_rpc_burst_buffer_info(slurm_msg_t * msg)
+{
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	void *resp_buffer = NULL;
+	int resp_buffer_size = 0;
+	int error_code = SLURM_SUCCESS;
+	Buf buffer;
+	DEF_TIMERS;
+
+	START_TIMER;
+	debug2("Processing RPC: REQUEST_BURST_BUFFER_INFO from uid=%d", uid);
+
+	buffer = init_buf(BUF_SIZE);
+	if (validate_super_user(uid))
+		uid = 0;
+	error_code = bb_g_state_pack(uid, buffer, msg->protocol_version);
+	END_TIMER2(__func__);
+
+	if (error_code) {
+		debug("_slurm_rpc_burst_buffer_info: %s",
+		       slurm_strerror(error_code));
+		slurm_send_rc_msg(msg, error_code);
+	} else {
+		slurm_msg_t response_msg;
+
+		resp_buffer_size = get_buf_offset(buffer);
+		resp_buffer = xfer_buf_data(buffer);
+		slurm_msg_t_init(&response_msg);
+		response_msg.flags = msg->flags;
+		response_msg.protocol_version = msg->protocol_version;
+		response_msg.address = msg->address;
+		response_msg.msg_type = RESPONSE_BURST_BUFFER_INFO;
+		response_msg.data = resp_buffer;
+		response_msg.data_size = resp_buffer_size;
+		slurm_send_node_msg(msg->conn_fd, &response_msg);
+		xfree(resp_buffer);
+	}
+}
+
 /* Reset the job credential key based upon configuration parameters.
  * NOTE: READ lock_slurmctld config before entry */
 static void _update_cred_key(void)
@@ -3881,7 +4343,7 @@ inline static void _slurm_rpc_suspend(slurm_msg_t * msg)
 	/* Locks: write job and node */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	char *op;
 
 	START_TIMER;
@@ -3930,7 +4392,7 @@ inline static void _slurm_rpc_requeue(slurm_msg_t * msg)
 	/* Locks: write job and node */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 
@@ -3971,7 +4433,7 @@ inline static void  _slurm_rpc_checkpoint(slurm_msg_t * msg)
 	/* Locks: write job lock, read node lock */
 	slurmctld_lock_t job_write_lock = {
 		NO_LOCK, WRITE_LOCK, READ_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	char *op;
 
 	START_TIMER;
@@ -4055,7 +4517,7 @@ inline static void  _slurm_rpc_checkpoint_comp(slurm_msg_t * msg)
 	/* Locks: read job */
 	slurmctld_lock_t job_read_lock = {
 		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("Processing RPC: REQUEST_CHECKPOINT_COMP from uid=%d", uid);
@@ -4085,7 +4547,7 @@ inline static void  _slurm_rpc_checkpoint_task_comp(slurm_msg_t * msg)
 	/* Locks: read job */
 	slurmctld_lock_t job_read_lock = {
 		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	ckpt_ptr = (checkpoint_task_comp_msg_t *) msg->data;
 	START_TIMER;
@@ -4197,7 +4659,8 @@ static int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid,
 	req_step_msg.task_dist = SLURM_DIST_CYCLIC;
 	req_step_msg.name = job_desc_msg->name;
 
-	error_code = step_create(&req_step_msg, &step_rec, true);
+	error_code = step_create(&req_step_msg, &step_rec, true,
+				 protocol_version);
 	xfree(req_step_msg.node_list);	/* may be set by step_create */
 
 	if (error_code != SLURM_SUCCESS)
@@ -4344,7 +4807,7 @@ static int _launch_batch_step(job_desc_msg_t *job_desc_msg, uid_t uid,
 inline static void  _slurm_rpc_trigger_clear(slurm_msg_t * msg)
 {
 	int rc;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
 	DEF_TIMERS;
 
@@ -4359,7 +4822,7 @@ inline static void  _slurm_rpc_trigger_clear(slurm_msg_t * msg)
 
 inline static void  _slurm_rpc_trigger_get(slurm_msg_t * msg)
 {
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	trigger_info_msg_t *resp_data;
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
 	slurm_msg_t response_msg;
@@ -4384,8 +4847,8 @@ inline static void  _slurm_rpc_trigger_get(slurm_msg_t * msg)
 inline static void  _slurm_rpc_trigger_set(slurm_msg_t * msg)
 {
 	int rc;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	gid_t gid = g_slurm_auth_get_gid(msg->auth_cred, NULL);;
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
+	gid_t gid = g_slurm_auth_get_gid(msg->auth_cred, slurm_get_auth_info());
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
 	DEF_TIMERS;
 
@@ -4401,7 +4864,7 @@ inline static void  _slurm_rpc_trigger_set(slurm_msg_t * msg)
 inline static void  _slurm_rpc_trigger_pull(slurm_msg_t * msg)
 {
 	int rc;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	trigger_info_msg_t * trigger_ptr = (trigger_info_msg_t *) msg->data;
 	DEF_TIMERS;
 
@@ -4413,7 +4876,8 @@ inline static void  _slurm_rpc_trigger_pull(slurm_msg_t * msg)
 	      (unsigned int) uid);
 	if (!validate_slurm_user(uid)) {
 		rc = ESLURM_USER_ID_MISSING;
-		error("Security violation, REQUEST_TRIGGER_PULL RPC from uid=%d",
+		error("Security violation, REQUEST_TRIGGER_PULL RPC "
+		      "from uid=%d",
 		      uid);
 	} else
 		rc = trigger_pull(trigger_ptr);
@@ -4463,13 +4927,44 @@ inline static void  _slurm_rpc_get_topo(slurm_msg_t * msg)
 	slurm_free_topo_info_msg(topo_resp_msg);
 }
 
+inline static void  _slurm_rpc_get_powercap(slurm_msg_t * msg)
+{
+	powercap_info_msg_t *powercap_resp_msg, *ptr;
+	slurm_msg_t response_msg;
+	/* Locks: read config lock */
+	slurmctld_lock_t config_read_lock = {
+		READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	DEF_TIMERS;
+
+	START_TIMER;
+	lock_slurmctld(config_read_lock);
+	powercap_resp_msg = xmalloc(sizeof(powercap_info_msg_t));
+	ptr = powercap_resp_msg;
+	ptr->power_cap = powercap_get_cluster_current_cap();
+	ptr->min_watts = powercap_get_cluster_min_watts();
+	ptr->cur_max_watts = powercap_get_cluster_current_max_watts();
+	ptr->adj_max_watts = powercap_get_cluster_adjusted_max_watts();
+	ptr->max_watts = powercap_get_cluster_max_watts();
+	unlock_slurmctld(config_read_lock);
+	END_TIMER2("_slurm_rpc_get_powercap");
+
+	slurm_msg_t_init(&response_msg);
+	response_msg.flags = msg->flags;
+	response_msg.protocol_version = msg->protocol_version;
+	response_msg.address  = msg->address;
+	response_msg.msg_type = RESPONSE_POWERCAP_INFO;
+	response_msg.data     = powercap_resp_msg;
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	slurm_free_powercap_info_msg(powercap_resp_msg);
+}
+
 inline static void  _slurm_rpc_job_notify(slurm_msg_t * msg)
 {
 	int error_code;
 	/* Locks: read job */
 	slurmctld_lock_t job_read_lock = {
 		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	job_notify_msg_t * notify_msg = (job_notify_msg_t *) msg->data;
 	struct job_record *job_ptr;
 	DEF_TIMERS;
@@ -4498,7 +4993,7 @@ inline static void  _slurm_rpc_job_notify(slurm_msg_t * msg)
 
 inline static void  _slurm_rpc_set_debug_flags(slurm_msg_t *msg)
 {
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	slurmctld_lock_t config_write_lock =
 		{ WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 	set_debug_flags_msg_t *request_msg =
@@ -4541,7 +5036,7 @@ inline static void  _slurm_rpc_set_debug_flags(slurm_msg_t *msg)
 inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg)
 {
 	int debug_level, old_debug_level;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	slurmctld_lock_t config_write_lock =
 		{ WRITE_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 	set_debug_level_msg_t *request_msg =
@@ -4597,7 +5092,7 @@ inline static void  _slurm_rpc_set_debug_level(slurm_msg_t *msg)
 inline static void  _slurm_rpc_set_schedlog_level(slurm_msg_t *msg)
 {
 	int schedlog_level, old_schedlog_level;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	slurmctld_lock_t config_read_lock =
 		{ READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
 	set_debug_level_msg_t *request_msg =
@@ -4632,7 +5127,7 @@ inline static void  _slurm_rpc_set_schedlog_level(slurm_msg_t *msg)
 	lock_slurmctld (config_read_lock);
 	log_opts.logfile_level = schedlog_level;
 	sched_log_alter(log_opts, LOG_DAEMON, slurmctld_conf.sched_logfile);
- 	unlock_slurmctld (config_read_lock);
+	unlock_slurmctld (config_read_lock);
 
 	conf = slurm_conf_lock();
 	old_schedlog_level = conf->sched_log_level;
@@ -4648,9 +5143,10 @@ inline static void  _slurm_rpc_set_schedlog_level(slurm_msg_t *msg)
 inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 {
 	int rc = SLURM_SUCCESS;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	accounting_update_msg_t *update_ptr =
 		(accounting_update_msg_t *) msg->data;
+	bool sent_rc = false;
 	DEF_TIMERS;
 
 	START_TIMER;
@@ -4671,20 +5167,41 @@ inline static void  _slurm_rpc_accounting_update_msg(slurm_msg_t *msg)
 
 	slurm_send_rc_msg(msg, rc);
 
-	if (update_ptr->update_list && list_count(update_ptr->update_list))
-		rc = assoc_mgr_update(update_ptr->update_list);
+	if (update_ptr->update_list && list_count(update_ptr->update_list)) {
+		slurmdb_update_object_t *object =
+			list_peek(update_ptr->update_list);
+		if (object->type != SLURMDB_ADD_TRES) {
+			/* If not specific message types, send message back
+			 * to the caller immediately letting him know we got it.
+			 * In most cases there is no need to wait since the end
+			 * result would be the same if we wait or not
+			 * since the update has already happened in
+			 * the database.
+			 */
+			slurm_send_rc_msg(msg, rc);
+			sent_rc = true;
+		}
+		rc = assoc_mgr_update(update_ptr->update_list, 0);
+	}
 
 	END_TIMER2("_slurm_rpc_accounting_update_msg");
 
-	if (rc != SLURM_SUCCESS)
-		error("assoc_mgr_update gave error: %s", slurm_strerror(rc));
+	if (sent_rc) {
+		if (rc != SLURM_SUCCESS)
+			error("assoc_mgr_update gave error: %s",
+			      slurm_strerror(rc));
+	} else {
+		info("sending after");
+		slurm_send_rc_msg(msg, rc);
+	}
+
 }
 
 /* _slurm_rpc_reboot_nodes - process RPC to schedule nodes reboot */
 inline static void _slurm_rpc_reboot_nodes(slurm_msg_t * msg)
 {
 	int rc;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 #ifndef HAVE_FRONT_END
 	int i;
 	struct node_record *node_ptr;
@@ -4748,7 +5265,7 @@ inline static void _slurm_rpc_reboot_nodes(slurm_msg_t * msg)
 
 inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg)
 {
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	time_t event_time = time(NULL);
 
 	DEF_TIMERS;
@@ -4768,7 +5285,7 @@ inline static void  _slurm_rpc_accounting_first_reg(slurm_msg_t *msg)
 
 inline static void  _slurm_rpc_accounting_register_ctld(slurm_msg_t *msg)
 {
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	DEF_TIMERS;
 
@@ -4795,7 +5312,7 @@ inline static void _slurm_rpc_dump_spank(slurm_msg_t * msg)
 	/* Locks: read job */
 	slurmctld_lock_t job_read_lock = {
 		NO_LOCK, READ_LOCK, NO_LOCK, NO_LOCK };
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	slurm_msg_t response_msg;
 	DEF_TIMERS;
 
@@ -4907,7 +5424,7 @@ inline static void _slurm_rpc_dump_stats(slurm_msg_t * msg)
 	int dump_size;
 	stats_info_request_msg_t *request_msg;
 	slurm_msg_t response_msg;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	request_msg = (stats_info_request_msg_t *)msg->data;
 
@@ -4958,7 +5475,7 @@ _slurm_rpc_dump_licenses(slurm_msg_t * msg)
 	int dump_size;
 	slurm_msg_t response_msg;
 	license_info_request_msg_t  *lic_req_msg;
-	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug2("%s: Processing RPC: REQUEST_LICENSE_INFO uid=%d",
@@ -5026,8 +5543,8 @@ _slurm_rpc_kill_job2(slurm_msg_t *msg)
 	uid_t uid;
 	int cc;
 
-	kill = 	(job_step_kill_msg_t *)msg->data;
-	uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	kill =	(job_step_kill_msg_t *)msg->data;
+	uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	START_TIMER;
 	debug("%s: REQUEST_KILL_JOB job %s uid %d",
@@ -5048,6 +5565,8 @@ _slurm_rpc_kill_job2(slurm_msg_t *msg)
 		error("%s: job_str_signal() job %s sig %d returned %s",
 		      __func__, kill->sjob_id,
 		      kill->signal, slurm_strerror(cc));
+	} else {
+		slurmctld_diag_stats.jobs_canceled++;
 	}
 
 	slurm_send_rc_msg(msg, cc);
@@ -5055,3 +5574,267 @@ _slurm_rpc_kill_job2(slurm_msg_t *msg)
 	unlock_slurmctld(lock);
 	END_TIMER2("_slurm_rpc_kill_job2");
 }
+
+/* Return the number of micro-seconds between now and argument "tv" */
+static int _delta_tv(struct timeval *tv)
+{
+	struct timeval now = {0, 0};
+	int delta_t;
+
+	if (gettimeofday(&now, NULL))
+		return 1;		/* Some error */
+
+	delta_t  = (now.tv_sec - tv->tv_sec) * 1000000;
+	delta_t += (now.tv_usec - tv->tv_usec);
+	return delta_t;
+}
+
+/* The batch messages when made for the comp_msg need to be freed
+ * differently than the normal free, so do that here.
+ */
+static void _slurmctld_free_comp_msg_list(void *x)
+{
+	slurm_msg_t *msg = (slurm_msg_t*)x;
+	if (msg) {
+		if (msg->msg_type == REQUEST_BATCH_JOB_LAUNCH) {
+			slurmctld_free_batch_job_launch_msg(msg->data);
+			msg->data = NULL;
+		}
+
+		slurm_free_comp_msg_list(msg);
+	}
+}
+
+
+static void  _slurm_rpc_composite_msg(slurm_msg_t *msg)
+{
+	static time_t config_update = 0;
+	static bool defer_sched = false;
+	static int sched_timeout = 0;
+	static int active_rpc_cnt = 0;
+	struct timeval start_tv;
+	bool run_scheduler = false;
+	composite_msg_t *comp_msg, comp_resp_msg;
+	/* Locks: Read configuration, write job, write node */
+	slurmctld_lock_t job_write_lock = {
+		READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
+
+	memset(&comp_resp_msg, 0, sizeof(composite_msg_t));
+	comp_resp_msg.msg_list = list_create(_slurmctld_free_comp_msg_list);
+
+	comp_msg = (composite_msg_t *) msg->data;
+
+	if (slurmctld_conf.debug_flags & DEBUG_FLAG_ROUTE)
+		info("Processing RPC: MESSAGE_COMPOSITE msg with %d messages",
+		     comp_msg->msg_list ? list_count(comp_msg->msg_list) : 0);
+
+	if (config_update != slurmctld_conf.last_update) {
+		char *sched_params = slurm_get_sched_params();
+		int time_limit;
+		char *tmp_ptr;
+
+		defer_sched = (sched_params && strstr(sched_params, "defer"));
+
+		time_limit = slurm_get_msg_timeout() / 2;
+		if (sched_params &&
+		    (tmp_ptr = strstr(sched_params, "max_sched_time="))) {
+			sched_timeout = atoi(tmp_ptr + 15);
+			if ((sched_timeout <= 0) ||
+			    (sched_timeout > time_limit)) {
+				error("Invalid max_sched_time: %d",
+				      sched_timeout);
+				sched_timeout = 0;
+			}
+		}
+
+		if (sched_timeout == 0) {
+			sched_timeout = MAX(time_limit, 1);
+			sched_timeout = MIN(sched_timeout, 2);
+			sched_timeout *= 1000000;
+		}
+		xfree(sched_params);
+		config_update = slurmctld_conf.last_update;
+	}
+
+	_throttle_start(&active_rpc_cnt);
+	lock_slurmctld(job_write_lock);
+	gettimeofday(&start_tv, NULL);
+	_slurm_rpc_comp_msg_list(comp_msg, &run_scheduler,
+				 comp_resp_msg.msg_list, &start_tv,
+				 sched_timeout);
+	unlock_slurmctld(job_write_lock);
+	_throttle_fini(&active_rpc_cnt);
+
+	if (list_count(comp_resp_msg.msg_list)) {
+		slurm_msg_t resp_msg;
+		slurm_msg_t_init(&resp_msg);
+		resp_msg.flags    = msg->flags;
+		resp_msg.protocol_version = msg->protocol_version;
+		memcpy(&resp_msg.address, &comp_msg->sender,
+		       sizeof(slurm_addr_t));
+		resp_msg.msg_type = RESPONSE_MESSAGE_COMPOSITE;
+		resp_msg.data     = &comp_resp_msg;
+		slurm_send_only_node_msg(&resp_msg);
+	}
+	FREE_NULL_LIST(comp_resp_msg.msg_list);
+
+	/* Functions below provide their own locking */
+	if (run_scheduler) {
+		/*
+		 * In defer mode, avoid triggering the scheduler logic
+		 * for every epilog complete message.
+		 * As one epilog message is sent from every node of each
+		 * job at termination, the number of simultaneous schedule
+		 * calls can be very high for large machine or large number
+		 * of managed jobs.
+		 */
+		if (!LOTS_OF_AGENTS && !defer_sched)
+			(void) schedule(0);	/* Has own locking */
+		schedule_node_save();		/* Has own locking */
+		schedule_job_save();		/* Has own locking */
+	}
+}
+
+static void  _slurm_rpc_comp_msg_list(composite_msg_t * comp_msg,
+				      bool *run_scheduler,
+				      List msg_list_in,
+				      struct timeval *start_tv,
+				      int timeout)
+{
+	ListIterator itr;
+	slurm_msg_t *next_msg;
+	composite_msg_t *ncomp_msg;
+	composite_msg_t *comp_resp_msg;
+	/* Locks: Read configuration, write job, write node */
+	slurmctld_lock_t job_write_lock = {
+		READ_LOCK, WRITE_LOCK, WRITE_LOCK, NO_LOCK };
+	DEF_TIMERS;
+
+	START_TIMER;
+
+	itr = list_iterator_create(comp_msg->msg_list);
+	while ((next_msg = list_next(itr))) {
+		if (_delta_tv(start_tv) >= timeout) {
+			END_TIMER;
+			if (slurmctld_conf.debug_flags & DEBUG_FLAG_ROUTE)
+				info("composite message processing "
+				     "yielding locks after running for %s",
+				     TIME_STR);
+			unlock_slurmctld(job_write_lock);
+			usleep(10);
+			lock_slurmctld(job_write_lock);
+			gettimeofday(start_tv, NULL);
+			START_TIMER;
+		}
+		/* The ret_list is used by slurm_send_rc_msg to house
+		   replys going back to the nodes.
+		*/
+		FREE_NULL_LIST(next_msg->ret_list);
+		next_msg->ret_list = msg_list_in;
+		switch (next_msg->msg_type) {
+		case MESSAGE_COMPOSITE:
+			comp_resp_msg = xmalloc(sizeof(composite_msg_t));
+			comp_resp_msg->msg_list =
+				list_create(_slurmctld_free_comp_msg_list);
+
+			ncomp_msg = (composite_msg_t *) next_msg->data;
+			if (slurmctld_conf.debug_flags & DEBUG_FLAG_ROUTE)
+				info("Processing embedded MESSAGE_COMPOSITE "
+				     "msg with %d direct "
+				     "messages", ncomp_msg->msg_list ?
+				     list_count(ncomp_msg->msg_list) : 0);
+			_slurm_rpc_comp_msg_list(ncomp_msg, run_scheduler,
+						 comp_resp_msg->msg_list,
+						 start_tv, timeout);
+			if (list_count(comp_resp_msg->msg_list)) {
+				slurm_msg_t *resp_msg =
+					xmalloc_nz(sizeof(slurm_msg_t));
+				slurm_msg_t_init(resp_msg);
+				resp_msg->msg_index = next_msg->msg_index;
+				resp_msg->flags = next_msg->flags;
+				resp_msg->protocol_version =
+					next_msg->protocol_version;
+				resp_msg->msg_type = RESPONSE_MESSAGE_COMPOSITE;
+				/* You can't just set the
+				 * resp_msg->address here, it won't
+				 * make it to where it needs to be
+				 * used, set the sender and let it be
+				 * handled on the tree node.
+				 */
+				memcpy(&comp_resp_msg->sender,
+				       &ncomp_msg->sender,
+				       sizeof(slurm_addr_t));
+
+				resp_msg->data = comp_resp_msg;
+
+				list_append(msg_list_in, resp_msg);
+			} else
+				slurm_free_composite_msg(comp_resp_msg);
+			break;
+		case REQUEST_COMPLETE_BATCH_SCRIPT:
+		case REQUEST_COMPLETE_BATCH_JOB:
+			_slurm_rpc_complete_batch_script(next_msg, 1);
+			break;
+		case REQUEST_STEP_COMPLETE:
+			_slurm_rpc_step_complete(next_msg, 1);
+			break;
+		case MESSAGE_EPILOG_COMPLETE:
+			_slurm_rpc_epilog_complete(next_msg, run_scheduler, 1);
+			break;
+		case MESSAGE_NODE_REGISTRATION_STATUS:
+			_slurm_rpc_node_registration(next_msg, 1);
+			break;
+		default:
+			error("_slurm_rpc_comp_msg_list: invalid msg type");
+			break;
+		}
+		next_msg->ret_list = NULL;
+	}
+	list_iterator_destroy(itr);
+	END_TIMER;
+	/* NOTE: RPC has no response */
+}
+
+/* _slurm_rpc_assoc_mgr_info()
+ *
+ * Pack the assoc_mgr lists and return it back to the caller.
+ */
+static void _slurm_rpc_assoc_mgr_info(slurm_msg_t * msg)
+{
+	DEF_TIMERS;
+	char *dump = NULL;
+	int dump_size = 0;
+	slurm_msg_t response_msg;
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	START_TIMER;
+	debug2("%s: Processing RPC: REQUEST_ASSOC_MGR_INFO uid=%d",
+	       __func__, uid);
+
+	/* do RPC call */
+	/* Security is handled in the assoc_mgr */
+	assoc_mgr_info_get_pack_msg(&dump, &dump_size,
+				     (assoc_mgr_info_request_msg_t *)msg->data,
+				     uid, acct_db_conn,
+				     msg->protocol_version);
+
+	END_TIMER2("_slurm_rpc_assoc_mgr_info");
+	debug2("%s: size=%d %s", __func__, dump_size, TIME_STR);
+
+	/* init response_msg structure
+	 */
+	slurm_msg_t_init(&response_msg);
+
+	response_msg.flags = msg->flags;
+	response_msg.protocol_version = msg->protocol_version;
+	response_msg.address = msg->address;
+	response_msg.msg_type = RESPONSE_ASSOC_MGR_INFO;
+	response_msg.data = dump;
+	response_msg.data_size = dump_size;
+	/* send message
+	 */
+	slurm_send_node_msg(msg->conn_fd, &response_msg);
+	xfree(dump);
+	/* Ciao!
+	 */
+}
diff --git a/src/slurmctld/proc_req.h b/src/slurmctld/proc_req.h
index 02319f4c0..1a3ecc687 100644
--- a/src/slurmctld/proc_req.h
+++ b/src/slurmctld/proc_req.h
@@ -82,7 +82,7 @@ extern int slurm_drain_nodes(char *node_list, char *reason,
  * NOTE: This is utilzed by plugins and not via RPC and it sets its
  *	own locks.
  */
-extern int slurm_fail_job(uint32_t job_id, uint16_t job_state);
+extern int slurm_fail_job(uint32_t job_id, uint32_t job_state);
 
 /* Copy an array of type char **, xmalloc() the array and xstrdup() the
  * strings in the array */
diff --git a/src/slurmctld/read_config.c b/src/slurmctld/read_config.c
index ffcbe985c..9ab77a4eb 100644
--- a/src/slurmctld/read_config.c
+++ b/src/slurmctld/read_config.c
@@ -59,6 +59,7 @@
 #include <unistd.h>
 
 #include "src/common/assoc_mgr.h"
+#include "src/common/cpu_frequency.h"
 #include "src/common/gres.h"
 #include "src/common/hostlist.h"
 #include "src/common/layouts_mgr.h"
@@ -66,16 +67,18 @@
 #include "src/common/macros.h"
 #include "src/common/node_select.h"
 #include "src/common/parse_spec.h"
+#include "src/common/power.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_jobcomp.h"
 #include "src/common/slurm_topology.h"
 #include "src/common/slurm_rlimits_info.h"
 #include "src/common/slurm_route.h"
+#include "src/common/strnatcmp.h"
 #include "src/common/switch.h"
 #include "src/common/xstring.h"
-#include "src/common/strnatcmp.h"
 
 #include "src/slurmctld/acct_policy.h"
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/front_end.h"
 #include "src/slurmctld/gang.h"
 #include "src/slurmctld/job_scheduler.h"
@@ -103,9 +106,10 @@ static int  _init_all_slurm_conf(void);
 static int  _preserve_select_type_param(slurm_ctl_conf_t * ctl_conf_ptr,
 					uint16_t old_select_type_p);
 static int  _preserve_plugins(slurm_ctl_conf_t * ctl_conf_ptr,
-				char *old_auth_type, char *old_checkpoint_type,
-				char *old_crypto_type, char *old_sched_type,
-				char *old_select_type, char *old_switch_type);
+			      char *old_auth_type, char *old_checkpoint_type,
+			      char *old_crypto_type, char *old_sched_type,
+			      char *old_select_type, char *old_switch_type,
+			      char *old_bb_type);
 static void _purge_old_node_state(struct node_record *old_node_table_ptr,
 				int old_node_record_count);
 static void _purge_old_part_state(List old_part_list, char *old_def_part_name);
@@ -116,7 +120,6 @@ static int  _restore_node_state(int recover,
 static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				uint16_t flags);
 static void _stat_slurm_dirs(void);
-static int  _strcmp(const char *s1, const char *s2);
 static int  _sync_nodes_to_comp_job(void);
 static int  _sync_nodes_to_jobs(void);
 static int  _sync_nodes_to_active_job(struct job_record *job_ptr);
@@ -131,6 +134,7 @@ static int _compare_hostnames(struct node_record *old_node_table,
 							  struct node_record *node_table,
 							  int node_count);
 
+
 /* Verify that Slurm directories are secure, not world writable */
 static void _stat_slurm_dirs(void)
 {
@@ -254,42 +258,14 @@ static void _reorder_nodes_by_rank(void)
 static void _build_bitmaps_pre_select(void)
 {
 	struct part_record   *part_ptr;
-	struct node_record   *node_ptr;
 	ListIterator part_iterator;
-	int i;
 
 	/* scan partition table and identify nodes in each */
 	part_iterator = list_iterator_create(part_list);
 	while ((part_ptr = (struct part_record *) list_next(part_iterator))) {
-		FREE_NULL_BITMAP(part_ptr->node_bitmap);
-
-		if ((part_ptr->nodes == NULL) || (part_ptr->nodes[0] == '\0')) {
-			/* Partitions need a bitmap, even if empty */
-			part_ptr->node_bitmap = bit_alloc(node_record_count);
-			continue;
-		}
-
-		if (node_name2bitmap(part_ptr->nodes, false,
-				     &part_ptr->node_bitmap)) {
+		if (build_part_bitmap(part_ptr) == ESLURM_INVALID_NODE_NAME)
 			fatal("Invalid node names in partition %s",
-			      part_ptr->name);
-		}
-
-		for (i=0; i<node_record_count; i++) {
-			if (bit_test(part_ptr->node_bitmap, i) == 0)
-				continue;
-			node_ptr = &node_record_table_ptr[i];
-			part_ptr->total_nodes++;
-			if (slurmctld_conf.fast_schedule)
-				part_ptr->total_cpus +=
-					node_ptr->config_ptr->cpus;
-			else
-				part_ptr->total_cpus += node_ptr->cpus;
-			node_ptr->part_cnt++;
-			xrealloc(node_ptr->part_pptr, (node_ptr->part_cnt *
-				sizeof(struct part_record *)));
-			node_ptr->part_pptr[node_ptr->part_cnt-1] = part_ptr;
-		}
+					part_ptr->name);
 	}
 	list_iterator_destroy(part_iterator);
 	return;
@@ -584,8 +560,8 @@ extern void qos_list_build(char *qos, bitstr_t **qos_bits)
 	slurmdb_qos_rec_t qos_rec, *qos_ptr = NULL;
 	bitstr_t *tmp_qos_bitstr;
 	int rc;
-	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK,
-				   READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, READ_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	if (!qos) {
 		FREE_NULL_BITMAP(*qos_bits);
@@ -654,7 +630,7 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 
 	if (part->default_flag) {
 		if (default_part_name &&
-		    strcmp(default_part_name, part->name)) {
+		    xstrcmp(default_part_name, part->name)) {
 			info("_parse_part_spec: changing default partition "
 			     "from %s to %s", default_part_name, part->name);
 			default_part_loc->flags &= (~PART_FLAG_DEFAULT);
@@ -676,7 +652,6 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	} else {
 		part_ptr->flags &= (~PART_FLAG_NO_ROOT);
 	}
-
 	if (part_ptr->flags & PART_FLAG_NO_ROOT)
 		debug2("partition %s does not allow root jobs", part_ptr->name);
 
@@ -687,6 +662,8 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 		part->default_time = NO_VAL;
 	}
 
+	if (part->exclusive_user)
+		part_ptr->flags |= PART_FLAG_EXCLUSIVE_USER;
 	if (part->hidden_flag)
 		part_ptr->flags |= PART_FLAG_HIDDEN;
 	if (part->root_only_flag)
@@ -707,10 +684,22 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 	part_ptr->min_nodes_orig = part->min_nodes;
 	part_ptr->preempt_mode   = part->preempt_mode;
 	part_ptr->priority       = part->priority;
+	part_ptr->qos_char       = xstrdup(part->qos_char);
 	part_ptr->state_up       = part->state_up;
 	part_ptr->grace_time     = part->grace_time;
 	part_ptr->cr_type        = part->cr_type;
 
+	if (part->billing_weights_str) {
+		xfree(part_ptr->billing_weights_str);
+		xfree(part_ptr->billing_weights);
+		part_ptr->billing_weights_str =
+			xstrdup(part->billing_weights_str);
+		part_ptr->billing_weights =
+			slurm_get_tres_weight_array(
+					part_ptr->billing_weights_str,
+					slurmctld_tres_cnt);
+	}
+
 	if (part->allow_accounts) {
 		xfree(part_ptr->allow_accounts);
 		part_ptr->allow_accounts = xstrdup(part->allow_accounts);
@@ -742,6 +731,23 @@ static int _build_single_partitionline_info(slurm_conf_partition_t *part)
 		qos_list_build(part_ptr->deny_qos, &part_ptr->deny_qos_bitstr);
 	}
 
+	if (part->qos_char) {
+		slurmdb_qos_rec_t qos_rec;
+		xfree(part_ptr->qos_char);
+		part_ptr->qos_char = xstrdup(part->qos_char);
+
+		memset(&qos_rec, 0, sizeof(slurmdb_qos_rec_t));
+		qos_rec.name = part_ptr->qos_char;
+		if (assoc_mgr_fill_in_qos(
+			    acct_db_conn, &qos_rec, accounting_enforce,
+			    (slurmdb_qos_rec_t **)&part_ptr->qos_ptr, 0)
+		    != SLURM_SUCCESS) {
+			fatal("Partition %s has an invalid qos (%s), "
+			      "please check your configuration",
+			      part_ptr->name, qos_rec.name);
+		}
+	}
+
  	if (part->allow_alloc_nodes) {
  		if (part_ptr->allow_alloc_nodes) {
  			int cnt_tot, cnt_uniq;
@@ -866,9 +872,10 @@ int read_slurm_conf(int recover, bool reconfig)
 	List old_part_list = NULL;
 	char *old_def_part_name = NULL;
 	char *old_auth_type       = xstrdup(slurmctld_conf.authtype);
-	uint16_t old_preempt_mode = slurmctld_conf.preempt_mode;
+	char *old_bb_type         = xstrdup(slurmctld_conf.bb_type);
 	char *old_checkpoint_type = xstrdup(slurmctld_conf.checkpoint_type);
 	char *old_crypto_type     = xstrdup(slurmctld_conf.crypto_type);
+	uint16_t old_preempt_mode = slurmctld_conf.preempt_mode;
 	char *old_preempt_type    = xstrdup(slurmctld_conf.preempt_type);
 	char *old_sched_type      = xstrdup(slurmctld_conf.schedtype);
 	char *old_select_type     = xstrdup(slurmctld_conf.select_type);
@@ -916,7 +923,7 @@ int read_slurm_conf(int recover, bool reconfig)
 		return error_code;
 	}
 
-	if (slurm_layouts_init() != SLURM_SUCCESS)
+	if (layouts_init() != SLURM_SUCCESS)
 		fatal("Failed to initialize the layouts framework");
 
 	if (slurm_topo_init() != SLURM_SUCCESS)
@@ -934,9 +941,18 @@ int read_slurm_conf(int recover, bool reconfig)
 	}
 	_handle_all_downnodes();
 	_build_all_partitionline_info();
-	if (!reconfig)
+	if (!reconfig) {
 		restore_front_end_state(recover);
 
+		/* currently load/dump_state_lite has to run before
+		 * load_all_job_state. */
+
+		/* load old config */
+		load_config_state_lite();
+
+		/* store new config */
+		dump_config_state_lite();
+	}
 	update_logging();
 	g_slurm_jobcomp_init(slurmctld_conf.job_comp_loc);
 	if (slurm_sched_init() != SLURM_SUCCESS)
@@ -976,6 +992,9 @@ int read_slurm_conf(int recover, bool reconfig)
 	rehash_node();
 	slurm_topo_build_config();
 	route_g_reconfigure();
+	if (reconfig)
+		power_g_reconfig();
+	cpu_freq_reconfig();
 
 	rehash_jobs();
 	set_slurmd_addr();
@@ -987,7 +1006,7 @@ int read_slurm_conf(int recover, bool reconfig)
 	 * Only load it at init time, not during reconfiguration stages.
 	 * It requires a full restart to switch to a new configuration for now.
 	 */
-	if (!reconfig && (slurm_layouts_load_config() != SLURM_SUCCESS))
+	if (!reconfig && (layouts_load_config(recover) != SLURM_SUCCESS))
 		fatal("Failed to load the layouts framework configuration");
 
 	if (reconfig) {		/* Preserve state from memory */
@@ -1064,7 +1083,6 @@ int read_slurm_conf(int recover, bool reconfig)
 
 	/* NOTE: Run restore_node_features before _restore_job_dependencies */
 	restore_node_features(recover);
-	_restore_job_dependencies();
 #ifdef 	HAVE_ELAN
 	_validate_node_proc_count();
 #endif
@@ -1081,6 +1099,9 @@ int read_slurm_conf(int recover, bool reconfig)
 		}
 	}
 
+	/* NOTE: Run loadd_all_resv_state() before _restore_job_dependencies */
+	_restore_job_dependencies();
+
 	/* sort config_list by weight for scheduling */
 	list_sort(config_list, &list_compare_config);
 
@@ -1088,10 +1109,10 @@ int read_slurm_conf(int recover, bool reconfig)
 	rc = _preserve_plugins(&slurmctld_conf,
 			       old_auth_type, old_checkpoint_type,
 			       old_crypto_type, old_sched_type,
-			       old_select_type, old_switch_type);
+			       old_select_type, old_switch_type, old_bb_type);
 	error_code = MAX(error_code, rc);	/* not fatal */
 
-	if (strcmp(old_preempt_type, slurmctld_conf.preempt_type)) {
+	if (xstrcmp(old_preempt_type, slurmctld_conf.preempt_type)) {
 		info("Changing PreemptType from %s to %s",
 		     old_preempt_type, slurmctld_conf.preempt_type);
 		(void) slurm_preempt_fini();
@@ -1109,6 +1130,11 @@ int read_slurm_conf(int recover, bool reconfig)
 	error_code = MAX(error_code, rc);	/* not fatal */
 	rc = _preserve_select_type_param(&slurmctld_conf, old_select_type_p);
 	error_code = MAX(error_code, rc);	/* not fatal */
+	if (reconfig)
+		rc =  bb_g_reconfig();
+	else
+		rc = bb_g_load_state(true);
+	error_code = MAX(error_code, rc);	/* not fatal */
 
 	/* Restore job accounting info if file missing or corrupted,
 	 * an extremely rare situation */
@@ -1118,10 +1144,6 @@ int read_slurm_conf(int recover, bool reconfig)
 	/* Sync select plugin with synchronized job/node/part data */
 	select_g_reconfigure();
 
-	if (slurmctld_conf.priority_flags & PRIORITY_FLAGS_TICKET_BASED)
-		info("TICKET_BASED fairshare is deprecated. Please consider "
-		     "using the \"FAIR_TREE\" algorithm.");
-
 	slurmctld_conf.last_update = time(NULL);
 	END_TIMER2("read_slurm_conf");
 	return error_code;
@@ -1218,6 +1240,7 @@ static int _restore_node_state(int recover,
 		}
 
 		node_ptr->last_response = old_node_ptr->last_response;
+		node_ptr->protocol_version = old_node_ptr->protocol_version;
 
 		/* make sure we get the old state from the select
 		 * plugin, just swap it out to avoid possible memory leak */
@@ -1255,8 +1278,7 @@ static int _restore_node_state(int recover,
 
 		node_ptr->sus_job_cnt   = old_node_ptr->sus_job_cnt;
 
-		if (node_ptr->gres_list)
-			list_destroy(node_ptr->gres_list);
+		FREE_NULL_LIST(node_ptr->gres_list);
 		node_ptr->gres_list = old_node_ptr->gres_list;
 		old_node_ptr->gres_list = NULL;
 
@@ -1338,18 +1360,6 @@ static void _purge_old_node_state(struct node_record *old_node_table_ptr,
 	xfree(old_node_table_ptr);
 }
 
-/* Variant of strcmp that will accept NULL string pointers */
-static int  _strcmp(const char *s1, const char *s2)
-{
-	if ((s1 != NULL) && (s2 == NULL))
-		return 1;
-	if ((s1 == NULL) && (s2 == NULL))
-		return 0;
-	if ((s1 == NULL) && (s2 != NULL))
-		return -1;
-	return strcmp(s1, s2);
-}
-
 /* Restore partition information from saved records */
 static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				uint16_t flags)
@@ -1379,7 +1389,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 			}
 			/* Current partition found in slurm.conf,
 			 * report differences from slurm.conf configuration */
-			if (_strcmp(part_ptr->allow_accounts,
+			if (xstrcmp(part_ptr->allow_accounts,
 				    old_part_ptr->allow_accounts)) {
 				error("Partition %s AllowAccounts differs from "
 				      "slurm.conf", part_ptr->name);
@@ -1387,7 +1397,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				part_ptr->allow_accounts =
 					xstrdup(old_part_ptr->allow_accounts);
 			}
-			if (_strcmp(part_ptr->allow_groups,
+			if (xstrcmp(part_ptr->allow_groups,
 				    old_part_ptr->allow_groups)) {
 				error("Partition %s AllowGroups differs from "
 				      "slurm.conf", part_ptr->name);
@@ -1397,7 +1407,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				accounts_list_build(part_ptr->allow_accounts,
 						&part_ptr->allow_account_array);
 			}
-			if (_strcmp(part_ptr->allow_qos,
+			if (xstrcmp(part_ptr->allow_qos,
 				    old_part_ptr->allow_qos)) {
 				error("Partition %s AllowQos differs from "
 				      "slurm.conf", part_ptr->name);
@@ -1407,7 +1417,18 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				qos_list_build(part_ptr->allow_qos,
 					       &part_ptr->allow_qos_bitstr);
 			}
-			if (_strcmp(part_ptr->deny_accounts,
+
+			if (xstrcmp(part_ptr->qos_char,
+				    old_part_ptr->qos_char)) {
+				error("Partition %s QOS differs from "
+				      "slurm.conf", part_ptr->name);
+				xfree(part_ptr->qos_char);
+				part_ptr->qos_char = xstrdup(
+					old_part_ptr->qos_char);
+				part_ptr->qos_ptr = old_part_ptr->qos_ptr;
+			}
+
+			if (xstrcmp(part_ptr->deny_accounts,
 				    old_part_ptr->deny_accounts)) {
 				error("Partition %s DenyAccounts differs from "
 				      "slurm.conf", part_ptr->name);
@@ -1417,7 +1438,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				accounts_list_build(part_ptr->deny_accounts,
 						&part_ptr->deny_account_array);
 			}
-			if (_strcmp(part_ptr->deny_qos,
+			if (xstrcmp(part_ptr->deny_qos,
 				    old_part_ptr->deny_qos)) {
 				error("Partition %s DenyQos differs from "
 				      "slurm.conf", part_ptr->name);
@@ -1427,7 +1448,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				qos_list_build(part_ptr->deny_qos,
 					       &part_ptr->deny_qos_bitstr);
 			}
-			if (_strcmp(part_ptr->allow_alloc_nodes,
+			if (xstrcmp(part_ptr->allow_alloc_nodes,
 				    old_part_ptr->allow_alloc_nodes)) {
 				error("Partition %s AllowNodes differs from "
 				      "slurm.conf", part_ptr->name);
@@ -1461,6 +1482,19 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				else
 					part_ptr->flags &= (~PART_FLAG_NO_ROOT);
 			}
+			if ((part_ptr->flags & PART_FLAG_EXCLUSIVE_USER) !=
+			    (old_part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)) {
+				error("Partition %s ExclusiveUser differs "
+				      "from slurm.conf", part_ptr->name);
+				if (old_part_ptr->flags &
+				    PART_FLAG_EXCLUSIVE_USER) {
+					part_ptr->flags |=
+						PART_FLAG_EXCLUSIVE_USER;
+				} else {
+					part_ptr->flags &=
+						(~PART_FLAG_EXCLUSIVE_USER);
+				}
+			}
 			if ((part_ptr->flags & PART_FLAG_ROOT_ONLY) !=
 			    (old_part_ptr->flags & PART_FLAG_ROOT_ONLY)) {
 				error("Partition %s RootOnly differs from "
@@ -1525,7 +1559,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 				part_ptr->min_nodes_orig = old_part_ptr->
 							   min_nodes_orig;
 			}
-			if (_strcmp(part_ptr->nodes, old_part_ptr->nodes)) {
+			if (xstrcmp(part_ptr->nodes, old_part_ptr->nodes)) {
 				error("Partition %s Nodes differs from "
 				      "slurm.conf", part_ptr->name);
 				xfree(part_ptr->nodes);
@@ -1571,6 +1605,9 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 						      allow_qos);
 			qos_list_build(part_ptr->allow_qos,
 				       &part_ptr->allow_qos_bitstr);
+			part_ptr->qos_char = xstrdup(old_part_ptr->
+						     qos_char);
+			part_ptr->qos_ptr = old_part_ptr->qos_ptr;
 			part_ptr->default_time = old_part_ptr->default_time;
 			part_ptr->deny_accounts = xstrdup(old_part_ptr->
 							  deny_accounts);
@@ -1599,7 +1636,7 @@ static int  _restore_part_state(List old_part_list, char *old_def_part_name,
 
 	if (old_def_part_name &&
 	    ((default_part_name == NULL) ||
-	     strcmp(old_def_part_name, default_part_name))) {
+	     xstrcmp(old_def_part_name, default_part_name))) {
 		part_ptr = find_part_record(old_def_part_name);
 		if (part_ptr) {
 			error("Default partition reset to %s",
@@ -1620,7 +1657,7 @@ static void _purge_old_part_state(List old_part_list, char *old_def_part_name)
 
 	if (!old_part_list)
 		return;
-	list_destroy(old_part_list);
+	FREE_NULL_LIST(old_part_list);
 }
 
 /*
@@ -1678,65 +1715,66 @@ static int _update_preempt(uint16_t old_preempt_mode)
 static int  _preserve_plugins(slurm_ctl_conf_t * ctl_conf_ptr,
 		char *old_auth_type, char *old_checkpoint_type,
 		char *old_crypto_type, char *old_sched_type,
-		char *old_select_type, char *old_switch_type)
+		char *old_select_type, char *old_switch_type,
+		char *old_bb_type)
 {
 	int rc = SLURM_SUCCESS;
 
-	if (old_auth_type) {
-		if (strcmp(old_auth_type, ctl_conf_ptr->authtype)) {
-			xfree(ctl_conf_ptr->authtype);
-			ctl_conf_ptr->authtype = old_auth_type;
-			rc =  ESLURM_INVALID_AUTHTYPE_CHANGE;
-		} else	/* free duplicate value */
-			xfree(old_auth_type);
+	if (xstrcmp(old_auth_type, ctl_conf_ptr->authtype)) {
+		xfree(ctl_conf_ptr->authtype);
+		ctl_conf_ptr->authtype = old_auth_type;
+		rc =  ESLURM_INVALID_AUTHTYPE_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_auth_type);
 	}
 
-	if (old_checkpoint_type) {
-		if (strcmp(old_checkpoint_type,
-				ctl_conf_ptr->checkpoint_type)) {
-			xfree(ctl_conf_ptr->checkpoint_type);
-			ctl_conf_ptr->checkpoint_type = old_checkpoint_type;
-			rc =  ESLURM_INVALID_CHECKPOINT_TYPE_CHANGE;
-		} else  /* free duplicate value */
-			xfree(old_checkpoint_type);
+	if (xstrcmp(old_bb_type, ctl_conf_ptr->bb_type)) {
+		xfree(ctl_conf_ptr->bb_type);
+		ctl_conf_ptr->bb_type = old_bb_type;
+		rc =  ESLURM_INVALID_BURST_BUFFER_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_bb_type);
 	}
 
-	if (old_crypto_type) {
-		if (strcmp(old_crypto_type,
-				ctl_conf_ptr->crypto_type)) {
-			xfree(ctl_conf_ptr->crypto_type);
-			ctl_conf_ptr->crypto_type = old_crypto_type;
-			rc = ESLURM_INVALID_CRYPTO_TYPE_CHANGE;
-		} else
-			xfree(old_crypto_type);
+	if (xstrcmp(old_checkpoint_type, ctl_conf_ptr->checkpoint_type)) {
+		xfree(ctl_conf_ptr->checkpoint_type);
+		ctl_conf_ptr->checkpoint_type = old_checkpoint_type;
+		rc =  ESLURM_INVALID_CHECKPOINT_TYPE_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_checkpoint_type);
 	}
 
-	if (old_sched_type) {
-		if (strcmp(old_sched_type, ctl_conf_ptr->schedtype)) {
-			xfree(ctl_conf_ptr->schedtype);
-			ctl_conf_ptr->schedtype = old_sched_type;
-			rc =  ESLURM_INVALID_SCHEDTYPE_CHANGE;
-		} else	/* free duplicate value */
-			xfree(old_sched_type);
+	if (xstrcmp(old_crypto_type, ctl_conf_ptr->crypto_type)) {
+		xfree(ctl_conf_ptr->crypto_type);
+		ctl_conf_ptr->crypto_type = old_crypto_type;
+		rc = ESLURM_INVALID_CRYPTO_TYPE_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_crypto_type);
 	}
 
+	if (xstrcmp(old_sched_type, ctl_conf_ptr->schedtype)) {
+		xfree(ctl_conf_ptr->schedtype);
+		ctl_conf_ptr->schedtype = old_sched_type;
+		rc =  ESLURM_INVALID_SCHEDTYPE_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_sched_type);
+	}
 
-	if (old_select_type) {
-		if (strcmp(old_select_type, ctl_conf_ptr->select_type)) {
-			xfree(ctl_conf_ptr->select_type);
-			ctl_conf_ptr->select_type = old_select_type;
-			rc =  ESLURM_INVALID_SELECTTYPE_CHANGE;
-		} else	/* free duplicate value */
-			xfree(old_select_type);
+
+	if (xstrcmp(old_select_type, ctl_conf_ptr->select_type)) {
+		xfree(ctl_conf_ptr->select_type);
+		ctl_conf_ptr->select_type = old_select_type;
+		rc =  ESLURM_INVALID_SELECTTYPE_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_select_type);
 	}
 
-	if (old_switch_type) {
-		if (strcmp(old_switch_type, ctl_conf_ptr->switch_type)) {
-			xfree(ctl_conf_ptr->switch_type);
-			ctl_conf_ptr->switch_type = old_switch_type;
-			rc = ESLURM_INVALID_SWITCHTYPE_CHANGE;
-		} else	/* free duplicate value */
-			xfree(old_switch_type);
+	if (xstrcmp(old_switch_type, ctl_conf_ptr->switch_type)) {
+		xfree(ctl_conf_ptr->switch_type);
+		ctl_conf_ptr->switch_type = old_switch_type;
+		rc = ESLURM_INVALID_SWITCHTYPE_CHANGE;
+	} else {	/* free duplicate value */
+		xfree(old_switch_type);
 	}
 
 	if (ctl_conf_ptr->backup_controller == NULL)
@@ -1806,13 +1844,20 @@ static int _sync_nodes_to_comp_job(void)
 			   plugin and this happens before it is
 			   normally set up so do it now.
 			*/
-			if (!cluster_cpus)
-				set_cluster_cpus();
+			set_cluster_tres(false);
 
 			info("%s: Job %u in completing state",
 			     __func__, job_ptr->job_id);
 			if (!job_ptr->node_bitmap_cg)
 				build_cg_bitmap(job_ptr);
+
+			/* deallocate_nodes will remove this job from
+			 * the system before it was added, so add it
+			 * now
+			 */
+			if (accounting_enforce & ACCOUNTING_ENFORCE_LIMITS)
+				acct_policy_job_begin(job_ptr);
+
 			deallocate_nodes(job_ptr, false, false, false);
 			/* The job in completing state at slurmctld restart or
 			 * reconfiguration, do not log completion again.
@@ -1844,6 +1889,12 @@ static int _sync_nodes_to_active_job(struct job_record *job_ptr)
 		} else if (bit_test(job_ptr->node_bitmap, i) == 0)
 			continue;
 
+		if (job_ptr->details &&
+		    (job_ptr->details->whole_node == 2)) {
+			node_ptr->owner_job_cnt++;
+			node_ptr->owner = job_ptr->user_id;
+		}
+
 		node_flags = node_ptr->node_state & NODE_STATE_FLAGS;
 
 		node_ptr->run_job_cnt++; /* NOTE:
@@ -1967,6 +2018,9 @@ static int _restore_job_dependencies(void)
 	char *new_depend;
 	bool valid = true;
 	List license_list;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+	assoc_mgr_lock(&locks);
 
 	assoc_mgr_clear_used_info();
 	job_iterator = list_iterator_create(job_list);
@@ -1986,11 +2040,14 @@ static int _restore_job_dependencies(void)
 			if (!IS_JOB_FINISHED(job_ptr))
 				acct_policy_add_job_submit(job_ptr);
 			if (IS_JOB_RUNNING(job_ptr) ||
-			    IS_JOB_SUSPENDED(job_ptr))
+			    IS_JOB_SUSPENDED(job_ptr)) {
 				acct_policy_job_begin(job_ptr);
+				job_claim_resv(job_ptr);
+			}
 		}
 
-		license_list = license_validate(job_ptr->licenses, &valid);
+		license_list = license_validate(job_ptr->licenses,
+						job_ptr->tres_req_cnt, &valid);
 		FREE_NULL_LIST(job_ptr->license_list);
 		if (valid)
 			job_ptr->license_list = license_list;
@@ -2011,6 +2068,9 @@ static int _restore_job_dependencies(void)
 		xfree(new_depend);
 	}
 	list_iterator_destroy(job_iterator);
+
+	assoc_mgr_unlock(&locks);
+
 	return error_code;
 }
 
@@ -2031,7 +2091,10 @@ static void _acct_restore_active_jobs(void)
 		if (IS_JOB_SUSPENDED(job_ptr))
 			jobacct_storage_g_job_suspend(acct_db_conn, job_ptr);
 		if (IS_JOB_SUSPENDED(job_ptr) || IS_JOB_RUNNING(job_ptr)) {
-			jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+			if (!with_slurmdbd)
+				jobacct_storage_g_job_start(acct_db_conn, job_ptr);
+			else if (job_ptr->db_index != NO_VAL)
+				job_ptr->db_index = 0;
 			step_iterator = list_iterator_create(
 				job_ptr->step_list);
 			while ((step_ptr = (struct step_record *)
@@ -2085,7 +2148,7 @@ _compare_hostnames(struct node_record *old_node_table,
 	hostset_ranged_string(set, set_size, ranged);
 
 	cc = 0;
-	if (strcmp(old_ranged, ranged) != 0) {
+	if (xstrcmp(old_ranged, ranged) != 0) {
 		error("%s: node names changed before reconfiguration. "
 		      "You have to restart slurmctld.", __func__);
 		cc = -1;
@@ -2213,24 +2276,24 @@ extern int load_config_state_lite(void)
 
 	safe_unpack16(&ver, buffer);
 	debug3("Version in last_conf_lite header is %u", ver);
-	if (ver > SLURM_PROTOCOL_VERSION) {
+	if (ver > SLURM_PROTOCOL_VERSION || ver < SLURM_MIN_PROTOCOL_VERSION) {
 		error("***********************************************");
 		error("Can not recover last_conf_lite, incompatible version, "
-		      "got %u <= %u", ver, SLURM_PROTOCOL_VERSION);
+		      "(%u not between %d and %d)",
+		      ver, SLURM_MIN_PROTOCOL_VERSION, SLURM_PROTOCOL_VERSION);
 		error("***********************************************");
 		free_buf(buffer);
 		return EFAULT;
+	} else {
+		safe_unpack_time(&buf_time, buffer);
+		safe_unpackstr_xmalloc(&last_accounting_storage_type,
+				       &uint32_tmp, buffer);
 	}
-
-	safe_unpack_time(&buf_time, buffer);
-	safe_unpackstr_xmalloc(&last_accounting_storage_type,
-			       &uint32_tmp, buffer);
-
 	xassert(slurmctld_conf.accounting_storage_type);
 
 	if (last_accounting_storage_type
-	    && !strcmp(last_accounting_storage_type,
-		       slurmctld_conf.accounting_storage_type))
+	    && !xstrcmp(last_accounting_storage_type,
+		        slurmctld_conf.accounting_storage_type))
 		slurmctld_init_db = 0;
 	xfree(last_accounting_storage_type);
 
diff --git a/src/slurmctld/reservation.c b/src/slurmctld/reservation.c
index 5c50fe420..85a10d0ef 100644
--- a/src/slurmctld/reservation.c
+++ b/src/slurmctld/reservation.c
@@ -2,7 +2,7 @@
  *  reservation.c - resource reservation management
  *****************************************************************************
  *  Copyright (C) 2009-2010 Lawrence Livermore National Security.
- *  Copyright (C) 2012-2014 SchedMD LLC
+ *  Copyright (C) 2012-2015 SchedMD LLC <http://www.schedmd.com>
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -67,11 +67,13 @@
 #include "src/common/pack.h"
 #include "src/common/parse_time.h"
 #include "src/common/slurm_accounting_storage.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "src/common/xassert.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xstring.h"
 
+#include "src/slurmctld/burst_buffer.h"
 #include "src/slurmctld/licenses.h"
 #include "src/slurmctld/locks.h"
 #include "src/slurmctld/reservation.h"
@@ -86,11 +88,8 @@
  * considering a reservation time specification being invalid */
 #define MAX_RESV_DELAY	600
 
-/* Change RESV_STATE_VERSION value when changing the state save format
- * Add logic to permit reading of the previous version's state in order
- * to avoid losing reservations between releases major SLURM updates. */
+/* No need to change we always pack SLURM_PROTOCOL_VERSION */
 #define RESV_STATE_VERSION          "PROTOCOL_VERSION"
-#define RESV_2_6_STATE_VERSION      "VER004"	/* SLURM version 2.6 */
 
 typedef struct resv_thread_args {
 	char *script;
@@ -108,6 +107,34 @@ uint32_t  cnodes_per_mp = 0;
 uint32_t  cpus_per_mp = 0;
 #endif
 
+/*
+ * the two following structs enable to build a
+ * planning of a constraint evolution over time
+ * taking into account temporal overlapping
+ */
+typedef struct constraint_planning {
+	List slot_list;
+} constraint_planning_t;
+
+typedef struct constraint_slot {
+	time_t start;
+	time_t end;
+	uint32_t value;
+} constraint_slot_t;
+/*
+ * the associated functions are the following
+ */
+static void _free_slot(void *x);
+static void _init_constraint_planning(constraint_planning_t* sched);
+static void _print_constraint_planning(constraint_planning_t* sched);
+static void _free_constraint_planning(constraint_planning_t* sched);
+static void _update_constraint_planning(constraint_planning_t* sched,
+					uint32_t value, time_t start,
+					time_t end);
+static uint32_t _max_constraint_planning(constraint_planning_t* sched,
+					 time_t *start, time_t *end);
+
+
 static void _advance_resv_time(slurmctld_resv_t *resv_ptr);
 static void _advance_time(time_t *res_time, int day_cnt);
 static int  _build_account_list(char *accounts, int *account_cnt,
@@ -144,6 +171,7 @@ static int  _post_resv_create(slurmctld_resv_t *resv_ptr);
 static int  _post_resv_delete(slurmctld_resv_t *resv_ptr);
 static int  _post_resv_update(slurmctld_resv_t *resv_ptr,
 			      slurmctld_resv_t *old_resv_ptr);
+static void _rebuild_core_bitmap(slurmctld_resv_t *resv_ptr);
 static int  _resize_resv(slurmctld_resv_t *resv_ptr, uint32_t node_cnt);
 static void _restore_resv(slurmctld_resv_t *dest_resv,
 			  slurmctld_resv_t *src_resv);
@@ -155,7 +183,8 @@ static int  _select_nodes(resv_desc_msg_t *resv_desc_ptr,
 			  struct part_record **part_ptr,
 			  bitstr_t **resv_bitmap, bitstr_t **core_bitmap);
 static int  _set_assoc_list(slurmctld_resv_t *resv_ptr);
-static void _set_cpu_cnt(slurmctld_resv_t *resv_ptr);
+static void _set_tres_cnt(slurmctld_resv_t *resv_ptr,
+			  slurmctld_resv_t *old_resv_ptr);
 static void _set_nodes_flags(slurmctld_resv_t *resv_ptr, time_t now,
 			     uint32_t flags);
 static int  _update_account_list(slurmctld_resv_t *resv_ptr,
@@ -174,10 +203,10 @@ static void _advance_time(time_t *res_time, int day_cnt)
 	time_t save_time = *res_time;
 	struct tm time_tm;
 
-	localtime_r(res_time, &time_tm);
+	slurm_localtime_r(res_time, &time_tm);
 	time_tm.tm_isdst = -1;
 	time_tm.tm_mday += day_cnt;
-	*res_time = mktime(&time_tm);
+	*res_time = slurm_mktime(&time_tm);
 	if (*res_time == (time_t)(-1)) {
 		error("Could not compute reservation time %lu",
 		      (long unsigned int) save_time);
@@ -230,11 +259,12 @@ static slurmctld_resv_t *_copy_resv(slurmctld_resv_t *resv_orig_ptr)
 	xassert(resv_orig_ptr->magic == RESV_MAGIC);
 	resv_copy_ptr = xmalloc(sizeof(slurmctld_resv_t));
 	resv_copy_ptr->accounts = xstrdup(resv_orig_ptr->accounts);
+	resv_copy_ptr->burst_buffer = xstrdup(resv_orig_ptr->burst_buffer);
 	resv_copy_ptr->account_cnt = resv_orig_ptr->account_cnt;
 	resv_copy_ptr->account_list = xmalloc(sizeof(char *) *
 					      resv_orig_ptr->account_cnt);
 	resv_copy_ptr->account_not = resv_orig_ptr->account_not;
-	for (i=0; i<resv_copy_ptr->account_cnt; i++) {
+	for (i = 0; i < resv_copy_ptr->account_cnt; i++) {
 		resv_copy_ptr->account_list[i] =
 				xstrdup(resv_orig_ptr->account_list[i]);
 	}
@@ -243,7 +273,7 @@ static slurmctld_resv_t *_copy_resv(slurmctld_resv_t *resv_orig_ptr)
 		resv_copy_ptr->core_bitmap = bit_copy(resv_orig_ptr->
 						      core_bitmap);
 	}
-	resv_copy_ptr->cpu_cnt = resv_orig_ptr->cpu_cnt;
+	resv_copy_ptr->core_cnt = resv_orig_ptr->core_cnt;
 	resv_copy_ptr->duration = resv_orig_ptr->duration;
 	resv_copy_ptr->end_time = resv_orig_ptr->end_time;
 	resv_copy_ptr->features = xstrdup(resv_orig_ptr->features);
@@ -257,21 +287,27 @@ static slurmctld_resv_t *_copy_resv(slurmctld_resv_t *resv_orig_ptr)
 	resv_copy_ptr->magic = resv_orig_ptr->magic;
 	resv_copy_ptr->flags_set_node = resv_orig_ptr->flags_set_node;
 	resv_copy_ptr->name = xstrdup(resv_orig_ptr->name);
-	resv_copy_ptr->node_bitmap = bit_copy(resv_orig_ptr->node_bitmap);
+	if (resv_orig_ptr->node_bitmap) {
+		resv_copy_ptr->node_bitmap =
+			bit_copy(resv_orig_ptr->node_bitmap);
+	}
 	resv_copy_ptr->node_cnt = resv_orig_ptr->node_cnt;
 	resv_copy_ptr->node_list = xstrdup(resv_orig_ptr->node_list);
 	resv_copy_ptr->partition = xstrdup(resv_orig_ptr->partition);
 	resv_copy_ptr->part_ptr = resv_orig_ptr->part_ptr;
 	resv_copy_ptr->resv_id = resv_orig_ptr->resv_id;
+	resv_copy_ptr->resv_watts = resv_orig_ptr->resv_watts;
 	resv_copy_ptr->start_time = resv_orig_ptr->start_time;
 	resv_copy_ptr->start_time_first = resv_orig_ptr->start_time_first;
 	resv_copy_ptr->start_time_prev = resv_orig_ptr->start_time_prev;
+	resv_copy_ptr->tres_str = xstrdup(resv_orig_ptr->tres_str);
+	resv_copy_ptr->tres_fmt_str = xstrdup(resv_orig_ptr->tres_fmt_str);
 	resv_copy_ptr->users = xstrdup(resv_orig_ptr->users);
 	resv_copy_ptr->user_cnt = resv_orig_ptr->user_cnt;
 	resv_copy_ptr->user_list = xmalloc(sizeof(uid_t) *
 					   resv_orig_ptr->user_cnt);
 	resv_copy_ptr->user_not = resv_orig_ptr->user_not;
-	for (i=0; i<resv_copy_ptr->user_cnt; i++)
+	for (i = 0; i < resv_copy_ptr->user_cnt; i++)
 		resv_copy_ptr->user_list[i] = resv_orig_ptr->user_list[i];
 
 	return resv_copy_ptr;
@@ -304,11 +340,15 @@ static void _restore_resv(slurmctld_resv_t *dest_resv,
 	dest_resv->assoc_list = src_resv->assoc_list;
 	src_resv->assoc_list = NULL;
 
+	xfree(dest_resv->burst_buffer);
+	dest_resv->burst_buffer = src_resv->burst_buffer;
+	src_resv->burst_buffer = NULL;
+
 	FREE_NULL_BITMAP(dest_resv->core_bitmap);
 	dest_resv->core_bitmap = src_resv->core_bitmap;
 	src_resv->core_bitmap = NULL;
 
-	dest_resv->cpu_cnt = src_resv->cpu_cnt;
+	dest_resv->core_cnt = src_resv->core_cnt;
 	dest_resv->duration = src_resv->duration;
 	dest_resv->end_time = src_resv->end_time;
 
@@ -325,8 +365,7 @@ static void _restore_resv(slurmctld_resv_t *dest_resv,
 	dest_resv->licenses = src_resv->licenses;
 	src_resv->licenses = NULL;
 
-	if (dest_resv->license_list)
-		list_destroy(dest_resv->license_list);
+	FREE_NULL_LIST(dest_resv->license_list);
 	dest_resv->license_list = src_resv->license_list;
 	src_resv->license_list = NULL;
 
@@ -353,10 +392,19 @@ static void _restore_resv(slurmctld_resv_t *dest_resv,
 
 	dest_resv->part_ptr = src_resv->part_ptr;
 	dest_resv->resv_id = src_resv->resv_id;
+	dest_resv->resv_watts = src_resv->resv_watts;
 	dest_resv->start_time = src_resv->start_time;
 	dest_resv->start_time_first = src_resv->start_time_first;
 	dest_resv->start_time_prev = src_resv->start_time_prev;
 
+	xfree(dest_resv->tres_str);
+	dest_resv->tres_str = src_resv->tres_str;
+	src_resv->tres_str = NULL;
+
+	xfree(dest_resv->tres_fmt_str);
+	dest_resv->tres_fmt_str = src_resv->tres_fmt_str;
+	src_resv->tres_fmt_str = NULL;
+
 	xfree(dest_resv->users);
 	dest_resv->users = src_resv->users;
 	src_resv->users = NULL;
@@ -380,15 +428,17 @@ static void _del_resv_rec(void *x)
 			xfree(resv_ptr->account_list[i]);
 		xfree(resv_ptr->account_list);
 		xfree(resv_ptr->assoc_list);
+		xfree(resv_ptr->burst_buffer);
 		FREE_NULL_BITMAP(resv_ptr->core_bitmap);
 		xfree(resv_ptr->features);
-		if (resv_ptr->license_list)
-			list_destroy(resv_ptr->license_list);
+		FREE_NULL_LIST(resv_ptr->license_list);
 		xfree(resv_ptr->licenses);
 		xfree(resv_ptr->name);
 		FREE_NULL_BITMAP(resv_ptr->node_bitmap);
 		xfree(resv_ptr->node_list);
 		xfree(resv_ptr->partition);
+		xfree(resv_ptr->tres_str);
+		xfree(resv_ptr->tres_fmt_str);
 		xfree(resv_ptr->users);
 		xfree(resv_ptr->user_list);
 		xfree(resv_ptr);
@@ -424,6 +474,7 @@ static void _dump_resv_req(resv_desc_msg_t *resv_ptr, char *mode)
 {
 
 	char start_str[32] = "-1", end_str[32] = "-1", *flag_str = NULL;
+	char watts_str[32] = "n/a";
 	char *node_cnt_str = NULL;
 	int duration, i;
 
@@ -438,6 +489,10 @@ static void _dump_resv_req(resv_desc_msg_t *resv_ptr, char *mode)
 		slurm_make_time_str(&resv_ptr->end_time,
 				    end_str,  sizeof(end_str));
 	}
+	if (resv_ptr->resv_watts != NO_VAL) {
+		snprintf(watts_str, sizeof(watts_str), "%u",
+			 resv_ptr->resv_watts);
+	}
 	if (resv_ptr->flags != NO_VAL)
 		flag_str = reservation_flags_string(resv_ptr->flags);
 
@@ -460,11 +515,13 @@ static void _dump_resv_req(resv_desc_msg_t *resv_ptr, char *mode)
 
 	info("%s: Name=%s StartTime=%s EndTime=%s Duration=%d "
 	     "Flags=%s NodeCnt=%s NodeList=%s Features=%s "
-	     "PartitionName=%s Users=%s Accounts=%s Licenses=%s",
+	     "PartitionName=%s Users=%s Accounts=%s Licenses=%s BurstBuffer=%s"
+	     "Watts=%s",
 	     mode, resv_ptr->name, start_str, end_str, duration,
 	     flag_str, node_cnt_str, resv_ptr->node_list,
 	     resv_ptr->features, resv_ptr->partition,
-	     resv_ptr->users, resv_ptr->accounts, resv_ptr->licenses);
+	     resv_ptr->users, resv_ptr->accounts, resv_ptr->licenses,
+	     resv_ptr->burst_buffer, watts_str);
 
 	xfree(flag_str);
 	xfree(node_cnt_str);
@@ -512,12 +569,12 @@ static void _generate_resv_name(resv_desc_msg_t *resv_ptr)
 /* Validate an account name */
 static bool _is_account_valid(char *account)
 {
-	slurmdb_association_rec_t assoc_rec, *assoc_ptr;
+	slurmdb_assoc_rec_t assoc_rec, *assoc_ptr;
 
 	if (!(accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS))
 		return true;	/* don't worry about account validity */
 
-	memset(&assoc_rec, 0, sizeof(slurmdb_association_rec_t));
+	memset(&assoc_rec, 0, sizeof(slurmdb_assoc_rec_t));
 	assoc_rec.uid       = NO_VAL;
 	assoc_rec.acct      = account;
 
@@ -533,10 +590,10 @@ static bool _is_account_valid(char *account)
  * associations must be set before calling this function and while
  * handling it after a return.
  */
-static int _append_assoc_list(List assoc_list, slurmdb_association_rec_t *assoc)
+static int _append_assoc_list(List assoc_list, slurmdb_assoc_rec_t *assoc)
 {
 	int rc = ESLURM_INVALID_ACCOUNT;
-	slurmdb_association_rec_t *assoc_ptr = NULL;
+	slurmdb_assoc_rec_t *assoc_ptr = NULL;
 	if (assoc_mgr_fill_in_assoc(
 		    acct_db_conn, assoc,
 		    accounting_enforce,
@@ -564,9 +621,9 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 {
 	int rc = SLURM_SUCCESS, i = 0, j = 0;
 	List assoc_list_allow = NULL, assoc_list_deny = NULL, assoc_list;
-	slurmdb_association_rec_t assoc, *assoc_ptr = NULL;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK };
+	slurmdb_assoc_rec_t assoc, *assoc_ptr = NULL;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 
 	/* no need to do this if we can't ;) */
@@ -576,7 +633,7 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 	assoc_list_allow = list_create(NULL);
 	assoc_list_deny  = list_create(NULL);
 
-	memset(&assoc, 0, sizeof(slurmdb_association_rec_t));
+	memset(&assoc, 0, sizeof(slurmdb_assoc_rec_t));
 	xfree(resv_ptr->assoc_list);
 
 	assoc_mgr_lock(&locks);
@@ -587,7 +644,7 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 			for (i=0; i < resv_ptr->user_cnt; i++) {
 				for (j=0; j < resv_ptr->account_cnt; j++) {
 					memset(&assoc, 0,
-					       sizeof(slurmdb_association_rec_t));
+					       sizeof(slurmdb_assoc_rec_t));
 					assoc.acct = resv_ptr->account_list[j];
 					assoc.uid  = resv_ptr->user_list[i];
 					rc = _append_assoc_list(
@@ -603,7 +660,7 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 				assoc_list = assoc_list_allow;
 			for (i=0; i < resv_ptr->user_cnt; i++) {
 				memset(&assoc, 0,
-				       sizeof(slurmdb_association_rec_t));
+				       sizeof(slurmdb_assoc_rec_t));
 				assoc.uid = resv_ptr->user_list[i];
 				rc = assoc_mgr_get_user_assocs(
 					    acct_db_conn, &assoc,
@@ -622,7 +679,7 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 				assoc_list = assoc_list_allow;
 			for (j=0; j < resv_ptr->account_cnt; j++) {
 				memset(&assoc, 0,
-				       sizeof(slurmdb_association_rec_t));
+				       sizeof(slurmdb_assoc_rec_t));
 				assoc.acct = resv_ptr->account_list[j];
 				assoc.uid  = (uint32_t)NO_VAL;
 				rc = _append_assoc_list(assoc_list, &assoc);
@@ -636,7 +693,7 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 		else
 			assoc_list = assoc_list_allow;
 		for (i=0; i < resv_ptr->user_cnt; i++) {
-			memset(&assoc, 0, sizeof(slurmdb_association_rec_t));
+			memset(&assoc, 0, sizeof(slurmdb_assoc_rec_t));
 			assoc.uid = resv_ptr->user_list[i];
 			rc = assoc_mgr_get_user_assocs(
 				    acct_db_conn, &assoc,
@@ -654,7 +711,7 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 		else
 			assoc_list = assoc_list_allow;
 		for (i=0; i < resv_ptr->account_cnt; i++) {
-			memset(&assoc, 0, sizeof(slurmdb_association_rec_t));
+			memset(&assoc, 0, sizeof(slurmdb_assoc_rec_t));
 			assoc.acct = resv_ptr->account_list[i];
 			assoc.uid  = (uint32_t)NO_VAL;
 			if ((rc = _append_assoc_list(assoc_list, &assoc))
@@ -698,8 +755,8 @@ static int _set_assoc_list(slurmctld_resv_t *resv_ptr)
 	debug("assoc_list:%s", resv_ptr->assoc_list);
 
 end_it:
-	list_destroy(assoc_list_allow);
-	list_destroy(assoc_list_deny);
+	FREE_NULL_LIST(assoc_list_allow);
+	FREE_NULL_LIST(assoc_list_deny);
 	assoc_mgr_unlock(&locks);
 
 	return rc;
@@ -718,16 +775,8 @@ static int _post_resv_create(slurmctld_resv_t *resv_ptr)
 	memset(&resv, 0, sizeof(slurmdb_reservation_rec_t));
 	resv.assocs = resv_ptr->assoc_list;
 	resv.cluster = slurmctld_cluster_name;
-	resv.cpus = resv_ptr->cpu_cnt;
-#ifdef HAVE_BG
-	/* Since on a bluegene we track cnodes instead of cpus do the
-	   adjustment since accounting is expecting cpus here.
-	*/
-	if (!cpu_mult)
-		(void)select_g_alter_node_cnt(
-			SELECT_GET_NODE_CPU_CNT, &cpu_mult);
-	resv.cpus *= cpu_mult;
-#endif
+	resv.tres_str = resv_ptr->tres_str;
+
 	resv.flags = resv_ptr->flags;
 	resv.id = resv_ptr->resv_id;
 	resv.name = resv_ptr->name;
@@ -738,6 +787,7 @@ static int _post_resv_create(slurmctld_resv_t *resv_ptr)
 	}
 	resv.time_end = resv_ptr->end_time;
 	resv.time_start = resv_ptr->start_time;
+	resv.tres_str = resv_ptr->tres_str;
 
 	rc = acct_storage_g_add_reservation(acct_db_conn, &resv);
 
@@ -749,6 +799,7 @@ static int _post_resv_delete(slurmctld_resv_t *resv_ptr)
 {
 	int rc = SLURM_SUCCESS;
 	slurmdb_reservation_rec_t resv;
+	time_t now = time(NULL);
 
 	if (resv_ptr->flags & RESERVE_FLAG_TIME_FLOAT)
 		return rc;
@@ -757,11 +808,13 @@ static int _post_resv_delete(slurmctld_resv_t *resv_ptr)
 	resv.cluster = slurmctld_cluster_name;
 	resv.id = resv_ptr->resv_id;
 	resv.name = resv_ptr->name;
+	resv.time_end = now;
 	resv.time_start = resv_ptr->start_time;
 	/* This is just a time stamp here to delete if the reservation
 	 * hasn't started yet so we don't get trash records in the
 	 * database if said database isn't up right now */
-	resv.time_start_prev = time(NULL);
+	resv.time_start_prev = now;
+	resv.tres_str = resv_ptr->tres_str;
 	rc = acct_storage_g_remove_reservation(acct_db_conn, &resv);
 
 	return rc;
@@ -774,6 +827,9 @@ static int _post_resv_update(slurmctld_resv_t *resv_ptr,
 	int rc = SLURM_SUCCESS;
 	slurmdb_reservation_rec_t resv;
 	char temp_bit[BUF_SIZE];
+	time_t now = time(NULL);
+
+	xassert(old_resv_ptr);
 
 	if (resv_ptr->flags & RESERVE_FLAG_TIME_FLOAT)
 		return rc;
@@ -783,72 +839,42 @@ static int _post_resv_update(slurmctld_resv_t *resv_ptr,
 	resv.id = resv_ptr->resv_id;
 	resv.time_end = resv_ptr->end_time;
 
-	if (!old_resv_ptr) {
-		resv.assocs = resv_ptr->assoc_list;
-		resv.cpus = resv_ptr->cpu_cnt;
-#ifdef HAVE_BG
-		/* Since on a bluegene we track cnodes instead of cpus
-		 * do the adjustment since accounting is expecting
-		 * cpus here.
-		 */
-		if (!cpu_mult)
-			(void)select_g_alter_node_cnt(
-				SELECT_GET_NODE_CPU_CNT, &cpu_mult);
-		resv.cpus *= cpu_mult;
-#endif
-		resv.flags = resv_ptr->flags;
-		resv.nodes = resv_ptr->node_list;
-	} else {
-		time_t now = time(NULL);
-
-		if (old_resv_ptr->assoc_list && resv_ptr->assoc_list) {
-			if (strcmp(old_resv_ptr->assoc_list,
-				  resv_ptr->assoc_list))
-				resv.assocs = resv_ptr->assoc_list;
-		} else if (resv_ptr->assoc_list)
+	if (old_resv_ptr->assoc_list && resv_ptr->assoc_list) {
+		if (strcmp(old_resv_ptr->assoc_list,
+			   resv_ptr->assoc_list))
 			resv.assocs = resv_ptr->assoc_list;
+	} else if (resv_ptr->assoc_list)
+		resv.assocs = resv_ptr->assoc_list;
 
-		if (old_resv_ptr->cpu_cnt != resv_ptr->cpu_cnt) {
-			resv.cpus = resv_ptr->cpu_cnt;
-#ifdef HAVE_BG
-			/* Since on a bluegene we track cnodes instead
-			 * of cpus do the adjustment since accounting
-			 * is expecting cpus here.
-			 */
-			if (!cpu_mult)
-				(void)select_g_alter_node_cnt(
-					SELECT_GET_NODE_CPU_CNT, &cpu_mult);
-			resv.cpus *= cpu_mult;
-#endif
-		} else
-			resv.cpus = (uint32_t)NO_VAL;
+	if (xstrcmp(old_resv_ptr->tres_str, resv_ptr->tres_str))
+		resv.tres_str = resv_ptr->tres_str;
 
-		if (old_resv_ptr->flags != resv_ptr->flags)
-			resv.flags = resv_ptr->flags;
-		else
-			resv.flags = NO_VAL;
+	if (old_resv_ptr->flags != resv_ptr->flags)
+		resv.flags = resv_ptr->flags;
+	else
+		resv.flags = NO_VAL;
 
-		if (old_resv_ptr->node_list && resv_ptr->node_list) {
-			if (strcmp(old_resv_ptr->node_list,
-				  resv_ptr->node_list))
-				resv.nodes = resv_ptr->node_list;
-		} else if (resv_ptr->node_list)
+	if (old_resv_ptr->node_list && resv_ptr->node_list) {
+		if (strcmp(old_resv_ptr->node_list,
+			   resv_ptr->node_list))
 			resv.nodes = resv_ptr->node_list;
+	} else if (resv_ptr->node_list)
+		resv.nodes = resv_ptr->node_list;
 
-		/* Here if the reservation has started already we need
-		 * to mark a new start time for it if certain
-		 * variables are needed in accounting.  Right now if
-		 * the assocs, nodes, flags or cpu count changes we need a
-		 * new start time of now. */
-		if ((resv_ptr->start_time < now)
-		     && (resv.assocs
-			 || resv.nodes
-			 || (resv.flags != NO_VAL)
-			 || (resv.cpus != NO_VAL))) {
-			resv_ptr->start_time_prev = resv_ptr->start_time;
-			resv_ptr->start_time = now;
-		}
+	/* Here if the reservation has started already we need
+	 * to mark a new start time for it if certain
+	 * variables are needed in accounting.  Right now if
+	 * the assocs, nodes, flags or cpu count changes we need a
+	 * new start time of now. */
+	if ((resv_ptr->start_time < now)
+	    && (resv.assocs
+		|| resv.nodes
+		|| (resv.flags != NO_VAL)
+		|| resv.tres_str)) {
+		resv_ptr->start_time_prev = resv_ptr->start_time;
+		resv_ptr->start_time = now;
 	}
+
 	/* now set the (maybe new) start_times */
 	resv.time_start = resv_ptr->start_time;
 	resv.time_start_prev = resv_ptr->start_time_prev;
@@ -1364,9 +1390,10 @@ static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer,
 		end_relative = resv_ptr->end_time;
 	}
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		packstr(resv_ptr->accounts,	buffer);
-		pack32(resv_ptr->cpu_cnt,	buffer);
+		packstr(resv_ptr->burst_buffer,	buffer);
+		pack32(resv_ptr->core_cnt,	buffer);
 		pack_time(end_relative,		buffer);
 		packstr(resv_ptr->features,	buffer);
 		pack32(resv_ptr->flags,		buffer);
@@ -1375,7 +1402,9 @@ static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer,
 		pack32(resv_ptr->node_cnt,	buffer);
 		packstr(resv_ptr->node_list,	buffer);
 		packstr(resv_ptr->partition,	buffer);
+		pack32(resv_ptr->resv_watts,    buffer);
 		pack_time(start_relative,	buffer);
+		packstr(resv_ptr->tres_fmt_str,	buffer);
 		packstr(resv_ptr->users,	buffer);
 
 		if (internal) {
@@ -1392,23 +1421,22 @@ static void _pack_resv(slurmctld_resv_t *resv_ptr, Buf buffer,
 			pack_time(resv_ptr->start_time_prev,	buffer);
 			pack_time(resv_ptr->start_time,	buffer);
 			pack8(resv_ptr->user_not,	buffer);
+			packstr(resv_ptr->tres_str,	buffer);
 		} else {
 			pack_bit_fmt(resv_ptr->node_bitmap, buffer);
 		}
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		uint16_t flags;
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		packstr(resv_ptr->accounts,	buffer);
-		pack32(resv_ptr->cpu_cnt,	buffer);
-		pack_time(resv_ptr->end_time,	buffer);
+		pack32(resv_ptr->core_cnt,	buffer);
+		pack_time(end_relative,		buffer);
 		packstr(resv_ptr->features,	buffer);
-		flags = resv_ptr->flags;
-		pack16(flags,			buffer);
+		pack32(resv_ptr->flags,		buffer);
 		packstr(resv_ptr->licenses,	buffer);
 		packstr(resv_ptr->name,		buffer);
 		pack32(resv_ptr->node_cnt,	buffer);
 		packstr(resv_ptr->node_list,	buffer);
 		packstr(resv_ptr->partition,	buffer);
-		pack_time(resv_ptr->start_time_first,	buffer);
+		pack_time(start_relative,	buffer);
 		packstr(resv_ptr->users,	buffer);
 
 		if (internal) {
@@ -1440,10 +1468,12 @@ slurmctld_resv_t *_load_reservation_state(Buf buffer,
 
 	resv_ptr = xmalloc(sizeof(slurmctld_resv_t));
 	xassert(resv_ptr->magic = RESV_MAGIC);	/* Sets value */
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&resv_ptr->accounts,
 				       &uint32_tmp,	buffer);
-		safe_unpack32(&resv_ptr->cpu_cnt,	buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->burst_buffer,
+				       &uint32_tmp,	buffer);
+		safe_unpack32(&resv_ptr->core_cnt,	buffer);
 		safe_unpack_time(&resv_ptr->end_time,	buffer);
 		safe_unpackstr_xmalloc(&resv_ptr->features,
 				       &uint32_tmp, 	buffer);
@@ -1457,7 +1487,10 @@ slurmctld_resv_t *_load_reservation_state(Buf buffer,
 				       &uint32_tmp,	buffer);
 		safe_unpackstr_xmalloc(&resv_ptr->partition,
 				       &uint32_tmp, 	buffer);
+		safe_unpack32(&resv_ptr->resv_watts,    buffer);
 		safe_unpack_time(&resv_ptr->start_time_first,	buffer);
+		safe_unpackstr_xmalloc(&resv_ptr->tres_fmt_str,
+				       &uint32_tmp, 	buffer);
 		safe_unpackstr_xmalloc(&resv_ptr->users, &uint32_tmp, buffer);
 
 		/* Fields saved for internal use only (save state) */
@@ -1485,16 +1518,20 @@ slurmctld_resv_t *_load_reservation_state(Buf buffer,
 		safe_unpack_time(&resv_ptr->start_time_prev, buffer);
 		safe_unpack_time(&resv_ptr->start_time, buffer);
 		safe_unpack8((uint8_t *)&resv_ptr->user_not,	buffer);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
-		uint16_t flags;
+		safe_unpackstr_xmalloc(&resv_ptr->tres_str,
+				       &uint32_tmp, 	buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
 		safe_unpackstr_xmalloc(&resv_ptr->accounts,
 				       &uint32_tmp,	buffer);
-		safe_unpack32(&resv_ptr->cpu_cnt,	buffer);
+		safe_unpack32(&resv_ptr->core_cnt,	buffer);
+		resv_ptr->tres_str = xstrdup_printf(
+			"%d=%u", TRES_CPU, resv_ptr->core_cnt);
+		resv_ptr->tres_fmt_str = xstrdup_printf(
+			"cpu=%u", resv_ptr->core_cnt);
 		safe_unpack_time(&resv_ptr->end_time,	buffer);
 		safe_unpackstr_xmalloc(&resv_ptr->features,
 				       &uint32_tmp, 	buffer);
-		flags = resv_ptr->flags;
-		safe_unpack16(&flags,			buffer);
+		safe_unpack32(&resv_ptr->flags,		buffer);
 		safe_unpackstr_xmalloc(&resv_ptr->licenses,
 				       &uint32_tmp, 	buffer);
 		safe_unpackstr_xmalloc(&resv_ptr->name,	&uint32_tmp, buffer);
@@ -1533,6 +1570,7 @@ slurmctld_resv_t *_load_reservation_state(Buf buffer,
 		safe_unpack_time(&resv_ptr->start_time, buffer);
 		safe_unpack8((uint8_t *)&resv_ptr->user_not,	buffer);
 	}
+
 	return resv_ptr;
 
 unpack_error:
@@ -1633,39 +1671,150 @@ static bool _resv_overlap(time_t start_time, time_t end_time,
 	return rc;
 }
 
-/* Set a reservation's CPU count. Requires that the reservation's
- *	node_bitmap be set. */
-static void _set_cpu_cnt(slurmctld_resv_t *resv_ptr)
+/* Set a reservation's TRES count. Requires that the reservation's
+ *	node_bitmap be set.
+ * This needs to be done after all other setup is done.
+ */
+static void _set_tres_cnt(slurmctld_resv_t *resv_ptr,
+			  slurmctld_resv_t *old_resv_ptr)
 {
 	int i;
-	uint32_t cpu_cnt = 0;
+	uint64_t cpu_cnt = 0;
 	struct node_record *node_ptr = node_record_table_ptr;
+	char start_time[32], end_time[32];
+	char *name1, *name2, *val1, *val2;
+	assoc_mgr_lock_t locks = { NO_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
 
-	if (!resv_ptr->node_bitmap)
-		return;
-
+	if (resv_ptr->full_nodes && resv_ptr->node_bitmap) {
+		resv_ptr->core_cnt = 0;
 #ifdef HAVE_BG
-	if (!cnodes_per_mp)
-		select_g_alter_node_cnt(SELECT_GET_NODE_SCALING,
-					&cnodes_per_mp);
+		if (!cnodes_per_mp)
+			select_g_alter_node_cnt(SELECT_GET_NODE_SCALING,
+						&cnodes_per_mp);
 #endif
 
-	for (i=0; i<node_record_count; i++, node_ptr++) {
-		if (!bit_test(resv_ptr->node_bitmap, i))
-			continue;
+		for (i=0; i<node_record_count; i++, node_ptr++) {
+			if (!bit_test(resv_ptr->node_bitmap, i))
+				continue;
 #ifdef HAVE_BG
-		if (cnodes_per_mp)
-			cpu_cnt += cnodes_per_mp;
-		else
-			cpu_cnt += node_ptr->sockets;
+			if (cnodes_per_mp)
+				resv_ptr->core_cnt += cnodes_per_mp;
+			else
+				resv_ptr->core_cnt += node_ptr->sockets;
 #else
-		if (slurmctld_conf.fast_schedule)
-			cpu_cnt += node_ptr->config_ptr->cores;
-		else
-			cpu_cnt += node_ptr->cores;
+			if (slurmctld_conf.fast_schedule) {
+				resv_ptr->core_cnt +=
+					(node_ptr->config_ptr->cores *
+					 node_ptr->config_ptr->sockets);
+				cpu_cnt += node_ptr->config_ptr->cpus;
+			} else {
+				resv_ptr->core_cnt += (node_ptr->cores *
+						       node_ptr->sockets);
+				cpu_cnt += node_ptr->cpus;
+			}
 #endif
+		}
+	} else if (resv_ptr->core_bitmap) {
+		/* This doesn't work on bluegene systems so don't
+		 * worry about it.
+		 */
+		resv_ptr->core_cnt =
+			bit_set_count(resv_ptr->core_bitmap);
+
+		if (resv_ptr->node_bitmap) {
+			for (i = 0; i < node_record_count; i++, node_ptr++) {
+				int offset, core;
+				uint32_t cores, threads;
+				if (!bit_test(resv_ptr->node_bitmap, i))
+					continue;
+
+				if (slurmctld_conf.fast_schedule) {
+					cores = (node_ptr->config_ptr->cores *
+						 node_ptr->config_ptr->sockets);
+					threads = node_ptr->config_ptr->threads;
+				} else {
+					cores = (node_ptr->cores *
+						 node_ptr->sockets);
+					threads = node_ptr->threads;
+				}
+				offset = cr_get_coremap_offset(i);
+
+				for (core=0; core < cores; core++) {
+					if (!bit_test(resv_ptr->core_bitmap,
+						     core + offset))
+						continue;
+					cpu_cnt += threads;
+				}
+				/* info("cpu_cnt is now %"PRIu64" after %s", */
+				/*      cpu_cnt, node_ptr->name); */
+			}
+		} else
+			  cpu_cnt = resv_ptr->core_cnt;
 	}
-	resv_ptr->cpu_cnt = cpu_cnt;
+
+#ifdef HAVE_BG
+	/* Since on a bluegene we track cnodes instead of cpus do the
+	   adjustment since accounting is expecting cpus here.
+	*/
+	if (!cpu_mult)
+		(void)select_g_alter_node_cnt(
+			SELECT_GET_NODE_CPU_CNT, &cpu_mult);
+
+	cpu_cnt = resv_ptr->core_cnt * cpu_mult;
+#endif
+
+	xfree(resv_ptr->tres_str);
+	if (cpu_cnt)
+		xstrfmtcat(resv_ptr->tres_str, "%s%u=%"PRIu64,
+			   resv_ptr->tres_str ? "," : "",
+			   TRES_CPU, cpu_cnt);
+
+	if ((name1 = licenses_2_tres_str(resv_ptr->license_list))) {
+		xstrfmtcat(resv_ptr->tres_str, "%s%s",
+			   resv_ptr->tres_str ? "," : "",
+			   name1);
+		xfree(name1);
+	}
+
+	if ((name1 = bb_g_xlate_bb_2_tres_str(resv_ptr->burst_buffer))) {
+		xstrfmtcat(resv_ptr->tres_str, "%s%s",
+			   resv_ptr->tres_str ? "," : "",
+			   name1);
+		xfree(name1);
+	}
+
+	xfree(resv_ptr->tres_fmt_str);
+	assoc_mgr_lock(&locks);
+	resv_ptr->tres_fmt_str = slurmdb_make_tres_string_from_simple(
+		resv_ptr->tres_str, assoc_mgr_tres_list);
+	assoc_mgr_unlock(&locks);
+
+	slurm_make_time_str(&resv_ptr->start_time, start_time,
+			    sizeof(start_time));
+	slurm_make_time_str(&resv_ptr->end_time, end_time, sizeof(end_time));
+	if (resv_ptr->accounts) {
+		name1 = " accounts=";
+		val1  = resv_ptr->accounts;
+	} else
+		name1 = val1 = "";
+	if (resv_ptr->users) {
+		name2 = " users=";
+		val2  = resv_ptr->users;
+	} else
+		name2 = val2 = "";
+
+	info("sched: %s reservation=%s%s%s%s%s nodes=%s cores=%u "
+	     "licenses=%s tres=%s watts=%u start=%s end=%s",
+	     old_resv_ptr ? "Updated" : "Created",
+	     resv_ptr->name, name1, val1, name2, val2,
+	     resv_ptr->node_list, resv_ptr->core_cnt, resv_ptr->licenses,
+	     resv_ptr->tres_str, resv_ptr->resv_watts,
+	     start_time, end_time);
+	if (old_resv_ptr)
+		_post_resv_update(resv_ptr, old_resv_ptr);
+	else
+		_post_resv_create(resv_ptr);
 }
 
 /*
@@ -1679,7 +1828,7 @@ static List _license_validate2(resv_desc_msg_t *resv_desc_ptr, bool *valid)
 	slurmctld_resv_t *resv_ptr;
 	char *merged_licenses;
 
-	license_list = license_validate(resv_desc_ptr->licenses, valid);
+	license_list = license_validate(resv_desc_ptr->licenses, NULL, valid);
 	if (!valid || (resv_desc_ptr->licenses == NULL))
 		return license_list;
 
@@ -1697,7 +1846,7 @@ static List _license_validate2(resv_desc_msg_t *resv_desc_ptr, bool *valid)
 		xstrcat(merged_licenses, resv_ptr->licenses);
 	}
 	list_iterator_destroy(iter);
-	merged_list = license_validate(merged_licenses, valid);
+	merged_list = license_validate(merged_licenses, NULL, valid);
 	xfree(merged_licenses);
 	FREE_NULL_LIST(merged_list);
 	return license_list;
@@ -1715,9 +1864,7 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 	int account_cnt = 0, user_cnt = 0;
 	char **account_list = NULL;
 	uid_t *user_list = NULL;
-	char start_time[32], end_time[32];
 	List license_list = (List) NULL;
-	char *name1, *name2, *val1, *val2;
 	uint32_t total_node_cnt = 0;
 	bool account_not = false, user_not = false;
 
@@ -1754,17 +1901,32 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 	if (resv_desc_ptr->flags == NO_VAL)
 		resv_desc_ptr->flags = 0;
 	else {
+#ifdef HAVE_BG
+		resv_desc_ptr->flags &= (~RESERVE_FLAG_REPLACE);
+#endif
 		resv_desc_ptr->flags &= RESERVE_FLAG_MAINT    |
 					RESERVE_FLAG_OVERLAP  |
 					RESERVE_FLAG_IGN_JOBS |
 					RESERVE_FLAG_DAILY    |
 					RESERVE_FLAG_WEEKLY   |
-					RESERVE_FLAG_LIC_ONLY |
 					RESERVE_FLAG_STATIC   |
+					RESERVE_FLAG_ANY_NODES   |
 					RESERVE_FLAG_PART_NODES  |
 					RESERVE_FLAG_FIRST_CORES |
-					RESERVE_FLAG_TIME_FLOAT;
+					RESERVE_FLAG_TIME_FLOAT  |
+					RESERVE_FLAG_REPLACE;
 	}
+	if (resv_desc_ptr->flags & RESERVE_FLAG_REPLACE) {
+		if (resv_desc_ptr->node_list) {
+			rc = ESLURM_INVALID_NODE_NAME;
+			goto bad_parse;
+		}
+		if (resv_desc_ptr->core_cnt) {
+			rc = ESLURM_INVALID_CPU_COUNT;
+			goto bad_parse;
+		}
+	}
+
 	if (resv_desc_ptr->partition) {
 		part_ptr = find_part_record(resv_desc_ptr->partition);
 		if (!part_ptr) {
@@ -1989,7 +2151,7 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 			if (rc != SLURM_SUCCESS)
 				goto bad_parse;
 		}
-	} else if (!(resv_desc_ptr->flags & RESERVE_FLAG_LIC_ONLY)) {
+	} else if (!(resv_desc_ptr->flags & RESERVE_FLAG_ANY_NODES)) {
 		if ((!resv_desc_ptr->node_cnt || !resv_desc_ptr->node_cnt[0]) &&
 		    !resv_desc_ptr->core_cnt) {
 			info("Reservation request lacks node specification");
@@ -2044,6 +2206,8 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 	resv_ptr->account_cnt	= account_cnt;
 	resv_ptr->account_list	= account_list;
 	resv_ptr->account_not	= account_not;
+	resv_ptr->burst_buffer	= resv_desc_ptr->burst_buffer;
+	resv_desc_ptr->burst_buffer = NULL;	/* Nothing left to free */
 	resv_ptr->duration      = resv_desc_ptr->duration;
 	resv_ptr->end_time	= resv_desc_ptr->end_time;
 	resv_ptr->features	= resv_desc_ptr->features;
@@ -2062,6 +2226,7 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 	resv_ptr->partition	= resv_desc_ptr->partition;
 	resv_desc_ptr->partition = NULL;	/* Nothing left to free */
 	resv_ptr->part_ptr	= part_ptr;
+	resv_ptr->resv_watts	= resv_desc_ptr->resv_watts;
 	resv_ptr->start_time	= resv_desc_ptr->start_time;
 	resv_ptr->start_time_first = resv_ptr->start_time;
 	resv_ptr->start_time_prev = resv_ptr->start_time;
@@ -2076,13 +2241,11 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 #if _DEBUG
 		info("reservation using full nodes");
 #endif
-		_set_cpu_cnt(resv_ptr);
 		resv_ptr->full_nodes = 1;
 	} else {
-		resv_ptr->cpu_cnt = bit_set_count(resv_ptr->core_bitmap);
 #if _DEBUG
-		info("reservation using partial nodes: core count %u",
-		     resv_ptr->cpu_cnt);
+		info("reservation using partial nodes: core count %"PRIu64,
+		     cpu_cnt);
 #endif
 		resv_ptr->full_nodes = 0;
 	}
@@ -2090,27 +2253,10 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 	if ((rc = _set_assoc_list(resv_ptr)) != SLURM_SUCCESS)
 		goto bad_parse;
 
-	slurm_make_time_str(&resv_ptr->start_time, start_time,
-			    sizeof(start_time));
-	slurm_make_time_str(&resv_ptr->end_time, end_time, sizeof(end_time));
-	if (resv_ptr->accounts) {
-		name1 = " accounts=";
-		val1  = resv_ptr->accounts;
-	} else
-		name1 = val1 = "";
-	if (resv_ptr->users) {
-		name2 = " users=";
-		val2  = resv_ptr->users;
-	} else
-		name2 = val2 = "";
-	info("sched: Created reservation %s%s%s%s%s nodes=%s start=%s end=%s",
-	     resv_ptr->name, name1, val1, name2, val2,
-	     resv_ptr->node_list, start_time, end_time);
 	if (resv_ptr->flags & RESERVE_FLAG_TIME_FLOAT)
 		resv_ptr->start_time -= now;
 
-	/* This needs to be done after all other setup is done. */
-	_post_resv_create(resv_ptr);
+	_set_tres_cnt(resv_ptr, NULL);
 
 	list_append(resv_list, resv_ptr);
 	last_resv_update = now;
@@ -2122,8 +2268,7 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 	for (i = 0; i < account_cnt; i++)
 		xfree(account_list[i]);
 	xfree(account_list);
-	if (license_list)
-		list_destroy(license_list);
+	FREE_NULL_LIST(license_list);
 	FREE_NULL_BITMAP(node_bitmap);
 	FREE_NULL_BITMAP(core_bitmap);
 	xfree(user_list);
@@ -2133,10 +2278,7 @@ extern int create_resv(resv_desc_msg_t *resv_desc_ptr)
 /* Purge all reservation data structures */
 extern void resv_fini(void)
 {
-	if (resv_list) {
-		list_destroy(resv_list);
-		resv_list = (List) NULL;
-	}
+	FREE_NULL_LIST(resv_list);
 }
 
 /* Update an exiting resource reservation */
@@ -2145,8 +2287,6 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 	time_t now = time(NULL);
 	slurmctld_resv_t *resv_backup, *resv_ptr;
 	int error_code = SLURM_SUCCESS, i, rc;
-	char start_time[32], end_time[32];
-	char *name1, *name2, *val1, *val2;
 
 	if (!resv_list)
 		resv_list = list_create(_del_resv_rec);
@@ -2194,14 +2334,24 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 			resv_ptr->flags |= RESERVE_FLAG_WEEKLY;
 		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_WEEKLY)
 			resv_ptr->flags &= (~RESERVE_FLAG_WEEKLY);
-		if (resv_desc_ptr->flags & RESERVE_FLAG_LIC_ONLY)
-			resv_ptr->flags |= RESERVE_FLAG_LIC_ONLY;
-		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_LIC_ONLY)
-			resv_ptr->flags &= (~RESERVE_FLAG_LIC_ONLY);
+		if (resv_desc_ptr->flags & RESERVE_FLAG_ANY_NODES)
+			resv_ptr->flags |= RESERVE_FLAG_ANY_NODES;
+		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_ANY_NODES)
+			resv_ptr->flags &= (~RESERVE_FLAG_ANY_NODES);
 		if (resv_desc_ptr->flags & RESERVE_FLAG_STATIC)
 			resv_ptr->flags |= RESERVE_FLAG_STATIC;
 		if (resv_desc_ptr->flags & RESERVE_FLAG_NO_STATIC)
 			resv_ptr->flags &= (~RESERVE_FLAG_STATIC);
+#ifndef HAVE_BG
+		if (resv_desc_ptr->flags & RESERVE_FLAG_REPLACE) {
+			if ((resv_ptr->flags & RESERVE_FLAG_SPEC_NODES) ||
+			    (resv_ptr->full_nodes == 0)) {
+				error_code = ESLURM_NOT_SUPPORTED;
+				goto update_failure;
+			}
+			resv_ptr->flags |= RESERVE_FLAG_REPLACE;
+		}
+#endif
 		if (resv_desc_ptr->flags & RESERVE_FLAG_PART_NODES) {
 			if ((resv_ptr->partition == NULL) &&
 			    (resv_desc_ptr->partition == NULL)) {
@@ -2245,6 +2395,8 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 		resv_desc_ptr->partition = NULL; /* Nothing left to free */
 		resv_ptr->part_ptr	= part_ptr;
 	}
+	if (resv_desc_ptr->resv_watts != NO_VAL)
+		resv_ptr->resv_watts = resv_desc_ptr->resv_watts;
 	if (resv_desc_ptr->accounts) {
 		rc = _update_account_list(resv_ptr, resv_desc_ptr->accounts);
 		if (rc) {
@@ -2252,6 +2404,13 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 			goto update_failure;
 		}
 	}
+	if (resv_desc_ptr->burst_buffer) {
+		xfree(resv_ptr->burst_buffer);
+		if (resv_desc_ptr->burst_buffer[0] != '\0') {
+			resv_ptr->burst_buffer = resv_desc_ptr->burst_buffer;
+			resv_desc_ptr->burst_buffer = NULL;
+		}
+	}
 	if (resv_desc_ptr->licenses && (resv_desc_ptr->licenses[0] == '\0')) {
 		if (((resv_desc_ptr->node_cnt != NULL)  &&
 		     (resv_desc_ptr->node_cnt[0] == 0)) ||
@@ -2264,8 +2423,7 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 		}
 		xfree(resv_desc_ptr->licenses);	/* clear licenses */
 		xfree(resv_ptr->licenses);
-		if (resv_ptr->license_list)
-			list_destroy(resv_ptr->license_list);
+		FREE_NULL_LIST(resv_ptr->license_list);
 	}
 
 	if (resv_desc_ptr->licenses) {
@@ -2281,8 +2439,7 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 		xfree(resv_ptr->licenses);
 		resv_ptr->licenses	= resv_desc_ptr->licenses;
 		resv_desc_ptr->licenses = NULL; /* Nothing left to free */
-		if (resv_ptr->license_list)
-			list_destroy(resv_ptr->license_list);
+		FREE_NULL_LIST(resv_ptr->license_list);
 		resv_ptr->license_list  = license_list;
 	}
 	if (resv_desc_ptr->features && (resv_desc_ptr->features[0] == '\0')) {
@@ -2451,31 +2608,13 @@ extern int update_resv(resv_desc_msg_t *resv_desc_ptr)
 		error_code = ESLURM_NODES_BUSY;
 		goto update_failure;
 	}
-	_set_cpu_cnt(resv_ptr);
 
 	/* This needs to be after checks for both account and user changes */
 	if ((error_code = _set_assoc_list(resv_ptr)) != SLURM_SUCCESS)
 		goto update_failure;
 
-	slurm_make_time_str(&resv_ptr->start_time, start_time,
-			    sizeof(start_time));
-	slurm_make_time_str(&resv_ptr->end_time, end_time, sizeof(end_time));
-	if (resv_ptr->accounts) {
-		name1 = " accounts=";
-		val1  = resv_ptr->accounts;
-	} else
-		name1 = val1 = "";
-	if (resv_ptr->users) {
-		name2 = " users=";
-		val2  = resv_ptr->users;
-	} else
-		name2 = val2 = "";
-	info("sched: Updated reservation=%s%s%s%s%s nodes=%s licenses=%s "
-	     "start=%s end=%s",
-	     resv_ptr->name, name1, val1, name2, val2,
-	     resv_ptr->node_list, resv_ptr->licenses, start_time, end_time);
+	_set_tres_cnt(resv_ptr, resv_backup);
 
-	_post_resv_update(resv_ptr, resv_backup);
 	_del_resv_rec(resv_backup);
 	(void) set_node_maint_mode(true);
 	last_resv_update = now;
@@ -2535,7 +2674,7 @@ static bool _match_user_assoc(char *assoc_str, List assoc_list, bool deny)
 {
 	ListIterator itr;
 	bool found = 0;
-	slurmdb_association_rec_t *assoc;
+	slurmdb_assoc_rec_t *assoc;
 	char tmp_char[1000];
 
 	if (!assoc_str || !assoc_list || !list_count(assoc_list))
@@ -2626,9 +2765,8 @@ extern void show_resv(char **buffer_ptr, int *buffer_size, uid_t uid,
 	time_t now = time(NULL);
 	List assoc_list = NULL;
 	bool check_permissions = false;
-	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK,
-				   NO_LOCK, NO_LOCK };
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   NO_LOCK, NO_LOCK, NO_LOCK };
 
 	DEF_TIMERS;
 
@@ -2649,11 +2787,11 @@ extern void show_resv(char **buffer_ptr, int *buffer_size, uid_t uid,
 	/* Create this list once since it will not change durning this call. */
 	if ((slurmctld_conf.private_data & PRIVATE_DATA_RESERVATIONS)
 	    && !validate_operator(uid)) {
-		slurmdb_association_rec_t assoc;
+		slurmdb_assoc_rec_t assoc;
 
 		check_permissions = true;
 
-		memset(&assoc, 0, sizeof(slurmdb_association_rec_t));
+		memset(&assoc, 0, sizeof(slurmdb_assoc_rec_t));
 		assoc.uid = uid;
 
 		assoc_list = list_create(NULL);
@@ -2819,6 +2957,93 @@ extern int dump_all_resv_state(void)
 	return 0;
 }
 
+/* Unfortunately the reservation's core_bitmap is a global bitmap and the nodes
+ * in the system have changed in terms of their node count or nodes have been
+ * added or removed. Make best effort to rebuild the reservation's core_bitmap
+ * on the limited information currently available. The specific cores might
+ * change, but this logic at least leaves their count constant and uses the
+ * same nodes. */
+static void _rebuild_core_bitmap(slurmctld_resv_t *resv_ptr)
+{
+	int i_first, i_last, i, j, k, core_offset, node_offset;
+	uint32_t core_cnt, core_inx, node_inx;
+	ListIterator job_iterator;
+	struct job_record  *job_ptr;
+
+	info("Core_bitmap for reservation %s no longer valid, cores addded or removed, rebuilding",
+	     resv_ptr->name);
+
+	core_cnt = bit_set_count(resv_ptr->core_bitmap);      /* Cores needed */
+	bit_free(resv_ptr->core_bitmap);
+	resv_ptr->core_bitmap =
+		bit_alloc(cr_get_coremap_offset(node_record_count));
+
+	/* Try to use any cores in use by jobs running in this reservation */
+	job_iterator = list_iterator_create(job_list);
+	while ((job_ptr = (struct job_record *) list_next(job_iterator))) {
+		if (!IS_JOB_RUNNING(job_ptr)      ||
+		    (job_ptr->node_bitmap == NULL)||
+		    (job_ptr->job_resrcs == NULL) ||
+		    (job_ptr->resv_name == NULL)  ||
+		    strcmp(job_ptr->resv_name, resv_ptr->name))
+			continue;
+		/* This job is currently running in this reservation */
+		i_first = bit_ffs(job_ptr->node_bitmap);
+		if (i_first >= 0)
+			i_last = bit_fls(job_ptr->node_bitmap);
+		else
+			i_last = i_first - 1;
+		node_offset = -1;
+		for (node_inx = i_first;
+		     ((core_cnt > 0) && (node_inx <= i_last)); node_inx++) {
+			if (!bit_test(job_ptr->node_bitmap, node_inx))
+				continue;
+			node_offset++;
+			core_offset = get_job_resources_offset(
+						job_ptr->job_resrcs,
+						node_offset, 0, 0);
+			if (core_offset < 0)
+				break;
+			j = cr_get_coremap_offset(node_inx);
+			k = cr_get_coremap_offset(node_inx + 1);
+			k -= j;	/* core count on this node */
+			for (i = 0; i < k; i++) {
+				if (!bit_test(job_ptr->job_resrcs->core_bitmap,
+					      core_offset + i))
+					continue;
+				bit_set(resv_ptr->core_bitmap, j + i);
+				if (--core_cnt == 0)
+					break;
+			}
+		}
+	}
+	list_iterator_destroy(job_iterator);
+
+	/* Use any other available cores, evenly distributing across nodes */
+	i_first = bit_ffs(resv_ptr->node_bitmap);
+	if (i_first >= 0)
+		i_last = bit_fls(resv_ptr->node_bitmap);
+	else
+		i_last = i_first - 1;
+	for (core_inx = 0; ((core_cnt > 0) && (core_inx <= core_cnt));
+	     core_inx++) {
+		for (node_inx = i_first; node_inx <= i_last; node_inx++) {
+			if (!bit_test(resv_ptr->node_bitmap, node_inx))
+				continue;
+			j = cr_get_coremap_offset(node_inx);
+			k = cr_get_coremap_offset(node_inx + 1);
+			j += core_inx;	/* Core offset on this node */
+			if (j >= k)
+				continue;
+			if (bit_test(resv_ptr->core_bitmap, j))
+				continue;	/* Already set by job */
+			bit_set(resv_ptr->core_bitmap, j);
+			if (--core_cnt == 0)
+				break;
+		}	
+	}
+}
+
 /* Validate one reservation record, return true if good */
 static bool _validate_one_reservation(slurmctld_resv_t *resv_ptr)
 {
@@ -2858,10 +3083,9 @@ static bool _validate_one_reservation(slurmctld_resv_t *resv_ptr)
 	}
 	if (resv_ptr->licenses) {
 		bool valid = true;
-		if (resv_ptr->license_list)
-			list_destroy(resv_ptr->license_list);
+		FREE_NULL_LIST(resv_ptr->license_list);
 		resv_ptr->license_list = license_validate(resv_ptr->licenses,
-							  &valid);
+							  NULL, &valid);
 		if (!valid) {
 			error("Reservation %s has invalid licenses (%s)",
 			      resv_ptr->name, resv_ptr->licenses);
@@ -2885,13 +3109,19 @@ static bool _validate_one_reservation(slurmctld_resv_t *resv_ptr)
 	}
 	if ((resv_ptr->flags & RESERVE_FLAG_PART_NODES) &&
 	    resv_ptr->part_ptr && resv_ptr->part_ptr->node_bitmap) {
+		slurmctld_resv_t old_resv_ptr;
+		memset(&old_resv_ptr, 0, sizeof(slurmctld_resv_t));
+
 		xfree(resv_ptr->node_list);
 		resv_ptr->node_list = xstrdup(resv_ptr->part_ptr->nodes);
 		FREE_NULL_BITMAP(resv_ptr->node_bitmap);
 		resv_ptr->node_bitmap = bit_copy(resv_ptr->part_ptr->
 						 node_bitmap);
 		resv_ptr->node_cnt = bit_set_count(resv_ptr->node_bitmap);
-		_set_cpu_cnt(resv_ptr);
+		old_resv_ptr.tres_str = resv_ptr->tres_str;
+		resv_ptr->tres_str = NULL;
+		_set_tres_cnt(resv_ptr, &old_resv_ptr);
+		xfree(old_resv_ptr.tres_str);
 		last_resv_update = time(NULL);
 	} else if (resv_ptr->node_list) {	/* Change bitmap last */
 		bitstr_t *node_bitmap;
@@ -2927,6 +3157,13 @@ static bool _validate_one_reservation(slurmctld_resv_t *resv_ptr)
 		FREE_NULL_BITMAP(resv_ptr->node_bitmap);
 		resv_ptr->node_bitmap = node_bitmap;
 	}
+
+	if (!resv_ptr->full_nodes && resv_ptr->core_bitmap &&
+	    (bit_size(resv_ptr->core_bitmap) !=
+	     cr_get_coremap_offset(node_record_count))) {
+		_rebuild_core_bitmap(resv_ptr);
+	}
+
 	return true;
 }
 
@@ -2986,8 +3223,80 @@ static void _validate_all_reservations(void)
 }
 
 /*
- * Validate that the reserved nodes are not DOWN or DRAINED and
- *	select different nodes as needed.
+ * Replace DOWN, DRAIN or ALLOCATED nodes for reservations with "replace" flag
+ */
+static void _resv_node_replace(slurmctld_resv_t *resv_ptr)
+{
+	bitstr_t *preserve_bitmap = NULL;
+	bitstr_t *core_bitmap = NULL, *new_bitmap = NULL, *tmp_bitmap = NULL;
+	resv_desc_msg_t resv_desc;
+	int i, add_nodes, new_nodes, preserve_nodes, busy_nodes_needed;
+	bool log_it = true;
+
+	/* Identify nodes which can be preserved in this reservation */
+	preserve_bitmap = bit_copy(resv_ptr->node_bitmap);
+	bit_and(preserve_bitmap, avail_node_bitmap);
+	bit_and(preserve_bitmap, idle_node_bitmap);
+	preserve_nodes = bit_set_count(preserve_bitmap);
+
+	/* Try to get replacement nodes, first from idle pool then re-use
+	 * busy nodes in the current reservation as needed */
+	add_nodes = resv_ptr->node_cnt - preserve_nodes;
+	while (add_nodes) {
+		memset(&resv_desc, 0, sizeof(resv_desc_msg_t));
+		resv_desc.start_time  = resv_ptr->start_time;
+		resv_desc.end_time    = resv_ptr->end_time;
+		resv_desc.features    = resv_ptr->features;
+		resv_desc.node_cnt    = xmalloc(sizeof(uint32_t) * 2);
+		resv_desc.node_cnt[0] = add_nodes;
+		i = _select_nodes(&resv_desc, &resv_ptr->part_ptr, &new_bitmap,
+				  &core_bitmap);
+		xfree(resv_desc.node_cnt);
+		xfree(resv_desc.node_list);
+		xfree(resv_desc.partition);
+		if (i == SLURM_SUCCESS) {
+			new_nodes = bit_set_count(new_bitmap);
+			busy_nodes_needed = resv_ptr->node_cnt - new_nodes
+					    - preserve_nodes;
+			if (busy_nodes_needed > 0) {
+				bit_not(preserve_bitmap);
+				bit_and(resv_ptr->node_bitmap, preserve_bitmap);
+				bit_not(preserve_bitmap);
+				tmp_bitmap = bit_pick_cnt(resv_ptr->node_bitmap,
+							  busy_nodes_needed);
+				bit_and(resv_ptr->node_bitmap, tmp_bitmap);
+				FREE_NULL_BITMAP(tmp_bitmap);
+				bit_or(resv_ptr->node_bitmap, preserve_bitmap);
+			} else {
+				bit_and(resv_ptr->node_bitmap, preserve_bitmap);
+			}
+			bit_or(resv_ptr->node_bitmap, new_bitmap);
+			FREE_NULL_BITMAP(new_bitmap);
+			FREE_NULL_BITMAP(resv_ptr->core_bitmap);
+			resv_ptr->core_bitmap = core_bitmap;	/* is NULL */
+			xfree(resv_ptr->node_list);
+			resv_ptr->node_list = bitmap2node_name(resv_ptr->
+							       node_bitmap);
+			info("modified reservation %s with replacement "
+				"nodes, new nodes: %s",
+				resv_ptr->name, resv_ptr->node_list);
+			break;
+		}
+		add_nodes /= 2;	/* Try to get idle nodes as possible */
+		if (log_it) {
+			info("unable to replace all allocated nodes in "
+			     "reservation %s at this time", resv_ptr->name);
+			log_it = false;
+		}
+	}
+	FREE_NULL_BITMAP(preserve_bitmap);
+	last_resv_update = time(NULL);
+	schedule_resv_save();
+}
+
+/*
+ * Replace DOWN or DRAINED in an advanced reservation, also replaces nodes
+ * in use for reservations with the "replace" flag.
  */
 static void _validate_node_choice(slurmctld_resv_t *resv_ptr)
 {
@@ -2996,17 +3305,24 @@ static void _validate_node_choice(slurmctld_resv_t *resv_ptr)
 	int i;
 	resv_desc_msg_t resv_desc;
 
-	if (resv_ptr->flags & RESERVE_FLAG_SPEC_NODES ||
-	    resv_ptr->flags & RESERVE_FLAG_STATIC)
+	if ((resv_ptr->node_bitmap == NULL) ||
+	    (resv_ptr->flags & RESERVE_FLAG_SPEC_NODES) ||
+	    (resv_ptr->flags & RESERVE_FLAG_STATIC))
+		return;
+
+	if (resv_ptr->flags & RESERVE_FLAG_REPLACE) {
+		_resv_node_replace(resv_ptr);
 		return;
+	}
 
 	i = bit_overlap(resv_ptr->node_bitmap, avail_node_bitmap);
-	if (i == resv_ptr->node_cnt)
+	if (i == resv_ptr->node_cnt) {
 		return;
+	}
 
 	/* Reservation includes DOWN, DRAINED/DRAINING, FAILING or
 	 * NO_RESPOND nodes. Generate new request using _select_nodes()
-	 * in attempt to replace this nodes */
+	 * in attempt to replace these nodes */
 	memset(&resv_desc, 0, sizeof(resv_desc_msg_t));
 	resv_desc.start_time = resv_ptr->start_time;
 	resv_desc.end_time   = resv_ptr->end_time;
@@ -3136,12 +3452,9 @@ extern int load_all_resv_state(int recover)
 
 	safe_unpackstr_xmalloc( &ver_str, &uint32_tmp, buffer);
 	debug3("Version string in resv_state header is %s", ver_str);
-	if (ver_str) {
-		if (!strcmp(ver_str, RESV_STATE_VERSION))
-			safe_unpack16(&protocol_version, buffer);
-		else if (!strcmp(ver_str, RESV_2_6_STATE_VERSION))
-			protocol_version = SLURM_2_6_PROTOCOL_VERSION;
-	}
+	if (ver_str && !strcmp(ver_str, RESV_STATE_VERSION))
+		safe_unpack16(&protocol_version, buffer);
+
 	if (protocol_version == (uint16_t) NO_VAL) {
 		error("************************************************************");
 		error("Can not recover reservation state, data version incompatible");
@@ -3647,7 +3960,7 @@ static bitstr_t *_pick_idle_node_cnt(bitstr_t *avail_bitmap,
 	} else if ((node_cnt == 0) &&
 		   ((resv_desc_ptr->core_cnt == NULL) ||
 		    (resv_desc_ptr->core_cnt[0] == 0)) &&
-		   (resv_desc_ptr->flags & RESERVE_FLAG_LIC_ONLY)) {
+		   (resv_desc_ptr->flags & RESERVE_FLAG_ANY_NODES)) {
 		return bit_alloc(bit_size(avail_bitmap));
 	}
 
@@ -3756,7 +4069,7 @@ static int _valid_job_access_resv(struct job_record *job_ptr,
 	/* Determine if we have access */
 	if (accounting_enforce & ACCOUNTING_ENFORCE_ASSOCS) {
 		char tmp_char[30];
-		slurmdb_association_rec_t *assoc;
+		slurmdb_assoc_rec_t *assoc;
 		if (!resv_ptr->assoc_list) {
 			error("Reservation %s has no association list. "
 			      "Checking user/account lists",
@@ -3765,16 +4078,16 @@ static int _valid_job_access_resv(struct job_record *job_ptr,
 		}
 
 		if (!job_ptr->assoc_ptr) {
-			slurmdb_association_rec_t assoc_rec;
+			slurmdb_assoc_rec_t assoc_rec;
 			/* This should never be called, but just to be
 			 * safe we will try to fill it in. */
 			memset(&assoc_rec, 0,
-			       sizeof(slurmdb_association_rec_t));
+			       sizeof(slurmdb_assoc_rec_t));
 			assoc_rec.id = job_ptr->assoc_id;
 			if (assoc_mgr_fill_in_assoc(
 				    acct_db_conn, &assoc_rec,
 				    accounting_enforce,
-				    (slurmdb_association_rec_t **)
+				    (slurmdb_assoc_rec_t **)
 				    &job_ptr->assoc_ptr, false))
 				goto end_it;
 		}
@@ -3886,7 +4199,7 @@ extern int job_test_resv_now(struct job_record *job_ptr)
 		return ESLURM_RESERVATION_INVALID;
 	}
 	if ((resv_ptr->node_cnt == 0) &&
-	    !(resv_ptr->flags & RESERVE_FLAG_LIC_ONLY)) {
+	    !(resv_ptr->flags & RESERVE_FLAG_ANY_NODES)) {
 		/* empty reservation treated like it will start later */
 		return ESLURM_INVALID_TIME_VALUE;
 	}
@@ -3894,6 +4207,30 @@ extern int job_test_resv_now(struct job_record *job_ptr)
 	return SLURM_SUCCESS;
 }
 
+/*
+ * Note that a job is starting execution. If that job is associated with a
+ * reservation having the "Replace" flag, then remove that job's nodes from
+ * the reservation. Additional nodes will be added to the reservation from
+ * those currently available.
+ */
+extern void job_claim_resv(struct job_record *job_ptr)
+{
+	slurmctld_resv_t *resv_ptr;
+
+	if (job_ptr->resv_name == NULL)
+		return;
+
+	resv_ptr = (slurmctld_resv_t *) list_find_first (resv_list,
+			_find_resv_name, job_ptr->resv_name);
+	if (!resv_ptr ||
+	    !(resv_ptr->flags & RESERVE_FLAG_REPLACE) ||
+	    (resv_ptr->flags & RESERVE_FLAG_SPEC_NODES) ||
+	    (resv_ptr->flags & RESERVE_FLAG_STATIC))
+		return;
+
+	_resv_node_replace(resv_ptr);
+}
+
 /* Adjust a job's time_limit and end_time as needed to avoid using
  *	reserved resources. Don't go below job's time_min value. */
 extern void job_time_adj_resv(struct job_record *job_ptr)
@@ -3975,6 +4312,148 @@ static uint32_t _get_job_duration(struct job_record *job_ptr)
 	return duration;
 }
 
+static void _add_bb_resv(burst_buffer_info_msg_t **bb_resv, char *plugin,
+			 char *type, uint64_t cnt)
+{
+	burst_buffer_info_t *bb_array;
+	burst_buffer_gres_t *gres_ptr;
+	int i;
+
+	if (*bb_resv == NULL)
+		*bb_resv = xmalloc(sizeof(burst_buffer_info_msg_t));
+
+	for (i = 0, bb_array = (*bb_resv)->burst_buffer_array;
+	     i < (*bb_resv)->record_count; i++) {
+		if (!xstrcmp(plugin, bb_array->name))
+			break;
+	}
+	if (i >= (*bb_resv)->record_count) {
+		(*bb_resv)->record_count++;
+		(*bb_resv)->burst_buffer_array = xrealloc(
+			(*bb_resv)->burst_buffer_array,
+			sizeof(burst_buffer_info_t) * (*bb_resv)->record_count);
+		bb_array = (*bb_resv)->burst_buffer_array +
+			   (*bb_resv)->record_count - 1;
+		bb_array->name = xstrdup(plugin);
+	}
+
+	if (type == NULL) {
+		bb_array->used_space += cnt;
+		return;
+	}
+
+	for (i = 0, gres_ptr = bb_array->gres_ptr; i < bb_array->gres_cnt; i++){
+		if ((gres_ptr->name == NULL) || !strcmp(type, gres_ptr->name))
+			break;
+	}
+	if (i >= bb_array->gres_cnt) {
+		bb_array->gres_cnt++;
+		bb_array->gres_ptr = xrealloc(bb_array->gres_ptr,
+					      sizeof(burst_buffer_gres_t) *
+					      bb_array->gres_cnt);
+		gres_ptr = bb_array->gres_ptr + bb_array->gres_cnt - 1;
+		gres_ptr->name = xstrdup(type);
+	}
+	gres_ptr->used_cnt += cnt;
+}
+
+static void _update_bb_resv(burst_buffer_info_msg_t **bb_resv, char *bb_spec)
+{
+	uint64_t cnt;
+	char *end_ptr = NULL, *end_ptr2 = NULL;
+	char *sep, *tmp_spec, *tok, *plugin, *type;
+
+	if ((bb_spec == NULL) || (bb_spec[0] == '\0'))
+		return;
+
+	tmp_spec = xstrdup(bb_spec);
+	tok = strtok_r(tmp_spec, ",", &end_ptr);
+	while (tok) {
+		if (!strncmp(tok, "cray:", 5)) {
+			plugin = "cray";
+			tok += 5;
+		} else if (!strncmp(tok, "generic:", 8)) {
+			plugin = "generic";
+			tok += 8;
+		} else
+			plugin = NULL;
+
+		sep = strchr(tok, ':');
+		if (sep) {
+			type = tok;
+			sep[0] = '\0';
+			tok = sep + 1;
+		} else {
+			type = NULL;
+		}
+
+		cnt = strtol(tok, &end_ptr2, 10);
+		if ((end_ptr2[0] == 'n') || (end_ptr2[0] == 'N')) {
+			type = "nodes";	/* Cray node spec format */
+		} else if ((end_ptr2[0] == 'k') || (end_ptr2[0] == 'K')) {
+			cnt *= ((uint64_t) 1024);
+		} else if ((end_ptr2[0] == 'm') || (end_ptr2[0] == 'M')) {
+			cnt *= ((uint64_t) 1024 * 1024);
+		} else if ((end_ptr2[0] == 'g') || (end_ptr2[0] == 'G')) {
+			cnt *= ((uint64_t) 1024 * 1024 * 1024);
+		} else if ((end_ptr2[0] == 't') || (end_ptr2[0] == 'T')) {
+			cnt *= ((uint64_t) 1024 * 1024 * 1024 * 1024);
+		} else if ((end_ptr2[0] == 'p') || (end_ptr2[0] == 'P')) {
+			cnt *= ((uint64_t) 1024 * 1024 * 1024 * 1024 * 1024);
+		} else {	/* Default GB */
+			cnt *= ((uint64_t) 1024 * 1024 * 1024);
+		}
+
+		if (cnt)
+			_add_bb_resv(bb_resv, plugin, type, cnt);
+		tok = strtok_r(NULL, ",", &end_ptr);
+	}
+	xfree(tmp_spec);
+}
+
+/*
+ * Determine how many burst buffer resources the specified job is prevented
+ *	from using due to reservations
+ *
+ * IN job_ptr   - job to test
+ * IN when      - when the job is expected to start
+ * RET burst buffer reservation structure, call
+ *	 slurm_free_burst_buffer_info_msg() to free
+ */
+extern burst_buffer_info_msg_t *job_test_bb_resv(struct job_record *job_ptr,
+						 time_t when)
+{
+	slurmctld_resv_t * resv_ptr;
+	time_t job_start_time, job_end_time, now = time(NULL);
+	burst_buffer_info_msg_t *bb_resv = NULL;
+	ListIterator iter;
+
+	if ((job_ptr->burst_buffer == NULL) ||
+	    (job_ptr->burst_buffer[0] == '\0'))
+		return bb_resv;
+
+	job_start_time = when;
+	job_end_time   = when + _get_job_duration(job_ptr);
+	iter = list_iterator_create(resv_list);
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if (resv_ptr->end_time <= now)
+			_advance_resv_time(resv_ptr);
+		if ((resv_ptr->start_time >= job_end_time) ||
+		    (resv_ptr->end_time   <= job_start_time))
+			continue;	/* reservation at different time */
+		if ((resv_ptr->burst_buffer == NULL) ||
+		    (resv_ptr->burst_buffer[0] == '\0'))
+			continue;	/* reservation has no burst buffers */
+		if (!xstrcmp(job_ptr->resv_name, resv_ptr->name))
+			continue;	/* job can use this reservation */
+
+		_update_bb_resv(&bb_resv, resv_ptr->burst_buffer);
+	}
+	list_iterator_destroy(iter);
+
+	return bb_resv;
+}
+
 /*
  * Determine how many licenses of the give type the specified job is
  *	prevented from using due to reservations
@@ -4015,6 +4494,228 @@ extern int job_test_lic_resv(struct job_record *job_ptr, char *lic_name,
 	return resv_cnt;
 }
 
+
+static void _free_slot(void *x)
+{
+	xfree(x);
+}
+
+static void _init_constraint_planning(constraint_planning_t* sched)
+{
+	sched->slot_list = list_create(_free_slot);
+}
+
+static void _free_constraint_planning(constraint_planning_t* sched)
+{
+	FREE_NULL_LIST(sched->slot_list);
+}
+
+/*
+ * update the list of slots with the new time delimited constraint
+ * the new slot may have to be :
+ * - inserted
+ * - added to a previously added slot, if it corresponds to the same
+ *   period
+ * - shrinked if it overlaps temporarily a previously slot
+ *   (in that case it will result in a new slot insertion and an
+ *    already defined slot update with the addition of the value)
+ * - splitted in two chunks if it is nested in a previously slot
+ *   (in that case it will result in the insertion of new head,
+ *    the update of the previously defined slot, and the iteration
+ *    of the logic with a new slot reduced to the remaining time)
+ */
+static void _update_constraint_planning(constraint_planning_t* sched,
+					uint32_t value,
+					time_t start, time_t end)
+{
+	ListIterator iter;
+	constraint_slot_t *cur_slot, *cstr_slot, *tmp_slot;
+	bool done = false;
+
+	/* create the constraint slot to add */
+	cstr_slot = xmalloc(sizeof(constraint_slot_t));
+	cstr_slot->value = value;
+	cstr_slot->start = start;
+	cstr_slot->end = end;
+
+	/* iterate on the current slot list to identify 
+	 * the modifications and do them live */
+	iter = list_iterator_create(sched->slot_list);
+	while ((cur_slot = (constraint_slot_t *) list_next(iter))) {
+		/* cur_slot is posterior or contiguous, insert cstr,
+		 * mark the state as done and break */
+		if (cstr_slot->end <= cur_slot->start) {
+			list_insert(iter,cstr_slot);
+			done = true;
+			break;
+		}
+		/* cur_slot has the same time period, update it,
+		 * mark the state as done and break */
+		if (cstr_slot->start == cur_slot->start &&
+		    cstr_slot->end == cur_slot->end) {
+			cur_slot->value += cstr_slot->value;
+			xfree(cstr_slot);
+			done = true;
+			break;
+		}
+		/* cur_slot is anterior or contiguous, continue */
+		if (cur_slot->end <= cstr_slot->start)
+			continue;
+		/* new slot starts after this one */
+		if (cur_slot->start <= cstr_slot->start) {
+			/* we may need up to 2 insertions and one update */
+			if (cur_slot->start < cstr_slot->start) {
+				tmp_slot = xmalloc(sizeof(constraint_slot_t));
+				tmp_slot->value = cur_slot->value;
+				tmp_slot->start = cur_slot->start;
+				tmp_slot->end = cstr_slot->start;
+				list_insert(iter,tmp_slot);
+				cur_slot->start = tmp_slot->end;
+			}
+			if (cstr_slot->end < cur_slot->end) {
+				cstr_slot->value += cur_slot->value;
+				list_insert(iter,cstr_slot);
+				cur_slot->start = cstr_slot->end;
+			} else if (cstr_slot->end > cur_slot->end) {
+				cur_slot->value += cstr_slot->value;
+				cstr_slot->start = cur_slot->end;
+				continue;
+			} else {
+				cur_slot->value += cstr_slot->value;
+				xfree(cstr_slot);
+			}
+			done = true;
+			break;
+		} else {
+			/* new slot starts before, and we know that it is
+			 * not contiguous (previously checked) */
+			tmp_slot = xmalloc(sizeof(constraint_slot_t));
+			tmp_slot->value = cstr_slot->value;
+			tmp_slot->start = cstr_slot->start;
+			tmp_slot->end = cur_slot->start;
+			list_insert(iter,tmp_slot);
+			if (cstr_slot->end == cur_slot-> end) {
+				cur_slot->value += cstr_slot->value;
+				xfree(cstr_slot);
+				done = true;
+				break;
+			} else if (cstr_slot->end < cur_slot-> end) {
+				cstr_slot->start = cur_slot->start;
+				cstr_slot->value += cur_slot->value;
+				list_insert(iter,cstr_slot);
+				cur_slot->start = cstr_slot->end;
+				done = true;
+				break;
+			} else {
+				cur_slot->value += cstr_slot->value;
+				cstr_slot->start = cur_slot->end;
+				continue;
+			}
+		}
+	}
+	list_iterator_destroy(iter);
+
+	/* we might still need to add the [updated] constrain slot */
+	if (!done)
+		list_append(sched->slot_list, cstr_slot);
+}
+
+static uint32_t _max_constraint_planning(constraint_planning_t* sched,
+					 time_t *start, time_t *end)
+{
+	ListIterator iter;
+	constraint_slot_t *cur_slot;
+	uint32_t max = 0;
+
+	iter = list_iterator_create(sched->slot_list);
+	while ((cur_slot = (constraint_slot_t *) list_next(iter))) {
+		if (cur_slot->value > max) {
+			max = cur_slot->value;
+			*start = cur_slot->start;
+			*end = cur_slot->end;
+		}
+	}
+	list_iterator_destroy(iter);
+
+	return max;
+}
+
+static void _print_constraint_planning(constraint_planning_t* sched)
+{
+	ListIterator iter;
+	constraint_slot_t *cur_slot;
+	char start_str[32] = "-1", end_str[32] = "-1";
+	uint32_t i = 0;
+
+	iter = list_iterator_create(sched->slot_list);
+	while ((cur_slot = (constraint_slot_t *) list_next(iter))) {
+		slurm_make_time_str(&cur_slot->start,
+				    start_str, sizeof(start_str));
+		slurm_make_time_str(&cur_slot->end,
+				    end_str, sizeof(end_str));
+		debug2("constraint_planning: slot[%u]: %s to %s count=%u",
+		       i, start_str, end_str, cur_slot->value);
+		i++;
+	}
+	list_iterator_destroy(iter);
+}
+
+/*
+ * Determine how many watts the specified job is prevented from using 
+ * due to reservations
+ *
+ * IN job_ptr   - job to test
+ * IN when      - when the job is expected to start
+ * RET amount of watts the job is prevented from using
+ */
+extern uint32_t job_test_watts_resv(struct job_record *job_ptr, time_t when)
+{
+	slurmctld_resv_t * resv_ptr;
+	time_t job_start_time, job_end_time, now = time(NULL);
+	ListIterator iter;
+	constraint_planning_t wsched;
+	time_t start, end;
+	char start_str[32] = "-1", end_str[32] = "-1";
+	uint32_t resv_cnt = 0;
+
+	_init_constraint_planning(&wsched);
+
+	job_start_time = when;
+	job_end_time   = when + _get_job_duration(job_ptr);
+	iter = list_iterator_create(resv_list);
+	while ((resv_ptr = (slurmctld_resv_t *) list_next(iter))) {
+		if (resv_ptr->end_time <= now)
+			_advance_resv_time(resv_ptr);
+		if (resv_ptr->resv_watts == NO_VAL ||
+		    resv_ptr->resv_watts == 0)
+			continue;       /* not a power reservation */
+		if ((resv_ptr->start_time >= job_end_time) ||
+		    (resv_ptr->end_time   <= job_start_time))
+			continue;	/* reservation at different time */
+
+		if (job_ptr->resv_name &&
+		    (strcmp(job_ptr->resv_name, resv_ptr->name) == 0))
+			continue;	/* job can use this reservation */
+
+		_update_constraint_planning(&wsched, resv_ptr->resv_watts,
+					    resv_ptr->start_time,
+					    resv_ptr->end_time);
+	}
+	list_iterator_destroy(iter);
+
+	resv_cnt = _max_constraint_planning(&wsched, &start, &end);
+	if (slurm_get_debug_flags() & DEBUG_FLAG_RESERVATION) {
+		_print_constraint_planning(&wsched);
+		slurm_make_time_str(&start, start_str, sizeof(start_str));
+		slurm_make_time_str(&end, end_str, sizeof(end_str));
+		debug2("reservation: max reserved watts=%u (%s to %s)",
+		       resv_cnt, start_str, end_str);
+	}
+	_free_constraint_planning(&wsched);
+
+	return resv_cnt;
+}
+
 /*
  * Determine which nodes a job can use based upon reservations
  * IN job_ptr      - job to test
@@ -4062,7 +4763,7 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			return ESLURM_INVALID_TIME_VALUE;
 		}
 		if ((resv_ptr->node_cnt == 0) &&
-		    (!(resv_ptr->flags & RESERVE_FLAG_LIC_ONLY))) {
+		    (!(resv_ptr->flags & RESERVE_FLAG_ANY_NODES))) {
 			/* empty reservation treated like it will start later */
 			*when = now + 600;
 			return ESLURM_INVALID_TIME_VALUE;
@@ -4078,12 +4779,12 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			return ESLURM_RESERVATION_INVALID;
 		}
 		if (job_ptr->details->req_node_bitmap &&
-		    (!(resv_ptr->flags & RESERVE_FLAG_LIC_ONLY)) &&
+		    (!(resv_ptr->flags & RESERVE_FLAG_ANY_NODES)) &&
 		    !bit_super_set(job_ptr->details->req_node_bitmap,
 				   resv_ptr->node_bitmap)) {
 			return ESLURM_RESERVATION_INVALID;
 		}
-		if (resv_ptr->flags & RESERVE_FLAG_LIC_ONLY) {
+		if (resv_ptr->flags & RESERVE_FLAG_ANY_NODES) {
 			*node_bitmap = bit_alloc(node_record_count);
 			bit_nset(*node_bitmap, 0, (node_record_count - 1));
 		} else
@@ -4165,7 +4866,7 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 				else if (resv_ptr->duration &&
 					 (resv_ptr->duration != NO_VAL)) {
 					end_relative = start_relative +
-						       resv_ptr->duration * 60;
+						resv_ptr->duration * 60;
 				} else {
 					end_relative = resv_ptr->end_time;
 					if (start_relative > end_relative)
@@ -4185,8 +4886,8 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			if (job_ptr->details->req_node_bitmap &&
 			    bit_overlap(job_ptr->details->req_node_bitmap,
 					resv_ptr->node_bitmap) &&
-			    ((resv_ptr->cpu_cnt == 0) ||
-			     (job_ptr->details->whole_node))) {
+			    (!resv_ptr->tres_str ||
+			     job_ptr->details->whole_node == 1)) {
 				*when = resv_ptr->end_time;
 				rc = ESLURM_NODES_BUSY;
 				break;
@@ -4202,7 +4903,7 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			}
 
 			if ((resv_ptr->full_nodes) ||
-			    (job_ptr->details->whole_node)) {
+			    (job_ptr->details->whole_node == 1)) {
 #if _DEBUG
 				info("reservation %s uses full nodes or job %u "
 				     "will not share nodes",
@@ -4382,11 +5083,13 @@ static void *_fork_script(void *x)
 	char *argv[3], *envp[1];
 	int status, wait_rc;
 	pid_t cpid;
+	uint16_t tm;
 
 	argv[0] = args->script;
 	argv[1] = args->resv_name;
 	argv[2] = NULL;
 	envp[0] = NULL;
+
 	if ((cpid = fork()) < 0) {
 		error("_fork_script fork error: %m");
 		goto fini;
@@ -4401,8 +5104,9 @@ static void *_fork_script(void *x)
 		exit(127);
 	}
 
+	tm = slurm_get_prolog_timeout();
 	while (1) {
-		wait_rc = waitpid(cpid, &status, 0);
+		wait_rc = waitpid_timeout(__func__, cpid, &status, tm);
 		if (wait_rc < 0) {
 			if (errno == EINTR)
 				continue;
@@ -4652,6 +5356,9 @@ extern void update_part_nodes_in_resv(struct part_record *part_ptr)
 		if ((resv_ptr->flags & RESERVE_FLAG_PART_NODES) &&
 		    (resv_ptr->partition != NULL) &&
 		    (strcmp(resv_ptr->partition, part_ptr->name) == 0)) {
+			slurmctld_resv_t old_resv_ptr;
+			memset(&old_resv_ptr, 0, sizeof(slurmctld_resv_t));
+
 			parti_ptr = find_part_record(resv_ptr->partition);
 			FREE_NULL_BITMAP(resv_ptr->node_bitmap);
 			resv_ptr->node_bitmap = bit_copy(parti_ptr->
@@ -4660,7 +5367,10 @@ extern void update_part_nodes_in_resv(struct part_record *part_ptr)
 							   node_bitmap);
 			xfree(resv_ptr->node_list);
 			resv_ptr->node_list = xstrdup(parti_ptr->nodes);
-			_set_cpu_cnt(resv_ptr);
+			old_resv_ptr.tres_str = resv_ptr->tres_str;
+			resv_ptr->tres_str = NULL;
+			_set_tres_cnt(resv_ptr, &old_resv_ptr);
+			xfree(old_resv_ptr.tres_str);
 			last_resv_update = time(NULL);
 		}
 	}
@@ -4674,14 +5384,16 @@ static void _set_nodes_flags(slurmctld_resv_t *resv_ptr, time_t now,
 	struct node_record *node_ptr;
 
 	if (!resv_ptr->node_bitmap) {
-		error("%s: reservation %s lacks a bitmap",
-		      __func__, resv_ptr->name);
+		if ((resv_ptr->flags & RESERVE_FLAG_ANY_NODES) == 0) {
+			error("%s: reservation %s lacks a bitmap",
+			      __func__, resv_ptr->name);
+		}
 		return;
 	}
 
 	i_first = bit_ffs(resv_ptr->node_bitmap);
 	if (i_first < 0) {
-		if ((resv_ptr->flags & RESERVE_FLAG_LIC_ONLY) == 0) {
+		if ((resv_ptr->flags & RESERVE_FLAG_ANY_NODES) == 0) {
 			error("%s: reservation %s includes no nodes",
 			      __func__, resv_ptr->name);
 		}
diff --git a/src/slurmctld/reservation.h b/src/slurmctld/reservation.h
index 616b4f947..a7e5877e3 100644
--- a/src/slurmctld/reservation.h
+++ b/src/slurmctld/reservation.h
@@ -111,6 +111,18 @@ extern int load_all_resv_state(int recover);
  */
 extern int validate_job_resv(struct job_record *job_ptr);
 
+/*
+ * Determine how many burst buffer resources the specified job is prevented
+ *	from using due to reservations
+ *
+ * IN job_ptr   - job to test
+ * IN when      - when the job is expected to start
+ * RET burst buffer reservation structure, call
+ *	 slurm_free_burst_buffer_info_msg() to free
+ */
+extern burst_buffer_info_msg_t *job_test_bb_resv(struct job_record *job_ptr,
+						 time_t when);
+
 /*
  * Determine how many licenses of the give type the specified job is
  *	prevented from using due to reservations
@@ -123,6 +135,22 @@ extern int validate_job_resv(struct job_record *job_ptr);
 extern int job_test_lic_resv(struct job_record *job_ptr, char *lic_name,
 			     time_t when);
 
+/*
+ * Determine how many watts the specified job is prevented from using 
+ * due to reservations
+ *
+ * TODO: this code, replicated from job_test_lic_resv seems to not being
+ * protected against consecutives reservations for which reserved watts
+ * (or licenses count) can not be added directly. Thus, if the job
+ * overlaps multiple non-overlapping reservations, it will be prevented
+ * to use more watts (or licenses) than necessary.
+ *
+ * IN job_ptr   - job to test
+ * IN when      - when the job is expected to start
+ * RET amount of watts the job is prevented from using
+ */
+extern uint32_t job_test_watts_resv(struct job_record *job_ptr, time_t when);
+
 /*
  * Determine which nodes a job can use based upon reservations
  *
@@ -144,6 +172,14 @@ extern int job_test_resv(struct job_record *job_ptr, time_t *when,
 			 bool move_time, bitstr_t **node_bitmap,
 			 bitstr_t **exc_core_bitmap, bool *resv_overlap);
 
+/*
+ * Note that a job is starting execution. If that job is associated with a
+ * reservation having the "Refresh" flag, then remove that job's nodes from
+ * the reservation. Additional nodes will be added to the reservation from
+ * those currently available.
+ */
+extern void job_claim_resv(struct job_record *job_ptr);
+
 /*
  * Determine the time of the first reservation to end after some time.
  * return zero of no reservation ends after that time.
diff --git a/src/slurmctld/sicp.c b/src/slurmctld/sicp.c
new file mode 100644
index 000000000..a2d031de9
--- /dev/null
+++ b/src/slurmctld/sicp.c
@@ -0,0 +1,480 @@
+/*****************************************************************************\
+ *  sicp.c - Inter-cluster job management functions
+ *****************************************************************************
+ *  Copyright (C) 2015 SchedMD LLC (http://www.schedmd.com).
+ *  Written by Morris Jette
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifdef HAVE_CONFIG_H
+#  include "config.h"
+#endif
+
+#include <string.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "slurm/slurm.h"
+#include "slurm/slurm_errno.h"
+
+#include "src/common/fd.h"
+#include "src/common/macros.h"
+#include "src/common/xassert.h"
+#include "src/common/xmalloc.h"
+#include "src/common/xstring.h"
+#include "src/slurmctld/locks.h"
+#include "src/slurmctld/sicp.h"
+#include "src/slurmctld/slurmctld.h"
+#include "src/slurmctld/state_save.h"
+
+#define JOB_HASH_INX(_job_id)	(_job_id % hash_table_size)
+#define JOB_ARRAY_HASH_INX(_job_id, _task_id) \
+	((_job_id + _task_id) % hash_table_size)
+
+static int		hash_table_size = 1000;
+static sicp_job_t **	sicp_hash = NULL;
+static List		sicp_job_list = NULL;
+
+static int		sicp_interval = 600;
+static bool		sicp_stop = false;
+static pthread_t	sicp_thread = 0;
+static pthread_mutex_t	sicp_lock = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t	sicp_cond = PTHREAD_COND_INITIALIZER;
+static pthread_mutex_t	thread_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static void		_add_job_hash(sicp_job_t *sicp_ptr);
+static void		_dump_sicp_state(void);
+static sicp_job_t *	_find_sicp(uint32_t job_id);
+static void		_list_delete_sicp(void *sicp_entry);
+static int		_list_find_sicp_old(void *sicp_entry, void *key);
+static void		_load_sicp_state(void);
+static void		_log_sicp_recs(void);
+static void		_my_sleep(int add_secs);
+
+/* _add_sicp_hash - add a sicp job to hash table */
+static void _add_job_hash(sicp_job_t *sicp_ptr)
+{
+	int inx;
+
+	inx = JOB_HASH_INX(sicp_ptr->job_id);
+	sicp_ptr->sicp_next = sicp_hash[inx];
+	sicp_hash[inx] = sicp_ptr;
+}
+
+static sicp_job_t *_find_sicp(uint32_t job_id)
+{
+	sicp_job_t *sicp_ptr;
+
+	sicp_ptr = sicp_hash[JOB_HASH_INX(job_id)];
+	while (sicp_ptr) {
+		if (sicp_ptr->job_id == job_id)
+			break;
+		sicp_ptr = sicp_ptr->sicp_next;
+	}
+	return sicp_ptr;
+}
+
+static void _list_delete_sicp(void *sicp_entry)
+{
+	sicp_job_t *sicp_ptr = (sicp_job_t *) sicp_entry;
+	sicp_job_t **sicp_pptr, *tmp_ptr;
+
+	/* Remove the record from sicp hash table */
+	sicp_pptr = &sicp_hash[JOB_HASH_INX(sicp_ptr->job_id)];
+	while ((sicp_pptr != NULL) && (*sicp_pptr != NULL) &&
+	       ((tmp_ptr = *sicp_pptr) != (sicp_job_t *) sicp_entry)) {
+		sicp_pptr = &tmp_ptr->sicp_next;
+	}
+	if (sicp_pptr == NULL)
+		error("sicp hash error");
+	else
+		*sicp_pptr = sicp_ptr->sicp_next;
+	xfree(sicp_ptr);
+}
+
+static void _my_sleep(int add_secs)
+{
+	struct timespec ts = {0, 0};
+	struct timeval  tv = {0, 0};
+
+	if (gettimeofday(&tv, NULL)) {		/* Some error */
+		sleep(1);
+		return;
+	}
+
+	ts.tv_sec  = tv.tv_sec + add_secs;
+	ts.tv_nsec = tv.tv_usec * 1000;
+	pthread_mutex_lock(&sicp_lock);
+	if (!sicp_stop)
+		pthread_cond_timedwait(&sicp_cond, &sicp_lock, &ts);
+	pthread_mutex_unlock(&sicp_lock);
+}
+
+static int _list_find_sicp_old(void *sicp_entry, void *key)
+{
+	sicp_job_t *sicp_ptr = (sicp_job_t *)sicp_entry;
+	time_t old;
+
+//FIXME: Do not purge if we lack current information from this cluster
+	if (!(IS_JOB_FINISHED(sicp_ptr)))
+		return 0;	/* Job still active */
+
+	old = time(NULL) - (24 * 60 * 60);	/* One day */
+	if (sicp_ptr->update_time > old)
+		return 0;	/* Job still active */
+
+	return 1;
+}
+
+/* Log all SICP job records */
+static void _log_sicp_recs(void)
+{
+	ListIterator sicp_iterator;
+	sicp_job_t *sicp_ptr;
+
+	sicp_iterator = list_iterator_create(sicp_job_list);
+	while ((sicp_ptr = (sicp_job_t *) list_next(sicp_iterator))) {
+		info("SICP: Job_ID:%u State:%s", sicp_ptr->job_id,
+		     job_state_string(sicp_ptr->job_state));
+	}
+	list_iterator_destroy(sicp_iterator);
+}
+
+static void _load_sicp_other_cluster(void)
+{
+int cluster_cnt = 0;
+	sicp_info_msg_t * sicp_buffer_ptr = NULL;
+	sicp_info_t *remote_sicp_ptr = NULL;
+	sicp_job_t *sicp_ptr;
+	int i, j, error_code;
+	time_t now;
+
+	for (i = 0; i < cluster_cnt; i++) {
+//FIXME: Issue RPC to load table from every _other_ cluster
+//This is just loading from the current cluster for testing purposes
+		error_code = slurm_load_sicp(&sicp_buffer_ptr);
+		if (error_code) {
+			error("slurm_load_sicp(HOSTNAME) error: %s",
+			      slurm_strerror(error_code));
+			continue;
+		}
+
+		pthread_mutex_lock(&sicp_lock);
+		now = time(NULL);
+		for (j = 0, remote_sicp_ptr = sicp_buffer_ptr->sicp_array;
+		     j < sicp_buffer_ptr->record_count;
+		     j++, remote_sicp_ptr++) {
+			sicp_ptr = _find_sicp(remote_sicp_ptr->job_id);
+			if (!sicp_ptr) {
+				sicp_ptr = xmalloc(sizeof(sicp_job_t));
+				sicp_ptr->job_id = remote_sicp_ptr->job_id;
+				sicp_ptr->job_state = remote_sicp_ptr->job_state;
+				list_append(sicp_job_list, sicp_ptr);
+				_add_job_hash(sicp_ptr);
+			}
+			sicp_ptr->update_time = now;
+		}
+		pthread_mutex_unlock(&sicp_lock);
+		slurm_free_sicp_msg(sicp_buffer_ptr);
+	}
+}
+
+extern void *_sicp_agent(void *args)
+{
+	static time_t last_sicp_time = 0;
+	time_t now;
+	double wait_time;
+
+	while (!sicp_stop) {
+		_my_sleep(1);
+		if (sicp_stop)
+			break;
+
+		now = time(NULL);
+		wait_time = difftime(now, last_sicp_time);
+		if (wait_time < sicp_interval)
+			continue;
+		last_sicp_time = now;
+
+		_load_sicp_other_cluster();
+
+		pthread_mutex_lock(&sicp_lock);
+		list_delete_all(sicp_job_list, &_list_find_sicp_old, "");
+		if (slurm_get_debug_flags() & DEBUG_FLAG_SICP)
+			_log_sicp_recs();
+		pthread_mutex_unlock(&sicp_lock);
+
+		_dump_sicp_state();	/* Has own locking */
+	}
+	return NULL;
+}
+
+static void _dump_sicp_state(void)
+{
+	char *old_file, *new_file, *reg_file;
+	ListIterator sicp_iterator;
+	sicp_job_t *sicp_ptr;
+	Buf buffer;
+	time_t now = time(NULL);
+	int error_code = SLURM_SUCCESS, len, log_fd;
+
+	pthread_mutex_lock(&sicp_lock);
+	len = list_count(sicp_job_list) * 4 + 128;
+	buffer = init_buf(len);
+
+	packstr("PROTOCOL_VERSION", buffer);
+	pack16(SLURM_PROTOCOL_VERSION, buffer);
+	pack_time(now, buffer);
+
+	sicp_iterator = list_iterator_create(sicp_job_list);
+	while ((sicp_ptr = (sicp_job_t *) list_next(sicp_iterator))) {
+		pack32(sicp_ptr->job_id, buffer);
+		pack16(sicp_ptr->job_state, buffer);
+	}
+	list_iterator_destroy(sicp_iterator);
+	pthread_mutex_unlock(&sicp_lock);
+
+	old_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(old_file, "/sicp_state.old");
+	reg_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(reg_file, "/sicp_state");
+	new_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(new_file, "/sicp_state.new");
+
+	lock_state_files();
+	log_fd = creat(new_file, 0600);
+	if (log_fd < 0) {
+		error("Can't save state, create file %s error %m",
+		      new_file);
+		error_code = errno;
+	} else {
+		int pos = 0, nwrite, amount, rc;
+		char *data;
+
+		fd_set_close_on_exec(log_fd);
+		nwrite = get_buf_offset(buffer);
+		data = (char *)get_buf_data(buffer);
+		while (nwrite > 0) {
+			amount = write(log_fd, &data[pos], nwrite);
+			if ((amount < 0) && (errno != EINTR)) {
+				error("Error writing file %s, %m", new_file);
+				error_code = errno;
+				break;
+			}
+			nwrite -= amount;
+			pos    += amount;
+		}
+
+		rc = fsync_and_close(log_fd, "sicp");
+		if (rc && !error_code)
+			error_code = rc;
+	}
+	if (error_code) {
+		(void) unlink(new_file);
+	} else {			/* file shuffle */
+		(void) unlink(old_file);
+		if (link(reg_file, old_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       reg_file, old_file);
+		(void) unlink(reg_file);
+		if (link(new_file, reg_file))
+			debug4("unable to create link for %s -> %s: %m",
+			       new_file, reg_file);
+		(void) unlink(new_file);
+	}
+	xfree(old_file);
+	xfree(reg_file);
+	xfree(new_file);
+	unlock_state_files();
+
+	free_buf(buffer);
+}
+
+static void _load_sicp_state(void)
+{
+	int data_allocated, data_read = 0;
+	uint32_t data_size = 0;
+	int state_fd, sicp_cnt = 0;
+	char *data = NULL, *state_file;
+	struct stat stat_buf;
+	Buf buffer;
+	char *ver_str = NULL;
+	uint32_t ver_str_len;
+	uint16_t protocol_version = (uint16_t)NO_VAL;
+	uint32_t job_id = 0;
+	uint32_t job_state = 0;
+	sicp_job_t *sicp_ptr;
+	time_t buf_time, now;
+
+	/* read the file */
+	lock_state_files();
+	state_file = xstrdup(slurmctld_conf.state_save_location);
+	xstrcat(state_file, "/sicp_state");
+	state_fd = open(state_file, O_RDONLY);
+	if (state_fd < 0) {
+		error("Could not open job state file %s: %m", state_file);
+		unlock_state_files();
+		xfree(state_file);
+		return;
+	} else if (fstat(state_fd, &stat_buf) < 0) {
+		error("Could not stat job state file %s: %m", state_file);
+		unlock_state_files();
+		(void) close(state_fd);
+		xfree(state_file);
+		return;
+	} else if (stat_buf.st_size < 10) {
+		error("Job state file %s too small", state_file);
+		unlock_state_files();
+		(void) close(state_fd);
+		xfree(state_file);
+		return;
+	}
+
+	data_allocated = BUF_SIZE;
+	data = xmalloc(data_allocated);
+	while (1) {
+		data_read = read(state_fd, &data[data_size], BUF_SIZE);
+		if (data_read < 0) {
+			if (errno == EINTR)
+				continue;
+			else {
+				error("Read error on %s: %m", state_file);
+				break;
+			}
+		} else if (data_read == 0)	/* eof */
+			break;
+		data_size      += data_read;
+		data_allocated += data_read;
+		xrealloc(data, data_allocated);
+	}
+	close(state_fd);
+	xfree(state_file);
+	unlock_state_files();
+
+	buffer = create_buf(data, data_size);
+	safe_unpackstr_xmalloc(&ver_str, &ver_str_len, buffer);
+	debug3("Version string in sicp_state header is %s", ver_str);
+	if (ver_str && !strcmp(ver_str, "PROTOCOL_VERSION"))
+		safe_unpack16(&protocol_version, buffer);
+	xfree(ver_str);
+
+	if (protocol_version == (uint16_t)NO_VAL) {
+		error("************************************************");
+		error("Can not recover SICP state, incompatible version");
+		error("************************************************");
+		xfree(ver_str);
+		free_buf(buffer);
+		return;
+	}
+	safe_unpack_time(&buf_time, buffer);
+
+	now = time(NULL);
+	while (remaining_buf(buffer) > 0) {
+		safe_unpack32(&job_id,    buffer);
+		safe_unpack32(&job_state, buffer);
+		sicp_ptr = xmalloc(sizeof(sicp_job_t));
+		sicp_ptr->job_id      = job_id;
+		sicp_ptr->job_state   = job_state;
+		sicp_ptr->update_time = now;
+		list_append(sicp_job_list, sicp_ptr);
+		_add_job_hash(sicp_ptr);
+		sicp_cnt++;
+	}
+
+	free_buf(buffer);
+	info("Recovered information about %d sicp jobs", sicp_cnt);
+	if (slurm_get_debug_flags() & DEBUG_FLAG_SICP)
+		_log_sicp_recs();
+	return;
+
+unpack_error:
+	error("Incomplete sicp data checkpoint file");
+	info("Recovered information about %d sicp jobs", sicp_cnt);
+	free_buf(buffer);
+	return;
+}
+
+/* Start a thread to poll other clusters for inter-cluster job status */
+extern void sicp_init(void)
+{
+	pthread_attr_t attr;
+
+	pthread_mutex_lock(&thread_lock);
+	if (sicp_thread) {
+		error("%s: sicp thread already running", __func__);
+		pthread_mutex_unlock(&thread_lock);
+	}
+
+	pthread_mutex_lock(&sicp_lock);
+	sicp_stop = false;
+	sicp_hash = xmalloc(sizeof(sicp_job_t) * hash_table_size);
+	sicp_job_list = list_create(_list_delete_sicp);
+	_load_sicp_state();
+	pthread_mutex_unlock(&sicp_lock);
+	slurm_attr_init(&attr);
+	/* Since we do a join on thread later, don't make it detached */
+	if (pthread_create(&sicp_thread, &attr, _sicp_agent, NULL))
+		error("Unable to start power thread: %m");
+	slurm_attr_destroy(&attr);
+	pthread_mutex_unlock(&thread_lock);
+}
+
+/* Shutdown the inter-cluster job status thread */
+extern void sicp_fini(void)
+{
+	pthread_mutex_lock(&thread_lock);
+	pthread_mutex_lock(&sicp_lock);
+	sicp_stop = true;
+	pthread_cond_signal(&sicp_cond);
+	pthread_mutex_unlock(&sicp_lock);
+
+	pthread_join(sicp_thread, NULL);
+	sicp_thread = 0;
+	FREE_NULL_LIST(sicp_job_list);
+	xfree(sicp_hash);
+	pthread_mutex_unlock(&thread_lock);
+}
+
+/* For a given inter-cluster job ID, return its state (if found) or NO_VAL */
+extern uint32_t sicp_get_state(uint32_t job_id)
+{
+	sicp_job_t *sicp_ptr;
+	uint32_t job_state = NO_VAL;
+
+	pthread_mutex_lock(&sicp_lock);
+	sicp_ptr = _find_sicp(job_id);
+	if (sicp_ptr)
+		job_state = sicp_ptr->job_state;
+	pthread_mutex_unlock(&sicp_lock);
+
+	return job_state;
+}
diff --git a/src/slurmctld/sicp.h b/src/slurmctld/sicp.h
new file mode 100644
index 000000000..76a79ae72
--- /dev/null
+++ b/src/slurmctld/sicp.h
@@ -0,0 +1,62 @@
+/*****************************************************************************\
+ *  sicp.h - Inter-cluster job management functions
+ *****************************************************************************
+ *  Copyright (C) SchedMD LLC (http://www.schedmd.com).
+ *  Written by Morris Jette
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
+ *  so. If you do not wish to do so, delete this exception statement from your
+ *  version.  If you delete this exception statement from all source files in
+ *  the program, then also delete it here.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#ifndef _HAVE_SICP_H
+#define _HAVE_SICP_H
+
+#include <sys/types.h>
+#include <time.h>
+
+#include "src/common/list.h"
+#include "src/slurmctld/slurmctld.h"
+
+typedef struct sicp_job {
+	uint32_t job_id;		/* Global job ID */
+	uint32_t job_state;		/* state of the job */
+	struct sicp_job *sicp_next;	/* link for hash table */
+	time_t   update_time;		/* Time job last seen */
+} sicp_job_t;
+
+/* For a given inter-cluster job ID, return its state (if found) or NO_VAL */
+extern uint32_t sicp_get_state(uint32_t job_id);
+
+/* Start a thread to poll other clusters for inter-cluster job status */
+extern void sicp_init(void);
+
+/* Shutdown the inter-cluster job status thread */
+extern void sicp_fini(void);
+
+#endif /* !_HAVE_SICP_H */
diff --git a/src/slurmctld/slurmctld.h b/src/slurmctld/slurmctld.h
index aefc4bd12..ba8f41b2f 100644
--- a/src/slurmctld/slurmctld.h
+++ b/src/slurmctld/slurmctld.h
@@ -3,7 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2014 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette1@llnl.gov> et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -85,12 +85,8 @@
 /*****************************************************************************\
  *  GENERAL CONFIGURATION parameters and data structures
 \*****************************************************************************/
-/* Maximum index for a job array. The minimum index will always be 0. */
-#ifndef MAX_JOB_ARRAY_VALUE
-#define MAX_JOB_ARRAY_VALUE 1000
-#endif
-
 /* Maximum parallel threads to service incoming RPCs.
+ * Also maximum parallel threads to service outgoing RPCs (separate counter).
  * Since some systems schedule pthread on a First-In-Last-Out basis,
  * increasing this value is strongly discouraged. */
 #ifndef MAX_SERVER_THREADS
@@ -155,6 +151,7 @@ typedef struct slurmctld_config {
 	time_t	boot_time;
 	time_t	shutdown_time;
 	int	server_thread_count;
+	bool scheduling_disabled;
 
 	slurm_cred_ctx_t cred_ctx;
 #ifdef WITH_PTHREADS
@@ -197,7 +194,7 @@ typedef struct diag_stats {
 	uint32_t bf_cycle_counter;
 	uint32_t bf_cycle_last;
 	uint32_t bf_cycle_max;
-	uint32_t bf_cycle_sum;
+	uint64_t bf_cycle_sum;
 	uint32_t bf_last_depth;
 	uint32_t bf_last_depth_try;
 	uint32_t bf_depth_sum;
@@ -208,6 +205,18 @@ typedef struct diag_stats {
 	uint32_t bf_active;
 } diag_stats_t;
 
+/* This is used to point out constants that exist in the
+ * curr_tres_array in tres_info_t  This should be the same order as
+ * the tres_trypes_t enum that is defined in src/common/slurmdb_defs.h
+ */
+enum {
+	TRES_ARRAY_CPU = 0,
+	TRES_ARRAY_MEM,
+	TRES_ARRAY_ENEGRY,
+	TRES_ARRAY_NODE,
+	TRES_ARRAY_TOTAL_CNT
+};
+
 extern time_t	last_proc_req_start;
 extern diag_stats_t slurmctld_diag_stats;
 extern slurmctld_config_t slurmctld_config;
@@ -217,12 +226,12 @@ extern void *acct_db_conn;
 extern int   accounting_enforce;
 extern int   association_based_accounting;
 extern uint32_t   cluster_cpus;
-extern int   with_slurmdbd;
 extern bool  load_2_4_state;
 extern int   batch_sched_delay;
 extern int   sched_interval;
 extern bool  slurmctld_init_db;
 extern int   slurmctld_primary;
+extern int   slurmctld_tres_cnt;
 
 /* Buffer size use to print the jobid2str()
  * jobid, taskid and state.
@@ -321,6 +330,8 @@ struct part_record {
 	bitstr_t *allow_qos_bitstr; /* (DON'T PACK) assocaited with
 				 * char *allow_qos but used internally */
 	char *alternate; 	/* name of alternate partition */
+	double *billing_weights;    /* array of TRES billing weights */
+	char   *billing_weights_str;/* per TRES billing weight string */
 	uint32_t def_mem_per_cpu; /* default MB memory per allocated CPU */
 	uint32_t default_time;	/* minutes, NO_VAL or INFINITE */
 	char *deny_accounts;	/* comma delimited list of denied accounts */
@@ -348,10 +359,20 @@ struct part_record {
 				 * jobs (DON'T PACK) */
 	uint16_t preempt_mode;	/* See PREEMPT_MODE_* in slurm/slurm.h */
 	uint16_t priority;	/* scheduling priority for jobs */
+	char *qos_char;         /* requested QOS from slurm.conf */
+	void *qos_ptr;          /* pointer to the quality of
+				 * service record attached to this
+				 * partition, it is void* because of
+				 * interdependencies in the header
+				 * files, confirm the value before use */
 	uint16_t state_up;	/* See PARTITION_* states in slurm.h */
 	uint32_t total_nodes;	/* total number of nodes in the partition */
 	uint32_t total_cpus;	/* total number of cpus in the partition */
+	uint32_t max_cpu_cnt;	/* max # of cpus on a node in the partition */
+	uint32_t max_core_cnt;	/* max # of cores on a node in the partition */
 	uint16_t cr_type;	/* Custom CR values for partition (if supported by select plugin) */
+	uint64_t *tres_cnt;	/* array of total TRES in partition. NO_PACK */
+	char     *tres_fmt_str;	/* str of configured TRES in partition */
 };
 
 extern List part_list;			/* list of part_record entries */
@@ -371,7 +392,8 @@ typedef struct slurmctld_resv {
 	char **account_list;	/* list of accounts permitted to use	*/
 	bool account_not;	/* account_list users NOT permitted to use */
 	char *assoc_list;	/* list of associations			*/
-	uint32_t cpu_cnt;	/* number of reserved CPUs		*/
+	uint32_t core_cnt;	/* number of reserved cores		*/
+	char *burst_buffer;	/* burst buffer resources		*/
 	bitstr_t *core_bitmap;	/* bitmap of reserved cores		*/
 	uint32_t duration;	/* time in seconds for this
 				 * reservation to last                  */
@@ -393,6 +415,7 @@ typedef struct slurmctld_resv {
 	char *partition;	/* name of partition to be used		*/
 	struct part_record *part_ptr;	/* pointer to partition used	*/
 	uint32_t resv_id;	/* unique reservation ID, internal use	*/
+	uint32_t resv_watts;	/* amount of power to reserve */
 	bool run_epilog;	/* set if epilog has been executed	*/
 	bool run_prolog;	/* set if prolog has been executed	*/
 	time_t start_time;	/* start time of reservation		*/
@@ -400,6 +423,8 @@ typedef struct slurmctld_resv {
 	time_t start_time_prev;	/* If start time was changed this is
 				 * the pervious start time.  Needed
 				 * for accounting */
+	char *tres_fmt_str;     /* formatted string of tres to deal with */
+	char *tres_str;         /* simple string of tres to deal with */
 	char *users;		/* names of users permitted to use	*/
 	int user_cnt;		/* count of users permitted to use	*/
 	uid_t *user_list;	/* array of users permitted to use	*/
@@ -441,7 +466,8 @@ struct job_details {
 	char *ckpt_dir;			/* directory to store checkpoint
 					 * images */
 	uint16_t contiguous;		/* set if requires contiguous nodes */
-	uint16_t core_spec;		/* specialized core count */
+	uint16_t core_spec;		/* specialized core/thread count,
+					 * threads if CORE_SPEC_THREAD flag set */
 	char *cpu_bind;			/* binding map for map/mask_cpu - This
 					 * currently does not matter to the
 					 * job allocation, setting this does
@@ -450,6 +476,9 @@ struct job_details {
 					 * currently does not matter to the
 					 * job allocation, setting this does
 					 * not do anything for steps. */
+	uint32_t cpu_freq_min;  	/* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  	/* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  	/* cpu frequency governor */
 	uint16_t cpus_per_task;		/* number of processors required for
 					 * each task */
 	List depend_list;		/* list of job_ptr:state pairs */
@@ -504,11 +533,12 @@ struct job_details {
 	char *std_in;			/* pathname of job's stdin file */
 	char *std_out;			/* pathname of job's stdout file */
 	time_t submit_time;		/* time of submission */
-	uint16_t task_dist;		/* task layout for this job. Only
+	uint32_t task_dist;		/* task layout for this job. Only
 					 * useful when Consumable Resources
 					 * is enabled */
 	uint32_t usable_nodes;		/* node count needed by preemption */
-	uint8_t whole_node;		/* job requested exclusive node use */
+	uint8_t whole_node;		/* 1: --exclusive
+					 * 2: --exclusive=user */
 	char *work_dir;			/* pathname of working directory */
 };
 
@@ -525,6 +555,14 @@ typedef struct job_array_struct {
 	uint32_t tot_comp_tasks;	/* Completed task count */
 } job_array_struct_t;
 
+#define ADMIN_SET_LIMIT 0xffff
+
+typedef struct {
+	uint16_t qos;
+	uint16_t time;
+	uint16_t *tres;
+} acct_policy_limit_set_t;
+
 /*
  * NOTE: When adding fields to the job_record, or any underlying structures,
  * be sure to sync with _rec_job_copy.
@@ -540,13 +578,15 @@ struct job_record {
 	job_array_struct_t *array_recs;	/* job array details,
 					 * only in meta-job record */
 	uint32_t assoc_id;              /* used for accounting plugins */
-	void    *assoc_ptr;		/* job's association record ptr, it is
+	void    *assoc_ptr;		/* job's assoc record ptr, it is
 					 * void* because of interdependencies
 					 * in the header files, confirm the
 					 * value before use */
 	uint16_t batch_flag;		/* 1 or 2 if batch job (with script),
 					 * 2 indicates retry mode (one retry) */
 	char *batch_host;		/* host executing batch script */
+	uint32_t bit_flags;             /* various flags */
+	char *burst_buffer;		/* burst buffer specification */
 	check_jobinfo_t check_job;      /* checkpoint context, opaque */
 	uint16_t ckpt_interval;		/* checkpoint interval in minutes */
 	time_t ckpt_time;		/* last time job was periodically
@@ -556,6 +596,11 @@ struct job_record {
 					 * by the job, decremented while job is
 					 * completing (N/A for bluegene
 					 * systems) */
+	double billable_tres;		/* calculated billable tres for the
+					 * job, as defined by the partition's
+					 * billing weight. Recalculated upon job
+					 * resize.  Cannot be calculated until
+					 * the job is alloocated resources. */
 	uint16_t cr_enabled;            /* specify if Consumable Resources
 					 * is enabled. Needed since CR deals
 					 * with a finer granularity in its
@@ -595,25 +640,16 @@ struct job_record {
 	struct job_record *job_array_next_j; /* job array linked list by job_id */
 	struct job_record *job_array_next_t; /* job array linked list by task_id */
 	job_resources_t *job_resrcs;	/* details of allocated cores */
-	uint16_t job_state;		/* state of the job */
+	uint32_t job_state;		/* state of the job */
 	uint16_t kill_on_node_fail;	/* 1 if job should be killed on
 					 * node failure */
 	char *licenses;			/* licenses required by the job */
 	List license_list;		/* structure with license info */
-	uint16_t limit_set_max_cpus;	/* if max_cpus was set from
-					 * a limit false if user set */
-	uint16_t limit_set_max_nodes;	/* if max_nodes was set from
-					 * a limit false if user set */
-	uint16_t limit_set_min_cpus;	/* if max_cpus was set from
-					 * a limit false if user set */
-	uint16_t limit_set_min_nodes;	/* if max_nodes was set from
-					 * a limit false if user set */
-	uint16_t limit_set_pn_min_memory; /* if pn_min_memory was set from
-					 * a limit false if user set */
-	uint16_t limit_set_time;    	/* if time_limit was set from
-					 * a limit false if user set */
-	uint16_t limit_set_qos;	   	/* if qos_limit was set from
-					 * a limit false if user set */
+	acct_policy_limit_set_t limit_set; /* flags if indicate an
+					    * associated limit was set from
+					    * a limit instead of from
+					    * the request, or if the
+					    * limit was set from admin */
 	uint16_t mail_type;		/* see MAIL_JOB_* in slurm.h */
 	char *mail_user;		/* user to get e-mail notification */
 	uint32_t magic;			/* magic cookie for data integrity */
@@ -645,6 +681,8 @@ struct job_record {
 	bool part_nodes_missing;	/* set if job's nodes removed from this
 					 * partition */
 	struct part_record *part_ptr;	/* pointer to the partition record */
+	uint8_t power_flags;		/* power management flags,
+					 * see SLURM_POWER_FLAGS_ */
 	time_t pre_sus_time;		/* time job ran prior to last suspend */
 	time_t preempt_time;		/* job preemption signal time */
 	bool preempt_in_progress;	/* Premption of other jobs in progress
@@ -673,12 +711,16 @@ struct job_record {
 	char *resp_host;		/* host for srun communications */
 	char *sched_nodes;		/* list of nodes scheduled for job */
 	dynamic_plugin_data_t *select_jobinfo;/* opaque data, BlueGene */
+	uint8_t sicp_mode;		/* set for inter-cluster jobs */
 	char **spank_job_env;		/* environment variables for job prolog
 					 * and epilog scripts as set by SPANK
 					 * plugins */
 	uint32_t spank_job_env_size;	/* element count in spank_env */
-	uint16_t start_protocol_ver;	/* Slurm version step was
-					 * started with */
+	uint16_t start_protocol_ver;	/* Slurm version job was
+					 * started with either the
+					 * creating message or the
+					 * lowest slurmd in the
+					 * allocation */
 	time_t start_time;		/* time execution begins,
 					 * actual or expected */
 	char *state_desc;		/* optional details for state_reason */
@@ -697,6 +739,18 @@ struct job_record {
 					 * for accounting */
 	uint32_t total_nodes;		/* number of allocated nodes
 					 * for accounting */
+	uint64_t *tres_req_cnt;         /* array of tres counts requested
+					 * based off g_tres_count in
+					 * assoc_mgr */
+	char *tres_req_str;             /* string format of
+					 * tres_req_cnt primarily
+					 * used for state */
+	char *tres_fmt_req_str;         /* formatted req tres string for job */
+	uint64_t *tres_alloc_cnt;       /* array of tres counts allocated
+					 * based off g_tres_count in
+					 * assoc_mgr */
+	char *tres_alloc_str;           /* simple tres string for job */
+	char *tres_fmt_alloc_str;       /* formatted tres string for job */
 	uint32_t user_id;		/* user the job runs as */
 	uint16_t wait_all_nodes;	/* if set, wait for all nodes to boot
 					 * before starting the job */
@@ -722,9 +776,13 @@ struct job_record {
 #define SLURM_DEPEND_SINGLETON		5	/* Only one job for this
 						 * user/name at a time */
 #define SLURM_DEPEND_EXPAND		6	/* Expand running job */
+
+#define SLURM_FLAGS_OR			1	/* OR job dependencies */
+
 struct	depend_spec {
 	uint32_t	array_task_id;	/* INFINITE for all array tasks */
 	uint16_t	depend_type;	/* SLURM_DEPEND_* type */
+	uint16_t	depend_flags;	/* SLURM_FLAGS_* type */
 	uint32_t	job_id;		/* SLURM job_id */
 	struct job_record *job_ptr;	/* pointer to this job */
 };
@@ -739,7 +797,9 @@ struct 	step_record {
 					 * step relative to job's nodes,
 					 * see src/common/job_resources.h */
 	uint32_t cpu_count;		/* count of step's CPUs */
-	uint32_t cpu_freq;		/* requested cpu frequency */
+	uint32_t cpu_freq_min; 		/* Minimum cpu frequency  */
+	uint32_t cpu_freq_max; 		/* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov; 		/* cpu frequency governor */
 	uint16_t cpus_per_task;		/* cpus per task initiated */
 	uint16_t cyclic_alloc;		/* set for cyclic task allocation
 					 * across nodes */
@@ -762,7 +822,9 @@ struct 	step_record {
 	uint16_t port;			/* port for srun communications */
 	time_t pre_sus_time;		/* time step ran prior to last suspend */
 	uint16_t start_protocol_ver;	/* Slurm version step was
-					 * started with */
+					 * started with either srun
+					 * or the lowest slurmd
+					 * version it is talking to */
 	int *resv_port_array;		/* reserved port indexes */
 	uint16_t resv_port_cnt;		/* count of ports reserved per node */
 	char *resv_ports;		/* ports reserved for job */
@@ -770,7 +832,7 @@ struct 	step_record {
 	time_t start_time;		/* step allocation start time */
 	uint32_t time_limit;	  	/* step allocation time limit */
 	dynamic_plugin_data_t *select_jobinfo;/* opaque data, BlueGene */
-	uint16_t state;			/* state of the step. See job_states */
+	uint32_t state;			/* state of the step. See job_states */
 	uint32_t step_id;		/* step number */
 	slurm_step_layout_t *step_layout;/* info about how tasks are laid out
 					  * in the step */
@@ -782,6 +844,8 @@ struct 	step_record {
 	switch_jobinfo_t *switch_job;	/* switch context, opaque */
 	time_t time_last_active;	/* time step was last found on node */
 	time_t tot_sus_time;		/* total time in suspended state */
+	char *tres_alloc_str;           /* simple tres string for step */
+	char *tres_fmt_alloc_str;       /* formatted tres string for step */
 };
 
 extern List job_list;			/* list of job_record entries */
@@ -858,6 +922,18 @@ extern void  build_config_feature_list(struct config_record *config_ptr);
  */
 extern struct part_record *create_part_record (void);
 
+/*
+ * build_part_bitmap - update the total_cpus, total_nodes, and node_bitmap
+ *	for the specified partition, also reset the partition pointers in
+ *	the node back to this partition.
+ * IN part_ptr - pointer to the partition
+ * RET 0 if no error, errno otherwise
+ * global: node_record_table_ptr - pointer to global node table
+ * NOTE: this does not report nodes defined in more than one partition. this
+ *	is checked only upon reading the configuration file, not on an update
+ */
+extern int build_part_bitmap(struct part_record *part_ptr);
+
 /*
  * job_limits_check - check the limits specified for the job.
  * IN job_ptr - pointer to job table entry.
@@ -1026,10 +1102,12 @@ extern uint32_t get_next_job_id(void);
 /*
  * get_part_list - find record for named partition(s)
  * IN name - partition name(s) in a comma separated list
+ * OUT err_part - The first invalid partition name.
  * RET List of pointers to the partitions or NULL if not found
  * NOTE: Caller must free the returned list
+ * NOTE: Caller must free err_part
  */
-extern List get_part_list(char *name);
+extern List get_part_list(char *name, char **err_part);
 
 /*
  * init_job_conf - initialize the job configuration tables and values.
@@ -1052,7 +1130,7 @@ extern int init_job_conf (void);
  *         hash_table - table of hash indexes
  *         last_node_update - time of last node table update
  */
-extern int init_node_conf ();
+extern int init_node_conf (void);
 
 /*
  * init_part_conf - initialize the default partition configuration values
@@ -1065,9 +1143,7 @@ extern int init_node_conf ();
 extern int init_part_conf (void);
 
 /* init_requeue_policy()
- *
- * Build the arrays holding the job exit code upon
- * which jobs should get requeued.
+ * Initialize the requeue exit/hold bitmaps.
  */
 extern void init_requeue_policy(void);
 
@@ -1129,6 +1205,13 @@ extern void job_array_pre_sched(struct job_record *job_ptr);
 /* If this is a job array meta-job, clean up after scheduling attempt */
 extern void job_array_post_sched(struct job_record *job_ptr);
 
+/* Create an exact copy of an existing job record for a job array.
+ * IN job_ptr - META job record for a job array, which is to become an
+ *		individial task of the job array.
+ *		Set the job's array_task_id to the task to be split out.
+ * RET - The new job record, which is the new META job record. */
+extern struct job_record *job_array_split(struct job_record *job_ptr);
+
 /* Record the start of one job array task */
 extern void job_array_start(struct job_record *job_ptr);
 
@@ -1161,6 +1244,11 @@ extern int job_checkpoint(checkpoint_msg_t *ckpt_ptr, uid_t uid,
 /* log the completion of the specified job */
 extern void job_completion_logger(struct job_record  *job_ptr, bool requeue);
 
+/* Convert a pn_min_memory into total memory for the job either cpu or
+ * node based. */
+extern uint64_t job_get_tres_mem(uint32_t pn_min_memory,
+				 uint32_t cpu_cnt, uint32_t node_cnt);
+
 /*
  * job_epilog_complete - Note the completion of the epilog script for a
  *	given job
@@ -1190,7 +1278,7 @@ extern void job_fini (void);
  * IN job_state - desired job state (JOB_BOOT_FAIL, JOB_NODE_FAIL, etc.)
  * RET 0 on success, otherwise ESLURM error code
  */
-extern int job_fail(uint32_t job_id, uint16_t job_state);
+extern int job_fail(uint32_t job_id, uint32_t job_state);
 
 
 /* job_hold_requeue()
@@ -1318,7 +1406,7 @@ extern int job_suspend2(suspend_msg_t *sus_ptr, uid_t uid,
  * IN job_id - id of the job which completed
  * IN uid - user id of user issuing the RPC
  * IN requeue - job should be run again if possible
- * IN node_fail - true of job terminated due to node failure
+ * IN node_fail - true if job terminated due to node failure
  * IN job_return_code - job's return code, if set then set state to JOB_FAILED
  * RET - 0 on success, otherwise ESLURM error code
  * global: job_list - pointer global job list
@@ -1408,14 +1496,26 @@ extern int job_step_signal(uint32_t job_id, uint32_t step_id,
  */
 extern void job_time_limit (void);
 
+/* Builds the tres_req_cnt and tres_req_str of a job.
+ * Only set when job is pending.
+ * NOTE: job write lock must be locked before calling this */
+extern void job_set_req_tres(struct job_record *job_ptr, bool assoc_mgr_locked);
+
 /*
- * job_update_cpu_cnt - when job is completing remove allocated cpus
+ * job_set_tres - set the tres up when allocating the job.
+ * Only set when job is running.
+ * NOTE: job write lock must be locked before calling this */
+extern void job_set_alloc_tres(
+	struct job_record *job_ptr, bool assoc_mgr_locked);
+
+/*
+ * job_update_tres_cnt - when job is completing remove allocated tres
  *                      from count.
  * IN/OUT job_ptr - job structure to be updated
  * IN node_inx    - node bit that is finished with job.
  * RET SLURM_SUCCES on success SLURM_ERROR on cpu_cnt underflow
  */
-extern int job_update_cpu_cnt(struct job_record *job_ptr, int node_inx);
+extern int job_update_tres_cnt(struct job_record *job_ptr, int node_inx);
 
 /*
  * check_job_step_time_limit - terminate jobsteps which have exceeded
@@ -1556,7 +1656,7 @@ extern void make_node_comp(struct node_record *node_ptr,
 /*
  * make_node_idle - flag specified node as having finished with a job
  * IN node_ptr - pointer to node reporting job completion
- * IN job_ptr  - pointer to job that just completed
+ * IN job_ptr - pointer to job that just completed or NULL if not applicable
  */
 extern void make_node_idle(struct node_record *node_ptr,
 			   struct job_record *job_ptr);
@@ -1591,6 +1691,10 @@ extern void node_not_resp (char *name, time_t msg_time,
  * and log that the node is not responding using a hostlist expression */
 extern void node_no_resp_msg(void);
 
+/* For a given job ID return the number of PENDING tasks which have their
+ * own separate job_record (do not count tasks in pending META job record) */
+extern int num_pending_job_array_tasks(uint32_t array_job_id);
+
 /*
  * pack_all_jobs - dump all job information for all jobs in
  *	machine independent form (for network transmission)
@@ -1626,6 +1730,18 @@ extern void pack_all_node (char **buffer_ptr, int *buffer_size,
 			   uint16_t show_flags, uid_t uid,
 			   uint16_t protocol_version);
 
+/*
+ * pack_all_sicp - dump inter-cluster job state information
+ * OUT buffer_ptr - the pointer is set to the allocated buffer.
+ * OUT buffer_size - set to size of the buffer in bytes
+ * IN uid - uid of user making request (for job/partition filtering)
+ * NOTE: the buffer at *buffer_ptr must be xfreed by the caller
+ * NOTE: change _unpack_sicp_msg() in common/slurm_protocol_pack.c
+ *	whenever the data format changes
+ */
+extern void pack_all_sicp(char **buffer_ptr, int *buffer_size,
+			  uid_t uid, uint16_t protocol_version);
+
 /* Pack all scheduling statistics */
 extern void pack_all_stat(int resp, char **buffer_ptr, int *buffer_size,
 			  uint16_t protocol_version);
@@ -1826,6 +1942,9 @@ extern void reset_job_bitmaps (void);
 /* Reset a node's CPU load value */
 extern void reset_node_load(char *node_name, uint32_t cpu_load);
 
+/* Reset a node's free memory value */
+extern void reset_node_free_mem(char *node_name, uint32_t free_mem);
+
 /* Reset all scheduling statistics
  * level IN - clear backfilled_jobs count if set */
 extern void reset_stats(int level);
@@ -1861,18 +1980,24 @@ extern void send_all_to_accounting(time_t event_time);
 
 /* A slurmctld lock needs to at least have a node read lock set before
  * this is called */
-extern void set_cluster_cpus(void);
+extern void set_cluster_tres(bool assoc_mgr_locked);
 
 /* sends all jobs in eligible state to accounting.  Only needed at
  * first registration
  */
-extern int send_jobs_to_accounting();
+extern int send_jobs_to_accounting(void);
 
 /* send all nodes in a down like state to accounting.  Only needed at
  * first registration
  */
 extern int send_nodes_to_accounting(time_t event_time);
 
+/* Decrement slurmctld thread count (as applies to thread limit) */
+extern void server_thread_decr(void);
+
+/* Increment slurmctld thread count (as applies to thread limit) */
+extern void server_thread_incr(void);
+
 /* Set a job's alias_list string */
 extern void set_job_alias_list(struct job_record *job_ptr);
 
@@ -1945,12 +2070,14 @@ extern void step_alloc_lps(struct step_record *step_ptr);
  * IN step_specs - job step specifications
  * OUT new_step_record - pointer to the new step_record (NULL on error)
  * IN batch_step - set if step is a batch script
- * RET - 0 or error code
+ * IN protocol_version - slurm protocol version of client
+  * RET - 0 or error code
  * NOTE: don't free the returned step_record because that is managed through
  * 	the job.
  */
 extern int step_create(job_step_create_request_msg_t *step_specs,
-		       struct step_record** new_step_record, bool batch_step);
+		       struct step_record** new_step_record, bool batch_step,
+		       uint16_t protocol_version);
 
 /*
  * step_layout_create - creates a step_layout according to the inputs.
@@ -1970,7 +2097,7 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					       uint32_t node_count,
 					       uint32_t num_tasks,
 					       uint16_t cpus_per_task,
-					       uint16_t task_dist,
+					       uint32_t task_dist,
 					       uint16_t plane_size);
 
 /*
@@ -2021,6 +2148,9 @@ extern bool test_job_array_complete(uint32_t array_job_id);
 /* Return true if ALL tasks of specific array job ID are completed */
 extern bool test_job_array_completed(uint32_t array_job_id);
 
+/* Return true if ALL tasks of specific array job ID are finished */
+extern bool test_job_array_finished(uint32_t array_job_id);
+
 /* Return true if ANY tasks of specific array job ID are pending */
 extern bool test_job_array_pending(uint32_t array_job_id);
 
@@ -2216,14 +2346,29 @@ extern bool validate_operator(uid_t uid);
 extern void cleanup_completing(struct job_record *);
 
 /*
- * jobid2str() - print all the parts that uniquely
- *               identify a job.
+ * jobid2fmt() - print a job ID including job array information.
+ */
+extern char *jobid2fmt(struct job_record *job_ptr, char *buf, int buf_size);
+
+/*
+ * jobid2str() - print all the parts that uniquely identify a job.
  */
-extern char *jobid2str(struct job_record *, char *);
+extern char *jobid2str(struct job_record *job_ptr, char *buf, int buf_size);
+
 
 /* trace_job() - print the job details if
  *               the DEBUG_FLAG_TRACE_JOBS is set
  */
 extern void trace_job(struct job_record *, const char *, const char *);
 
+/*
+ */
+int
+waitpid_timeout(const char *, pid_t, int *, int);
+
+/*
+ * Calcuate and populate the number of tres' for all partitions.
+ */
+extern void set_partition_tres();
+
 #endif /* !_HAVE_SLURMCTLD_H */
diff --git a/src/slurmctld/srun_comm.c b/src/slurmctld/srun_comm.c
index 25084e0e1..d039310fb 100644
--- a/src/slurmctld/srun_comm.c
+++ b/src/slurmctld/srun_comm.c
@@ -81,6 +81,7 @@ static void _srun_agent_launch(slurm_addr_t *addr, char *host,
 extern void srun_allocate (uint32_t job_id)
 {
 	struct job_record *job_ptr = find_job_record (job_id);
+	int i;
 
 	xassert(job_ptr);
 	if (job_ptr && job_ptr->alloc_resp_port && job_ptr->alloc_node &&
@@ -104,6 +105,19 @@ extern void srun_allocate (uint32_t job_id)
 		if (job_ptr->details) {
 			msg_arg->pn_min_memory = job_ptr->details->
 						 pn_min_memory;
+			msg_arg->cpu_freq_min = job_ptr->details->cpu_freq_min;
+			msg_arg->cpu_freq_max = job_ptr->details->cpu_freq_max;
+			msg_arg->cpu_freq_gov = job_ptr->details->cpu_freq_gov;
+			if (job_ptr->details->env_cnt) {
+				msg_arg->env_size = job_ptr->details->env_cnt;
+				msg_arg->environment = xmalloc(
+					sizeof(char *) * msg_arg->env_size);
+				for (i = 0; i < msg_arg->env_size; i++) {
+					msg_arg->environment[i] =
+						xstrdup(job_ptr->details->
+							env_sup[i]);
+				}
+			}
 		}
 		memcpy(msg_arg->cpus_per_node,
 		       job_resrcs_ptr->cpu_array_value,
@@ -117,6 +131,7 @@ extern void srun_allocate (uint32_t job_id)
 		msg_arg->select_jobinfo = select_g_select_jobinfo_copy(
 				job_ptr->select_jobinfo);
 		msg_arg->error_code	= SLURM_SUCCESS;
+
 		_srun_agent_launch(addr, job_ptr->alloc_node,
 				   RESPONSE_RESOURCE_ALLOCATION, msg_arg,
 				   job_ptr->start_protocol_ver);
diff --git a/src/slurmctld/statistics.c b/src/slurmctld/statistics.c
index 89706a959..9f1ac8f87 100644
--- a/src/slurmctld/statistics.c
+++ b/src/slurmctld/statistics.c
@@ -58,14 +58,68 @@ extern void pack_all_stat(int resp, char **buffer_ptr, int *buffer_size,
 	int parts_packed;
 	int agent_queue_size;
 	time_t now = time(NULL);
+	uint32_t uint32_tmp = 0;
 
 	buffer_ptr[0] = NULL;
 	*buffer_size = 0;
 
 	buffer = init_buf(BUF_SIZE);
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
+		parts_packed = resp;
+		pack32(parts_packed, buffer);
+
+		if (resp) {
+			pack_time(now, buffer);
+			debug3("pack_all_stat: time = %u",
+			       (uint32_t) last_proc_req_start);
+			pack_time(last_proc_req_start, buffer);
+
+			debug3("pack_all_stat: server_thread_count = %u",
+			       slurmctld_config.server_thread_count);
+			pack32(slurmctld_config.server_thread_count, buffer);
+
+			agent_queue_size = retry_list_size();
+			pack32(agent_queue_size, buffer);
 
-	if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+			pack32(slurmctld_diag_stats.jobs_submitted, buffer);
+			pack32(slurmctld_diag_stats.jobs_started, buffer);
+			pack32(slurmctld_diag_stats.jobs_completed, buffer);
+			pack32(slurmctld_diag_stats.jobs_canceled, buffer);
+			pack32(slurmctld_diag_stats.jobs_failed, buffer);
+
+			pack32(slurmctld_diag_stats.schedule_cycle_max,
+			       buffer);
+			pack32(slurmctld_diag_stats.schedule_cycle_last,
+			       buffer);
+			pack32(slurmctld_diag_stats.schedule_cycle_sum,
+			       buffer);
+			pack32(slurmctld_diag_stats.schedule_cycle_counter,
+			       buffer);
+			pack32(slurmctld_diag_stats.schedule_cycle_depth,
+			       buffer);
+			pack32(slurmctld_diag_stats.schedule_queue_len, buffer);
+
+			pack32(slurmctld_diag_stats.backfilled_jobs, buffer);
+			pack32(slurmctld_diag_stats.last_backfilled_jobs,
+			       buffer);
+			pack32(slurmctld_diag_stats.bf_cycle_counter, buffer);
+			pack64(slurmctld_diag_stats.bf_cycle_sum, buffer);
+			pack32(slurmctld_diag_stats.bf_cycle_last, buffer);
+			pack32(slurmctld_diag_stats.bf_last_depth, buffer);
+			pack32(slurmctld_diag_stats.bf_last_depth_try, buffer);
+
+			pack32(slurmctld_diag_stats.bf_queue_len, buffer);
+			pack32(slurmctld_diag_stats.bf_cycle_max, buffer);
+			pack_time(slurmctld_diag_stats.bf_when_last_cycle,
+				  buffer);
+			pack32(slurmctld_diag_stats.bf_depth_sum, buffer);
+			pack32(slurmctld_diag_stats.bf_depth_try_sum, buffer);
+			pack32(slurmctld_diag_stats.bf_queue_len_sum, buffer);
+			pack32(slurmctld_diag_stats.bf_active,	 buffer);
+		}
+	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
 		parts_packed = resp;
+
 		pack32(parts_packed, buffer);
 
 		if (resp) {
@@ -103,7 +157,8 @@ extern void pack_all_stat(int resp, char **buffer_ptr, int *buffer_size,
 			pack32(slurmctld_diag_stats.last_backfilled_jobs,
 			       buffer);
 			pack32(slurmctld_diag_stats.bf_cycle_counter, buffer);
-			pack32(slurmctld_diag_stats.bf_cycle_sum, buffer);
+			uint32_tmp = (uint32_t)slurmctld_diag_stats.bf_cycle_sum;
+			pack32(uint32_tmp, buffer);
 			pack32(slurmctld_diag_stats.bf_cycle_last, buffer);
 			pack32(slurmctld_diag_stats.bf_last_depth, buffer);
 			pack32(slurmctld_diag_stats.bf_last_depth_try, buffer);
diff --git a/src/slurmctld/step_mgr.c b/src/slurmctld/step_mgr.c
index 9c239e4a3..293f1c94a 100644
--- a/src/slurmctld/step_mgr.c
+++ b/src/slurmctld/step_mgr.c
@@ -82,7 +82,8 @@ static void _build_pending_step(struct job_record  *job_ptr,
 				job_step_create_request_msg_t *step_specs);
 static int  _count_cpus(struct job_record *job_ptr, bitstr_t *bitmap,
 			uint32_t *usable_cpu_cnt);
-static struct step_record * _create_step_record(struct job_record *job_ptr);
+static struct step_record * _create_step_record(struct job_record *job_ptr,
+						uint16_t protocol_verion);
 static void _dump_step_layout(struct step_record *step_ptr);
 static void _free_step_rec(struct step_record *step_ptr);
 static bool _is_mem_resv(void);
@@ -157,10 +158,12 @@ static int _opt_node_cnt(uint32_t step_min_nodes, uint32_t step_max_nodes,
 /*
  * _create_step_record - create an empty step_record for the specified job.
  * IN job_ptr - pointer to job table entry to have step record added
+ * IN protocol_version - slurm protocol version of client
  * RET a pointer to the record or NULL if error
  * NOTE: allocates memory that should be xfreed with delete_step_record
  */
-static struct step_record * _create_step_record(struct job_record *job_ptr)
+static struct step_record * _create_step_record(struct job_record *job_ptr,
+						uint16_t protocol_version)
 {
 	struct step_record *step_ptr;
 
@@ -181,7 +184,11 @@ static struct step_record * _create_step_record(struct job_record *job_ptr)
 	step_ptr->time_limit = INFINITE;
 	step_ptr->jobacct    = jobacctinfo_create(NULL);
 	step_ptr->requid     = -1;
-	step_ptr->start_protocol_ver = SLURM_PROTOCOL_VERSION;
+	if (protocol_version)
+		step_ptr->start_protocol_ver = protocol_version;
+	else
+		step_ptr->start_protocol_ver = job_ptr->start_protocol_ver;
+
 	(void) list_append (job_ptr->step_list, step_ptr);
 
 	return step_ptr;
@@ -197,34 +204,39 @@ static void _build_pending_step(struct job_record *job_ptr,
 	if ((step_specs->host == NULL) || (step_specs->port == 0))
 		return;
 
-	step_ptr = _create_step_record(job_ptr);
+	step_ptr = _create_step_record(job_ptr, 0);
 	if (step_ptr == NULL)
 		return;
 
-	step_ptr->port      = step_specs->port;
-	step_ptr->host      = xstrdup(step_specs->host);
-	step_ptr->state     = JOB_PENDING;
-	step_ptr->cpu_count = step_specs->num_tasks;
+	step_ptr->cpu_count	= step_specs->num_tasks;
+	step_ptr->port		= step_specs->port;
+	step_ptr->host		= xstrdup(step_specs->host);
+	step_ptr->state		= JOB_PENDING;
+	step_ptr->step_id	= SLURM_EXTERN_CONT;
+	if (job_ptr->node_bitmap)
+		step_ptr->step_node_bitmap = bit_copy(job_ptr->node_bitmap);
 	step_ptr->time_last_active = time(NULL);
-	step_ptr->step_id   = INFINITE;
+
 }
 
 static void _internal_step_complete(struct job_record *job_ptr,
 				    struct step_record *step_ptr)
 {
 	jobacct_storage_g_step_complete(acct_db_conn, step_ptr);
-	job_ptr->derived_ec = MAX(job_ptr->derived_ec,
-				  step_ptr->exit_code);
-
-	/* This operations are needed for Cray systems and also provide a
-	 * cleaner state for requeued jobs. */
-	step_ptr->state = JOB_COMPLETING;
-	select_g_step_finish(step_ptr);
+	if (step_ptr->step_id != SLURM_EXTERN_CONT) {
+		job_ptr->derived_ec = MAX(job_ptr->derived_ec,
+					  step_ptr->exit_code);
+
+		/* This operations are needed for Cray systems and also provide
+		 * a cleaner state for requeued jobs. */
+		step_ptr->state = JOB_COMPLETING;
+		select_g_step_finish(step_ptr);
 #if !defined(HAVE_NATIVE_CRAY) && !defined(HAVE_CRAY_NETWORK)
-	/* On native Cray, post_job_step is called after NHC completes.
-	 * IF SIMULATING A CRAY THIS NEEDS TO BE COMMENTED OUT!!!! */
-	post_job_step(step_ptr);
+		/* On native Cray, post_job_step is called after NHC completes.
+		 * IF SIMULATING A CRAY THIS NEEDS TO BE COMMENTED OUT!!!! */
+		post_job_step(step_ptr);
 #endif
+	}
 }
 
 /*
@@ -245,7 +257,7 @@ extern void delete_step_records (struct job_record *job_ptr)
 	step_iterator = list_iterator_create(job_ptr->step_list);
 	while ((step_ptr = (struct step_record *) list_next (step_iterator))) {
 		/* Only check if not a pending step */
-		if (step_ptr->step_id != INFINITE) {
+		if (step_ptr->step_id != SLURM_EXTERN_CONT) {
 			uint16_t cleaning = 0;
 			select_g_select_jobinfo_get(step_ptr->select_jobinfo,
 						    SELECT_JOBDATA_CLEANING,
@@ -255,6 +267,7 @@ extern void delete_step_records (struct job_record *job_ptr)
 			/* _internal_step_complete() will purge step record */
 			_internal_step_complete(job_ptr, step_ptr);
 		} else {
+			_internal_step_complete(job_ptr, step_ptr);
 			list_remove (step_iterator);
 			_free_step_rec(step_ptr);
 		}
@@ -265,6 +278,7 @@ extern void delete_step_records (struct job_record *job_ptr)
 /*
  * step_list_purge - Simple purge of a job's step list records. No testing is
  *	performed to insure the step records has no active references.
+ *	**ONLY CALL WHEN FREEING MEMORY AT END OF JOB**
  * IN job_ptr - pointer to job table entry to have step records removed
  */
 extern void step_list_purge(struct job_record *job_ptr)
@@ -282,7 +296,7 @@ extern void step_list_purge(struct job_record *job_ptr)
 		_free_step_rec(step_ptr);
 	}
 	list_iterator_destroy(step_iterator);
-	list_destroy(job_ptr->step_list);
+	FREE_NULL_LIST(job_ptr->step_list);
 }
 
 /* _free_step_rec - delete a step record's data structures */
@@ -312,9 +326,10 @@ static void _free_step_rec(struct step_record *step_ptr)
 	xfree(step_ptr->network);
 	xfree(step_ptr->ckpt_dir);
 	xfree(step_ptr->gres);
-	if (step_ptr->gres_list)
-		list_destroy(step_ptr->gres_list);
+	FREE_NULL_LIST(step_ptr->gres_list);
 	select_g_select_jobinfo_free(step_ptr->select_jobinfo);
+	xfree(step_ptr->tres_alloc_str);
+	xfree(step_ptr->tres_fmt_alloc_str);
 	xfree(step_ptr->ext_sensors);
 	step_ptr->job_ptr = NULL;
 	xfree(step_ptr);
@@ -377,12 +392,22 @@ dump_step_desc(job_step_create_request_msg_t *step_spec)
 		mem_type   = "cpu";
 	}
 
-	debug3("StepDesc: user_id=%u job_id=%u node_count=%u-%u cpu_count=%u",
+	if (slurmctld_conf.debug_flags & DEBUG_FLAG_CPU_FREQ) {
+		info("StepDesc: user_id=%u job_id=%u "
+		     "cpu_freq_gov=%u cpu_freq_max=%u cpu_freq_min=%u",
+		     step_spec->user_id, step_spec->job_id,
+		     step_spec->cpu_freq_gov, step_spec->cpu_freq_max,
+		     step_spec->cpu_freq_min);
+	}
+	debug3("StepDesc: user_id=%u job_id=%u node_count=%u-%u cpu_count=%u "
+	       "num_tasks=%u",
 	       step_spec->user_id, step_spec->job_id,
 	       step_spec->min_nodes, step_spec->max_nodes,
-	       step_spec->cpu_count);
-	debug3("   cpu_freq=%u num_tasks=%u relative=%u task_dist=%u plane=%u",
-	       step_spec->cpu_freq, step_spec->num_tasks, step_spec->relative,
+	       step_spec->cpu_count, step_spec->num_tasks);
+	debug3("   cpu_freq_gov=%u cpu_freq_max=%u cpu_freq_min=%u "
+	       "relative=%u task_dist=0x%X plane=%u",
+	       step_spec->cpu_freq_gov, step_spec->cpu_freq_max,
+	       step_spec->cpu_freq_min, step_spec->relative,
 	       step_spec->task_dist, step_spec->plane_size);
 	debug3("   node_list=%s  constraints=%s",
 	       step_spec->node_list, step_spec->features);
@@ -712,7 +737,7 @@ int job_step_complete(uint32_t job_id, uint32_t step_id, uid_t uid,
 	if (step_ptr == NULL)
 		return ESLURM_INVALID_JOB_ID;
 
-	if (step_ptr->step_id == INFINITE)	/* batch step */
+	if (step_ptr->step_id == SLURM_EXTERN_CONT)
 		return SLURM_SUCCESS;
 
 	/* If the job is already cleaning we have already been here
@@ -957,8 +982,11 @@ _pick_step_nodes (struct job_record  *job_ptr,
 				return NULL;
 			}
 		}
-		job_ptr->job_state &= (~JOB_CONFIGURING);
-		debug("Configuration for job %u complete", job_ptr->job_id);
+		if (job_ptr->details
+		    && job_ptr->details->prolog_running == 0) {
+			job_ptr->job_state &= (~JOB_CONFIGURING);
+			debug("Configuration for job %u complete", job_ptr->job_id);
+		}
 	}
 
 	/* In exclusive mode, just satisfy the processor count.
@@ -967,7 +995,8 @@ _pick_step_nodes (struct job_record  *job_ptr,
 	if (step_spec->exclusive) {
 		int avail_cpus, avail_tasks, total_cpus, total_tasks, node_inx;
 		int i_first, i_last;
-		uint32_t avail_mem, total_mem, gres_cnt;
+		uint32_t avail_mem, total_mem;
+		uint64_t gres_cnt;
 		uint32_t nodes_picked_cnt = 0;
 		uint32_t tasks_picked_cnt = 0, total_task_cnt = 0;
 		bitstr_t *selected_nodes = NULL, *non_selected_nodes = NULL;
@@ -1062,9 +1091,9 @@ _pick_step_nodes (struct job_record  *job_ptr,
 							 node_inx, false,
 							 job_ptr->job_id,
 							 NO_VAL);
-			if ((gres_cnt != NO_VAL) && (cpus_per_task > 0))
+			if ((gres_cnt != NO_VAL64) && (cpus_per_task > 0))
 				gres_cnt /= cpus_per_task;
-			avail_tasks = MIN(avail_tasks, gres_cnt);
+			avail_tasks = MIN((uint64_t)avail_tasks, gres_cnt);
 			gres_cnt = gres_plugin_step_test(step_gres_list,
 							 job_ptr->gres_list,
 							 node_inx, true,
@@ -1072,7 +1101,7 @@ _pick_step_nodes (struct job_record  *job_ptr,
 							 NO_VAL);
 			if ((gres_cnt != NO_VAL) && (cpus_per_task > 0))
 				gres_cnt /= cpus_per_task;
-			total_tasks = MIN(total_tasks, gres_cnt);
+			total_tasks = MIN((uint64_t)total_tasks, gres_cnt);
 			if (step_spec->plane_size &&
 			    step_spec->plane_size != (uint16_t) NO_VAL) {
 				if (avail_tasks < step_spec->plane_size)
@@ -1314,7 +1343,8 @@ _pick_step_nodes (struct job_record  *job_ptr,
 			FREE_NULL_BITMAP(selected_nodes);
 			goto cleanup;
 		}
-		if (step_spec->task_dist == SLURM_DIST_ARBITRARY) {
+		if ((step_spec->task_dist & SLURM_DIST_STATE_BASE) ==
+		    SLURM_DIST_ARBITRARY) {
 			/* if we are in arbitrary mode we need to make
 			 * sure we aren't running on an elan switch.
 			 * If we aren't change the number of nodes
@@ -1327,7 +1357,8 @@ _pick_step_nodes (struct job_record  *job_ptr,
 				     "switch type elan. Switching DIST type "
 				     "to BLOCK");
 				xfree(step_spec->node_list);
-				step_spec->task_dist = SLURM_DIST_BLOCK;
+				step_spec->task_dist &= SLURM_DIST_STATE_FLAGS;
+				step_spec->task_dist |= SLURM_DIST_BLOCK;
 				FREE_NULL_BITMAP(selected_nodes);
 				step_spec->min_nodes =
 					bit_set_count(nodes_avail);
@@ -2075,18 +2106,25 @@ static int _test_strlen(char *test_str, char *str_name, int max_str_len)
  */
 extern int
 step_create(job_step_create_request_msg_t *step_specs,
-	    struct step_record** new_step_record, bool batch_step)
+	    struct step_record** new_step_record, bool batch_step,
+	    uint16_t protocol_version)
 {
 	struct step_record *step_ptr;
 	struct job_record  *job_ptr;
 	bitstr_t *nodeset;
 	int cpus_per_task, ret_code, i;
 	uint32_t node_count = 0;
+	uint64_t cpu_count, tres_count;
 	time_t now = time(NULL);
 	char *step_node_list = NULL;
 	uint32_t orig_cpu_count;
 	List step_gres_list = (List) NULL;
 	dynamic_plugin_data_t *select_jobinfo = NULL;
+	uint32_t task_dist;
+	char *tmp_tres_str = NULL;
+	assoc_mgr_lock_t locks = { READ_LOCK, NO_LOCK, NO_LOCK, NO_LOCK,
+				   READ_LOCK, NO_LOCK, NO_LOCK };
+
 #ifdef HAVE_ALPS_CRAY
 	uint32_t resv_id = 0;
 #endif
@@ -2128,19 +2166,48 @@ step_create(job_step_create_request_msg_t *step_specs,
 	    (job_ptr->end_time <= time(NULL)))
 		return ESLURM_ALREADY_DONE;
 
-	if ((step_specs->task_dist != SLURM_DIST_CYCLIC) &&
-	    (step_specs->task_dist != SLURM_DIST_BLOCK) &&
-	    (step_specs->task_dist != SLURM_DIST_CYCLIC_CYCLIC) &&
-	    (step_specs->task_dist != SLURM_DIST_BLOCK_CYCLIC) &&
-	    (step_specs->task_dist != SLURM_DIST_CYCLIC_BLOCK) &&
-	    (step_specs->task_dist != SLURM_DIST_BLOCK_BLOCK) &&
-	    (step_specs->task_dist != SLURM_DIST_CYCLIC_CFULL) &&
-	    (step_specs->task_dist != SLURM_DIST_BLOCK_CFULL) &&
-	    (step_specs->task_dist != SLURM_DIST_PLANE) &&
-	    (step_specs->task_dist != SLURM_DIST_ARBITRARY))
+	task_dist = step_specs->task_dist & SLURM_DIST_STATE_BASE;
+	/* Set to block in the case that mem is 0. srun leaves the dist
+	 * set to unknown if mem is 0.
+	 * ex. SallocDefaultCommand=srun -n1 -N1 --mem=0 ... */
+	if ((task_dist == SLURM_DIST_UNKNOWN) &&
+	    (!(step_specs->pn_min_memory &(~MEM_PER_CPU)))) {
+		step_specs->task_dist &= SLURM_DIST_STATE_FLAGS;
+		step_specs->task_dist |= SLURM_DIST_BLOCK;
+		task_dist = SLURM_DIST_BLOCK;
+	}
+
+	if ((task_dist != SLURM_DIST_CYCLIC) &&
+	    (task_dist != SLURM_DIST_BLOCK) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CYCLIC) &&
+	    (task_dist != SLURM_DIST_BLOCK_CYCLIC) &&
+	    (task_dist != SLURM_DIST_CYCLIC_BLOCK) &&
+	    (task_dist != SLURM_DIST_BLOCK_BLOCK) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CFULL) &&
+	    (task_dist != SLURM_DIST_BLOCK_CFULL) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CYCLIC_CYCLIC) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CYCLIC_BLOCK) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CYCLIC_CFULL) &&
+	    (task_dist != SLURM_DIST_CYCLIC_BLOCK_CYCLIC) &&
+	    (task_dist != SLURM_DIST_CYCLIC_BLOCK_BLOCK) &&
+	    (task_dist != SLURM_DIST_CYCLIC_BLOCK_CFULL) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CFULL_CYCLIC) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CFULL_BLOCK) &&
+	    (task_dist != SLURM_DIST_CYCLIC_CFULL_CFULL) &&
+	    (task_dist != SLURM_DIST_BLOCK_CYCLIC_CYCLIC) &&
+	    (task_dist != SLURM_DIST_BLOCK_CYCLIC_BLOCK) &&
+	    (task_dist != SLURM_DIST_BLOCK_CYCLIC_CFULL) &&
+	    (task_dist != SLURM_DIST_BLOCK_BLOCK_CYCLIC) &&
+	    (task_dist != SLURM_DIST_BLOCK_BLOCK_BLOCK) &&
+	    (task_dist != SLURM_DIST_BLOCK_BLOCK_CFULL) &&
+	    (task_dist != SLURM_DIST_BLOCK_CFULL_CYCLIC) &&
+	    (task_dist != SLURM_DIST_BLOCK_CFULL_BLOCK) &&
+	    (task_dist != SLURM_DIST_BLOCK_CFULL_CFULL) &&
+	    (task_dist != SLURM_DIST_PLANE) &&
+	    (task_dist != SLURM_DIST_ARBITRARY))
 		return ESLURM_BAD_DIST;
 
-	if ((step_specs->task_dist == SLURM_DIST_ARBITRARY) &&
+	if ((task_dist == SLURM_DIST_ARBITRARY) &&
 	    (!strcmp(slurmctld_conf.switch_type, "switch/elan"))) {
 		return ESLURM_TASKDIST_ARBITRARY_UNSUPPORTED;
 	}
@@ -2149,8 +2216,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 	    _test_strlen(step_specs->gres, "gres", 1024)		||
 	    _test_strlen(step_specs->host, "host", 1024)		||
 	    _test_strlen(step_specs->name, "name", 1024)		||
-	    _test_strlen(step_specs->network, "network", 1024)		||
-	    _test_strlen(step_specs->node_list, "node_list", 1024*64))
+	    _test_strlen(step_specs->network, "network", 1024))
 		return ESLURM_PATHNAME_TOO_LONG;
 
 	if (job_ptr->next_step_id >= slurmctld_conf.max_step_cnt)
@@ -2240,8 +2306,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 					    job_ptr->gres_list, job_ptr->job_id,
 					    NO_VAL);
 	if (i != SLURM_SUCCESS) {
-		if (step_gres_list)
-			list_destroy(step_gres_list);
+		FREE_NULL_LIST(step_gres_list);
 		return i;
 	}
 
@@ -2254,8 +2319,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 				   cpus_per_task, node_count, select_jobinfo,
 				   &ret_code);
 	if (nodeset == NULL) {
-		if (step_gres_list)
-			list_destroy(step_gres_list);
+		FREE_NULL_LIST(step_gres_list);
 		select_g_select_jobinfo_free(select_jobinfo);
 		if ((ret_code == ESLURM_NODES_BUSY) ||
 		    (ret_code == ESLURM_PORTS_BUSY) ||
@@ -2290,17 +2354,15 @@ step_create(job_step_create_request_msg_t *step_specs,
 	if (step_specs->num_tasks > max_tasks) {
 		error("step has invalid task count: %u max is %u",
 		      step_specs->num_tasks, max_tasks);
-		if (step_gres_list)
-			list_destroy(step_gres_list);
+		FREE_NULL_LIST(step_gres_list);
 		FREE_NULL_BITMAP(nodeset);
 		select_g_select_jobinfo_free(select_jobinfo);
 		return ESLURM_BAD_TASK_COUNT;
 	}
 #endif
-	step_ptr = _create_step_record(job_ptr);
+	step_ptr = _create_step_record(job_ptr, protocol_version);
 	if (step_ptr == NULL) {
-		if (step_gres_list)
-			list_destroy(step_gres_list);
+		FREE_NULL_LIST(step_gres_list);
 		FREE_NULL_BITMAP(nodeset);
 		select_g_select_jobinfo_free(select_jobinfo);
 		return ESLURMD_TOOMANYSTEPS;
@@ -2311,7 +2373,8 @@ step_create(job_step_create_request_msg_t *step_specs,
 
 	/* Here is where the node list is set for the step */
 	if (step_specs->node_list &&
-	    (step_specs->task_dist == SLURM_DIST_ARBITRARY)) {
+	    ((step_specs->task_dist & SLURM_DIST_STATE_BASE) ==
+	     SLURM_DIST_ARBITRARY)) {
 		step_node_list = xstrdup(step_specs->node_list);
 		xfree(step_specs->node_list);
 		step_specs->node_list = bitmap2node_name(nodeset);
@@ -2326,7 +2389,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 	}
 	step_ptr->step_node_bitmap = nodeset;
 
-	switch(step_specs->task_dist) {
+	switch (step_specs->task_dist & SLURM_DIST_NODESOCKMASK) {
 	case SLURM_DIST_CYCLIC:
 	case SLURM_DIST_CYCLIC_CYCLIC:
 	case SLURM_DIST_CYCLIC_CFULL:
@@ -2348,7 +2411,17 @@ step_create(job_step_create_request_msg_t *step_specs,
 	step_ptr->port = step_specs->port;
 	step_ptr->host = xstrdup(step_specs->host);
 	step_ptr->batch_step = batch_step;
-	step_ptr->cpu_freq = step_specs->cpu_freq;
+	if ((step_specs->cpu_freq_min == NO_VAL) &&
+	    (step_specs->cpu_freq_max == NO_VAL) &&
+	    (step_specs->cpu_freq_gov == NO_VAL)) {
+		step_ptr->cpu_freq_min = job_ptr->details->cpu_freq_min;
+		step_ptr->cpu_freq_max = job_ptr->details->cpu_freq_max;
+		step_ptr->cpu_freq_gov = job_ptr->details->cpu_freq_gov;
+	} else {
+		step_ptr->cpu_freq_min = step_specs->cpu_freq_min;
+		step_ptr->cpu_freq_max = step_specs->cpu_freq_max;
+		step_ptr->cpu_freq_gov = step_specs->cpu_freq_gov;
+	}
 	step_ptr->cpus_per_task = (uint16_t)cpus_per_task;
 	step_ptr->pn_min_memory = step_specs->pn_min_memory;
 	step_ptr->ckpt_interval = step_specs->ckpt_interval;
@@ -2395,6 +2468,8 @@ step_create(job_step_create_request_msg_t *step_specs,
 
 	/* a batch script does not need switch info */
 	if (!batch_step) {
+		char *mpi_params;
+
 		step_ptr->step_layout =
 			step_layout_create(step_ptr,
 					   step_node_list, node_count,
@@ -2409,8 +2484,10 @@ step_create(job_step_create_request_msg_t *step_specs,
 				return ESLURM_INVALID_TASK_MEMORY;
 			return SLURM_ERROR;
 		}
-		if ((step_specs->resv_port_cnt != (uint16_t) NO_VAL)
-		    && (step_specs->resv_port_cnt == 0)) {
+		if (step_specs->resv_port_cnt == (uint16_t) NO_VAL
+		    && (mpi_params = slurm_get_mpi_params())) {
+
+			step_specs->resv_port_cnt = 0;
 			/* reserved port count set to maximum task count on
 			 * any node plus one */
 			for (i = 0; i < step_ptr->step_layout->node_cnt; i++) {
@@ -2419,6 +2496,7 @@ step_create(job_step_create_request_msg_t *step_specs,
 					    step_ptr->step_layout->tasks[i]);
 			}
 			step_specs->resv_port_cnt++;
+			xfree(mpi_params);
 		}
 		if (step_specs->resv_port_cnt != (uint16_t) NO_VAL
 		    && step_specs->resv_port_cnt != 0) {
@@ -2456,6 +2534,48 @@ step_create(job_step_create_request_msg_t *step_specs,
 
 	select_g_step_start(step_ptr);
 
+#ifdef HAVE_BG_L_P
+	/* Only L and P use this code */
+	if (step_ptr->job_ptr->details)
+		cpu_count = (uint64_t)step_ptr->job_ptr->details->min_cpus;
+	else
+		cpu_count = (uint64_t)step_ptr->job_ptr->cpu_cnt;
+#else
+	if (!step_ptr->step_layout || !step_ptr->step_layout->task_cnt)
+		cpu_count = (uint64_t)step_ptr->job_ptr->total_cpus;
+	else
+		cpu_count = (uint64_t)step_ptr->cpu_count;
+#endif
+	xfree(step_ptr->tres_alloc_str);
+
+	xstrfmtcat(step_ptr->tres_alloc_str, "%s%u=%"PRIu64,
+		   step_ptr->tres_alloc_str ? "," : "",
+		   TRES_CPU, cpu_count);
+
+	tres_count = (uint64_t)step_ptr->pn_min_memory;
+	if (tres_count & MEM_PER_CPU) {
+		tres_count &= (~MEM_PER_CPU);
+		tres_count *= cpu_count;
+	} else
+		tres_count *= node_count;
+
+	xstrfmtcat(step_ptr->tres_alloc_str, "%s%u=%"PRIu64,
+		   step_ptr->tres_alloc_str ? "," : "",
+		   TRES_MEM, tres_count);
+
+	if ((tmp_tres_str = gres_2_tres_str(step_ptr->gres_list, 0, true))) {
+		xstrfmtcat(step_ptr->tres_alloc_str, "%s%s",
+			   step_ptr->tres_alloc_str ? "," : "",
+			   tmp_tres_str);
+		xfree(tmp_tres_str);
+	}
+
+	xfree(step_ptr->tres_fmt_alloc_str);
+	assoc_mgr_lock(&locks);
+	step_ptr->tres_fmt_alloc_str = slurmdb_make_tres_string_from_simple(
+		step_ptr->tres_alloc_str, assoc_mgr_tres_list);
+	assoc_mgr_unlock(&locks);
+
 	jobacct_storage_g_step_start(acct_db_conn, step_ptr);
 	return SLURM_SUCCESS;
 }
@@ -2465,7 +2585,7 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					       uint32_t node_count,
 					       uint32_t num_tasks,
 					       uint16_t cpus_per_task,
-					       uint16_t task_dist,
+					       uint32_t task_dist,
 					       uint16_t plane_size)
 {
 	uint16_t cpus_per_node[node_count];
@@ -2496,6 +2616,14 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 	} else if (step_ptr->pn_min_memory == MEM_PER_CPU)
 		step_ptr->pn_min_memory = 0;	/* clear MEM_PER_CPU flag */
 
+#ifdef HAVE_FRONT_END
+	if (step_ptr->job_ptr->front_end_ptr &&
+	    (step_ptr->start_protocol_ver >
+	     step_ptr->job_ptr->front_end_ptr->protocol_version))
+		step_ptr->start_protocol_ver =
+			step_ptr->job_ptr->front_end_ptr->protocol_version;
+#endif
+
 #ifdef HAVE_BGQ
 	/* Since we have to deal with a conversion between cnodes and
 	   midplanes here the math is really easy, and already has
@@ -2510,22 +2638,56 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 	 * used by this job step */
 	first_bit = bit_ffs(job_ptr->node_bitmap);
 	last_bit  = bit_fls(job_ptr->node_bitmap);
+
 	for (i = first_bit; i <= last_bit; i++) {
+		uint16_t cpus, cpus_used;
+
 		if (!bit_test(job_ptr->node_bitmap, i))
 			continue;
 		job_node_offset++;
 		if (bit_test(step_ptr->step_node_bitmap, i)) {
+			struct node_record *node_ptr =
+				node_record_table_ptr + i;
+
+#ifndef HAVE_FRONT_END
+			if (step_ptr->start_protocol_ver >
+			    node_ptr->protocol_version)
+				step_ptr->start_protocol_ver =
+					node_ptr->protocol_version;
+#endif
+
 			/* find out the position in the job */
 			pos = bit_get_pos_num(job_resrcs_ptr->node_bitmap, i);
 			if (pos == -1)
 				return NULL;
 			if (pos >= job_resrcs_ptr->nhosts)
 				fatal("step_layout_create: node index bad");
+
+			cpus = job_resrcs_ptr->cpus[pos];
+			cpus_used = job_resrcs_ptr->cpus_used[pos];
+			/* Here we are trying to figure out the number
+			 * of cpus available if we only want to run 1
+			 * thread per core.
+			 */
+			if ((job_resrcs_ptr->whole_node != 1)
+			    && (slurmctld_conf.select_type_param
+				& (CR_CORE | CR_SOCKET))
+			    && (job_ptr->details->cpu_bind_type
+				& CPU_BIND_ONE_THREAD_PER_CORE)) {
+				uint16_t threads;
+				if (slurmctld_conf.fast_schedule)
+					threads = node_ptr->config_ptr->threads;
+				else
+					threads = node_ptr->threads;
+
+				cpus /= threads;
+				cpus_used /= threads;
+			}
+
 			if (step_ptr->exclusive) {
-				usable_cpus = job_resrcs_ptr->cpus[pos] -
-					      job_resrcs_ptr->cpus_used[pos];
+				usable_cpus = cpus - cpus_used;
 			} else
-				usable_cpus = job_resrcs_ptr->cpus[pos];
+				usable_cpus = cpus;
 			if ((step_ptr->pn_min_memory & MEM_PER_CPU) &&
 			    _is_mem_resv()) {
 				uint32_t mem_use = step_ptr->pn_min_memory;
@@ -2601,6 +2763,41 @@ extern slurm_step_layout_t *step_layout_create(struct step_record *step_ptr,
 					plane_size);
 }
 
+/* Translate v14.11 CPU frequency data between old and new formats */
+static void _cpu_freq_new2old(uint32_t *old, uint32_t new_min, uint32_t new_max,
+			      uint32_t new_gov)
+{
+	if (new_gov == CPU_FREQ_CONSERVATIVE) {
+		*old = CPU_FREQ_CONSERVATIVE_OLD;
+	} else if (new_gov == CPU_FREQ_ONDEMAND) {
+		*old = CPU_FREQ_ONDEMAND_OLD;
+	} else if (new_gov == CPU_FREQ_PERFORMANCE) {
+		*old = CPU_FREQ_PERFORMANCE_OLD;
+	} else if (new_gov == CPU_FREQ_POWERSAVE) {
+		*old = CPU_FREQ_POWERSAVE_OLD;
+	} else {
+		*old = new_max;
+	}
+}
+static void _cpu_freq_old2new(uint32_t old, uint32_t *new_min,
+			      uint32_t *new_max, uint32_t *new_gov)
+{
+	*new_min = NO_VAL;
+	*new_max = NO_VAL;
+	if (old == CPU_FREQ_CONSERVATIVE_OLD) {
+		*new_gov = CPU_FREQ_CONSERVATIVE;
+	} else if (old == CPU_FREQ_ONDEMAND_OLD) {
+		*new_gov = CPU_FREQ_ONDEMAND;
+	} else if (old == CPU_FREQ_PERFORMANCE_OLD) {
+		*new_gov = CPU_FREQ_PERFORMANCE;
+	} else if (old == CPU_FREQ_POWERSAVE_OLD) {
+		*new_gov = CPU_FREQ_POWERSAVE;
+	} else {
+		*new_gov = CPU_FREQ_USERSPACE;
+		*new_max = old;
+	}
+}
+
 /* Pack the data for a specific job step record
  * IN step - pointer to a job step record
  * IN/OUT buffer - location to store data, pointers automatically advanced
@@ -2641,7 +2838,7 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 	cpu_cnt = step_ptr->cpu_count;
 #endif
 
-	if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		pack32(step_ptr->job_ptr->array_job_id, buffer);
 		pack32(step_ptr->job_ptr->array_task_id, buffer);
 		pack32(step_ptr->job_ptr->job_id, buffer);
@@ -2649,10 +2846,16 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 		pack16(step_ptr->ckpt_interval, buffer);
 		pack32(step_ptr->job_ptr->user_id, buffer);
 		pack32(cpu_cnt, buffer);
-		pack32(step_ptr->cpu_freq, buffer);
+		pack32(step_ptr->cpu_freq_min, buffer);
+		pack32(step_ptr->cpu_freq_max, buffer);
+		pack32(step_ptr->cpu_freq_gov, buffer);
 		pack32(task_cnt, buffer);
+		if (step_ptr->step_layout)
+			pack32(step_ptr->step_layout->task_dist, buffer);
+		else
+			pack32((uint32_t) SLURM_DIST_UNKNOWN, buffer);
 		pack32(step_ptr->time_limit, buffer);
-		pack16(step_ptr->state, buffer);
+		pack32(step_ptr->state, buffer);
 
 		pack_time(step_ptr->start_time, buffer);
 		if (IS_JOB_SUSPENDED(step_ptr->job_ptr)) {
@@ -2678,15 +2881,20 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 		packstr(step_ptr->gres, buffer);
 		select_g_select_jobinfo_pack(step_ptr->select_jobinfo, buffer,
 					     protocol_version);
-	} else if (protocol_version >= SLURM_2_6_PROTOCOL_VERSION) {
+		packstr(step_ptr->tres_fmt_alloc_str, buffer);
+	} else if (protocol_version >= SLURM_14_03_PROTOCOL_VERSION) {
+		uint32_t utmp32 = 0;
 		pack32(step_ptr->job_ptr->array_job_id, buffer);
-		pack16((uint16_t) step_ptr->job_ptr->array_task_id, buffer);
+		pack32(step_ptr->job_ptr->array_task_id, buffer);
 		pack32(step_ptr->job_ptr->job_id, buffer);
 		pack32(step_ptr->step_id, buffer);
 		pack16(step_ptr->ckpt_interval, buffer);
 		pack32(step_ptr->job_ptr->user_id, buffer);
 		pack32(cpu_cnt, buffer);
-		pack32(step_ptr->cpu_freq, buffer);
+		_cpu_freq_new2old(&utmp32, step_ptr->cpu_freq_min,
+				  step_ptr->cpu_freq_max,
+				  step_ptr->cpu_freq_gov);
+		pack32(utmp32, buffer);
 		pack32(task_cnt, buffer);
 		pack32(step_ptr->time_limit, buffer);
 		pack16(step_ptr->state, buffer);
@@ -2702,7 +2910,10 @@ static void _pack_ctld_job_step_info(struct step_record *step_ptr, Buf buffer,
 		}
 		pack_time(run_time, buffer);
 
-		packstr(step_ptr->job_ptr->partition, buffer);
+		if (step_ptr->job_ptr->part_ptr)
+			packstr(step_ptr->job_ptr->part_ptr->name, buffer);
+		else
+			packstr(step_ptr->job_ptr->partition, buffer);
 		packstr(step_ptr->resv_ports, buffer);
 		packstr(node_list, buffer);
 		packstr(step_ptr->name, buffer);
@@ -3070,6 +3281,22 @@ extern int step_partial_comp(step_complete_msg_t *req, uid_t uid,
 	}
 
 	step_ptr = find_step_record(job_ptr, req->job_step_id);
+	if ((step_ptr == NULL) && (req->job_step_id == SLURM_EXTERN_CONT)) {
+		step_ptr = _create_step_record(job_ptr, 0);
+		checkpoint_alloc_jobinfo(&step_ptr->check_job);
+		step_ptr->ext_sensors = ext_sensors_alloc();
+		step_ptr->name = xstrdup("extern");
+		step_ptr->select_jobinfo = select_g_select_jobinfo_alloc();
+		step_ptr->state = JOB_RUNNING;
+		step_ptr->start_time = job_ptr->start_time;
+		step_ptr->step_id = SLURM_EXTERN_CONT;
+		if (job_ptr->node_bitmap) {
+			step_ptr->step_node_bitmap =
+				bit_copy(job_ptr->node_bitmap);
+		}
+		step_ptr->time_last_active = time(NULL);
+		jobacct_storage_g_step_start(acct_db_conn, step_ptr);
+	}
 	if (step_ptr == NULL) {
 		info("step_partial_comp: StepID=%u.%u invalid",
 		     req->job_id, req->job_step_id);
@@ -3365,7 +3592,9 @@ extern void dump_job_step_state(struct job_record *job_ptr,
 	} else
 		pack32((uint32_t) 0, buffer);
 	pack32(step_ptr->time_limit, buffer);
-	pack32(step_ptr->cpu_freq, buffer);
+	pack32(step_ptr->cpu_freq_min, buffer);
+	pack32(step_ptr->cpu_freq_max, buffer);
+	pack32(step_ptr->cpu_freq_gov, buffer);
 
 	pack_time(step_ptr->start_time, buffer);
 	pack_time(step_ptr->pre_sus_time, buffer);
@@ -3394,7 +3623,8 @@ extern void dump_job_step_state(struct job_record *job_ptr,
 				SLURM_PROTOCOL_VERSION);
 	select_g_select_jobinfo_pack(step_ptr->select_jobinfo, buffer,
 				     SLURM_PROTOCOL_VERSION);
-
+	packstr(step_ptr->tres_alloc_str, buffer);
+	packstr(step_ptr->tres_fmt_alloc_str, buffer);
 }
 
 /*
@@ -3412,18 +3642,19 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	uint16_t start_protocol_ver = SLURM_MIN_PROTOCOL_VERSION;
 	uint16_t ckpt_interval, cpus_per_task, resv_port_cnt, state;
 	uint32_t core_size, cpu_count, exit_code, pn_min_memory, name_len;
-	uint32_t step_id, time_limit, cpu_freq;
+	uint32_t step_id, time_limit, cpu_freq_min, cpu_freq_max, cpu_freq_gov;
 	time_t start_time, pre_sus_time, tot_sus_time, ckpt_time;
 	char *host = NULL, *ckpt_dir = NULL, *core_job = NULL;
 	char *resv_ports = NULL, *name = NULL, *network = NULL;
 	char *bit_fmt = NULL, *gres = NULL;
+	char *tres_alloc_str = NULL, *tres_fmt_alloc_str = NULL;
 	switch_jobinfo_t *switch_tmp = NULL;
 	check_jobinfo_t check_tmp = NULL;
 	slurm_step_layout_t *step_layout = NULL;
 	List gres_list = NULL;
 	dynamic_plugin_data_t *select_jobinfo = NULL;
 
-	if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+	if (protocol_version >= SLURM_15_08_PROTOCOL_VERSION) {
 		safe_unpack32(&step_id, buffer);
 		safe_unpack16(&cyclic_alloc, buffer);
 		safe_unpack16(&port, buffer);
@@ -3446,7 +3677,76 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 		if (core_size)
 			safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
 		safe_unpack32(&time_limit, buffer);
-		safe_unpack32(&cpu_freq, buffer);
+		safe_unpack32(&cpu_freq_min, buffer);
+		safe_unpack32(&cpu_freq_max, buffer);
+		safe_unpack32(&cpu_freq_gov, buffer);
+
+		safe_unpack_time(&start_time, buffer);
+		safe_unpack_time(&pre_sus_time, buffer);
+		safe_unpack_time(&tot_sus_time, buffer);
+		safe_unpack_time(&ckpt_time, buffer);
+
+		safe_unpackstr_xmalloc(&host, &name_len, buffer);
+		safe_unpackstr_xmalloc(&resv_ports, &name_len, buffer);
+		safe_unpackstr_xmalloc(&name, &name_len, buffer);
+		safe_unpackstr_xmalloc(&network, &name_len, buffer);
+		safe_unpackstr_xmalloc(&ckpt_dir, &name_len, buffer);
+
+		safe_unpackstr_xmalloc(&gres, &name_len, buffer);
+		if (gres_plugin_step_state_unpack(&gres_list, buffer,
+						  job_ptr->job_id, step_id,
+						  protocol_version)
+		    != SLURM_SUCCESS)
+			goto unpack_error;
+
+		safe_unpack16(&batch_step, buffer);
+		if (!batch_step) {
+			if (unpack_slurm_step_layout(&step_layout, buffer,
+						     protocol_version))
+				goto unpack_error;
+			switch_g_alloc_jobinfo(&switch_tmp,
+					       job_ptr->job_id, step_id);
+			if (switch_g_unpack_jobinfo(switch_tmp, buffer,
+						    protocol_version))
+				goto unpack_error;
+		}
+		checkpoint_alloc_jobinfo(&check_tmp);
+		if (checkpoint_unpack_jobinfo(check_tmp, buffer,
+					      protocol_version))
+			goto unpack_error;
+
+		if (select_g_select_jobinfo_unpack(&select_jobinfo, buffer,
+						   protocol_version))
+			goto unpack_error;
+		safe_unpackstr_xmalloc(&tres_alloc_str, &name_len, buffer);
+		safe_unpackstr_xmalloc(&tres_fmt_alloc_str, &name_len, buffer);
+	} else if (protocol_version >= SLURM_14_11_PROTOCOL_VERSION) {
+		uint32_t utmp32 = 0;
+		safe_unpack32(&step_id, buffer);
+		safe_unpack16(&cyclic_alloc, buffer);
+		safe_unpack16(&port, buffer);
+		safe_unpack16(&ckpt_interval, buffer);
+		safe_unpack16(&cpus_per_task, buffer);
+		safe_unpack16(&resv_port_cnt, buffer);
+		safe_unpack16(&state, buffer);
+		safe_unpack16(&start_protocol_ver, buffer);
+
+		safe_unpack8(&no_kill, buffer);
+
+		safe_unpack32(&cpu_count, buffer);
+		safe_unpack32(&pn_min_memory, buffer);
+		safe_unpack32(&exit_code, buffer);
+		if (exit_code != NO_VAL) {
+			safe_unpackstr_xmalloc(&bit_fmt, &name_len, buffer);
+			safe_unpack16(&bit_cnt, buffer);
+		}
+		safe_unpack32(&core_size, buffer);
+		if (core_size)
+			safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
+		safe_unpack32(&time_limit, buffer);
+		safe_unpack32(&utmp32, buffer);
+		_cpu_freq_old2new(utmp32, &cpu_freq_min, &cpu_freq_max,
+				  &cpu_freq_gov);
 
 		safe_unpack_time(&start_time, buffer);
 		safe_unpack_time(&pre_sus_time, buffer);
@@ -3486,6 +3786,7 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 						   protocol_version))
 			goto unpack_error;
 	} else if (protocol_version >= SLURM_MIN_PROTOCOL_VERSION) {
+		uint32_t utmp32 = 0;
 		safe_unpack32(&step_id, buffer);
 		safe_unpack16(&cyclic_alloc, buffer);
 		safe_unpack16(&port, buffer);
@@ -3507,7 +3808,9 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 		if (core_size)
 			safe_unpackstr_xmalloc(&core_job, &name_len, buffer);
 		safe_unpack32(&time_limit, buffer);
-		safe_unpack32(&cpu_freq, buffer);
+		safe_unpack32(&utmp32, buffer);
+		_cpu_freq_old2new(utmp32, &cpu_freq_min, &cpu_freq_max,
+				  &cpu_freq_gov);
 
 		safe_unpack_time(&start_time, buffer);
 		safe_unpack_time(&pre_sus_time, buffer);
@@ -3566,7 +3869,7 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 
 	step_ptr = find_step_record(job_ptr, step_id);
 	if (step_ptr == NULL)
-		step_ptr = _create_step_record(job_ptr);
+		step_ptr = _create_step_record(job_ptr, start_protocol_ver);
 	if (step_ptr == NULL)
 		goto unpack_error;
 
@@ -3604,8 +3907,19 @@ extern int load_step_state(struct job_record *job_ptr, Buf buffer,
 	step_ptr->step_layout  = step_layout;
 
 	step_ptr->switch_job   = switch_tmp;
+
+	xfree(step_ptr->tres_alloc_str);
+	step_ptr->tres_alloc_str     = tres_alloc_str;
+	tres_alloc_str = NULL;
+
+	xfree(step_ptr->tres_fmt_alloc_str);
+	step_ptr->tres_fmt_alloc_str = tres_fmt_alloc_str;
+	tres_fmt_alloc_str = NULL;
+
 	step_ptr->check_job    = check_tmp;
-	step_ptr->cpu_freq     = cpu_freq;
+	step_ptr->cpu_freq_min = cpu_freq_min;
+	step_ptr->cpu_freq_max = cpu_freq_max;
+	step_ptr->cpu_freq_gov = cpu_freq_gov;
 	step_ptr->state        = state;
 	step_ptr->start_protocol_ver = start_protocol_ver;
 
@@ -3649,14 +3963,16 @@ unpack_error:
 	xfree(network);
 	xfree(ckpt_dir);
 	xfree(gres);
-	if (gres_list)
-		list_destroy(gres_list);
+	FREE_NULL_LIST(gres_list);
 	xfree(bit_fmt);
 	xfree(core_job);
 	if (switch_tmp)
 		switch_g_free_jobinfo(switch_tmp);
 	slurm_step_layout_destroy(step_layout);
 	select_g_select_jobinfo_free(select_jobinfo);
+	xfree(tres_alloc_str);
+	xfree(tres_fmt_alloc_str);
+
 	return SLURM_FAILURE;
 }
 
diff --git a/src/slurmctld/trigger_mgr.c b/src/slurmctld/trigger_mgr.c
index a39c48799..534b07615 100644
--- a/src/slurmctld/trigger_mgr.c
+++ b/src/slurmctld/trigger_mgr.c
@@ -1734,10 +1734,7 @@ extern void trigger_process(void)
 /* Free all allocated memory */
 extern void trigger_fini(void)
 {
-	if (trigger_list != NULL) {
-		list_destroy(trigger_list);
-		trigger_list = NULL;
-	}
+	FREE_NULL_LIST(trigger_list);
 	FREE_NULL_BITMAP(trigger_down_front_end_bitmap);
 	FREE_NULL_BITMAP(trigger_up_front_end_bitmap);
 	FREE_NULL_BITMAP(trigger_down_nodes_bitmap);
diff --git a/src/slurmd/Makefile.in b/src/slurmd/Makefile.in
index fd7d969ad..cd05a5971 100644
--- a/src/slurmd/Makefile.in
+++ b/src/slurmd/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/slurmd/common/Makefile.in b/src/slurmd/common/Makefile.in
index bd0ff4e65..0911e842a 100644
--- a/src/slurmd/common/Makefile.in
+++ b/src/slurmd/common/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -262,6 +265,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -311,8 +316,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -331,6 +340,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -374,6 +386,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -397,6 +410,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/slurmd/common/reverse_tree.h b/src/slurmd/common/reverse_tree.h
index 5fc004e72..52b81b45f 100644
--- a/src/slurmd/common/reverse_tree.h
+++ b/src/slurmd/common/reverse_tree.h
@@ -6,32 +6,32 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
- *  
+ *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
diff --git a/src/slurmd/common/reverse_tree_math.c b/src/slurmd/common/reverse_tree_math.c
index 7c8f55403..4ca21d9c3 100644
--- a/src/slurmd/common/reverse_tree_math.c
+++ b/src/slurmd/common/reverse_tree_math.c
@@ -6,32 +6,36 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
- *  
+ *  Portions copyright (C) 2014 Institute of Semiconductor Physics
+ *                     Siberian Branch of Russian Academy of Science
+ *  Written by Artem Polyakov <artpol84@gmail.com>.
+ *  All rights reserved.
+ *
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
  *  Please also read the included file: DISCLAIMER.
- *  
+ *
  *  SLURM is free software; you can redistribute it and/or modify it under
  *  the terms of the GNU General Public License as published by the Free
  *  Software Foundation; either version 2 of the License, or (at your option)
  *  any later version.
  *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
+ *  In addition, as a special exception, the copyright holders give permission
+ *  to link the code of portions of this program with the OpenSSL library under
+ *  certain conditions as described in each individual source file, and
+ *  distribute linked combinations including the two. You must obey the GNU
+ *  General Public License in all respects for all of the code used other than
+ *  OpenSSL. If you modify file(s) with this exception, you may extend this
+ *  exception to your version of the file(s), but you are not obligated to do
  *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
+ *  version.  If you delete this exception statement from all source files in
  *  the program, then also delete it here.
- *  
+ *
  *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
  *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
  *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
  *  details.
- *  
+ *
  *  You should have received a copy of the GNU General Public License along
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
@@ -138,7 +142,7 @@ reverse_tree_info(int rank, int num_nodes, int width,
 	max_children = geometric_series(width, *max_depth);
 	*depth = 0;
 	search_tree(rank, 0, max_children, width, &p, &c, depth);
-	
+
 	if ((rank + c) >= num_nodes)
 		c = num_nodes - rank - 1;
 
@@ -147,19 +151,87 @@ reverse_tree_info(int rank, int num_nodes, int width,
 	return;
 }
 
-#if 0
-main()
+int reverse_tree_direct_children(int rank, int num_nodes, int width,
+				 int depth, int *children)
 {
+	int current, child_distance;
+	int max_depth, sub_depth, max_rank_children;
 	int i;
+
+	max_depth = dep(num_nodes, width);
+	sub_depth = max_depth - depth;
+	if( sub_depth == 0 ){
+		return 0;
+	}
+	max_rank_children = geometric_series(width, sub_depth);
+	current = rank + 1;
+	child_distance = (max_rank_children / width);
+	for (i = 0; i < width && current < num_nodes; i++) {
+		children[i] = current;
+		current += child_distance;
+	}
+	return i;
+}
+
+#if 0
+
+// Dumb brute force function
+static int dumb_direct_children(int *children, int width, int id,
+				int max_node_id)
+{
+	int child;
+	int count = 0;
+	for(child = id+1; child < max_node_id; child++){
+		int parent_id, child_num, depth, max_depth;
+		reverse_tree_info(child, max_node_id, width,
+				  &parent_id, &child_num,
+				  &depth, &max_depth);
+		if( parent_id == id ){
+			children[count++] = child;
+		}
+	}
+	return count;
+}
+
+int
+main(int argc, char **argv)
+{
+	int i, j;
 	int n = 8192;
 	int w = 5;
 
-	int parent, children;
+	int parent, children, depth, maxdepth;
 
 	for (i = 0; i < n; i++) {
-		tree_parent_and_children(i, n, w, &parent, &children);
-		printf("%d : %d %d\n", i, parent, children);
+		int children1[w], children2[w];
+		int cnt1, cnt2;
+
+		reverse_tree_info(i, n, w, &parent,
+				  &children, &depth, &maxdepth);
+		printf("\
+%d : par: %d nchild: %d depth: %d, maxdepth: %d\n",
+		       i, parent, children, depth, maxdepth);
+		cnt1 = dumb_direct_children(children1, w, i, n);
+		cnt2 = reverce_tree_direct_children(i, n, w, depth, children2);
+		if (cnt1 != cnt2 ) {
+			printf("\
+Direct children sanity check error: cnt1 = %d, cnt2 = %d\n", cnt1, cnt2);
+			return -1;
+		}
+
+		for(j = 0; j < cnt1; j++){
+
+			if (children1[j] != children2[j]) {
+				printf("\
+Direct children sanity check error: cnt1 = %d, cnt2 = %d\n", cnt1, cnt2);
+				printf("\
+Failed on %d'th element: children1[%d] = %d, children2[%d] = %d\n",
+				       j, j, children1[j], j, children2[j]);
+				return -1;
+			}
+		}
 	}
 
+	return 0;
 }
 #endif
diff --git a/src/slurmd/common/reverse_tree_math.h b/src/slurmd/common/reverse_tree_math.h
index 071875282..f11d6869f 100644
--- a/src/slurmd/common/reverse_tree_math.h
+++ b/src/slurmd/common/reverse_tree_math.h
@@ -6,6 +6,10 @@
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Christopher J. Morrone <morrone2@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
+ *  Portions copyright (C) 2014 Institute of Semiconductor Physics
+ *                     Siberian Branch of Russian Academy of Science
+ *  Written by Artem Polyakov <artpol84@gmail.com>.
+ *  All rights reserved.
  *  
  *  This file is part of SLURM, a resource management program.
  *  For details, see <http://slurm.schedmd.com/>.
@@ -58,5 +62,7 @@ void reverse_tree_info(int rank, int num_nodes, int width,
 		       int *parent, int *num_children,
 		       int *depth, int *total_depth);
 
+int reverse_tree_direct_children(int rank, int num_nodes,
+			int width, int depth, int *children);
 
 #endif /* !_REVERSE_TREE_MATH_H */
diff --git a/src/slurmd/common/run_script.c b/src/slurmd/common/run_script.c
index 9e5f8bb53..bc2070cc5 100644
--- a/src/slurmd/common/run_script.c
+++ b/src/slurmd/common/run_script.c
@@ -76,18 +76,16 @@ int waitpid_timeout (const char *name, pid_t pid, int *pstatus, int timeout)
 		if (rc < 0) {
 			if (errno == EINTR)
 				continue;
-			error("waidpid: %m");
+			error("waitpid: %m");
 			return (-1);
-		}
-		else if (timeout_ms <= 0) {
+		} else if (timeout_ms <= 0) {
 			info ("%s%stimeout after %ds: killing pgid %d",
 			      name != NULL ? name : "",
 			      name != NULL ? ": " : "",
 			      timeout, pid);
 			killpg(pid, SIGKILL);
 			options = 0;
-		}
-		else {
+		} else {
 			poll(NULL, 0, delay);
 			timeout_ms -= delay;
 			delay = MIN (timeout_ms, MIN(max_delay, delay*2));
@@ -236,7 +234,7 @@ int run_script(const char *name, const char *pattern, uint32_t job_id,
 
 	}
 	list_iterator_destroy (i);
-	list_destroy (l);
+	FREE_NULL_LIST (l);
 
 	return rc;
 }
diff --git a/src/slurmd/common/slurmd_cgroup.c b/src/slurmd/common/slurmd_cgroup.c
index c3c26769e..f19ad6e7e 100644
--- a/src/slurmd/common/slurmd_cgroup.c
+++ b/src/slurmd/common/slurmd_cgroup.c
@@ -438,6 +438,13 @@ extern int set_system_cgroup_mem_limit(uint32_t mem_spec_limit)
 	return SLURM_SUCCESS;
 }
 
+extern int disable_system_cgroup_mem_oom()
+{
+	/* 1: disables the oom killer */
+	return xcgroup_set_uint64_param(&system_memory_cg, "memory.oom_control",
+					1);
+}
+
 extern int attach_system_cpuset_pid(pid_t pid)
 {
 	if (xcgroup_add_pids(&system_cpuset_cg, &pid, 1) != XCGROUP_SUCCESS)
diff --git a/src/slurmd/common/slurmd_cgroup.h b/src/slurmd/common/slurmd_cgroup.h
index 9d8300332..babb4f8f5 100644
--- a/src/slurmd/common/slurmd_cgroup.h
+++ b/src/slurmd/common/slurmd_cgroup.h
@@ -58,6 +58,9 @@ extern int set_system_cgroup_cpus(char *phys_core_str);
 /* Set memory limit in system memory cgroup */
 extern int set_system_cgroup_mem_limit(uint32_t mem_spec_limit);
 
+/* Disable OOM killer in system memory cgroup */
+extern int disable_system_cgroup_mem_oom();
+
 /* Attach pid to system cpuset cgroup */
 extern int attach_system_cpuset_pid(pid_t pid);
 
diff --git a/src/slurmd/common/slurmstepd_init.c b/src/slurmd/common/slurmstepd_init.c
index 6863bcad1..501c172b1 100644
--- a/src/slurmd/common/slurmstepd_init.c
+++ b/src/slurmd/common/slurmstepd_init.c
@@ -67,12 +67,13 @@ extern void pack_slurmd_conf_lite(slurmd_conf_t *conf, Buf buffer)
 	pack32(conf->daemonize, buffer);
 	pack32((uint32_t)conf->slurm_user_id, buffer);
 	pack16(conf->use_pam, buffer);
-	pack16(conf->task_plugin_param, buffer);
+	pack32(conf->task_plugin_param, buffer);
 	packstr(conf->node_topo_addr, buffer);
 	packstr(conf->node_topo_pattern, buffer);
 	pack32((uint32_t)conf->port, buffer);
 	pack16(conf->log_fmt, buffer);
 	pack16(conf->mem_limit_enforce, buffer);
+	pack64(conf->msg_aggr_window_msgs, buffer);
 }
 
 extern int unpack_slurmd_conf_lite_no_alloc(slurmd_conf_t *conf, Buf buffer)
@@ -107,13 +108,15 @@ extern int unpack_slurmd_conf_lite_no_alloc(slurmd_conf_t *conf, Buf buffer)
 	safe_unpack32(&uint32_tmp, buffer);
 	conf->slurm_user_id = (uid_t)uint32_tmp;
 	safe_unpack16(&conf->use_pam, buffer);
-	safe_unpack16(&conf->task_plugin_param, buffer);
+	safe_unpack32(&conf->task_plugin_param, buffer);
 	safe_unpackstr_xmalloc(&conf->node_topo_addr, &uint32_tmp, buffer);
 	safe_unpackstr_xmalloc(&conf->node_topo_pattern, &uint32_tmp, buffer);
 	safe_unpack32(&uint32_tmp, buffer);
+	conf->port = uint32_tmp;
 	safe_unpack16(&conf->log_fmt, buffer);
 	safe_unpack16(&conf->mem_limit_enforce, buffer);
-	conf->port = uint32_tmp;
+	safe_unpack64(&conf->msg_aggr_window_msgs, buffer);
+
 	return SLURM_SUCCESS;
 
 unpack_error:
diff --git a/src/slurmd/common/xcgroup.c b/src/slurmd/common/xcgroup.c
index 6c209c5bf..f37bd5d95 100644
--- a/src/slurmd/common/xcgroup.c
+++ b/src/slurmd/common/xcgroup.c
@@ -276,6 +276,7 @@ int xcgroup_ns_is_available(xcgroup_ns_t* cgns)
 
 	if (xcgroup_create(cgns, &cg, "/", 0, 0) == XCGROUP_ERROR)
 		return 0;
+
 	if (xcgroup_get_param(&cg, "release_agent",
 			      &value, &s) != XCGROUP_SUCCESS)
 		fstatus = 0;
@@ -538,10 +539,15 @@ int xcgroup_load(xcgroup_ns_t* cgns, xcgroup_t* cg, char* uri)
 
 int xcgroup_delete(xcgroup_t* cg)
 {
-	if (rmdir(cg->path))
+	/*
+	 *  Simply delete cgroup with rmdir(2). If cgroup doesn't
+	 *   exist, do not propagate error back to caller.
+	 */
+	if ((rmdir(cg->path) < 0) && (errno != ENOENT)) {
+		verbose ("xcgroup: rmdir(%s): %m", cg->path);
 		return XCGROUP_ERROR;
-	else
-		return XCGROUP_SUCCESS;
+	}
+	return XCGROUP_SUCCESS;
 }
 
 static int cgroup_procs_readable (xcgroup_t *cg)
diff --git a/src/slurmd/slurmd/Makefile.am b/src/slurmd/slurmd/Makefile.am
index b191bbf68..8e82a16f6 100644
--- a/src/slurmd/slurmd/Makefile.am
+++ b/src/slurmd/slurmd/Makefile.am
@@ -19,7 +19,6 @@ SLURMD_SOURCES = \
 	req.c req.h \
 	get_mach_stat.c get_mach_stat.h	\
 	read_proc.c 	        	\
-	xcpu.c xcpu.h \
 	slurmd_plugstack.c slurmd_plugstack.h
 
 slurmd_SOURCES = $(SLURMD_SOURCES)
diff --git a/src/slurmd/slurmd/Makefile.in b/src/slurmd/slurmd/Makefile.in
index 3a40b0517..724aab846 100644
--- a/src/slurmd/slurmd/Makefile.in
+++ b/src/slurmd/slurmd/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -136,7 +139,7 @@ CONFIG_CLEAN_VPATH_FILES =
 am__installdirs = "$(DESTDIR)$(sbindir)"
 PROGRAMS = $(sbin_PROGRAMS)
 am__objects_1 = slurmd.$(OBJEXT) req.$(OBJEXT) get_mach_stat.$(OBJEXT) \
-	read_proc.$(OBJEXT) xcpu.$(OBJEXT) slurmd_plugstack.$(OBJEXT)
+	read_proc.$(OBJEXT) slurmd_plugstack.$(OBJEXT)
 am_slurmd_OBJECTS = $(am__objects_1)
 slurmd_OBJECTS = $(am_slurmd_OBJECTS)
 am__DEPENDENCIES_1 =
@@ -253,6 +256,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -302,8 +307,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -322,6 +331,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -365,6 +377,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -388,6 +401,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -464,7 +478,6 @@ SLURMD_SOURCES = \
 	req.c req.h \
 	get_mach_stat.c get_mach_stat.h	\
 	read_proc.c 	        	\
-	xcpu.c xcpu.h \
 	slurmd_plugstack.c slurmd_plugstack.h
 
 slurmd_SOURCES = $(SLURMD_SOURCES)
@@ -572,7 +585,6 @@ distclean-compile:
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/req.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmd.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/slurmd_plugstack.Po@am__quote@
-@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/xcpu.Po@am__quote@
 
 .c.o:
 @am__fastdepCC_TRUE@	$(AM_V_CC)$(COMPILE) -MT $@ -MD -MP -MF $(DEPDIR)/$*.Tpo -c -o $@ $<
diff --git a/src/slurmd/slurmd/get_mach_stat.c b/src/slurmd/slurmd/get_mach_stat.c
index d7c5eb1a0..78fe80328 100644
--- a/src/slurmd/slurmd/get_mach_stat.c
+++ b/src/slurmd/slurmd/get_mach_stat.c
@@ -308,8 +308,17 @@ extern int get_up_time(uint32_t *up_time)
 		*up_time = 0;
 		return errno;
 	}
-
+#if defined(_TEST_REBOOT)
+{
+	/* Make node look like it rebooted when slurmd started, for testing */
+	static uint32_t orig_uptime = 0;
+	if (orig_uptime == 0)
+		orig_uptime = info.uptime;
+	*up_time = info.uptime - orig_uptime;
+}
+#else
 	*up_time = info.uptime;
+#endif
 #endif
 	return 0;
 }
@@ -352,6 +361,25 @@ extern int get_cpu_load(uint32_t *cpu_load)
 	return 0;
 }
 
+extern int get_free_mem(uint32_t *free_mem)
+{
+#if defined(HAVE_AIX) || defined(__sun) || defined(__APPLE__) || defined(__NetBSD__) || defined(__FreeBSD__) || defined(__CYGWIN__)
+	/* Not sure how to get CPU load on above systems.
+	 * Perhaps some method below works. */
+	*free_mem = 0;
+#else
+	struct sysinfo info;
+
+	if (sysinfo(&info) < 0) {
+		*free_mem = 0;
+		return errno;
+	}
+
+	*free_mem = (((uint64_t )info.freeram)*info.mem_unit)/(1024*1024);
+#endif
+	return 0;
+}
+
 #ifdef USE_CPU_SPEED
 /* _chk_cpuinfo_str
  *	check a line of cpuinfo data (buffer) for a keyword.  If it
diff --git a/src/slurmd/slurmd/get_mach_stat.h b/src/slurmd/slurmd/get_mach_stat.h
index 83bf9a433..951586419 100644
--- a/src/slurmd/slurmd/get_mach_stat.h
+++ b/src/slurmd/slurmd/get_mach_stat.h
@@ -52,6 +52,7 @@
 #endif  /*  HAVE_CONFIG_H */
 
 extern int get_cpu_load(uint32_t *cpu_load);
+extern int get_free_mem(uint32_t *free_mem);
 extern int get_mach_name(char *node_name);
 extern int get_memory(uint32_t *real_memory);
 extern int get_tmp_disk(uint32_t *tmp_disk, char *tmp_fs);
diff --git a/src/slurmd/slurmd/req.c b/src/slurmd/slurmd/req.c
index bc53e1f4d..4cb9e0c8f 100644
--- a/src/slurmd/slurmd/req.c
+++ b/src/slurmd/slurmd/req.c
@@ -57,6 +57,7 @@
 #include <unistd.h>
 #include <utime.h>
 
+#include "src/common/callerid.h"
 #include "src/common/cpu_frequency.h"
 #include "src/common/env.h"
 #include "src/common/fd.h"
@@ -66,6 +67,7 @@
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/msg_aggr.h"
 #include "src/common/node_select.h"
 #include "src/common/plugstack.h"
 #include "src/common/read_config.h"
@@ -76,6 +78,7 @@
 #include "src/common/slurm_protocol_defs.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_protocol_interface.h"
+#include "src/common/slurm_strcasestr.h"
 #include "src/common/stepd_api.h"
 #include "src/common/uid.h"
 #include "src/common/util-net.h"
@@ -84,7 +87,6 @@
 
 #include "src/slurmd/slurmd/get_mach_stat.h"
 #include "src/slurmd/slurmd/slurmd.h"
-#include "src/slurmd/slurmd/xcpu.h"
 
 #include "src/slurmd/common/job_container_plugin.h"
 #include "src/slurmd/common/proctrack.h"
@@ -98,6 +100,8 @@
 #define RETRY_DELAY 15		/* retry every 15 seconds */
 #define MAX_RETRY   240		/* retry 240 times (one hour max) */
 
+#define EPIL_RETRY_MAX 2	/* max retries of epilog complete message */
+
 #ifndef MAXHOSTNAMELEN
 #define MAXHOSTNAMELEN	64
 #endif
@@ -156,6 +160,7 @@ static void _launch_complete_rm(uint32_t job_id);
 static void _launch_complete_wait(uint32_t job_id);
 static int  _launch_job_fail(uint32_t job_id, uint32_t slurm_rc);
 static void _note_batch_job_finished(uint32_t job_id);
+static int  _prolog_is_running (uint32_t jobid);
 static int  _step_limits_match(void *x, void *key);
 static int  _terminate_all_steps(uint32_t jobid, bool batch);
 static void _rpc_launch_tasks(slurm_msg_t *);
@@ -183,12 +188,14 @@ static int  _rpc_health_check(slurm_msg_t *);
 static int  _rpc_acct_gather_update(slurm_msg_t *);
 static int  _rpc_acct_gather_energy(slurm_msg_t *);
 static int  _rpc_step_complete(slurm_msg_t *msg);
+static int  _rpc_step_complete_aggr(slurm_msg_t *msg);
 static int  _rpc_stat_jobacct(slurm_msg_t *msg);
 static int  _rpc_list_pids(slurm_msg_t *msg);
 static int  _rpc_daemon_status(slurm_msg_t *msg);
 static int  _run_epilog(job_env_t *job_env);
 static int  _run_prolog(job_env_t *job_env, slurm_cred_t *cred);
 static void _rpc_forward_data(slurm_msg_t *msg);
+static int  _rpc_network_callerid(slurm_msg_t *msg);
 
 
 static bool _pause_for_job_completion(uint32_t jobid, char *nodes,
@@ -206,15 +213,15 @@ static uid_t _get_job_uid(uint32_t jobid);
 
 static gids_t *_gids_cache_lookup(char *user, gid_t gid);
 
-static int  _add_starting_step(slurmd_step_type_t type, void *req);
-static int  _remove_starting_step(slurmd_step_type_t type, void *req);
+static int  _add_starting_step(uint16_t type, void *req);
+static int  _remove_starting_step(uint16_t type, void *req);
 static int  _compare_starting_steps(void *s0, void *s1);
 static int  _wait_for_starting_step(uint32_t job_id, uint32_t step_id);
 static bool _step_is_starting(uint32_t job_id, uint32_t step_id);
 
 static void _add_job_running_prolog(uint32_t job_id);
 static void _remove_job_running_prolog(uint32_t job_id);
-static int  _compare_job_running_prolog(void *s0, void *s1);
+static int  _match_jobid(void *s0, void *s1);
 static void _wait_for_job_running_prolog(uint32_t job_id);
 
 /*
@@ -252,6 +259,7 @@ static uint32_t active_job_id[JOB_STATE_CNT];
 
 static pthread_mutex_t prolog_mutex = PTHREAD_MUTEX_INITIALIZER;
 
+
 void
 slurmd_req(slurm_msg_t *msg)
 {
@@ -260,14 +268,10 @@ slurmd_req(slurm_msg_t *msg)
 	if (msg == NULL) {
 		if (startup == 0)
 			startup = time(NULL);
-		if (waiters) {
-			list_destroy(waiters);
-			waiters = NULL;
-		}
+		FREE_NULL_LIST(waiters);
 		slurm_mutex_lock(&job_limits_mutex);
 		if (job_limits_list) {
-			list_destroy(job_limits_list);
-			job_limits_list = NULL;
+			FREE_NULL_LIST(job_limits_list);
 			job_limits_loaded = false;
 		}
 		slurm_mutex_unlock(&job_limits_mutex);
@@ -424,6 +428,10 @@ slurmd_req(slurm_msg_t *msg)
 		(void) _rpc_step_complete(msg);
 		slurm_free_step_complete_msg(msg->data);
 		break;
+	case REQUEST_STEP_COMPLETE_AGGR:
+		(void) _rpc_step_complete_aggr(msg);
+		slurm_free_step_complete_msg(msg->data);
+		break;
 	case REQUEST_JOB_STEP_STAT:
 		(void) _rpc_stat_jobacct(msg);
 		slurm_free_job_step_id_msg(msg->data);
@@ -444,6 +452,21 @@ slurmd_req(slurm_msg_t *msg)
 		_rpc_forward_data(msg);
 		slurm_free_forward_data_msg(msg->data);
 		break;
+	case REQUEST_NETWORK_CALLERID:
+		debug2("Processing RPC: REQUEST_NETWORK_CALLERID");
+		_rpc_network_callerid(msg);
+		slurm_free_network_callerid_msg(msg->data);
+		break;
+	case MESSAGE_COMPOSITE:
+		error("Processing RPC: MESSAGE_COMPOSITE: "
+		      "This should never happen");
+		msg_aggr_add_msg(msg, 0, NULL);
+		break;
+	case RESPONSE_MESSAGE_COMPOSITE:
+		debug2("Processing RPC: RESPONSE_MESSAGE_COMPOSITE");
+		msg_aggr_resp(msg);
+		slurm_free_composite_msg(msg->data);
+		break;
 	case REQUEST_SUSPEND:	/* Defunct, see REQUEST_SUSPEND_INT */
 	default:
 		error("slurmd_req: invalid request msg type %d",
@@ -470,9 +493,9 @@ static int _send_slurmd_conf_lite (int fd, slurmd_conf_t *cf)
 }
 
 static int
-_send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
+_send_slurmstepd_init(int fd, int type, void *req,
 		      slurm_addr_t *cli, slurm_addr_t *self,
-		      hostset_t step_hset)
+		      hostset_t step_hset, uint16_t protocol_version)
 {
 	int len = 0;
 	Buf buffer = NULL;
@@ -481,7 +504,7 @@ _send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
 	gid_t gid = (uid_t)-1;
 	gids_t *gids = NULL;
 
-	int rank;
+	int rank, proto;
 	int parent_rank, children, depth, max_depth;
 	char *parent_alias = NULL;
 	char *user_name = NULL;
@@ -493,17 +516,22 @@ _send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
 	/* send type over to slurmstepd */
 	safe_write(fd, &type, sizeof(int));
 
-	/* step_hset can be NULL for batch scripts, OR if the user is
-	 * the SlurmUser, and the job credential did not validate in
-	 * _check_job_credential.  If the job credential did not validate,
-	 * then it did not come from the controller and there is no reason
-	 * to send step completion messages to the controller.
+	/* step_hset can be NULL for batch scripts OR if the job was submitted
+	 * by SlurmUser or root using the --no-allocate/-Z option and the job
+	 * job credential validation by _check_job_credential() failed. If the
+	 * job credential did not validate, then it did not come from slurmctld
+	 * and there is no reason to send step completion messages to slurmctld.
 	 */
 	if (step_hset == NULL) {
+		bool send_error = false;
 		if (type == LAUNCH_TASKS) {
-			info("task rank unavailable due to invalid job "
-			     "credential, step completion RPC impossible");
+			launch_tasks_request_msg_t *launch_req;
+			launch_req = (launch_tasks_request_msg_t *) req;
+			if (launch_req->job_step_id != SLURM_EXTERN_CONT)
+				send_error = true;
 		}
+		if (send_error)
+			info("task rank unavailable due to invalid job credential, step completion RPC impossible");
 		rank = -1;
 		parent_rank = -1;
 		children = 0;
@@ -626,9 +654,18 @@ _send_slurmstepd_init(int fd, slurmd_step_type_t type, void *req,
 	}
 	buffer = init_buf(0);
 	msg.data = req;
-	msg.protocol_version = SLURM_PROTOCOL_VERSION;
+
+	if (protocol_version == (uint16_t)NO_VAL)
+		proto = SLURM_PROTOCOL_VERSION;
+	else
+		proto = protocol_version;
+
+	msg.protocol_version = (uint16_t)proto;
 	pack_msg(&msg, buffer);
 	len = get_buf_offset(buffer);
+
+	safe_write(fd, &proto, sizeof(int));
+
 	safe_write(fd, &len, sizeof(int));
 	safe_write(fd, get_buf_data(buffer), len);
 	free_buf(buffer);
@@ -706,9 +743,9 @@ rwfail:
  * will be init, not slurmd.
  */
 static int
-_forkexec_slurmstepd(slurmd_step_type_t type, void *req,
+_forkexec_slurmstepd(uint16_t type, void *req,
 		     slurm_addr_t *cli, slurm_addr_t *self,
-		     const hostset_t step_hset)
+		     const hostset_t step_hset, uint16_t protocol_version)
 {
 	pid_t pid;
 	int to_stepd[2] = {-1, -1};
@@ -750,7 +787,8 @@ _forkexec_slurmstepd(slurmd_step_type_t type, void *req,
 
 		if ((rc = _send_slurmstepd_init(to_stepd[1], type,
 						req, cli, self,
-						step_hset)) != 0) {
+						step_hset,
+						protocol_version)) != 0) {
 			error("Unable to init slurmstepd");
 			goto done;
 		}
@@ -871,16 +909,16 @@ _check_job_credential(launch_tasks_request_msg_t *req, uid_t uid,
 		      uint16_t protocol_version)
 {
 	slurm_cred_arg_t arg;
-	hostset_t        s_hset = NULL;
-	bool             user_ok = _slurm_authorized_user(uid);
-	bool             verified = true;
-	int              host_index = -1;
-	int              rc;
+	hostset_t	s_hset = NULL;
+	bool		user_ok = _slurm_authorized_user(uid);
+	bool		verified = true;
+	int		host_index = -1;
+	int		rc;
 	slurm_cred_t    *cred = req->cred;
-	uint32_t         jobid = req->job_id;
-	uint32_t         stepid = req->job_step_id;
-	int              tasks_to_launch = req->tasks_to_launch[node_id];
-	uint32_t         job_cpus = 0, step_cpus = 0;
+	uint32_t	jobid = req->job_id;
+	uint32_t	stepid = req->job_step_id;
+	int		tasks_to_launch = req->tasks_to_launch[node_id];
+	uint32_t	job_cpus = 0, step_cpus = 0;
 
 	/*
 	 * First call slurm_cred_verify() so that all valid
@@ -1107,7 +1145,6 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 #endif
 	slurm_addr_t self;
 	slurm_addr_t *cli = &msg->orig_addr;
-	socklen_t adlen;
 	hostset_t step_hset = NULL;
 	job_mem_limits_t *job_limits_ptr;
 	int nodeid = 0;
@@ -1115,7 +1152,7 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 	/* It is always 0 for front end systems */
 	nodeid = nodelist_find(req->complete_nodelist, conf->node_name);
 #endif
-	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	memcpy(&req->orig_addr, &msg->orig_addr, sizeof(slurm_addr_t));
 
 	super_user = _slurm_authorized_user(req_uid);
@@ -1137,6 +1174,14 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 	req->envc = envcount(req->env);
 
 #ifndef HAVE_FRONT_END
+	/*
+	 *  Do not launch a new job step while prolog in progress:
+	 */
+	if (_prolog_is_running (req->job_id)) {
+		info("[job %u] prolog in progress\n", req->job_id);
+		errnum = EINPROGRESS;
+		goto done;
+	}
 	slurm_mutex_lock(&prolog_mutex);
 	first_job_run = !slurm_cred_jobid_cached(conf->vctx, req->job_id);
 #endif
@@ -1224,12 +1269,11 @@ _rpc_launch_tasks(slurm_msg_t *msg)
 		slurm_mutex_unlock(&job_limits_mutex);
 	}
 
-	adlen = sizeof(self);
-	_slurm_getsockname(msg->conn_fd, (struct sockaddr *)&self, &adlen);
+	slurm_get_stream_addr(msg->conn_fd, &self);
 
 	debug3("_rpc_launch_tasks: call to _forkexec_slurmstepd");
 	errnum = _forkexec_slurmstepd(LAUNCH_TASKS, (void *)req, cli, &self,
-				      step_hset);
+				      step_hset, msg->protocol_version);
 	debug3("_rpc_launch_tasks: return from _forkexec_slurmstepd");
 	_launch_complete_add(req->job_id);
 
@@ -1468,16 +1512,190 @@ static void _notify_slurmctld_prolog_fini(
 		error("Error sending prolog completion notification: %m");
 }
 
+/* Convert memory limits from per-CPU to per-node */
+static void _convert_job_mem(slurm_msg_t *msg)
+{
+	prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data;
+	slurm_cred_arg_t arg;
+	hostset_t j_hset = NULL;
+	int rc, hi, host_index, job_cpus;
+	int i, i_first_bit = 0, i_last_bit = 0;
+
+	rc = slurm_cred_verify(conf->vctx, req->cred, &arg,
+			       msg->protocol_version);
+	if (rc < 0) {
+		error("%s: slurm_cred_verify failed: %m", __func__);
+		req->nnodes = 1;	/* best guess */
+		return;
+	}
+
+	req->nnodes = arg.job_nhosts;
+
+	if (arg.job_mem_limit == 0)
+		goto fini;
+	if ((arg.job_mem_limit & MEM_PER_CPU) == 0) {
+		req->job_mem_limit = arg.job_mem_limit;
+		goto fini;
+	}
+
+	/* Assume 1 CPU on error */
+	req->job_mem_limit = arg.job_mem_limit & (~MEM_PER_CPU);
+
+	if (!(j_hset = hostset_create(arg.job_hostlist))) {
+		error("%s: Unable to parse credential hostlist: `%s'",
+		      __func__, arg.step_hostlist);
+		goto fini;
+	}
+	host_index = hostset_find(j_hset, conf->node_name);
+	hostset_destroy(j_hset);
+
+	hi = host_index + 1;	/* change from 0-origin to 1-origin */
+	for (i = 0; hi; i++) {
+		if (hi > arg.sock_core_rep_count[i]) {
+			i_first_bit += arg.sockets_per_node[i] *
+				       arg.cores_per_socket[i] *
+				       arg.sock_core_rep_count[i];
+			i_last_bit = i_first_bit +
+				     arg.sockets_per_node[i] *
+				     arg.cores_per_socket[i] *
+				     arg.sock_core_rep_count[i];
+			hi -= arg.sock_core_rep_count[i];
+		} else {
+			i_first_bit += arg.sockets_per_node[i] *
+				       arg.cores_per_socket[i] * (hi - 1);
+			i_last_bit = i_first_bit +
+				     arg.sockets_per_node[i] *
+				     arg.cores_per_socket[i];
+			break;
+		}
+	}
+
+	/* Now count the allocated processors on this node */
+	job_cpus = 0;
+	for (i = i_first_bit; i < i_last_bit; i++) {
+		if (bit_test(arg.job_core_bitmap, i))
+			job_cpus++;
+	}
+
+	/* NOTE: alloc_lps is the count of allocated resources
+	 * (typically cores). Convert to CPU count as needed */
+	if (i_last_bit > i_first_bit) {
+		i = conf->cpus / (i_last_bit - i_first_bit);
+		if (i > 1)
+			job_cpus *= i;
+	}
+
+	req->job_mem_limit *= job_cpus;
+
+fini:	slurm_cred_free_args(&arg);
+}
+
+static void _make_prolog_mem_container(slurm_msg_t *msg)
+{
+	prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data;
+	job_mem_limits_t *job_limits_ptr;
+	step_loc_t step_info;
+
+	_convert_job_mem(msg);	/* Convert per-CPU mem limit */
+	if (req->job_mem_limit) {
+		slurm_mutex_lock(&job_limits_mutex);
+		if (!job_limits_list)
+			job_limits_list = list_create(_job_limits_free);
+		step_info.jobid  = req->job_id;
+		step_info.stepid = SLURM_EXTERN_CONT;
+		job_limits_ptr = list_find_first (job_limits_list,
+						  _step_limits_match,
+						  &step_info);
+		if (!job_limits_ptr) {
+			job_limits_ptr = xmalloc(sizeof(job_mem_limits_t));
+			job_limits_ptr->job_id   = req->job_id;
+			job_limits_ptr->job_mem  = req->job_mem_limit;
+			job_limits_ptr->step_id  = SLURM_EXTERN_CONT;
+			job_limits_ptr->step_mem = req->job_mem_limit;
+#if _LIMIT_INFO
+			info("AddLim step:%u.%u job_mem:%u step_mem:%u",
+			      job_limits_ptr->job_id, job_limits_ptr->step_id,
+			      job_limits_ptr->job_mem,
+			      job_limits_ptr->step_mem);
+#endif
+			list_append(job_limits_list, job_limits_ptr);
+		}
+		slurm_mutex_unlock(&job_limits_mutex);
+	}
+}
+
+static void _spawn_prolog_stepd(slurm_msg_t *msg)
+{
+	prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data;
+	launch_tasks_request_msg_t *launch_req;
+	slurm_addr_t self;
+	slurm_addr_t *cli = &msg->orig_addr;
+	int i;
+
+	launch_req = xmalloc(sizeof(launch_tasks_request_msg_t));
+	launch_req->alias_list		= req->alias_list;
+	launch_req->complete_nodelist	= req->nodes;
+	launch_req->cpus_per_task	= 1;
+	launch_req->cred		= req->cred;
+	launch_req->cwd			= req->work_dir;
+	launch_req->efname		= "/dev/null";
+	launch_req->gid			= req->gid;
+	launch_req->global_task_ids	= xmalloc(sizeof(uint32_t *)
+						  * req->nnodes);
+	launch_req->ifname		= "/dev/null";
+	launch_req->job_id		= req->job_id;
+	launch_req->job_mem_lim		= req->job_mem_limit;
+	launch_req->job_step_id		= SLURM_EXTERN_CONT;
+	launch_req->nnodes		= req->nnodes;
+	launch_req->ntasks		= req->nnodes;
+	launch_req->ofname		= "/dev/null";
+	launch_req->partition		= req->partition;
+	launch_req->spank_job_env_size	= req->spank_job_env_size;
+	launch_req->spank_job_env	= req->spank_job_env;
+	launch_req->step_mem_lim	= req->job_mem_limit;
+	launch_req->tasks_to_launch	= xmalloc(sizeof(uint16_t)
+						  * req->nnodes);
+	launch_req->uid			= req->uid;
+
+	for (i = 0; i < req->nnodes; i++) {
+		uint32_t *tmp32 = xmalloc(sizeof(uint32_t));
+		*tmp32 = i;
+		launch_req->global_task_ids[i] = tmp32;
+		launch_req->tasks_to_launch[i] = 1;
+	}
+
+	slurm_get_stream_addr(msg->conn_fd, &self);
+
+	debug3("%s: call to _forkexec_slurmstepd", __func__);
+	(void) _forkexec_slurmstepd(LAUNCH_TASKS, (void *)launch_req, cli,
+				     &self, NULL, msg->protocol_version);
+	debug3("%s: return from _forkexec_slurmstepd", __func__);
+
+	for (i = 0; i < req->nnodes; i++)
+		xfree(launch_req->global_task_ids[i]);
+	xfree(launch_req->global_task_ids);
+	xfree(launch_req->tasks_to_launch);
+	xfree(launch_req);
+}
+
 static void _rpc_prolog(slurm_msg_t *msg)
 {
 	int rc = SLURM_SUCCESS;
 	prolog_launch_msg_t *req = (prolog_launch_msg_t *)msg->data;
 	job_env_t job_env;
 	bool     first_job_run;
+	uid_t    req_uid;
 
 	if (req == NULL)
 		return;
 
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	if (!_slurm_authorized_user(req_uid)) {
+		error("REQUEST_LAUNCH_PROLOG request from uid %u",
+		      (unsigned int) req_uid);
+		return;
+	}
+
 	if (slurm_send_rc_msg(msg, rc) < 0) {
 		error("Error starting prolog: %m");
 	}
@@ -1495,6 +1713,9 @@ static void _rpc_prolog(slurm_msg_t *msg)
 		rc = ESLURMD_PROLOG_FAILED;
 	}
 
+	if (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN)
+		_make_prolog_mem_container(msg);
+
 	if (container_g_create(req->job_id))
 		error("container_g_create(%u): %m", req->job_id);
 
@@ -1523,7 +1744,7 @@ static void _rpc_prolog(slurm_msg_t *msg)
 		job_env.resv_id = select_g_select_jobinfo_xstrdup(
 			req->select_jobinfo, SELECT_PRINT_RESV_ID);
 #endif
-		rc = _run_prolog(&job_env, NULL);
+		rc = _run_prolog(&job_env, req->cred);
 
 		if (rc) {
 			int term_sig, exit_status;
@@ -1543,6 +1764,9 @@ static void _rpc_prolog(slurm_msg_t *msg)
 
 	if (!(slurmctld_conf.prolog_flags & PROLOG_FLAG_NOHOLD))
 		_notify_slurmctld_prolog_fini(req->job_id, rc);
+
+	if (slurmctld_conf.prolog_flags & PROLOG_FLAG_CONTAIN)
+		_spawn_prolog_stepd(msg);
 }
 
 static void
@@ -1555,7 +1779,8 @@ _rpc_batch_job(slurm_msg_t *msg, bool new_msg)
 	slurm_addr_t *cli = &msg->orig_addr;
 
 	if (new_msg) {
-		uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+		uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+						     slurm_get_auth_info());
 		if (!_slurm_authorized_user(req_uid)) {
 			error("Security violation, batch launch RPC from uid %d",
 			      req_uid);
@@ -1673,7 +1898,7 @@ _rpc_batch_job(slurm_msg_t *msg, bool new_msg)
 
 	debug3("_rpc_batch_job: call to _forkexec_slurmstepd");
 	rc = _forkexec_slurmstepd(LAUNCH_BATCH_JOB, (void *)req, cli, NULL,
-				  (hostset_t)NULL);
+				  (hostset_t)NULL, SLURM_PROTOCOL_VERSION);
 	debug3("_rpc_batch_job: return from _forkexec_slurmstepd: %d", rc);
 
 	slurm_mutex_unlock(&launch_mutex);
@@ -1739,7 +1964,8 @@ static void
 _rpc_job_notify(slurm_msg_t *msg)
 {
 	job_notify_msg_t *req = msg->data;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	uid_t job_uid;
 	List steps;
 	ListIterator i;
@@ -1788,7 +2014,7 @@ _rpc_job_notify(slurm_msg_t *msg)
 		close(fd);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 no_job:
 	if (step_cnt == 0) {
@@ -1829,10 +2055,11 @@ _launch_job_fail(uint32_t job_id, uint32_t slurm_rc)
 	} else {
 		req_msg.job_id = job_id;
 		req_msg.job_id_str = NULL;
-		if (requeue_no_hold)
+		if (requeue_no_hold) {
 			req_msg.state = JOB_PENDING;
-		else
-			req_msg.state = JOB_REQUEUE_HOLD;
+		} else {
+			req_msg.state = (JOB_REQUEUE_HOLD|JOB_LAUNCH_FAILED);
+		}
 		resp_msg.msg_type = REQUEST_JOB_REQUEUE;
 		resp_msg.data = &req_msg;
 	}
@@ -1865,7 +2092,8 @@ _abort_step(uint32_t job_id, uint32_t step_id)
 static void
 _rpc_reconfig(slurm_msg_t *msg)
 {
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 
 	if (!_slurm_authorized_user(req_uid))
 		error("Security violation, reconfig RPC from uid %d",
@@ -1879,7 +2107,8 @@ _rpc_reconfig(slurm_msg_t *msg)
 static void
 _rpc_shutdown(slurm_msg_t *msg)
 {
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 
 	forward_wait(msg);
 	if (!_slurm_authorized_user(req_uid))
@@ -1898,7 +2127,8 @@ _rpc_reboot(slurm_msg_t *msg)
 {
 	char *reboot_program, *sp;
 	slurm_ctl_conf_t *cfg;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	int exit_code;
 
 	if (!_slurm_authorized_user(req_uid))
@@ -1934,7 +2164,6 @@ static void _job_limits_free(void *x)
 	xfree(x);
 }
 
-
 static int _job_limits_match(void *x, void *key)
 {
 	job_mem_limits_t *job_limits_ptr = (job_mem_limits_t *) x;
@@ -2014,7 +2243,7 @@ _load_job_limits(void)
 		close(fd);
 	}
 	list_iterator_destroy(step_iter);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 }
 
 static void
@@ -2163,7 +2392,7 @@ _enforce_job_mem_limit(void)
 		close(fd);
 	}
 	list_iterator_destroy(step_iter);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	for (i=0; i<job_cnt; i++) {
 		if (job_mem_info_ptr[i].mem_used == 0) {
@@ -2203,7 +2432,8 @@ static int
 _rpc_ping(slurm_msg_t *msg)
 {
 	int        rc = SLURM_SUCCESS;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	static bool first_msg = true;
 
 	if (!_slurm_authorized_user(req_uid)) {
@@ -2234,6 +2464,7 @@ _rpc_ping(slurm_msg_t *msg)
 		slurm_msg_t resp_msg;
 		ping_slurmd_resp_msg_t ping_resp;
 		get_cpu_load(&ping_resp.cpu_load);
+		get_free_mem(&ping_resp.free_mem);
 		slurm_msg_t_copy(&resp_msg, msg);
 		resp_msg.msg_type = RESPONSE_PING_SLURMD;
 		resp_msg.data     = &ping_resp;
@@ -2250,7 +2481,8 @@ static int
 _rpc_health_check(slurm_msg_t *msg)
 {
 	int        rc = SLURM_SUCCESS;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 
 	if (!_slurm_authorized_user(req_uid)) {
 		error("Security violation, health check RPC from uid %d",
@@ -2286,7 +2518,8 @@ static int
 _rpc_acct_gather_update(slurm_msg_t *msg)
 {
 	int        rc = SLURM_SUCCESS;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	static bool first_msg = true;
 
 	if (!_slurm_authorized_user(req_uid)) {
@@ -2322,9 +2555,10 @@ _rpc_acct_gather_update(slurm_msg_t *msg)
 
 		memset(&acct_msg, 0, sizeof(acct_gather_node_resp_msg_t));
 		acct_msg.node_name = conf->node_name;
-		acct_msg.energy = acct_gather_energy_alloc();
+		acct_msg.sensor_cnt = 1;
+		acct_msg.energy = acct_gather_energy_alloc(acct_msg.sensor_cnt);
 		acct_gather_energy_g_get_data(
-			ENERGY_DATA_STRUCT, acct_msg.energy);
+			ENERGY_DATA_NODE_ENERGY, acct_msg.energy);
 
 		slurm_msg_t_copy(&resp_msg, msg);
 		resp_msg.msg_type = RESPONSE_ACCT_GATHER_UPDATE;
@@ -2341,7 +2575,8 @@ static int
 _rpc_acct_gather_energy(slurm_msg_t *msg)
 {
 	int        rc = SLURM_SUCCESS;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	static bool first_msg = true;
 
 	if (!_slurm_authorized_user(req_uid)) {
@@ -2363,10 +2598,13 @@ _rpc_acct_gather_energy(slurm_msg_t *msg)
 		acct_gather_node_resp_msg_t acct_msg;
 		time_t now = time(NULL), last_poll = 0;
 		int data_type = ENERGY_DATA_STRUCT;
+		uint16_t sensor_cnt;
 		acct_gather_energy_req_msg_t *req = msg->data;
 
 		acct_gather_energy_g_get_data(ENERGY_DATA_LAST_POLL,
 					      &last_poll);
+		acct_gather_energy_g_get_data(ENERGY_DATA_SENSOR_CNT,
+					      &sensor_cnt);
 
 		/* If we polled later than delta seconds then force a
 		   new poll.
@@ -2375,7 +2613,9 @@ _rpc_acct_gather_energy(slurm_msg_t *msg)
 			data_type = ENERGY_DATA_JOULES_TASK;
 
 		memset(&acct_msg, 0, sizeof(acct_gather_node_resp_msg_t));
-		acct_msg.energy = acct_gather_energy_alloc();
+		acct_msg.sensor_cnt = sensor_cnt;
+		acct_msg.energy = acct_gather_energy_alloc(acct_msg.sensor_cnt);
+
 		acct_gather_energy_g_get_data(data_type, acct_msg.energy);
 
 		slurm_msg_t_copy(&resp_msg, msg);
@@ -2397,6 +2637,15 @@ _signal_jobstep(uint32_t jobid, uint32_t stepid, uid_t req_uid,
 	uid_t uid;
 	uint16_t protocol_version;
 
+	/*  There will be no stepd if the prolog is still running
+	 *   Return failure so caller can retry.
+	 */
+	if (_prolog_is_running (jobid)) {
+		info ("signal %d req for %u.%u while prolog is running."
+		      " Returning failure.", signal, jobid, stepid);
+		return SLURM_FAILURE;
+	}
+
 	fd = stepd_connect(conf->spooldir, conf->node_name, jobid, stepid,
 			   &protocol_version);
 	if (fd == -1) {
@@ -2448,18 +2697,12 @@ static void
 _rpc_signal_tasks(slurm_msg_t *msg)
 {
 	int               rc = SLURM_SUCCESS;
-	uid_t             req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t             req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+							 slurm_get_auth_info());
 	kill_tasks_msg_t *req = (kill_tasks_msg_t *) msg->data;
 	uint32_t flag;
 	uint32_t sig;
 
-#ifdef HAVE_XCPU
-	if (!_slurm_authorized_user(req_uid)) {
-		error("REQUEST_SIGNAL_TASKS not support with XCPU system");
-		return ESLURM_NOT_SUPPORTED;
-	}
-#endif
-
 	flag = req->signal >> 24;
 	sig  = req->signal & 0xfff;
 
@@ -2481,7 +2724,8 @@ _rpc_checkpoint_tasks(slurm_msg_t *msg)
 {
 	int               fd;
 	int               rc = SLURM_SUCCESS;
-	uid_t             req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t             req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+							 slurm_get_auth_info());
 	checkpoint_tasks_msg_t *req = (checkpoint_tasks_msg_t *) msg->data;
 	uint16_t protocol_version;
 	uid_t uid;
@@ -2548,7 +2792,7 @@ _rpc_terminate_tasks(slurm_msg_t *msg)
 		goto done2;
 	}
 
-	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	if ((req_uid != uid)
 	    && (!_slurm_authorized_user(req_uid))) {
 		debug("kill req from uid %ld for job %u.%u owned by uid %ld",
@@ -2589,7 +2833,7 @@ _rpc_step_complete(slurm_msg_t *msg)
 
 	/* step completion messages are only allowed from other slurmstepd,
 	   so only root or SlurmUser is allowed here */
-	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	if (!_slurm_authorized_user(req_uid)) {
 		debug("step completion from uid %ld for job %u.%u",
 		      (long) req_uid, req->job_id, req->job_step_id);
@@ -2609,6 +2853,55 @@ done:
 	return rc;
 }
 
+static void _setup_step_complete_msg(slurm_msg_t *msg, void *data)
+{
+	slurm_msg_t_init(msg);
+	msg->msg_type = REQUEST_STEP_COMPLETE;
+	msg->data = data;
+}
+
+/* This step_complete RPC came from slurmstepd because we are using
+ * message aggregation configured and we are at the head of the tree.
+ * This just adds the message to the list and goes on it's merry way. */
+static int
+_rpc_step_complete_aggr(slurm_msg_t *msg)
+{
+	int rc;
+	uid_t uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+
+	if (!_slurm_authorized_user(uid)) {
+		error("Security violation: step_complete_aggr from uid %d",
+		      uid);
+		if (msg->conn_fd >= 0)
+			slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
+		return SLURM_ERROR;
+	}
+
+	if (conf->msg_aggr_window_msgs > 1) {
+		slurm_msg_t *req = xmalloc_nz(sizeof(slurm_msg_t));
+		_setup_step_complete_msg(req, msg->data);
+		msg->data = NULL;
+
+		msg_aggr_add_msg(req, 1, NULL);
+	} else {
+		slurm_msg_t req;
+		_setup_step_complete_msg(&req, msg->data);
+
+		while (slurm_send_recv_controller_rc_msg(&req, &rc) < 0) {
+			error("Unable to send step complete, "
+			      "trying again in a minute: %m");
+		}
+	}
+
+	/* Finish communication with the stepd, we have to wait for
+	 * the message back from the slurmctld or we will cause a race
+	 * condition with srun.
+	 */
+	slurm_send_rc_msg(msg, SLURM_SUCCESS);
+
+	return SLURM_SUCCESS;
+}
+
 /* Get list of active jobs and steps, xfree returned value */
 static char *
 _get_step_list(void)
@@ -2651,7 +2944,7 @@ _get_step_list(void)
 		}
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	if (step_list == NULL)
 		xstrcat(step_list, "NONE");
@@ -2702,7 +2995,7 @@ _rpc_stat_jobacct(slurm_msg_t *msg)
 	debug3("Entering _rpc_stat_jobacct");
 	/* step completion messages are only allowed from other slurmstepd,
 	   so only root or SlurmUser is allowed here */
-	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	fd = stepd_connect(conf->spooldir, conf->node_name,
 			   req->job_id, req->step_id, &protocol_version);
@@ -2754,9 +3047,9 @@ _rpc_stat_jobacct(slurm_msg_t *msg)
 	*/
 	if (stepd_list_pids(fd, protocol_version, &resp->step_pids->pid,
 			    &resp->step_pids->pid_cnt) == SLURM_ERROR) {
-                debug("No pids for nonexistent job %u.%u requested",
-                      req->job_id, req->step_id);
-        }
+		debug("No pids for nonexistent job %u.%u requested",
+		      req->job_id, req->step_id);
+	}
 
 	close(fd);
 
@@ -2768,6 +3061,99 @@ _rpc_stat_jobacct(slurm_msg_t *msg)
 	return SLURM_SUCCESS;
 }
 
+static int
+_callerid_find_job(callerid_conn_t conn, uint32_t *job_id)
+{
+	ino_t inode;
+	pid_t pid;
+	int rc;
+
+	rc = callerid_find_inode_by_conn(conn, &inode);
+	if (rc != SLURM_SUCCESS) {
+		debug3("network_callerid inode not found");
+		return ESLURM_INVALID_JOB_ID;
+	}
+	debug3("network_callerid found inode %lu", (long unsigned int)inode);
+
+	rc = find_pid_by_inode(&pid, inode);
+	if (rc != SLURM_SUCCESS) {
+		debug3("network_callerid process not found");
+		return ESLURM_INVALID_JOB_ID;
+	}
+	debug3("network_callerid found process %d", (pid_t)pid);
+
+	rc = slurm_pid2jobid(pid, job_id);
+	if (rc != SLURM_SUCCESS) {
+		debug3("network_callerid job not found");
+		return ESLURM_INVALID_JOB_ID;
+	}
+	debug3("network_callerid found job %u", *job_id);
+	return SLURM_SUCCESS;
+}
+
+static int
+_rpc_network_callerid(slurm_msg_t *msg)
+{
+	network_callerid_msg_t *req = (network_callerid_msg_t *)msg->data;
+	slurm_msg_t resp_msg;
+	network_callerid_resp_t *resp = NULL;
+
+	uid_t req_uid = -1;
+	uid_t job_uid = -1;
+	uint32_t job_id = (uint32_t)NO_VAL;
+	callerid_conn_t conn;
+	int rc = ESLURM_INVALID_JOB_ID;
+	char ip_src_str[INET6_ADDRSTRLEN];
+	char ip_dst_str[INET6_ADDRSTRLEN];
+
+	debug3("Entering _rpc_network_callerid");
+
+	resp = xmalloc(sizeof(network_callerid_resp_t));
+	slurm_msg_t_copy(&resp_msg, msg);
+
+	/* Ideally this would be in an if block only when debug3 is enabled */
+	inet_ntop(req->af, req->ip_src, ip_src_str, INET6_ADDRSTRLEN);
+	inet_ntop(req->af, req->ip_dst, ip_dst_str, INET6_ADDRSTRLEN);
+	debug3("network_callerid checking %s:%u => %s:%u",
+		ip_src_str, req->port_src, ip_dst_str, req->port_dst);
+
+	/* My remote is the other's source */
+	memcpy((void*)&conn.ip_dst, (void*)&req->ip_src, 16);
+	memcpy((void*)&conn.ip_src, (void*)&req->ip_dst, 16);
+	conn.port_src = req->port_dst;
+	conn.port_dst = req->port_src;
+	conn.af = req->af;
+
+	/* Find the job id */
+	rc = _callerid_find_job(conn, &job_id);
+	if (rc == SLURM_SUCCESS) {
+		/* We found the job */
+		req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+		if (!_slurm_authorized_user(req_uid)) {
+			/* Requestor is not root or SlurmUser */
+			job_uid = _get_job_uid(job_id);
+			if (job_uid != req_uid) {
+				/* RPC call sent by non-root user who does not
+				 * own this job. Do not send them the job ID. */
+				error("Security violation, REQUEST_NETWORK_CALLERID from uid=%d",
+				      req_uid);
+				job_id = NO_VAL;
+				rc = ESLURM_INVALID_JOB_ID;
+			}
+		}
+	}
+
+	resp->job_id = job_id;
+	resp->node_name = xstrdup(conf->node_name);
+
+	resp_msg.msg_type = RESPONSE_NETWORK_CALLERID;
+	resp_msg.data     = resp;
+
+	slurm_send_node_msg(msg->conn_fd, &resp_msg);
+	slurm_free_network_callerid_resp(resp);
+	return rc;
+}
+
 static int
 _rpc_list_pids(slurm_msg_t *msg)
 {
@@ -2779,66 +3165,66 @@ _rpc_list_pids(slurm_msg_t *msg)
 	uid_t job_uid;
 	uint16_t protocol_version = 0;
 
-        debug3("Entering _rpc_list_pids");
-        /* step completion messages are only allowed from other slurmstepd,
-           so only root or SlurmUser is allowed here */
-        req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	debug3("Entering _rpc_list_pids");
+	/* step completion messages are only allowed from other slurmstepd,
+	 * so only root or SlurmUser is allowed here */
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 
 	job_uid = _get_job_uid(req->job_id);
 
-        if ((int)job_uid < 0) {
-                error("stat_pid for invalid job_id: %u",
+	if ((int)job_uid < 0) {
+		error("stat_pid for invalid job_id: %u",
 		      req->job_id);
-                if (msg->conn_fd >= 0)
-                        slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID);
-                return  ESLURM_INVALID_JOB_ID;
-        }
-
-        /*
-         * check that requesting user ID is the SLURM UID or root
-         */
-        if ((req_uid != job_uid)
+		if (msg->conn_fd >= 0)
+			slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID);
+		return  ESLURM_INVALID_JOB_ID;
+	}
+
+	/*
+	 * check that requesting user ID is the SLURM UID or root
+	 */
+	if ((req_uid != job_uid)
 	    && (!_slurm_authorized_user(req_uid))) {
-                error("stat_pid from uid %ld for job %u "
-                      "owned by uid %ld",
-                      (long) req_uid, req->job_id, (long) job_uid);
-
-                if (msg->conn_fd >= 0) {
-                        slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
-                        return ESLURM_USER_ID_MISSING;/* or bad in this case */
-                }
-        }
-
-        resp = xmalloc(sizeof(job_step_pids_t));
-        slurm_msg_t_copy(&resp_msg, msg);
- 	resp->node_name = xstrdup(conf->node_name);
+		error("stat_pid from uid %ld for job %u "
+		      "owned by uid %ld",
+		      (long) req_uid, req->job_id, (long) job_uid);
+
+		if (msg->conn_fd >= 0) {
+			slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
+			return ESLURM_USER_ID_MISSING;/* or bad in this case */
+		}
+	}
+
+	resp = xmalloc(sizeof(job_step_pids_t));
+	slurm_msg_t_copy(&resp_msg, msg);
+	resp->node_name = xstrdup(conf->node_name);
 	resp->pid_cnt = 0;
 	resp->pid = NULL;
-        fd = stepd_connect(conf->spooldir, conf->node_name,
-                           req->job_id, req->step_id, &protocol_version);
-        if (fd == -1) {
-                error("stepd_connect to %u.%u failed: %m",
-                      req->job_id, req->step_id);
-                slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID);
-                slurm_free_job_step_pids(resp);
-                return  ESLURM_INVALID_JOB_ID;
+	fd = stepd_connect(conf->spooldir, conf->node_name,
+			   req->job_id, req->step_id, &protocol_version);
+	if (fd == -1) {
+		error("stepd_connect to %u.%u failed: %m",
+		      req->job_id, req->step_id);
+		slurm_send_rc_msg(msg, ESLURM_INVALID_JOB_ID);
+		slurm_free_job_step_pids(resp);
+		return  ESLURM_INVALID_JOB_ID;
 
-        }
+	}
 
 	if (stepd_list_pids(fd, protocol_version,
 			    &resp->pid, &resp->pid_cnt) == SLURM_ERROR) {
-                debug("No pids for nonexistent job %u.%u requested",
-                      req->job_id, req->step_id);
-        }
+		debug("No pids for nonexistent job %u.%u requested",
+		      req->job_id, req->step_id);
+	}
 
-        close(fd);
+	close(fd);
 
-        resp_msg.msg_type = RESPONSE_JOB_STEP_PIDS;
-        resp_msg.data     = resp;
+	resp_msg.msg_type = RESPONSE_JOB_STEP_PIDS;
+	resp_msg.data     = resp;
 
-        slurm_send_node_msg(msg->conn_fd, &resp_msg);
-        slurm_free_job_step_pids(resp);
-        return SLURM_SUCCESS;
+	slurm_send_node_msg(msg->conn_fd, &resp_msg);
+	slurm_free_job_step_pids(resp);
+	return SLURM_SUCCESS;
 }
 
 /*
@@ -2848,7 +3234,8 @@ _rpc_list_pids(slurm_msg_t *msg)
 static void
 _rpc_timelimit(slurm_msg_t *msg)
 {
-	uid_t           uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t           uid = g_slurm_auth_get_uid(msg->auth_cred,
+						   slurm_get_auth_info());
 	kill_job_msg_t *req = msg->data;
 	int             nsteps, rc;
 
@@ -2863,7 +3250,7 @@ _rpc_timelimit(slurm_msg_t *msg)
 	 *  Indicate to slurmctld that we've received the message
 	 */
 	slurm_send_rc_msg(msg, SLURM_SUCCESS);
-	slurm_close_accepted_conn(msg->conn_fd);
+	slurm_close(msg->conn_fd);
 	msg->conn_fd = -1;
 
 	if (req->step_id != NO_VAL) {
@@ -2904,8 +3291,7 @@ _rpc_timelimit(slurm_msg_t *msg)
 		_kill_all_active_steps(req->job_id, SIG_TIME_LIMIT, true);
 	else /* (msg->type == REQUEST_KILL_PREEMPTED) */
 		_kill_all_active_steps(req->job_id, SIG_PREEMPTED, true);
-	nsteps = xcpu_signal(SIGTERM, req->nodes) +
-		_kill_all_active_steps(req->job_id, SIGTERM, false);
+	nsteps = _kill_all_active_steps(req->job_id, SIGTERM, false);
 	verbose( "Job %u: timeout: sent SIGTERM to %d active steps",
 		 req->job_id, nsteps );
 
@@ -2948,7 +3334,7 @@ static void  _rpc_pid2jid(slurm_msg_t *msg)
 		close(fd);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	if (found) {
 		debug3("_rpc_pid2jid: pid(%u) found in %u",
@@ -2983,8 +3369,8 @@ _get_grouplist(char **user_name, uid_t my_uid, gid_t my_gid,
 	*groups = (gid_t *) xmalloc(*ngroups * sizeof(gid_t));
 
 	if (getgrouplist(*user_name, my_gid, *groups, ngroups) < 0) {
-	        *groups = xrealloc(*groups, *ngroups * sizeof(gid_t));
-	        getgrouplist(*user_name, my_gid, *groups, ngroups);
+		*groups = xrealloc(*groups, *ngroups * sizeof(gid_t));
+		getgrouplist(*user_name, my_gid, *groups, ngroups);
 	}
 
 	return 0;
@@ -3036,8 +3422,10 @@ _rpc_file_bcast(slurm_msg_t *msg)
 	int fd, flags, offset, inx, rc;
 	int ngroups = 16;
 	gid_t *groups;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
-	gid_t req_gid = g_slurm_auth_get_gid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
+	gid_t req_gid = g_slurm_auth_get_gid(msg->auth_cred,
+					     slurm_get_auth_info());
 	pid_t child;
 	uint32_t job_id;
 
@@ -3110,10 +3498,10 @@ _rpc_file_bcast(slurm_msg_t *msg)
 	 * Change the code below with caution.
 	\*********************************************************************/
 
-        if (setgroups(ngroups, groups) < 0) {
-	        error("sbcast: uid: %u setgroups: %s", req_uid,
+	if (setgroups(ngroups, groups) < 0) {
+		error("sbcast: uid: %u setgroups: %s", req_uid,
 		      strerror(errno));
-	        exit(errno);
+		exit(errno);
 	}
 
 	if (setgid(req_gid) < 0) {
@@ -3220,7 +3608,7 @@ _rpc_reattach_tasks(slurm_msg_t *msg)
 
 	debug2("_rpc_reattach_tasks: nodeid %d in the job step", nodeid);
 
-	req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	req_uid = g_slurm_auth_get_uid(msg->auth_cred, slurm_get_auth_info());
 	if ((req_uid != uid) && (!_slurm_authorized_user(req_uid))) {
 		error("uid %ld attempt to attach to job %u.%u owned by %ld",
 		      (long) req_uid, req->job_id, req->job_step_id,
@@ -3263,7 +3651,13 @@ _rpc_reattach_tasks(slurm_msg_t *msg)
 
 	resp->gtids = NULL;
 	resp->local_pids = NULL;
-	/* Following call fills in gtids and local_pids when successful */
+
+	 /* NOTE: We need to use the protocol_version from
+	  * sattach here since responses will be sent back to it. */
+	if (msg->protocol_version < protocol_version)
+		protocol_version = msg->protocol_version;
+
+	/* Following call fills in gtids and local_pids when successful. */
 	rc = stepd_attach(fd, protocol_version, &ioaddr,
 			  &resp_msg.address, job_cred_sig, resp);
 	if (rc != SLURM_SUCCESS) {
@@ -3319,7 +3713,7 @@ static uid_t _get_job_uid(uint32_t jobid)
 		break;
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	return uid;
 }
@@ -3372,7 +3766,7 @@ _kill_all_active_steps(uint32_t jobid, int sig, bool batch)
 		close(fd);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 	if (step_cnt == 0)
 		debug2("No steps in jobid %u to send signal %d", jobid, sig);
 	return step_cnt;
@@ -3424,7 +3818,7 @@ _terminate_all_steps(uint32_t jobid, bool batch)
 		close(fd);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 	if (step_cnt == 0)
 		debug2("No steps in job %u to terminate", jobid);
 	return step_cnt;
@@ -3459,7 +3853,7 @@ _job_still_running(uint32_t job_id)
 		}
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	return retval;
 }
@@ -3519,41 +3913,64 @@ _steps_completed_now(uint32_t jobid)
 		}
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	return rc;
 }
 
+static void _epilog_complete_msg_setup(
+	slurm_msg_t *msg, epilog_complete_msg_t *req, uint32_t jobid, int rc)
+{
+	slurm_msg_t_init(msg);
+	memset(req, 0, sizeof(epilog_complete_msg_t));
+
+	req->job_id      = jobid;
+	req->return_code = rc;
+	req->node_name   = conf->node_name;
+
+	msg->msg_type    = MESSAGE_EPILOG_COMPLETE;
+	msg->data        = req;
+}
+
 /*
  *  Send epilog complete message to currently active controller.
+ *  If enabled, use message aggregation.
  *   Returns SLURM_SUCCESS if message sent successfully,
  *           SLURM_FAILURE if epilog complete message fails to be sent.
  */
 static int
 _epilog_complete(uint32_t jobid, int rc)
 {
-	int                    ret = SLURM_SUCCESS;
-	slurm_msg_t            msg;
-	epilog_complete_msg_t  req;
+	int ret = SLURM_SUCCESS;
 
-	slurm_msg_t_init(&msg);
+	if (conf->msg_aggr_window_msgs > 1) {
+		/* message aggregation is enabled */
+		slurm_msg_t *msg = xmalloc(sizeof(slurm_msg_t));
+		epilog_complete_msg_t *req =
+			xmalloc(sizeof(epilog_complete_msg_t));
 
-	req.job_id      = jobid;
-	req.return_code = rc;
-	req.node_name   = conf->node_name;
+		_epilog_complete_msg_setup(msg, req, jobid, rc);
 
-	msg.msg_type    = MESSAGE_EPILOG_COMPLETE;
-	msg.data        = &req;
+		/* we need to copy this symbol */
+		req->node_name   = xstrdup(conf->node_name);
 
-	/* Note: No return code to message, slurmctld will resend
-	 * TERMINATE_JOB request if message send fails */
-	if (slurm_send_only_controller_msg(&msg) < 0) {
-		error("Unable to send epilog complete message: %m");
-		ret = SLURM_ERROR;
+		msg_aggr_add_msg(msg, 0, NULL);
 	} else {
-		debug ("Job %u: sent epilog complete msg: rc = %d", jobid, rc);
-	}
+		slurm_msg_t msg;
+		epilog_complete_msg_t req;
 
+		_epilog_complete_msg_setup(&msg, &req, jobid, rc);
+
+		/* Note: No return code to message, slurmctld will resend
+		 * TERMINATE_JOB request if message send fails */
+		if (slurm_send_only_controller_msg(&msg) < 0) {
+			error("Unable to send epilog complete message: %m");
+			ret = SLURM_ERROR;
+		} else {
+			debug("Job %u: sent epilog complete msg: rc = %d",
+			      jobid, rc);
+		}
+	}
 	return ret;
 }
 
@@ -3566,7 +3983,8 @@ static void
 _rpc_signal_job(slurm_msg_t *msg)
 {
 	signal_job_msg_t *req = msg->data;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	uid_t job_uid;
 	List steps;
 	ListIterator i;
@@ -3574,20 +3992,6 @@ _rpc_signal_job(slurm_msg_t *msg)
 	int step_cnt  = 0;
 	int fd;
 
-#ifdef HAVE_XCPU
-	if (!_slurm_authorized_user(req_uid)) {
-		error("REQUEST_SIGNAL_JOB not supported with XCPU system");
-		if (msg->conn_fd >= 0) {
-			slurm_send_rc_msg(msg, ESLURM_NOT_SUPPORTED);
-			if (slurm_close_accepted_conn(msg->conn_fd) < 0)
-				error ("_rpc_signal_job: close(%d): %m",
-				       msg->conn_fd);
-			msg->conn_fd = -1;
-		}
-		return;
-	}
-#endif
-
 	debug("_rpc_signal_job, uid = %d, signal = %d", req_uid, req->signal);
 	job_uid = _get_job_uid(req->job_id);
 	if ((int)job_uid < 0)
@@ -3601,7 +4005,7 @@ _rpc_signal_job(slurm_msg_t *msg)
 		      req->job_id, req_uid);
 		if (msg->conn_fd >= 0) {
 			slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
-			if (slurm_close_accepted_conn(msg->conn_fd) < 0)
+			if (slurm_close(msg->conn_fd) < 0)
 				error ("_rpc_signal_job: close(%d): %m",
 				       msg->conn_fd);
 			msg->conn_fd = -1;
@@ -3647,7 +4051,7 @@ _rpc_signal_job(slurm_msg_t *msg)
 		close(fd);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 no_job:
 	if (step_cnt == 0) {
@@ -3661,7 +4065,7 @@ no_job:
 	 */
 	if (msg->conn_fd >= 0) {
 		slurm_send_rc_msg(msg, SLURM_SUCCESS);
-		if (slurm_close_accepted_conn(msg->conn_fd) < 0)
+		if (slurm_close(msg->conn_fd) < 0)
 			error ("_rpc_signal_job: close(%d): %m", msg->conn_fd);
 		msg->conn_fd = -1;
 	}
@@ -3731,7 +4135,7 @@ extern void record_launched_jobs(void)
 		_launch_complete_add(stepd->jobid);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 }
 
 /*
@@ -3743,7 +4147,8 @@ _rpc_suspend_job(slurm_msg_t *msg)
 {
 	int time_slice = -1;
 	suspend_int_msg_t *req = msg->data;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 	List steps;
 	ListIterator i;
 	step_loc_t *stepd;
@@ -3771,7 +4176,7 @@ _rpc_suspend_job(slurm_msg_t *msg)
 	 * detected with the request */
 	if (msg->conn_fd >= 0) {
 		slurm_send_rc_msg(msg, rc);
-		if (slurm_close_accepted_conn(msg->conn_fd) < 0)
+		if (slurm_close(msg->conn_fd) < 0)
 			error("_rpc_suspend_job: close(%d): %m",
 			      msg->conn_fd);
 		msg->conn_fd = -1;
@@ -3913,7 +4318,7 @@ _rpc_suspend_job(slurm_msg_t *msg)
 			break;
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	if ((req->op == RESUME_JOB) && (req->indf_susp))
 		switch_g_job_resume(req->switch_info, 5);
@@ -3943,7 +4348,8 @@ static void
 _rpc_abort_job(slurm_msg_t *msg)
 {
 	kill_job_msg_t *req    = msg->data;
-	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred,
+						      slurm_get_auth_info());
 	job_env_t       job_env;
 
 	debug("_rpc_abort_job, uid = %d", uid);
@@ -3977,13 +4383,12 @@ _rpc_abort_job(slurm_msg_t *msg)
 	 */
 	if (msg->conn_fd >= 0) {
 		slurm_send_rc_msg(msg, SLURM_SUCCESS);
-		if (slurm_close_accepted_conn(msg->conn_fd) < 0)
+		if (slurm_close(msg->conn_fd) < 0)
 			error ("rpc_abort_job: close(%d): %m", msg->conn_fd);
 		msg->conn_fd = -1;
 	}
 
-	if ((xcpu_signal(SIGKILL, req->nodes) +
-	     _kill_all_active_steps(req->job_id, SIG_ABORT, true)) ) {
+	if (_kill_all_active_steps(req->job_id, SIG_ABORT, true)) {
 		/*
 		 *  Block until all user processes are complete.
 		 */
@@ -4140,6 +4545,23 @@ _rpc_terminate_batch_job(uint32_t job_id, uint32_t user_id, char *node_name)
 	_waiter_complete(job_id);
 }
 
+static void _handle_old_batch_job_launch(slurm_msg_t *msg)
+{
+	if (msg->msg_type != REQUEST_BATCH_JOB_LAUNCH) {
+		error("_handle_batch_job_launch: "
+		      "Invalid response msg_type (%u)", msg->msg_type);
+		return;
+	}
+
+	/* (resp_msg.msg_type == REQUEST_BATCH_JOB_LAUNCH) */
+	debug2("Processing RPC: REQUEST_BATCH_JOB_LAUNCH");
+	last_slurmctld_msg = time(NULL);
+	_rpc_batch_job(msg, false);
+	slurm_free_job_launch_msg(msg->data);
+	msg->data = NULL;
+
+}
+
 /* This complete batch RPC came from slurmstepd because we have select/serial
  * configured. Terminate the job here. Forward the batch completion RPC to
  * slurmctld and possible get a new batch launch RPC in response. */
@@ -4147,9 +4569,21 @@ static void
 _rpc_complete_batch(slurm_msg_t *msg)
 {
 	int		i, rc, msg_rc;
-	slurm_msg_t	req_msg, resp_msg;
-	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	slurm_msg_t	resp_msg;
+	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred,
+						      slurm_get_auth_info());
 	complete_batch_script_msg_t *req = msg->data;
+	static int	running_serial = -1;
+	uint16_t msg_type;
+
+	if (running_serial == -1) {
+		char *select_type = slurm_get_select_type();
+		if (!strcmp(select_type, "select/serial"))
+			running_serial = 1;
+		else
+			running_serial = 0;
+		xfree(select_type);
+	}
 
 	if (!_slurm_authorized_user(uid)) {
 		error("Security violation: complete_batch(%u) from uid %d",
@@ -4158,17 +4592,39 @@ _rpc_complete_batch(slurm_msg_t *msg)
 			slurm_send_rc_msg(msg, ESLURM_USER_ID_MISSING);
 		return;
 	}
-
+	info("got batch finish");
 	slurm_send_rc_msg(msg, SLURM_SUCCESS);
-	_rpc_terminate_batch_job(req->job_id, req->user_id, req->node_name);
 
-	slurm_msg_t_init(&req_msg);
-	req_msg.msg_type= REQUEST_COMPLETE_BATCH_JOB;
-	req_msg.data	= msg->data;
+	if (running_serial) {
+		_rpc_terminate_batch_job(
+			req->job_id, req->user_id, req->node_name);
+		msg_type = REQUEST_COMPLETE_BATCH_JOB;
+	} else
+		msg_type = msg->msg_type;
+
 	for (i = 0; i <= MAX_RETRY; i++) {
-		msg_rc = slurm_send_recv_controller_msg(&req_msg, &resp_msg);
-		if (msg_rc == SLURM_SUCCESS)
-			break;
+		if (conf->msg_aggr_window_msgs > 1) {
+			slurm_msg_t *req_msg =
+				xmalloc_nz(sizeof(slurm_msg_t));
+			slurm_msg_t_init(req_msg);
+			req_msg->msg_type = msg_type;
+			req_msg->data = msg->data;
+			msg->data = NULL;
+
+			msg_aggr_add_msg(req_msg, 1,
+					 _handle_old_batch_job_launch);
+			return;
+		} else {
+			slurm_msg_t req_msg;
+			slurm_msg_t_init(&req_msg);
+			req_msg.msg_type = msg_type;
+			req_msg.data	 = msg->data;
+			msg_rc = slurm_send_recv_controller_msg(
+				&req_msg, &resp_msg);
+
+			if (msg_rc == SLURM_SUCCESS)
+				break;
+		}
 		info("Retrying job complete RPC for job %u", req->job_id);
 		sleep(RETRY_DELAY);
 	}
@@ -4188,17 +4644,7 @@ _rpc_complete_batch(slurm_msg_t *msg)
 		return;
 	}
 
-	if (resp_msg.msg_type != REQUEST_BATCH_JOB_LAUNCH) {
-		error("Invalid response msg_type (%u) to complete_batch RPC "
-		      "for job %u", resp_msg.msg_type, req->job_id);
-		return;
-	}
-
-	/* (resp_msg.msg_type == REQUEST_BATCH_JOB_LAUNCH) */
-	debug2("Processing RPC: REQUEST_BATCH_JOB_LAUNCH");
-	last_slurmctld_msg = time(NULL);
-	_rpc_batch_job(&resp_msg, false);
-	slurm_free_job_launch_msg(resp_msg.data);
+	_handle_old_batch_job_launch(&resp_msg);
 }
 
 static void
@@ -4209,9 +4655,12 @@ _rpc_terminate_job(slurm_msg_t *msg)
 #endif
 	int             rc     = SLURM_SUCCESS;
 	kill_job_msg_t *req    = msg->data;
-	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t           uid    = g_slurm_auth_get_uid(msg->auth_cred,
+						      slurm_get_auth_info());
 	int             nsteps = 0;
 	int		delay;
+//	slurm_ctl_conf_t *cf;
+//	struct stat	stat_buf;
 	job_env_t       job_env;
 
 	debug("_rpc_terminate_job, uid = %d", uid);
@@ -4271,7 +4720,7 @@ _rpc_terminate_job(slurm_msg_t *msg)
 			 */
 			debug("sent SUCCESS, waiting for step to start");
 			slurm_send_rc_msg (msg, SLURM_SUCCESS);
-			if (slurm_close_accepted_conn(msg->conn_fd) < 0)
+			if (slurm_close(msg->conn_fd) < 0)
 				error ( "rpc_kill_job: close(%d): %m",
 					msg->conn_fd);
 			msg->conn_fd = -1;
@@ -4296,7 +4745,6 @@ _rpc_terminate_job(slurm_msg_t *msg)
 	 * Tasks might be stopped (possibly by a debugger)
 	 * so send SIGCONT first.
 	 */
-	xcpu_signal(SIGCONT, req->nodes);
 	_kill_all_active_steps(req->job_id, SIGCONT, true);
 	if (errno == ESLURMD_STEP_SUSPENDED) {
 		/*
@@ -4304,11 +4752,9 @@ _rpc_terminate_job(slurm_msg_t *msg)
 		 * bother with a "nice" termination.
 		 */
 		debug2("Job is currently suspended, terminating");
-		nsteps = xcpu_signal(SIGKILL, req->nodes) +
-			_terminate_all_steps(req->job_id, true);
+		nsteps = _terminate_all_steps(req->job_id, true);
 	} else {
-		nsteps = xcpu_signal(SIGTERM, req->nodes) +
-			_kill_all_active_steps(req->job_id, SIGTERM, true);
+		nsteps = _kill_all_active_steps(req->job_id, SIGTERM, true);
 	}
 
 #ifndef HAVE_AIX
@@ -4361,7 +4807,7 @@ _rpc_terminate_job(slurm_msg_t *msg)
 	if (msg->conn_fd >= 0) {
 		debug4("sent SUCCESS");
 		slurm_send_rc_msg(msg, SLURM_SUCCESS);
-		if (slurm_close_accepted_conn(msg->conn_fd) < 0)
+		if (slurm_close(msg->conn_fd) < 0)
 			error ("rpc_kill_job: close(%d): %m", msg->conn_fd);
 		msg->conn_fd = -1;
 	}
@@ -4371,8 +4817,7 @@ _rpc_terminate_job(slurm_msg_t *msg)
 	 */
 	delay = MAX(conf->kill_wait, 5);
 	if ( !_pause_for_job_completion (req->job_id, req->nodes, delay) &&
-	     (xcpu_signal(SIGKILL, req->nodes) +
-	      _terminate_all_steps(req->job_id, true)) ) {
+	     _terminate_all_steps(req->job_id, true) ) {
 		/*
 		 *  Block until all user processes are complete.
 		 */
@@ -4405,8 +4850,8 @@ _rpc_terminate_job(slurm_msg_t *msg)
 				    SELECT_JOBDATA_BLOCK_ID,
 				    &job_env.resv_id);
 #elif defined(HAVE_ALPS_CRAY)
-	job_env.resv_id = select_g_select_jobinfo_xstrdup(req->select_jobinfo,
-							  SELECT_PRINT_RESV_ID);
+	job_env.resv_id = select_g_select_jobinfo_xstrdup(
+		req->select_jobinfo, SELECT_PRINT_RESV_ID);
 #endif
 	rc = _run_epilog(&job_env);
 	xfree(job_env.resv_id);
@@ -4433,6 +4878,7 @@ _rpc_terminate_job(slurm_msg_t *msg)
 	_wait_state_completed(req->job_id, 5);
 	_waiter_complete(req->job_id);
 	_sync_messages_kill(req);
+
 	_epilog_complete(req->job_id, rc);
 }
 
@@ -4576,12 +5022,10 @@ _pause_for_job_completion (uint32_t job_id, char *nodes, int max_time)
 	bool rc = false;
 
 	while ((sec < max_time) || (max_time == 0)) {
-		rc = (_job_still_running (job_id) ||
-			xcpu_signal(0, nodes));
+		rc = _job_still_running (job_id);
 		if (!rc)
 			break;
 		if ((max_time == 0) && (sec > 1)) {
-			xcpu_signal(SIGKILL, nodes);
 			_terminate_all_steps(job_id, true);
 		}
 		if (sec > 10) {
@@ -4610,7 +5054,8 @@ static void
 _rpc_update_time(slurm_msg_t *msg)
 {
 	int   rc      = SLURM_SUCCESS;
-	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	uid_t req_uid = g_slurm_auth_get_uid(msg->auth_cred,
+					     slurm_get_auth_info());
 
 	if ((req_uid != conf->slurm_user_id) && (req_uid != 0)) {
 		rc = ESLURM_USER_ID_MISSING;
@@ -4644,8 +5089,10 @@ _build_env(job_env_t *job_env)
 		job_env->spank_job_env_size = 0;
 		job_env->spank_job_env = (char **) NULL;
 	}
-	if (job_env->spank_job_env_size)
-		env_array_merge(&env, (const char **) job_env->spank_job_env);
+	if (job_env->spank_job_env_size) {
+		env_array_merge_spank(&env,
+				      (const char **) job_env->spank_job_env);
+	}
 
 	slurm_mutex_lock(&conf->config_mutex);
 	setenvf(&env, "SLURMD_NODENAME", "%s", conf->node_name);
@@ -4705,11 +5152,12 @@ _destroy_env(char **env)
 	return;
 }
 
+/* Trigger srun of spank prolog or epilog in slurmstepd */
 static int
 _run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid)
 {
 	pid_t cpid;
-	int status = 0;
+	int status = 0, timeout;
 	int pfds[2];
 
 	if (pipe (pfds) < 0) {
@@ -4745,9 +5193,9 @@ _run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid)
 		if (dup2 (pfds[0], STDIN_FILENO) < 0)
 			fatal ("dup2: %m");
 #ifdef SETPGRP_TWO_ARGS
-                setpgrp(0, 0);
+		setpgrp(0, 0);
 #else
-                setpgrp();
+		setpgrp();
 #endif
 		if (conf->chos_loc && !access(conf->chos_loc, X_OK))
 			execve(conf->chos_loc, argv, env);
@@ -4763,11 +5211,9 @@ _run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid)
 		error ("Failed to send slurmd conf to slurmstepd\n");
 	close (pfds[1]);
 
-	/*
-	 *  Wait for up to 120s for all spank plugins to complete:
-	 */
-	if (waitpid_timeout (mode, cpid, &status, 120) < 0) {
-		error ("spank/%s timed out after 120s", mode);
+	timeout = MAX(slurm_get_prolog_timeout(), 120); /* 120 secs in v15.08 */
+	if (waitpid_timeout (mode, cpid, &status, timeout) < 0) {
+		error ("spank/%s timed out after %u secs", mode, timeout);
 		return (-1);
 	}
 
@@ -4785,7 +5231,6 @@ _run_spank_job_script (const char *mode, char **env, uint32_t job_id, uid_t uid)
 static int _run_job_script(const char *name, const char *path,
 			   uint32_t jobid, int timeout, char **env, uid_t uid)
 {
-	bool have_spank = false;
 	struct stat stat_buf;
 	int status = 0, rc;
 
@@ -4796,8 +5241,6 @@ static int _run_job_script(const char *name, const char *path,
 	 *   prolog/epilog status.
 	 */
 	if (conf->plugstack && (stat(conf->plugstack, &stat_buf) == 0))
-		have_spank = true;
-	if (have_spank)
 		status = _run_spank_job_script(name, env, jobid, uid);
 	if ((rc = run_script(name, path, jobid, timeout, env, uid)))
 		status = rc;
@@ -4813,7 +5256,7 @@ _run_prolog(job_env_t *job_env, slurm_cred_t *cred)
 	char *my_prolog;
 	char **my_env;
 
-	my_env = _build_env(job_env);//
+	my_env = _build_env(job_env);
 	setenvf(&my_env, "SLURM_STEP_ID", "%u", job_env->step_id);
 
 	slurm_mutex_lock(&conf->config_mutex);
@@ -4874,6 +5317,7 @@ _run_prolog(job_env_t *job_env, slurm_cred_t *cred)
 	char *my_prolog;
 	time_t start_time = time(NULL);
 	static uint16_t msg_timeout = 0;
+	static uint16_t timeout;
 	pthread_t       timer_id;
 	pthread_attr_t  timer_attr;
 	pthread_cond_t  timer_cond  = PTHREAD_COND_INITIALIZER;
@@ -4881,21 +5325,24 @@ _run_prolog(job_env_t *job_env, slurm_cred_t *cred)
 	timer_struct_t  timer_struct;
 	bool prolog_fini = false;
 	char **my_env;
-	List job_gres_list = NULL, step_gres_list = NULL;
 
 	my_env = _build_env(job_env);
 	setenvf(&my_env, "SLURM_STEP_ID", "%u", job_env->step_id);
 	if (cred) {
-		get_cred_gres(cred, conf->node_name, &job_gres_list,
-			      &step_gres_list);
-		gres_plugin_job_set_env(&my_env, job_gres_list);
-		FREE_NULL_LIST(job_gres_list);
-		FREE_NULL_LIST(step_gres_list);
+		slurm_cred_arg_t cred_arg;
+		slurm_cred_get_args(cred, &cred_arg);
+		setenvf(&my_env, "SLURM_JOB_CONSTRAINTS", "%s",
+			cred_arg.job_constraints);
+		gres_plugin_job_set_env(&my_env, cred_arg.job_gres_list);
+		slurm_cred_free_args(&cred_arg);
 	}
 
 	if (msg_timeout == 0)
 		msg_timeout = slurm_get_msg_timeout();
 
+	if (timeout == 0)
+		timeout = slurm_get_prolog_timeout();
+
 	slurm_mutex_lock(&conf->config_mutex);
 	my_prolog = xstrdup(conf->prolog);
 	slurm_mutex_unlock(&conf->config_mutex);
@@ -4908,8 +5355,14 @@ _run_prolog(job_env_t *job_env, slurm_cred_t *cred)
 	timer_struct.timer_mutex = &timer_mutex;
 	pthread_create(&timer_id, &timer_attr, &_prolog_timer, &timer_struct);
 	START_TIMER;
-	rc = _run_job_script("prolog", my_prolog, job_env->jobid,
-			     -1, my_env, job_env->uid);
+
+	if (timeout == (uint16_t)NO_VAL)
+		rc = _run_job_script("prolog", my_prolog, job_env->jobid,
+				     -1, my_env, job_env->uid);
+	else
+		rc = _run_job_script("prolog", my_prolog, job_env->jobid,
+				     timeout, my_env, job_env->uid);
+
 	END_TIMER;
 	info("%s: run job script took %s", __func__, TIME_STR);
 	slurm_mutex_lock(&timer_mutex);
@@ -4939,6 +5392,7 @@ _run_epilog(job_env_t *job_env)
 {
 	time_t start_time = time(NULL);
 	static uint16_t msg_timeout = 0;
+	static uint16_t timeout;
 	int error_code, diff_time;
 	char *my_epilog;
 	char **my_env = _build_env(job_env);
@@ -4946,13 +5400,22 @@ _run_epilog(job_env_t *job_env)
 	if (msg_timeout == 0)
 		msg_timeout = slurm_get_msg_timeout();
 
+	if (timeout == 0)
+		timeout = slurm_get_prolog_timeout();
+
 	slurm_mutex_lock(&conf->config_mutex);
 	my_epilog = xstrdup(conf->epilog);
 	slurm_mutex_unlock(&conf->config_mutex);
 
 	_wait_for_job_running_prolog(job_env->jobid);
-	error_code = _run_job_script("epilog", my_epilog, job_env->jobid,
-				     -1, my_env, job_env->uid);
+
+	if (timeout == (uint16_t)NO_VAL)
+		error_code = _run_job_script("epilog", my_epilog, job_env->jobid,
+					     -1, my_env, job_env->uid);
+	else
+		error_code = _run_job_script("epilog", my_epilog, job_env->jobid,
+					     timeout, my_env, job_env->uid);
+
 	xfree(my_epilog);
 	_destroy_env(my_env);
 
@@ -5179,7 +5642,7 @@ init_gids_cache(int cache)
 
 
 static int
-_add_starting_step(slurmd_step_type_t type, void *req)
+_add_starting_step(uint16_t type, void *req)
 {
 	starting_step_t *starting_step;
 	int rc = SLURM_SUCCESS;
@@ -5189,11 +5652,11 @@ _add_starting_step(slurmd_step_type_t type, void *req)
 	slurm_mutex_lock(&conf->starting_steps_lock);
 	starting_step = xmalloc(sizeof(starting_step_t));
 	if (!starting_step) {
-		error("_add_starting_step failed to allocate memory");
+		error("%s failed to allocate memory", __func__);
 		rc = SLURM_FAILURE;
 		goto fail;
 	}
-	switch(type) {
+	switch (type) {
 	case LAUNCH_BATCH_JOB:
 		starting_step->job_id =
 			((batch_job_launch_msg_t *)req)->job_id;
@@ -5206,14 +5669,19 @@ _add_starting_step(slurmd_step_type_t type, void *req)
 		starting_step->step_id =
 			((launch_tasks_request_msg_t *)req)->job_step_id;
 		break;
+	case REQUEST_LAUNCH_PROLOG:
+		starting_step->job_id  = ((prolog_launch_msg_t *)req)->job_id;
+		starting_step->step_id = SLURM_EXTERN_CONT;
+		break;
 	default:
-		error("_add_starting_step called with an invalid type");
+		error("%s called with an invalid type: %u", __func__, type);
 		rc = SLURM_FAILURE;
 		xfree(starting_step);
 		goto fail;
 	}
+
 	if (!list_append(conf->starting_steps, starting_step)) {
-		error("_add_starting_step failed to allocate memory for list");
+		error("%s failed to allocate memory for list", __func__);
 		rc = SLURM_FAILURE;
 		xfree(starting_step);
 		goto fail;
@@ -5226,7 +5694,7 @@ fail:
 
 
 static int
-_remove_starting_step(slurmd_step_type_t type, void *req)
+_remove_starting_step(uint16_t type, void *req)
 {
 	uint32_t job_id, step_id;
 	ListIterator iter;
@@ -5246,7 +5714,7 @@ _remove_starting_step(slurmd_step_type_t type, void *req)
 		step_id = ((launch_tasks_request_msg_t *)req)->job_step_id;
 		break;
 	default:
-		error("_remove_starting_step called with an invalid type");
+		error("%s called with an invalid type: %u", __func__, type);
 		rc = SLURM_FAILURE;
 		goto fail;
 	}
@@ -5264,7 +5732,7 @@ _remove_starting_step(slurmd_step_type_t type, void *req)
 		}
 	}
 	if (!found) {
-		error("_remove_starting_step: step not found");
+		error("%s: step %u.%u not found", __func__, job_id, step_id);
 		rc = SLURM_FAILURE;
 	}
 fail:
@@ -5401,7 +5869,7 @@ static void _remove_job_running_prolog(uint32_t job_id)
 	slurm_mutex_unlock(&conf->prolog_running_lock);
 }
 
-static int _compare_job_running_prolog(void *listentry, void *key)
+static int _match_jobid(void *listentry, void *key)
 {
 	uint32_t *job0 = (uint32_t *)listentry;
 	uint32_t *job1 = (uint32_t *)key;
@@ -5409,16 +5877,22 @@ static int _compare_job_running_prolog(void *listentry, void *key)
 	return (*job0 == *job1);
 }
 
+static int _prolog_is_running (uint32_t jobid)
+{
+	int rc = 0;
+	if (list_find_first (conf->prolog_running_jobs,
+	                     (ListFindF) _match_jobid, &jobid))
+		rc = 1;
+	return (rc);
+}
+
 /* Wait for the job's prolog to complete */
 static void _wait_for_job_running_prolog(uint32_t job_id)
 {
 	debug( "Waiting for job %d's prolog to complete", job_id);
 	slurm_mutex_lock(&conf->prolog_running_lock);
 
-	while (list_find_first( conf->prolog_running_jobs,
-				&_compare_job_running_prolog,
-				&job_id )) {
-
+	while (_prolog_is_running (job_id)) {
 		pthread_cond_wait(&conf->prolog_running_cond,
 				  &conf->prolog_running_lock);
 	}
@@ -5462,7 +5936,8 @@ _rpc_forward_data(slurm_msg_t *msg)
 		goto done;
 	}
 
-	req_uid = (uint32_t)g_slurm_auth_get_uid(msg->auth_cred, NULL);
+	req_uid = (uint32_t)g_slurm_auth_get_uid(msg->auth_cred,
+						 slurm_get_auth_info());
 	/*
 	 * although always in localhost, we still convert it to network
 	 * byte order, to make it consistent with pack/unpack.
diff --git a/src/slurmd/slurmd/slurmd.c b/src/slurmd/slurmd/slurmd.c
index efff0cdda..dfb416f0f 100644
--- a/src/slurmd/slurmd/slurmd.c
+++ b/src/slurmd/slurmd/slurmd.c
@@ -75,6 +75,7 @@
 #include "src/common/list.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
+#include "src/common/msg_aggr.h"
 #include "src/common/node_conf.h"
 #include "src/common/node_select.h"
 #include "src/common/pack.h"
@@ -90,6 +91,7 @@
 #include "src/common/slurm_jobacct_gather.h"
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_route.h"
+#include "src/common/slurm_strcasestr.h"
 #include "src/common/slurm_topology.h"
 #include "src/common/stepd_api.h"
 #include "src/common/switch.h"
@@ -103,6 +105,7 @@
 #include "src/slurmd/common/job_container_plugin.h"
 #include "src/slurmd/common/proctrack.h"
 #include "src/slurmd/common/slurmd_cgroup.h"
+#include "src/slurmd/common/xcpuinfo.h"
 #include "src/slurmd/slurmd/get_mach_stat.h"
 #include "src/slurmd/slurmd/req.h"
 #include "src/slurmd/slurmd/slurmd.h"
@@ -166,6 +169,7 @@ static void      _decrement_thd_count(void);
 static void      _destroy_conf(void);
 static int       _drain_node(char *reason);
 static void      _fill_registration_msg(slurm_node_registration_status_msg_t *);
+static uint64_t  _get_int(const char *my_str);
 static void      _handle_connection(slurm_fd_t fd, slurm_addr_t *client);
 static void      _hup_handler(int);
 static void      _increment_thd_count(void);
@@ -175,6 +179,7 @@ static bool      _is_core_spec_cray(void);
 static void      _kill_old_slurmd(void);
 static int       _memory_spec_init(void);
 static void      _msg_engine(void);
+static uint64_t  _parse_msg_aggr_params(int type, char *params);
 static void      _print_conf(void);
 static void      _print_config(void);
 static void      _process_cmdline(int ac, char **av);
@@ -186,6 +191,7 @@ static int       _resource_spec_init(void);
 static int       _restore_cred_state(slurm_cred_ctx_t ctx);
 static void      _select_spec_cores(void);
 static void     *_service_connection(void *);
+static void      _set_msg_aggr_params(void);
 static int       _set_slurmd_spooldir(void);
 static int       _set_topo_info(void);
 static int       _slurmd_init(void);
@@ -358,6 +364,9 @@ main (int argc, char *argv[])
 		fatal("failed to initialize slurmd_plugstack");
 
 	_spawn_registration_engine();
+	msg_aggr_sender_init(conf->hostname, conf->port,
+			     conf->msg_aggr_window_time,
+			     conf->msg_aggr_window_msgs);
 	_msg_engine();
 
 	/*
@@ -580,10 +589,12 @@ _service_connection(void *arg)
 		goto cleanup;
 	}
 	debug2("got this type of message %d", msg->msg_type);
-	slurmd_req(msg);
+
+	if (msg->msg_type != MESSAGE_COMPOSITE)
+		slurmd_req(msg);
 
 cleanup:
-	if ((msg->conn_fd >= 0) && slurm_close_accepted_conn(msg->conn_fd) < 0)
+	if ((msg->conn_fd >= 0) && slurm_close(msg->conn_fd) < 0)
 		error ("close(%d): %m", con->fd);
 
 	xfree(con->cli_addr);
@@ -597,26 +608,36 @@ extern int
 send_registration_msg(uint32_t status, bool startup)
 {
 	int rc, ret_val = SLURM_SUCCESS;
-	slurm_msg_t req;
 	slurm_node_registration_status_msg_t *msg =
 		xmalloc (sizeof (slurm_node_registration_status_msg_t));
 
-	slurm_msg_t_init(&req);
-
 	msg->startup = (uint16_t) startup;
 	_fill_registration_msg(msg);
 	msg->status  = status;
 
-	req.msg_type = MESSAGE_NODE_REGISTRATION_STATUS;
-	req.data     = msg;
+	if (conf->msg_aggr_window_msgs > 1) {
+		slurm_msg_t *req = xmalloc_nz(sizeof(slurm_msg_t));
+
+		slurm_msg_t_init(req);
+		req->msg_type = MESSAGE_NODE_REGISTRATION_STATUS;
+		req->data     = msg;
 
-	if (slurm_send_recv_controller_rc_msg(&req, &rc) < 0) {
-		error("Unable to register: %m");
-		ret_val = SLURM_FAILURE;
+		msg_aggr_add_msg(req, 1, NULL);
 	} else {
-		sent_reg_time = time(NULL);
+		slurm_msg_t req;
+		slurm_msg_t_init(&req);
+		req.msg_type = MESSAGE_NODE_REGISTRATION_STATUS;
+		req.data     = msg;
+
+		if (slurm_send_recv_controller_rc_msg(&req, &rc) < 0) {
+			error("Unable to register: %m");
+			ret_val = SLURM_FAILURE;
+		}
+		slurm_free_node_registration_status_msg(msg);
 	}
-	slurm_free_node_registration_status_msg (msg);
+
+	if (ret_val == SLURM_SUCCESS)
+		sent_reg_time = time(NULL);
 
 	return ret_val;
 }
@@ -650,6 +671,7 @@ _fill_registration_msg(slurm_node_registration_status_msg_t *msg)
 	msg->tmp_disk    = conf->tmp_disk_space;
 	msg->hash_val    = slurm_get_hash_val();
 	get_cpu_load(&msg->cpu_load);
+	get_free_mem(&msg->free_mem);
 
 	gres_info = init_buf(1024);
 	if (gres_plugin_node_config_pack(gres_info) != SLURM_SUCCESS)
@@ -722,21 +744,23 @@ _fill_registration_msg(slurm_node_registration_status_msg_t *msg)
 		}
 
 		close(fd);
-		if (stepd->stepid == NO_VAL)
-			debug("found apparently running job %u", stepd->jobid);
-		else
-			debug("found apparently running step %u.%u",
-			      stepd->jobid, stepd->stepid);
+		if (stepd->stepid == NO_VAL) {
+			debug("%s: found apparently running job %u",
+			      __func__, stepd->jobid);
+		} else {
+			debug("%s: found apparently running step %u.%u",
+			      __func__, stepd->jobid, stepd->stepid);
+		}
 		msg->job_id[n]  = stepd->jobid;
 		msg->step_id[n] = stepd->stepid;
 		n++;
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	if (!msg->energy)
-		msg->energy = acct_gather_energy_alloc();
-	acct_gather_energy_g_get_data(ENERGY_DATA_STRUCT, msg->energy);
+		msg->energy = acct_gather_energy_alloc(1);
+	acct_gather_energy_g_get_data(ENERGY_DATA_NODE_ENERGY, msg->energy);
 
 	msg->timestamp = time(NULL);
 
@@ -959,6 +983,9 @@ _read_config(void)
 		      xstrdup(cf->acct_gather_profile_type));
 	_free_and_set(conf->job_acct_gather_type,
 		      xstrdup(cf->job_acct_gather_type));
+	_free_and_set(conf->msg_aggr_params,
+		      xstrdup(cf->msg_aggr_params));
+	_set_msg_aggr_params();
 
 	if ( (conf->node_name == NULL) ||
 	     (conf->node_name[0] == '\0') )
@@ -998,6 +1025,10 @@ _reconfigure(void)
 	slurm_topo_build_config();
 	_set_topo_info();
 	route_g_reconfigure();
+	cpu_freq_reconfig();
+
+	msg_aggr_sender_reconfig(conf->msg_aggr_window_time,
+				 conf->msg_aggr_window_msgs);
 
 	/*
 	 * In case the administrator changed the cpu frequency set capabilities
@@ -1048,14 +1079,15 @@ _reconfigure(void)
 		close(fd);
 	}
 	list_iterator_destroy(i);
-	list_destroy(steps);
+	FREE_NULL_LIST(steps);
 
 	gres_plugin_reconfig(&did_change);
 	(void) switch_g_reconfig();
 	container_g_reconfig();
 	if (did_change) {
 		uint32_t cpu_cnt = MAX(conf->conf_cpus, conf->block_map_size);
-		(void) gres_plugin_node_config_load(cpu_cnt, conf->node_name);
+		(void) gres_plugin_node_config_load(cpu_cnt, conf->node_name,
+						    NULL);
 		send_registration_msg(SLURM_SUCCESS, false);
 	}
 
@@ -1202,6 +1234,7 @@ _destroy_conf(void)
 		xfree(conf->job_acct_gather_freq);
 		xfree(conf->job_acct_gather_type);
 		xfree(conf->logfile);
+		xfree(conf->msg_aggr_params);
 		xfree(conf->node_name);
 		xfree(conf->node_addr);
 		xfree(conf->node_topo_addr);
@@ -1217,10 +1250,10 @@ _destroy_conf(void)
 		xfree(conf->task_epilog);
 		xfree(conf->tmpfs);
 		slurm_mutex_destroy(&conf->config_mutex);
-		list_destroy(conf->starting_steps);
+		FREE_NULL_LIST(conf->starting_steps);
 		slurm_mutex_destroy(&conf->starting_steps_lock);
 		pthread_cond_destroy(&conf->starting_steps_cond);
-		list_destroy(conf->prolog_running_jobs);
+		FREE_NULL_LIST(conf->prolog_running_jobs);
 		slurm_mutex_destroy(&conf->prolog_running_lock);
 		pthread_cond_destroy(&conf->prolog_running_cond);
 		slurm_cred_ctx_destroy(conf->vctx);
@@ -1444,7 +1477,7 @@ _slurmd_init(void)
 	cpu_cnt = MAX(conf->conf_cpus, conf->block_map_size);
 
 	if ((gres_plugin_init() != SLURM_SUCCESS) ||
-	    (gres_plugin_node_config_load(cpu_cnt, conf->node_name)
+	    (gres_plugin_node_config_load(cpu_cnt, conf->node_name, NULL)
 	     != SLURM_SUCCESS))
 		return SLURM_FAILURE;
 	if (slurm_topo_init() != SLURM_SUCCESS)
@@ -1761,6 +1794,7 @@ _term_handler(int signum)
 		_shutdown = 1;
 		if (msg_pthread && (pthread_self() != msg_pthread))
 			pthread_kill(msg_pthread, SIGTERM);
+		msg_aggr_sender_fini();
 	}
 }
 
@@ -1954,6 +1988,60 @@ static int _set_topo_info(void)
 	return rc;
 }
 
+static uint64_t _get_int(const char *my_str)
+{
+	char *end = NULL;
+	uint64_t value;
+
+	if (!my_str)
+		return NO_VAL;
+	value = strtol(my_str, &end, 10);
+	if (my_str == end)
+		return NO_VAL;
+	return value;
+}
+
+static uint64_t _parse_msg_aggr_params(int type, char *params)
+{
+	uint64_t value = NO_VAL;
+	char *sub_str = NULL;
+
+	if (!params)
+		return NO_VAL;
+
+	switch (type) {
+	case WINDOW_TIME:
+		if ((sub_str = slurm_strcasestr(params, "WindowTime=")))
+			value = _get_int(sub_str + 11);
+		break;
+	case WINDOW_MSGS:
+		if ((sub_str = slurm_strcasestr(params, "WindowMsgs=")))
+			value = _get_int(sub_str + 11);
+		break;
+	default:
+		fatal("invalid message aggregation parameters: %s", params);
+	}
+	return value;
+}
+
+static void _set_msg_aggr_params(void)
+{
+	conf->msg_aggr_window_time = _parse_msg_aggr_params(WINDOW_TIME,
+			       conf->msg_aggr_params);
+	conf->msg_aggr_window_msgs = _parse_msg_aggr_params(WINDOW_MSGS,
+			       conf->msg_aggr_params);
+
+	if (conf->msg_aggr_window_time == NO_VAL)
+		conf->msg_aggr_window_time = DEFAULT_MSG_AGGR_WINDOW_TIME;
+	if (conf->msg_aggr_window_msgs == NO_VAL)
+		conf->msg_aggr_window_msgs = DEFAULT_MSG_AGGR_WINDOW_MSGS;
+	if (conf->msg_aggr_window_msgs > 1) {
+		info("Message aggregation enabled: WindowMsgs=%"PRIu64", WindowTime=%"PRIu64,
+		     conf->msg_aggr_window_msgs, conf->msg_aggr_window_time);
+	} else
+		info("Message aggregation disabled");
+}
+
 /*
  * Initialize resource specialization
  */
@@ -1986,7 +2074,8 @@ static int _core_spec_init(void)
 	pid_t pid;
 
 	if ((conf->core_spec_cnt == 0) && (conf->cpu_spec_list == NULL)) {
-		info("No specialized cores configured by default on this node");
+		debug("Resource spec: No specialized cores configured by "
+		      "default on this node");
 		return SLURM_SUCCESS;
 	}
 	if (_is_core_spec_cray()) {	/* No need to use cgroups */
@@ -2080,6 +2169,11 @@ static int _memory_spec_init(void)
 		      "system memory cgroup");
 		return SLURM_ERROR;
 	}
+	if (disable_system_cgroup_mem_oom()) {
+		error("Resource spec: unable to disable OOM Killer in "
+		      "system memory cgroup");
+		return SLURM_ERROR;
+	}
 	pid = getpid();
 	if (attach_system_memory_pid(pid) != SLURM_SUCCESS) {
 		error("Resource spec: unable to attach slurmd to "
diff --git a/src/slurmd/slurmd/slurmd.h b/src/slurmd/slurmd/slurmd.h
index 385667d99..d25381578 100644
--- a/src/slurmd/slurmd/slurmd.h
+++ b/src/slurmd/slurmd/slurmd.h
@@ -61,8 +61,6 @@
 #include "src/common/slurm_protocol_api.h"
 #include "src/common/slurm_cred.h"
 
-#include "src/slurmd/common/xcpuinfo.h"
-
 #ifndef __USE_XOPEN_EXTENDED
 extern pid_t getsid(pid_t pid);		/* missing from <unistd.h> */
 extern pid_t getpgid(pid_t pid);
@@ -70,6 +68,14 @@ extern pid_t getpgid(pid_t pid);
 
 extern int devnull;
 
+/*
+ * Message aggregation types
+ */
+typedef enum {
+	WINDOW_TIME,
+	WINDOW_MSGS
+} msg_aggr_param_type_t;
+
 /*
  * Global config type
  */
@@ -153,8 +159,11 @@ typedef struct slurmd_config {
 	char           *acct_gather_filesystem_type; /*  */
 	char           *acct_gather_infiniband_type; /*  */
 	char           *acct_gather_profile_type; /*  */
+	char           *msg_aggr_params;      /* message aggregation params */
+	uint64_t        msg_aggr_window_msgs; /* msg aggr window size in msgs */
+	uint64_t        msg_aggr_window_time; /* msg aggr window size in time */
 	uint16_t	use_pam;
-	uint16_t	task_plugin_param; /* TaskPluginParams, expressed
+	uint32_t	task_plugin_param; /* TaskPluginParams, expressed
 					 * using cpu_bind_type_t flags */
 	uint16_t	propagate_prio;	/* PropagatePrioProcess flag       */
 
diff --git a/src/slurmd/slurmd/xcpu.c b/src/slurmd/slurmd/xcpu.c
deleted file mode 100644
index 2ecac5f8f..000000000
--- a/src/slurmd/slurmd/xcpu.c
+++ /dev/null
@@ -1,148 +0,0 @@
-/*****************************************************************************\
- *  src/slurmd/slurmd/xcpu.c - xcpu-based process management functions
- *****************************************************************************
- *  Copyright (C) 2006 The Regents of the University of California.
- *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
- *  Written by Morris Jette <jette1@llnl.gov>.
- *  CODE-OCEC-09-009. All rights reserved.
- *
- *  This file is part of SLURM, a resource management program.
- *  For details, see <http://slurm.schedmd.com/>.
- *  Please also read the included file: DISCLAIMER.
- *
- *  SLURM is free software; you can redistribute it and/or modify it under
- *  the terms of the GNU General Public License as published by the Free
- *  Software Foundation; either version 2 of the License, or (at your option)
- *  any later version.
- *
- *  In addition, as a special exception, the copyright holders give permission 
- *  to link the code of portions of this program with the OpenSSL library under 
- *  certain conditions as described in each individual source file, and 
- *  distribute linked combinations including the two. You must obey the GNU 
- *  General Public License in all respects for all of the code used other than 
- *  OpenSSL. If you modify file(s) with this exception, you may extend this 
- *  exception to your version of the file(s), but you are not obligated to do 
- *  so. If you do not wish to do so, delete this exception statement from your
- *  version.  If you delete this exception statement from all source files in 
- *  the program, then also delete it here.
- *
- *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
- *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
- *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
- *  details.
- *
- *  You should have received a copy of the GNU General Public License along
- *  with SLURM; if not, write to the Free Software Foundation, Inc.,
- *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-\*****************************************************************************/
-
-#if HAVE_CONFIG_H
-#  include "config.h"
-#endif
-
-#ifdef HAVE_XCPU
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <signal.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <dirent.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-#include "src/common/hostlist.h"
-#include "src/common/log.h"
-
-/* Write a message to a given file name, return 1 on success, 0 on failure */
-static int _send_sig(char *path, int sig, char *msg)
-{
-	int fd, len, rc = 0;
-
-	fd = open(path, O_WRONLY | O_APPEND);
-	if (fd == -1)
-		return 0;
-
-	if (sig == 0)
-		rc = 1;
-	else {
-		debug2("%s to %s", msg, path);
-		len = strlen(msg) + 1;
-		write(fd, msg, len);
-		rc = 1;
-	}
-
-	close(fd);
-	return rc;
-}
-
-static char *_sig_name(int sig)
-{
-	static char name[8];
-
-	switch(sig) {
-	case SIGCONT:
-		return "SIGCONT";
-	case SIGKILL:
-		return "SIGKILL";
-	case SIGTERM:
-		return "SIGTERM";
-	default:
-		snprintf(name, sizeof(name), "%d", sig);
-		return name;
-	}
-
-}
-
-/* Identify every XCPU process in a specific node and signal it.
- * Return the process count */
-extern int xcpu_signal(int sig, char *nodes)
-{
-	int procs = 0;
-	hostlist_t hl;
-	char *node, sig_msg[64], dir_path[128], ctl_path[200];
-	DIR *dir;
-	struct dirent *sub_dir;
-
-	/* Translate "nodes" to a hostlist */
-	hl = hostlist_create(nodes);
-	if (hl == NULL) {
-		error("hostlist_create: %m");
-		return 0;
-	}
-
-	/* Plan 9 only takes strings, so we map number to name */
-	snprintf(sig_msg, sizeof(sig_msg), "signal %s",
-		_sig_name(sig));
-
-	/* For each node, look for processes */
-	while ((node = hostlist_shift(hl))) {
-		snprintf(dir_path, sizeof(dir_path), 
-			"%s/%s/xcpu",
-			XCPU_DIR, node);
-		free(node);
-		if ((dir = opendir(dir_path)) == NULL) {
-			error("opendir(%s): %m", dir_path);
-			continue;
-		}
-		while ((sub_dir = readdir(dir))) {
-			snprintf(ctl_path, sizeof(ctl_path),
-				"%s/%s/ctl",dir_path, 
-				sub_dir->d_name);
-			procs += _send_sig(ctl_path, sig, sig_msg);
-		}
-		closedir(dir);
-	}
-
-	hostlist_destroy(hl);
-	return procs;
-}
-
-#else
-
-extern int xcpu_signal(int sig, char *nodes)
-{
-	return 0;
-}
-#endif
diff --git a/src/slurmd/slurmstepd/Makefile.in b/src/slurmd/slurmstepd/Makefile.in
index c80614e91..3c727a8c6 100644
--- a/src/slurmd/slurmstepd/Makefile.in
+++ b/src/slurmd/slurmstepd/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -256,6 +259,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -305,8 +310,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -325,6 +334,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -368,6 +380,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -391,6 +404,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/slurmd/slurmstepd/io.c b/src/slurmd/slurmstepd/io.c
index c844726cd..3d51c19b1 100644
--- a/src/slurmd/slurmstepd/io.c
+++ b/src/slurmd/slurmstepd/io.c
@@ -1932,6 +1932,7 @@ _user_managed_io_connect(srun_info_t *srun, uint32_t gtid)
 	slurm_msg_t msg;
 
 	slurm_msg_t_init(&msg);
+	msg.protocol_version = srun->protocol_version;
 	msg.msg_type = TASK_USER_MANAGED_IO_STREAM;
 	msg.data = &user_io_msg;
 	user_io_msg.task_id = gtid;
diff --git a/src/slurmd/slurmstepd/mgr.c b/src/slurmd/slurmstepd/mgr.c
index 81e3dc798..bd69c413f 100644
--- a/src/slurmd/slurmstepd/mgr.c
+++ b/src/slurmd/slurmstepd/mgr.c
@@ -90,6 +90,7 @@
 #include "src/common/env.h"
 #include "src/common/fd.h"
 #include "src/common/forward.h"
+#include "src/common/gres.h"
 #include "src/common/hostlist.h"
 #include "src/common/log.h"
 #include "src/common/mpi.h"
@@ -115,6 +116,7 @@
 #include "src/slurmd/common/run_script.h"
 #include "src/slurmd/common/reverse_tree.h"
 #include "src/slurmd/common/set_oomadj.h"
+#include "src/slurmd/common/xcpuinfo.h"
 
 #include "src/slurmd/slurmstepd/slurmstepd.h"
 #include "src/slurmd/slurmstepd/mgr.h"
@@ -177,7 +179,7 @@ typedef struct kill_thread {
  */
 static int  _access(const char *path, int modes, uid_t uid, gid_t gid);
 static void _send_launch_failure(launch_tasks_request_msg_t *,
-				 slurm_addr_t *, int);
+				 slurm_addr_t *, int, uint16_t);
 static int  _drain_node(char *reason);
 static int  _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized);
 static int  _become_user(stepd_step_rec_t *job, struct priv_state *ps);
@@ -228,17 +230,17 @@ static stepd_step_rec_t *reattach_job;
  */
 extern stepd_step_rec_t *
 mgr_launch_tasks_setup(launch_tasks_request_msg_t *msg, slurm_addr_t *cli,
-		       slurm_addr_t *self)
+		       slurm_addr_t *self, uint16_t protocol_version)
 {
 	stepd_step_rec_t *job = NULL;
 
-	if (!(job = stepd_step_rec_create(msg))) {
+	if (!(job = stepd_step_rec_create(msg, protocol_version))) {
 		/* We want to send back to the slurmd the reason we
 		   failed so keep track of it since errno could be
 		   reset in _send_launch_failure.
 		*/
 		int fail = errno;
-		_send_launch_failure (msg, cli, errno);
+		_send_launch_failure(msg, cli, errno, protocol_version);
 		errno = fail;
 		return NULL;
 	}
@@ -250,6 +252,7 @@ mgr_launch_tasks_setup(launch_tasks_request_msg_t *msg, slurm_addr_t *cli,
 	job->envtp->cli = cli;
 	job->envtp->self = self;
 	job->envtp->select_jobinfo = msg->select_jobinfo;
+	job->accel_bind_type = msg->accel_bind_type;
 
 	return job;
 }
@@ -348,7 +351,7 @@ static int _call_select_plugin_from_stepd(stepd_step_rec_t *job,
 	int rc;
 
 	fake_job_record.job_id		= job->jobid;
-	fake_job_record.job_state	= (uint16_t)NO_VAL;
+	fake_job_record.job_state	= NO_VAL;
 	fake_job_record.select_jobinfo	= select_g_select_jobinfo_alloc();
 	select_g_select_jobinfo_set(fake_job_record.select_jobinfo,
 				    SELECT_JOBDATA_RESV_ID, &job->resv_id);
@@ -553,8 +556,7 @@ _setup_normal_io(stepd_step_rec_t *job)
 						job->task[ii]->id,
 						same ? job->task[ii]->id : -2);
 					if (rc != SLURM_SUCCESS) {
-						error("Could not open output "
-						      "file %s: %m",
+						error("Could not open output file %s: %m",
 						      job->task[ii]->ofname);
 						rc = ESLURMD_IO_ERROR;
 						goto claim;
@@ -570,8 +572,7 @@ _setup_normal_io(stepd_step_rec_t *job)
 					file_flags, job, job->labelio,
 					-1, same ? -1 : -2);
 				if (rc != SLURM_SUCCESS) {
-					error("Could not open output "
-					      "file %s: %m",
+					error("Could not open output file %s: %m",
 					      job->task[0]->ofname);
 					rc = ESLURMD_IO_ERROR;
 					goto claim;
@@ -592,9 +593,7 @@ _setup_normal_io(stepd_step_rec_t *job)
 							job->labelio,
 							-2, job->task[ii]->id);
 						if (rc != SLURM_SUCCESS) {
-							error("Could not "
-							      "open error "
-							      "file %s: %m",
+							error("Could not open error file %s: %m",
 							      job->task[ii]->
 							      efname);
 							rc = ESLURMD_IO_ERROR;
@@ -609,8 +608,7 @@ _setup_normal_io(stepd_step_rec_t *job)
 						file_flags, job, job->labelio,
 						-2, -1);
 					if (rc != SLURM_SUCCESS) {
-						error("Could not open error "
-						      "file %s: %m",
+						error("Could not open error file %s: %m",
 						      job->task[0]->efname);
 						rc = ESLURMD_IO_ERROR;
 						goto claim;
@@ -712,6 +710,10 @@ _send_exit_msg(stepd_step_rec_t *job, uint32_t *tid, int n, int status)
 		    (resp.address.sin_addr.s_addr == 0))
 			continue;	/* no srun or sattach here */
 
+		/* This should always be set to something else we have a bug. */
+		xassert(srun->protocol_version);
+		resp.protocol_version = srun->protocol_version;
+
 		if (_send_srun_resp_msg(&resp, job->nnodes) != SLURM_SUCCESS)
 			error("Failed to send MESSAGE_TASK_EXIT: %m");
 	}
@@ -762,6 +764,18 @@ _wait_for_children_slurmstepd(stepd_step_rec_t *job)
 	pthread_mutex_unlock(&step_complete.lock);
 }
 
+/* If accounting by the job is minimal (i.e. just our "sleep"), then don't
+ * send accounting information to slurmctld */
+static bool _minimal_acctg(stepd_step_rec_t *job)
+{
+	if (!job->jobacct)			/* No accounting data */
+		return true;
+	if ((job->jobacct->sys_cpu_sec == 0) &&	/* No measurable usage */
+	    (job->jobacct->user_cpu_sec == 0))
+		return true;
+
+	return false;
+}
 
 /*
  * Send a single step completion message, which represents a single range
@@ -782,15 +796,18 @@ _one_step_complete_msg(stepd_step_rec_t *job, int first, int last)
 
 	debug2("_one_step_complete_msg: first=%d, last=%d", first, last);
 
-	memset(&msg, 0, sizeof(step_complete_msg_t));
-	msg.job_id = job->jobid;
-	msg.job_step_id = job->stepid;
+	if ((job->stepid == SLURM_EXTERN_CONT) && _minimal_acctg(job))
+		return;
+
 	if (job->batch) {	/* Nested batch step anomalies */
 		if (first == -1)
 			first = 0;
 		if (last == -1)
 			last = 0;
 	}
+	memset(&msg, 0, sizeof(step_complete_msg_t));
+	msg.job_id = job->jobid;
+	msg.job_step_id = job->stepid;
 	msg.range_first = first;
 	msg.range_last = last;
 	msg.step_rc = step_complete.step_rc;
@@ -832,7 +849,27 @@ _one_step_complete_msg(stepd_step_rec_t *job, int first, int last)
 		/* on error AGAIN, send to the slurmctld instead */
 		debug3("Rank %d sending complete to slurmctld instead, range "
 		       "%d to %d", step_complete.rank, first, last);
-	} else {
+	} else if (conf->msg_aggr_window_msgs > 1) {
+		/* this is the base of the tree, its parent is slurmctld */
+		debug3("Rank %d sending complete to slurmd for message aggr, "
+		       "range %d to %d",
+		       step_complete.rank, first, last);
+		/* this is the base of the tree, but we are doing
+		 * message aggr so send it to the slurmd to handle */
+		req.msg_type = REQUEST_STEP_COMPLETE_AGGR;
+		slurm_set_addr_char(&req.address, conf->port, "localhost");
+		for (i = 0; i <= REVERSE_TREE_PARENT_RETRY; i++) {
+			if (i)
+				sleep(1);
+			retcode = slurm_send_recv_rc_msg_only_one(&req, &rc, 0);
+			if ((retcode == 0) && (rc == 0))
+				goto finished;
+		}
+		req.msg_type = REQUEST_STEP_COMPLETE;
+		/* this is the base of the tree, its parent is slurmctld */
+		debug3("Rank %d sending complete to slurmctld instead, range "
+		       "%d to %d", step_complete.rank, first, last);
+	}  else {
 		/* this is the base of the tree, its parent is slurmctld */
 		debug3("Rank %d sending complete to slurmctld, range %d to %d",
 		       step_complete.rank, first, last);
@@ -953,6 +990,69 @@ extern void agent_queue_request(void *dummy)
 	      "checkpoint plugin");
 }
 
+static int _spawn_job_container(stepd_step_rec_t *job)
+{
+	jobacctinfo_t *jobacct = NULL;
+	struct rusage rusage;
+	jobacct_id_t jobacct_id;
+	int status = 0;
+	pid_t pid;
+
+	acct_gather_profile_g_task_start(0);
+	pid = fork();
+	if (pid == 0) {
+		setpgid(0, 0);
+		setsid();
+		acct_gather_profile_g_child_forked();
+		/* Need to exec() something for proctrack/linuxproc to work,
+		 * it will not keep a process named "slurmstepd" */
+		execl(SLEEP_CMD, "sleep", "1000000", NULL);
+		error("execl: %m");
+		sleep(1);
+		exit(0);
+	} else if (pid < 0) {
+		error("fork: %m");
+		return SLURM_ERROR;
+	}
+
+	job->pgid = pid;
+	proctrack_g_add(job, pid);
+
+	jobacct_id.nodeid = job->nodeid;
+	jobacct_id.taskid = job->nodeid;   /* Treat node ID as global task ID */
+	jobacct_id.job    = job;
+	jobacct_gather_set_proctrack_container_id(job->cont_id);
+	jobacct_gather_add_task(pid, &jobacct_id, 1);
+	container_g_add_cont(job->jobid, job->cont_id);
+
+	job->state = SLURMSTEPD_STEP_RUNNING;
+	if (!conf->job_acct_gather_freq)
+		jobacct_gather_stat_task(0);
+
+	while ((wait4(pid, &status, 0, &rusage) < 0) && (errno == EINTR)) {
+		;	       /* Wait until above processs exits from signal */
+	}
+
+	jobacct = jobacct_gather_remove_task(pid);
+	if (jobacct) {
+		jobacctinfo_setinfo(jobacct,
+				    JOBACCT_DATA_RUSAGE, &rusage,
+				    SLURM_PROTOCOL_VERSION);
+		job->jobacct->energy.consumed_energy = 0;
+		jobacctinfo_aggregate(job->jobacct, jobacct);
+		jobacctinfo_destroy(jobacct);
+	}
+	acct_gather_profile_g_task_end(pid);
+	step_complete.rank = job->nodeid;
+
+	acct_gather_profile_endpoll();
+	acct_gather_profile_g_node_step_end();
+	acct_gather_profile_fini();
+	_send_step_complete_msgs(job);
+
+	return SLURM_SUCCESS;
+}
+
 /*
  * Executes the functions of the slurmd job manager process,
  * which runs as root and performs shared memory and interconnect
@@ -996,7 +1096,7 @@ job_manager(stepd_step_rec_t *job)
 		goto fail1;
 	}
 
-	if (!job->batch &&
+	if (!job->batch && (job->stepid != SLURM_EXTERN_CONT) &&
 	    (switch_g_job_preinit(job->switch_job) < 0)) {
 		rc = ESLURM_INTERCONNECT_FAILURE;
 		goto fail1;
@@ -1009,6 +1109,14 @@ job_manager(stepd_step_rec_t *job)
 		goto fail1;
 	}
 
+	if (job->stepid == SLURM_EXTERN_CONT)
+		return _spawn_job_container(job);
+
+	if (!job->batch && job->accel_bind_type) {
+		(void) gres_plugin_node_config_load(conf->cpus, conf->node_name,
+						    (void *)&xcpuinfo_abs_to_mac);
+	}
+
 #ifdef HAVE_ALPS_CRAY
 	/*
 	 * Note that the previously called proctrack_g_create function is
@@ -1072,6 +1180,23 @@ job_manager(stepd_step_rec_t *job)
 		goto fail2;
 	}
 
+	if (!job->batch && job->accel_bind_type && (job->node_tasks <= 1))
+		job->accel_bind_type = 0;
+	if (!job->batch && job->accel_bind_type && (job->node_tasks > 1)) {
+		uint64_t gpu_cnt, mic_cnt, nic_cnt;
+		gpu_cnt = gres_plugin_step_count(job->step_gres_list, "gpu");
+		mic_cnt = gres_plugin_step_count(job->step_gres_list, "mic");
+		nic_cnt = gres_plugin_step_count(job->step_gres_list, "nic");
+		if ((gpu_cnt <= 1) || (gpu_cnt == NO_VAL64))
+			job->accel_bind_type &= (~ACCEL_BIND_CLOSEST_GPU);
+		if ((mic_cnt <= 1) || (mic_cnt == NO_VAL64))
+			job->accel_bind_type &= (~ACCEL_BIND_CLOSEST_MIC);
+		if ((nic_cnt <= 1) || (nic_cnt == NO_VAL64))
+			job->accel_bind_type &= (~ACCEL_BIND_CLOSEST_NIC);
+		if (job->accel_bind_type == ACCEL_BIND_VERBOSE)
+			job->accel_bind_type = 0;
+	}
+
 	/* Calls pam_setup() and requires pam_finish() if
 	 * successful.  Only check for < 0 here since other slurm
 	 * error codes could come that are more descriptive. */
@@ -1155,7 +1280,11 @@ fail2:
 	 */
 	task_g_post_step(job);
 
-	if (job->cpu_freq != NO_VAL)
+	/*
+	 * Reset cpu frequency if it was changed
+	 */
+	if (job->cpu_freq_min != NO_VAL || job->cpu_freq_max != NO_VAL ||
+	    job->cpu_freq_gov != NO_VAL)
 		cpu_freq_reset(job);
 
 	/* Notify srun of completion AFTER frequency reset to avoid race
@@ -1224,13 +1353,13 @@ struct exec_wait_info {
 	int childfd;
 };
 
-static struct exec_wait_info * exec_wait_info_create (int i)
+static struct exec_wait_info * _exec_wait_info_create (int i)
 {
 	int fdpair[2];
 	struct exec_wait_info * e;
 
 	if (pipe (fdpair) < 0) {
-		error ("exec_wait_info_create: pipe: %m");
+		error ("_exec_wait_info_create: pipe: %m");
 		return NULL;
 	}
 
@@ -1246,36 +1375,40 @@ static struct exec_wait_info * exec_wait_info_create (int i)
 	return (e);
 }
 
-static void exec_wait_info_destroy (struct exec_wait_info *e)
+static void _exec_wait_info_destroy (struct exec_wait_info *e)
 {
 	if (e == NULL)
 		return;
 
-	if (e->parentfd >= 0)
+	if (e->parentfd >= 0) {
 		close (e->parentfd);
-	if (e->childfd >= 0)
+		e->parentfd = -1;
+	}
+	if (e->childfd >= 0) {
 		close (e->childfd);
+		e->childfd = -1;
+	}
 	e->id = -1;
 	e->pid = -1;
 	xfree(e);
 }
 
-static pid_t exec_wait_get_pid (struct exec_wait_info *e)
+static pid_t _exec_wait_get_pid (struct exec_wait_info *e)
 {
 	if (e == NULL)
 		return (-1);
 	return (e->pid);
 }
 
-static struct exec_wait_info * fork_child_with_wait_info (int id)
+static struct exec_wait_info * _fork_child_with_wait_info (int id)
 {
 	struct exec_wait_info *e;
 
-	if (!(e = exec_wait_info_create (id)))
+	if (!(e = _exec_wait_info_create (id)))
 		return (NULL);
 
 	if ((e->pid = fork ()) < 0) {
-		exec_wait_info_destroy (e);
+		_exec_wait_info_destroy (e);
 		return (NULL);
 	}
 	/*
@@ -1284,20 +1417,19 @@ static struct exec_wait_info * fork_child_with_wait_info (int id)
 	if (e->pid == 0) {
 		close (e->parentfd);
 		e->parentfd = -1;
-	}
-	else {
+	} else {
 		close (e->childfd);
 		e->childfd = -1;
 	}
 	return (e);
 }
 
-static int exec_wait_child_wait_for_parent (struct exec_wait_info *e)
+static int _exec_wait_child_wait_for_parent (struct exec_wait_info *e)
 {
 	char c;
 
 	if (read (e->childfd, &c, sizeof (c)) != 1)
-		return error ("wait_for_parent: failed: %m");
+		return error ("_exec_wait_child_wait_for_parent: failed: %m");
 
 	return (0);
 }
@@ -1407,7 +1539,7 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 
 	set_oom_adj(0);	/* the tasks may be killed by OOM */
 	if (task_g_pre_setuid(job)) {
-		error("Failed task affinity setup");
+		error("Failed to invoke task plugins: one of task_p_pre_setuid functions returned error");
 		return SLURM_ERROR;
 	}
 
@@ -1486,7 +1618,7 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 		goto fail4;
 	}
 
-	exec_wait_list = list_create ((ListDelF) exec_wait_info_destroy);
+	exec_wait_list = list_create ((ListDelF) _exec_wait_info_destroy);
 	if (!exec_wait_list) {
 		error ("Unable to create exec_wait_list");
 		rc = SLURM_ERROR;
@@ -1502,19 +1634,19 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 		struct exec_wait_info *ei;
 
 		acct_gather_profile_g_task_start(i);
-		if ((ei = fork_child_with_wait_info (i)) == NULL) {
+		if ((ei = _fork_child_with_wait_info (i)) == NULL) {
 			error("child fork: %m");
 			exec_wait_kill_children (exec_wait_list);
 			rc = SLURM_ERROR;
 			goto fail4;
-		} else if ((pid = exec_wait_get_pid (ei)) == 0)  { /* child */
+		} else if ((pid = _exec_wait_get_pid (ei)) == 0)  { /* child */
 			/*
 			 *  Destroy exec_wait_list in the child.
 			 *   Only exec_wait_info for previous tasks have been
 			 *   added to the list so far, so everything else
 			 *   can be discarded.
 			 */
-			list_destroy (exec_wait_list);
+			FREE_NULL_LIST (exec_wait_list);
 
 #ifdef HAVE_AIX
 			(void) mkcrid(0);
@@ -1561,7 +1693,7 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 			 *   children in any process groups or containers
 			 *   before they make a call to exec(2).
 			 */
-			if (exec_wait_child_wait_for_parent (ei) < 0)
+			if (_exec_wait_child_wait_for_parent (ei) < 0)
 				exit (1);
 
 			exec_task(job, i);
@@ -1657,7 +1789,7 @@ _fork_all_tasks(stepd_step_rec_t *job, bool *io_initialized)
 	 * Now it's ok to unblock the tasks, so they may call exec.
 	 */
 	list_for_each (exec_wait_list, (ListForF) exec_wait_signal, job);
-	list_destroy (exec_wait_list);
+	FREE_NULL_LIST (exec_wait_list);
 
 	for (i = 0; i < job->node_tasks; i++) {
 		/*
@@ -1679,8 +1811,7 @@ fail4:
 	}
 fail3:
 	_reclaim_privileges (&sprivs);
-	if (exec_wait_list)
-		list_destroy (exec_wait_list);
+	FREE_NULL_LIST (exec_wait_list);
 fail2:
 	io_close_task_fds(job);
 fail1:
@@ -2083,7 +2214,8 @@ static int _drain_node(char *reason)
 }
 
 static void
-_send_launch_failure (launch_tasks_request_msg_t *msg, slurm_addr_t *cli, int rc)
+_send_launch_failure(launch_tasks_request_msg_t *msg, slurm_addr_t *cli, int rc,
+		     uint16_t protocol_version)
 {
 	slurm_msg_t resp_msg;
 	launch_tasks_response_msg_t resp;
@@ -2106,6 +2238,7 @@ _send_launch_failure (launch_tasks_request_msg_t *msg, slurm_addr_t *cli, int rc
 		       NULL);
 	resp_msg.data = &resp;
 	resp_msg.msg_type = RESPONSE_LAUNCH_TASKS;
+	resp_msg.protocol_version = protocol_version;
 
 	resp.node_name     = name;
 	resp.return_code   = rc ? rc : -1;
@@ -2132,6 +2265,7 @@ _send_launch_resp(stepd_step_rec_t *job, int rc)
 
 	slurm_msg_t_init(&resp_msg);
 	resp_msg.address	= srun->resp_addr;
+	resp_msg.protocol_version = srun->protocol_version;
 	resp_msg.data		= &resp;
 	resp_msg.msg_type	= RESPONSE_LAUNCH_TASKS;
 
@@ -2164,9 +2298,13 @@ _send_complete_batch_script_msg(stepd_step_rec_t *job, int err, int status)
 	char *		select_type;
 	bool		msg_to_ctld;
 
-	select_type = slurm_get_select_type();
-	msg_to_ctld = strcmp(select_type, "select/serial");
-	xfree(select_type);
+	if (conf->msg_aggr_window_msgs > 1)
+		msg_to_ctld = 0;
+	else {
+		select_type = slurm_get_select_type();
+		msg_to_ctld = strcmp(select_type, "select/serial");
+		xfree(select_type);
+	}
 
 	req.job_id	= job->jobid;
 	req.job_rc      = status;
@@ -2230,6 +2368,15 @@ _drop_privileges(stepd_step_rec_t *job, bool do_setuid,
 		strncpy (ps->saved_cwd, "/tmp", sizeof (ps->saved_cwd));
 	}
 
+#ifdef HAVE_NATIVE_CRAY
+	/* LDAP on Native Cray is not sufficiently scalable to support the
+	 * getgroups() call */
+	ps->ngids = 1;
+	if (get_list) {
+		ps->gid_list = (gid_t *) xmalloc(sizeof(gid_t));
+		ps->gid_list[0] = job->gid;
+	}
+#else
 	ps->ngids = getgroups(0, NULL);
 	if (get_list) {
 		ps->gid_list = (gid_t *) xmalloc(ps->ngids * sizeof(gid_t));
@@ -2241,6 +2388,7 @@ _drop_privileges(stepd_step_rec_t *job, bool do_setuid,
 			return -1;
 		}
 	}
+#endif
 
 	/*
 	 * No need to drop privileges if we're not running as root
@@ -2430,10 +2578,34 @@ _become_user(stepd_step_rec_t *job, struct priv_state *ps)
 	return SLURM_SUCCESS;
 }
 
+#ifndef HAVE_NATIVE_CRAY
+/* _get_primary_group()
+ */
+static int
+_get_primary_group(const char *user, gid_t *gid)
+{
+	struct passwd pwd;
+	struct passwd *pwd0 = NULL;
+	char buf[256];
+	int cc;
+
+	cc = getpwnam_r(user, &pwd, buf, sizeof(buf), &pwd0);
+	if (cc != 0) {
+		error("%s: getpwnam_r() failed: %m", __func__);
+		return -1;
+	}
+
+	*gid = pwd0->pw_gid;
+	return 0;
+}
+#endif
 
 static int
 _initgroups(stepd_step_rec_t *job)
 {
+#ifndef HAVE_NATIVE_CRAY
+	gid_t primary_gid = 0;
+#endif
 	int rc;
 
 	if (job->ngids > 0) {
@@ -2453,6 +2625,48 @@ _initgroups(stepd_step_rec_t *job)
 		}
 		return -1;
 	}
+
+#ifndef HAVE_NATIVE_CRAY
+	/* LDAP on Native Cray is not sufficiently scalable to support the
+	 * getpwnam_r() call */
+	rc = _get_primary_group(job->user_name, &primary_gid);
+	if (rc < 0) {
+		error("%s: _get_primary_group() failed", __func__);
+		return -1;
+	}
+	/* If job->gid is not the primary group for the
+	 * user job->user_name then add the primary group
+	 * in the list of user groups.
+	 */
+	if (primary_gid != job->gid) {
+		int ngroups_max = sysconf(_SC_NGROUPS_MAX);
+		gid_t grps[ngroups_max];
+		int size, max;
+
+		max = ngroups_max;
+
+		size = getgrouplist(job->user_name,
+				    job->gid,
+				    grps,
+				    &ngroups_max);
+		if (size < 0) {
+			error("%s: getgrouplist() failed: %m", __func__);
+			return -1;
+		}
+		if (size > max - 1) {
+			error("\
+%s: too many groups %d max %d for user %s groups %d %d\n",
+			      __func__, size, max, job->user_name,
+			      primary_gid, job->gid);
+		}
+		grps[size++] = primary_gid;
+
+		if (setgroups(size, grps)) {
+			error("%s: setgroups() failed: %m", __func__);
+			return -1;
+		}
+	}
+#endif
 	return 0;
 }
 
@@ -2516,11 +2730,11 @@ _run_script_as_user(const char *name, const char *path, stepd_step_rec_t *job,
 		return -1;
 	}
 
-	if ((ei = fork_child_with_wait_info(0)) == NULL) {
+	if ((ei = _fork_child_with_wait_info(0)) == NULL) {
 		error ("executing %s: fork: %m", name);
 		return -1;
 	}
-	if ((cpid = exec_wait_get_pid (ei)) == 0) {
+	if ((cpid = _exec_wait_get_pid (ei)) == 0) {
 		struct priv_state sprivs;
 		char *argv[2];
 
@@ -2562,16 +2776,25 @@ _run_script_as_user(const char *name, const char *path, stepd_step_rec_t *job,
 		/*
 		 *  Wait for signal from parent
 		 */
-		exec_wait_child_wait_for_parent (ei);
-
-		execve(path, argv, env);
-		error("execve(%s): %m", path);
+		_exec_wait_child_wait_for_parent (ei);
+
+		while (1) {
+			execve(path, argv, env);
+			error("execve(%s): %m", path);
+			if ((errno == ENFILE) || (errno = ENOMEM)) {
+				/* System limit on open files or memory reached,
+				 * retry after short delay */
+				sleep(1);
+			} else {
+				break;
+			}
+		}
 		exit(127);
 	}
 
 	if (exec_wait_signal_child (ei) < 0)
 		error ("run_script_as_user: Failed to wakeup %s", name);
-	exec_wait_info_destroy (ei);
+	_exec_wait_info_destroy (ei);
 
 	if (max_wait < 0)
 		opt = 0;
diff --git a/src/slurmd/slurmstepd/mgr.h b/src/slurmd/slurmstepd/mgr.h
index a4c2982e0..f47a2f327 100644
--- a/src/slurmd/slurmstepd/mgr.h
+++ b/src/slurmd/slurmstepd/mgr.h
@@ -56,13 +56,15 @@ void batch_finish(stepd_step_rec_t *job, int rc);
  * Initialize a stepd_step_rec_t structure for a launch tasks
  */
 stepd_step_rec_t *mgr_launch_tasks_setup(launch_tasks_request_msg_t *msg,
-				     slurm_addr_t *client, slurm_addr_t *self);
+					 slurm_addr_t *client,
+					 slurm_addr_t *self,
+					 uint16_t protocol_version);
 
 /*
  * Initialize a stepd_step_rec_t structure for a batch job
  */
 stepd_step_rec_t *mgr_launch_batch_job_setup(batch_job_launch_msg_t *msg,
-					 slurm_addr_t *client);
+					     slurm_addr_t *client);
 
 /*
  * Finalize a batch job.
diff --git a/src/slurmd/slurmstepd/pam_ses.c b/src/slurmd/slurmstepd/pam_ses.c
index 84da0f7ab..06ef22151 100644
--- a/src/slurmd/slurmstepd/pam_ses.c
+++ b/src/slurmd/slurmstepd/pam_ses.c
@@ -74,9 +74,9 @@ int
 pam_setup (char *user, char *host)
 {
 	/*
-	 * Any application using PAM must provide a conversion function, which
+	 * Any application using PAM must provide a conversation function, which
 	 * is used for direct communication between a loaded module and the
-	 * application. In this case, SLURM does need a communication mechanism,
+	 * application. In this case, SLURM does not need a communication mechanism,
 	 * so the default (or null) conversation function may be used.
 	 */
 	struct pam_conv conv = {misc_conv, NULL};
@@ -91,36 +91,45 @@ pam_setup (char *user, char *host)
 	 * a user, the limits imposed by the sys admin are picked up. Opening
 	 * a PAM session requires a PAM handle, which is obtained when the PAM
 	 * interface is initialized. (PAM handles are required with essentially
-	 * all PAM calls.) It's also necessary to have the users PAM credentials
+	 * all PAM calls.) It's also necessary to have the user's PAM credentials
 	 * to open a user session.
  	 */
         if ((rc = pam_start (SLURM_SERVICE_PAM, user, &conv, &pam_h))
 			!= PAM_SUCCESS) {
-                error ("pam_start: %s", pam_strerror(pam_h, rc));
-                return SLURM_ERROR;
+                error ("pam_start: %s", pam_strerror(NULL, rc));
+                goto fail1;
         } else if ((rc = pam_set_item (pam_h, PAM_USER, user))
 			!= PAM_SUCCESS) {
                 error ("pam_set_item USER: %s", pam_strerror(pam_h, rc));
-                return SLURM_ERROR;
+                goto fail2;
         } else if ((rc = pam_set_item (pam_h, PAM_RUSER, user))
 			!= PAM_SUCCESS) {
                 error ("pam_set_item RUSER: %s", pam_strerror(pam_h, rc));
-                return SLURM_ERROR;
+                goto fail2;
         } else if ((rc = pam_set_item (pam_h, PAM_RHOST, host))
 			!= PAM_SUCCESS) {
                 error ("pam_set_item HOST: %s", pam_strerror(pam_h, rc));
-              return SLURM_ERROR;
+                goto fail2;
         } else if ((rc = pam_setcred (pam_h, PAM_ESTABLISH_CRED))
 			!= PAM_SUCCESS) {
-                error ("pam_setcred: %s", pam_strerror(pam_h, rc));
-                return SLURM_ERROR;
+                error ("pam_setcred ESTABLISH: %s", pam_strerror(pam_h, rc));
+                goto fail2;
         } else if ((rc = pam_open_session (pam_h, 0)) != PAM_SUCCESS) {
                 error("pam_open_session: %s", pam_strerror(pam_h, rc));
-                return SLURM_ERROR;
+                goto fail3;
         }
 
 	return SLURM_SUCCESS;
 
+fail3:
+        pam_setcred (pam_h, PAM_DELETE_CRED);
+
+fail2:
+        pam_end (pam_h, rc);
+
+fail1:
+        pam_h = NULL;
+        return SLURM_ERROR;
 }
 
 
@@ -143,9 +152,14 @@ pam_finish ()
 		 */
                 if ((rc = pam_close_session (pam_h, 0)) != PAM_SUCCESS) {
                         error("pam_close_session: %s", pam_strerror(pam_h, rc));
-                } else if (pam_end (pam_h, rc) != PAM_SUCCESS) {
-                        error("pam_end: %s", pam_strerror(pam_h, rc));
                 }
+                if ((rc = pam_setcred (pam_h, PAM_DELETE_CRED)) != PAM_SUCCESS){
+                        error("pam_setcred DELETE: %s", pam_strerror(pam_h,rc));
+                }
+                if ((rc = pam_end (pam_h, rc)) != PAM_SUCCESS) {
+                        error("pam_end: %s", pam_strerror(NULL, rc));
+                }
+                pam_h = NULL;
         }
 }
 
diff --git a/src/slurmd/slurmstepd/req.c b/src/slurmd/slurmstepd/req.c
index ea8e42a67..31945cbd6 100644
--- a/src/slurmd/slurmstepd/req.c
+++ b/src/slurmd/slurmstepd/req.c
@@ -248,7 +248,7 @@ msg_thr_create(stepd_step_rec_t *job)
 	fd_set_nonblocking(fd);
 
 	eio_obj = eio_obj_create(fd, &msg_socket_ops, (void *)job);
-	job->msg_handle = eio_handle_create();
+	job->msg_handle = eio_handle_create(0);
 	eio_new_initial_obj(job->msg_handle, eio_obj);
 
 	slurm_attr_init(&attr);
@@ -419,8 +419,8 @@ _handle_accept(void *arg)
 	}
 
 	/* Get the uid & gid from the credential, then destroy it. */
-	uid = g_slurm_auth_get_uid(auth_cred, NULL);
-	gid = g_slurm_auth_get_gid(auth_cred, NULL);
+	uid = g_slurm_auth_get_uid(auth_cred, slurm_get_auth_info());
+	gid = g_slurm_auth_get_gid(auth_cred, slurm_get_auth_info());
 	debug3("  Identity: uid=%d, gid=%d", uid, gid);
 	g_slurm_auth_destroy(auth_cred);
 	free_buf(buffer);
@@ -1101,7 +1101,10 @@ _handle_attach(int fd, stepd_step_rec_t *job, uid_t uid)
 	safe_read(fd, &srun->ioaddr, sizeof(slurm_addr_t));
 	safe_read(fd, &srun->resp_addr, sizeof(slurm_addr_t));
 	safe_read(fd, srun->key, SLURM_IO_KEY_SIZE);
+	safe_read(fd, &srun->protocol_version, sizeof(int));
 
+	if (!srun->protocol_version)
+		srun->protocol_version = (uint16_t)NO_VAL;
 	/*
 	 * Check if jobstep is actually running.
 	 */
@@ -1353,8 +1356,10 @@ _handle_resume(int fd, stepd_step_rec_t *job, uid_t uid)
 	if (!job->batch && switch_g_job_step_post_resume(job))
 		error("switch_g_job_step_post_resume: %m");
 	/* set the cpu frequencies if cpu_freq option used */
-	if (job->cpu_freq != NO_VAL)
+	if (job->cpu_freq_min != NO_VAL || job->cpu_freq_max != NO_VAL ||
+	    job->cpu_freq_gov != NO_VAL) {
 		cpu_freq_set(job);
+	}
 
 	pthread_mutex_unlock(&suspend_mutex);
 
diff --git a/src/slurmd/slurmstepd/slurmstepd.c b/src/slurmd/slurmstepd/slurmstepd.c
index 7c970bfd7..3539b1119 100644
--- a/src/slurmd/slurmstepd/slurmstepd.c
+++ b/src/slurmd/slurmstepd/slurmstepd.c
@@ -77,11 +77,11 @@ static void _dump_user_env(void);
 static void _send_ok_to_slurmd(int sock);
 static void _send_fail_to_slurmd(int sock);
 static stepd_step_rec_t *_step_setup(slurm_addr_t *cli, slurm_addr_t *self,
-				 slurm_msg_t *msg);
+				     slurm_msg_t *msg);
 #ifdef MEMORY_LEAK_DEBUG
 static void _step_cleanup(stepd_step_rec_t *job, slurm_msg_t *msg, int rc);
 #endif
-static int process_cmdline (int argc, char *argv[]);
+static int _process_cmdline (int argc, char *argv[]);
 
 int slurmstepd_blocked_signals[] = {
 	SIGPIPE, 0
@@ -102,7 +102,7 @@ main (int argc, char *argv[])
 	gid_t *gids;
 	int rc = 0;
 
-	if (process_cmdline (argc, argv) < 0)
+	if (_process_cmdline (argc, argv) < 0)
 		fatal ("Error in slurmstepd command line");
 
 	xsignal_block(slurmstepd_blocked_signals);
@@ -172,14 +172,14 @@ ending:
 
 	xfree(cli);
 	xfree(self);
-	xfree(conf->hostname);
 	xfree(conf->block_map);
 	xfree(conf->block_map_inv);
-	xfree(conf->spooldir);
+	xfree(conf->hostname);
+	xfree(conf->logfile);
 	xfree(conf->node_name);
 	xfree(conf->node_topo_addr);
 	xfree(conf->node_topo_pattern);
-	xfree(conf->logfile);
+	xfree(conf->spooldir);
 	xfree(conf);
 #endif
 	info("done with job");
@@ -318,7 +318,7 @@ static int _handle_spank_mode (int argc, char *argv[])
 /*
  *  Process special "modes" of slurmstepd passed as cmdline arguments.
  */
-static int process_cmdline (int argc, char *argv[])
+static int _process_cmdline (int argc, char *argv[])
 {
 	if ((argc == 2) && (strcmp(argv[1], "getenv") == 0)) {
 		print_rlimits();
@@ -378,7 +378,7 @@ _init_from_slurmd(int sock, char **argv,
 	char *incoming_buffer = NULL;
 	Buf buffer;
 	int step_type;
-	int len;
+	int len, proto;
 	slurm_addr_t *cli = NULL;
 	slurm_addr_t *self = NULL;
 	slurm_msg_t *msg = NULL;
@@ -456,6 +456,9 @@ _init_from_slurmd(int sock, char **argv,
 	/* Receive cpu_frequency info from slurmd */
 	cpu_freq_recv_info(sock);
 
+	/* get the protocol version of the srun */
+	safe_read(sock, &proto, sizeof(int));
+
 	/* receive req from slurmd */
 	safe_read(sock, &len, sizeof(int));
 	incoming_buffer = xmalloc(sizeof(char) * len);
@@ -464,9 +467,9 @@ _init_from_slurmd(int sock, char **argv,
 
 	msg = xmalloc(sizeof(slurm_msg_t));
 	slurm_msg_t_init(msg);
-	msg->protocol_version = SLURM_PROTOCOL_VERSION;
+	msg->protocol_version = (uint16_t)proto;
 
-	switch(step_type) {
+	switch (step_type) {
 	case LAUNCH_BATCH_JOB:
 		msg->msg_type = REQUEST_BATCH_JOB_LAUNCH;
 		break;
@@ -474,7 +477,7 @@ _init_from_slurmd(int sock, char **argv,
 		msg->msg_type = REQUEST_LAUNCH_TASKS;
 		break;
 	default:
-		fatal("Unrecognized launch RPC");
+		fatal("%s: Unrecognized launch RPC (%d)", __func__, step_type);
 		break;
 	}
 	if (unpack_msg(msg, buffer) == SLURM_ERROR)
@@ -513,14 +516,15 @@ _step_setup(slurm_addr_t *cli, slurm_addr_t *self, slurm_msg_t *msg)
 {
 	stepd_step_rec_t *job = NULL;
 
-	switch(msg->msg_type) {
+	switch (msg->msg_type) {
 	case REQUEST_BATCH_JOB_LAUNCH:
 		debug2("setup for a batch_job");
 		job = mgr_launch_batch_job_setup(msg->data, cli);
 		break;
 	case REQUEST_LAUNCH_TASKS:
 		debug2("setup for a launch_task");
-		job = mgr_launch_tasks_setup(msg->data, cli, self);
+		job = mgr_launch_tasks_setup(msg->data, cli, self,
+					     msg->protocol_version);
 		break;
 	default:
 		fatal("handle_launch_message: Unrecognized launch RPC");
@@ -544,7 +548,7 @@ _step_setup(slurm_addr_t *cli, slurm_addr_t *self, slurm_msg_t *msg)
 	if (msg->msg_type == REQUEST_BATCH_JOB_LAUNCH)
 		gres_plugin_job_set_env(&job->env, job->job_gres_list);
 	else if (msg->msg_type == REQUEST_LAUNCH_TASKS)
-		gres_plugin_step_set_env(&job->env, job->step_gres_list);
+		gres_plugin_step_set_env(&job->env, job->step_gres_list, 0);
 
 	/*
 	 * Add slurmd node topology informations to job env array
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.c b/src/slurmd/slurmstepd/slurmstepd_job.c
index 1de80d768..f55318c4c 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.c
+++ b/src/slurmd/slurmstepd/slurmstepd_job.c
@@ -298,7 +298,7 @@ _task_info_destroy(stepd_step_task_info_t *t, uint16_t multi_prog)
 
 /* create a slurmd job structure from a launch tasks message */
 extern stepd_step_rec_t *
-stepd_step_rec_create(launch_tasks_request_msg_t *msg)
+stepd_step_rec_create(launch_tasks_request_msg_t *msg, uint16_t protocol_version)
 {
 	stepd_step_rec_t  *job = NULL;
 	srun_info_t   *srun = NULL;
@@ -351,7 +351,9 @@ stepd_step_rec_create(launch_tasks_request_msg_t *msg)
 	job->cpu_bind = xstrdup(msg->cpu_bind);
 	job->mem_bind_type = msg->mem_bind_type;
 	job->mem_bind = xstrdup(msg->mem_bind);
-	job->cpu_freq = msg->cpu_freq;
+	job->cpu_freq_min = msg->cpu_freq_min;
+	job->cpu_freq_max = msg->cpu_freq_max;
+	job->cpu_freq_gov = msg->cpu_freq_gov;
 	job->ckpt_dir = xstrdup(msg->ckpt_dir);
 	job->restart_dir = xstrdup(msg->restart_dir);
 	job->cpus_per_task = msg->cpus_per_task;
@@ -368,7 +370,7 @@ stepd_step_rec_create(launch_tasks_request_msg_t *msg)
 			job->array_task_id = atoi(msg->env[i] + 20);
 	}
 
-	job->eio     = eio_handle_create();
+	job->eio     = eio_handle_create(0);
 	job->sruns   = list_create((ListDelF) _srun_info_destructor);
 	job->clients = list_create(NULL); /* FIXME! Needs destructor */
 	job->stdout_eio_objs = list_create(NULL); /* FIXME! Needs destructor */
@@ -392,13 +394,19 @@ stepd_step_rec_create(launch_tasks_request_msg_t *msg)
 	job->envtp->mem_bind_type = 0;
 	job->envtp->mem_bind = NULL;
 	job->envtp->ckpt_dir = NULL;
-	job->envtp->comm_port = msg->resp_port[nodeid % msg->num_resp_port];
-
-	memcpy(&resp_addr, &msg->orig_addr, sizeof(slurm_addr_t));
-	slurm_set_addr(&resp_addr,
-		       msg->resp_port[nodeid % msg->num_resp_port],
-		       NULL);
+	if (!msg->resp_port)
+		msg->num_resp_port = 0;
+	if (msg->num_resp_port) {
+		job->envtp->comm_port =
+			msg->resp_port[nodeid % msg->num_resp_port];
+		memcpy(&resp_addr, &msg->orig_addr, sizeof(slurm_addr_t));
+		slurm_set_addr(&resp_addr,
+			       msg->resp_port[nodeid % msg->num_resp_port],
+			       NULL);
+	}
 	job->user_managed_io = msg->user_managed_io;
+	if (!msg->io_port)
+		msg->user_managed_io = 1;
 	if (!msg->user_managed_io) {
 		memcpy(&io_addr,   &msg->orig_addr, sizeof(slurm_addr_t));
 		slurm_set_addr(&io_addr,
@@ -406,7 +414,8 @@ stepd_step_rec_create(launch_tasks_request_msg_t *msg)
 			       NULL);
 	}
 
-	srun = srun_info_create(msg->cred, &resp_addr, &io_addr);
+	srun = srun_info_create(msg->cred, &resp_addr, &io_addr,
+				protocol_version);
 
 	job->buffered_stdio = msg->buffered_stdio;
 	job->labelio = msg->labelio;
@@ -531,7 +540,7 @@ batch_stepd_step_rec_create(batch_job_launch_msg_t *msg)
 	job->restart_dir = xstrdup(msg->restart_dir);
 
 	job->env     = _array_copy(msg->envc, msg->environment);
-	job->eio     = eio_handle_create();
+	job->eio     = eio_handle_create(0);
 	job->sruns   = list_create((ListDelF) _srun_info_destructor);
 	job->envtp   = xmalloc(sizeof(env_t));
 	job->envtp->jobid = -1;
@@ -564,7 +573,7 @@ batch_stepd_step_rec_create(batch_job_launch_msg_t *msg)
 	get_cred_gres(msg->cred, conf->node_name,
 		      &job->job_gres_list, &job->step_gres_list);
 
-	srun = srun_info_create(NULL, NULL, NULL);
+	srun = srun_info_create(NULL, NULL, NULL, (uint16_t)NO_VAL);
 
 	list_append(job->sruns, (void *) srun);
 
@@ -613,7 +622,7 @@ stepd_step_rec_destroy(stepd_step_rec_t *job)
 
 	for (i = 0; i < job->node_tasks; i++)
 		_task_info_destroy(job->task[i], job->multi_prog);
-	list_destroy(job->sruns);
+	FREE_NULL_LIST(job->sruns);
 	xfree(job->envtp);
 	xfree(job->node_name);
 	mpmd_free(job);
@@ -627,7 +636,8 @@ stepd_step_rec_destroy(stepd_step_rec_t *job)
 }
 
 extern srun_info_t *
-srun_info_create(slurm_cred_t *cred, slurm_addr_t *resp_addr, slurm_addr_t *ioaddr)
+srun_info_create(slurm_cred_t *cred, slurm_addr_t *resp_addr,
+		 slurm_addr_t *ioaddr, uint16_t protocol_version)
 {
 	char             *data = NULL;
 	uint32_t          len  = 0;
@@ -635,7 +645,9 @@ srun_info_create(slurm_cred_t *cred, slurm_addr_t *resp_addr, slurm_addr_t *ioad
 	srun_key_t       *key  = xmalloc(sizeof(srun_key_t));
 
 	srun->key    = key;
-
+	if (!protocol_version || (protocol_version == (uint16_t)NO_VAL))
+		protocol_version = SLURM_PROTOCOL_VERSION;
+	srun->protocol_version = protocol_version;
 	/*
 	 * If no credential was provided, return the empty
 	 * srun info object. (This is used, for example, when
diff --git a/src/slurmd/slurmstepd/slurmstepd_job.h b/src/slurmd/slurmstepd/slurmstepd_job.h
index 121ef36a4..cdf0698bb 100644
--- a/src/slurmd/slurmstepd/slurmstepd_job.h
+++ b/src/slurmd/slurmstepd/slurmstepd_job.h
@@ -72,6 +72,7 @@ typedef struct {
 	slurm_addr_t ioaddr;       /* Address to connect on for normal I/O.
 				      Spawn IO uses messages to the normal
 				      resp_addr. */
+	uint16_t protocol_version; /* protocol_version of the srun */
 } srun_info_t;
 
 typedef enum {
@@ -152,7 +153,10 @@ typedef struct {
 	char          *cpu_bind;       /* binding map for map/mask_cpu      */
 	mem_bind_type_t mem_bind_type; /* --mem_bind=                       */
 	char          *mem_bind;       /* binding map for tasks to memory   */
-	uint32_t       cpu_freq;       /* requested cpu frequency           */
+	uint16_t accel_bind_type;  /* --accel_bind= */
+	uint32_t cpu_freq_min; /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max; /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov; /* cpu frequency governor */
 	switch_jobinfo_t *switch_job; /* switch-specific job information     */
 	uid_t         uid;     /* user id for job                           */
 	char          *user_name;
@@ -238,13 +242,14 @@ typedef struct {
 } stepd_step_rec_t;
 
 
-stepd_step_rec_t * stepd_step_rec_create(launch_tasks_request_msg_t *msg);
+stepd_step_rec_t * stepd_step_rec_create(launch_tasks_request_msg_t *msg,
+					 uint16_t protocol_version);
 stepd_step_rec_t * batch_stepd_step_rec_create(batch_job_launch_msg_t *msg);
 
 void stepd_step_rec_destroy(stepd_step_rec_t *job);
 
 srun_info_t * srun_info_create(slurm_cred_t *cred, slurm_addr_t *respaddr,
-				    slurm_addr_t *ioaddr);
+			       slurm_addr_t *ioaddr, uint16_t protocol_version);
 
 void  srun_info_destroy(srun_info_t *srun);
 
diff --git a/src/slurmd/slurmstepd/task.c b/src/slurmd/slurmstepd/task.c
index 3ece7b9dd..72a56eb5f 100644
--- a/src/slurmd/slurmstepd/task.c
+++ b/src/slurmd/slurmstepd/task.c
@@ -75,6 +75,7 @@
 
 #include "src/common/checkpoint.h"
 #include "src/common/env.h"
+#include "src/common/gres.h"
 #include "src/common/fd.h"
 #include "src/common/log.h"
 #include "src/common/mpi.h"
@@ -355,7 +356,7 @@ _setup_mpi(stepd_step_rec_t *job, int ltaskid)
 
 	return mpi_hook_slurmstepd_task(info, &job->env);
 }
-
+extern void block_daemon(void);
 
 /*
  *  Current process is running as the user when this is called.
@@ -388,7 +389,9 @@ exec_task(stepd_step_rec_t *job, int i)
 	job->envtp->distribution = job->task_dist;
 	job->envtp->cpu_bind = xstrdup(job->cpu_bind);
 	job->envtp->cpu_bind_type = job->cpu_bind_type;
-	job->envtp->cpu_freq = job->cpu_freq;
+	job->envtp->cpu_freq_min = job->cpu_freq_min;
+	job->envtp->cpu_freq_max = job->cpu_freq_max;
+	job->envtp->cpu_freq_gov = job->cpu_freq_gov;
 	job->envtp->mem_bind = xstrdup(job->mem_bind);
 	job->envtp->mem_bind_type = job->mem_bind_type;
 	job->envtp->distribution = -1;
@@ -440,12 +443,23 @@ exec_task(stepd_step_rec_t *job, int i)
 
 	/* task plugin hook */
 	if (task_g_pre_launch(job)) {
-		error ("Failed task affinity setup");
-		exit (1);
+		error("Failed to invoke task plugins: one of task_p_pre_launch functions returned error");
+		exit(1);
+	}
+	if (!job->batch && job->accel_bind_type) {
+		/* Modify copy of job's environment. Do not alter in place or
+		 * concurrent searches of the environment can generate invalid
+		 * memory references. */
+		job->envtp->env = env_array_copy((const char **) job->env);
+		gres_plugin_step_set_env(&job->envtp->env, job->step_gres_list,
+					 job->accel_bind_type);
+		tmp_env = job->env;
+		job->env = job->envtp->env;
+		env_array_free(tmp_env);
 	}
 
 	if (spank_user_task (job, i) < 0) {
-		error ("Failed to invoke task plugin stack");
+		error ("Failed to invoke spank plugin stack");
 		exit (1);
 	}
 
diff --git a/src/slurmd/slurmstepd/ulimits.c b/src/slurmd/slurmstepd/ulimits.c
index 15671f67e..da3a69818 100644
--- a/src/slurmd/slurmstepd/ulimits.c
+++ b/src/slurmd/slurmstepd/ulimits.c
@@ -186,7 +186,8 @@ set_umask(stepd_step_rec_t *job)
 	char *val;
 
 	if (!(val = getenvp(job->env, "SLURM_UMASK"))) {
-		debug("Couldn't find SLURM_UMASK in environment");
+		if (job->stepid != SLURM_EXTERN_CONT)
+			debug("Couldn't find SLURM_UMASK in environment");
 		return SLURM_ERROR;
 	}
 
diff --git a/src/slurmdbd/Makefile.in b/src/slurmdbd/Makefile.in
index 3f9f6c79e..1d90a2543 100644
--- a/src/slurmdbd/Makefile.in
+++ b/src/slurmdbd/Makefile.in
@@ -102,6 +102,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -110,10 +111,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -126,7 +129,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/slurmdbd/proc_req.c b/src/slurmdbd/proc_req.c
index 4bffdba12..40308d1c9 100644
--- a/src/slurmdbd/proc_req.c
+++ b/src/slurmdbd/proc_req.c
@@ -57,6 +57,8 @@ static int   _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _add_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 				 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _add_tres(slurmdbd_conn_t *slurmdbd_conn,
+			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _add_clusters(slurmdbd_conn_t *slurmdbd_conn,
@@ -75,10 +77,12 @@ static int   _archive_dump(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _archive_load(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
-static int   _cluster_cpus(slurmdbd_conn_t *slurmdbd_conn,
+static int   _cluster_tres(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_accounts(slurmdbd_conn_t *slurmdbd_conn,
 			   Buf in_buffer, Buf *out_buffer, uint32_t *uid);
+static int   _get_tres(slurmdbd_conn_t *slurmdbd_conn,
+			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_assocs(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid);
 static int   _get_clusters(slurmdbd_conn_t *slurmdbd_conn,
@@ -217,6 +221,10 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _add_account_coords(slurmdbd_conn,
 						 in_buffer, out_buffer, uid);
 			break;
+		case DBD_ADD_TRES:
+			rc = _add_tres(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
+			break;
 		case DBD_ADD_ASSOCS:
 			rc = _add_assocs(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
@@ -253,14 +261,18 @@ proc_req(slurmdbd_conn_t *slurmdbd_conn,
 			rc = _archive_load(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
-		case DBD_CLUSTER_CPUS:
-			rc = _cluster_cpus(slurmdbd_conn,
+		case DBD_CLUSTER_TRES:
+			rc = _cluster_tres(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
 		case DBD_GET_ACCOUNTS:
 			rc = _get_accounts(slurmdbd_conn,
 					   in_buffer, out_buffer, uid);
 			break;
+		case DBD_GET_TRES:
+			rc = _get_tres(slurmdbd_conn,
+					 in_buffer, out_buffer, uid);
+			break;
 		case DBD_GET_ASSOCS:
 			rc = _get_assocs(slurmdbd_conn,
 					 in_buffer, out_buffer, uid);
@@ -530,33 +542,6 @@ static int _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 	char *comment = NULL;
 
 	debug2("DBD_ADD_ACCOUNTS: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		slurmdb_user_rec_t user;
-
-		memset(&user, 0, sizeof(slurmdb_user_rec_t));
-		user.uid = *uid;
-		if (assoc_mgr_fill_in_user(
-			    slurmdbd_conn->db_conn, &user, 1, NULL)
-		    != SLURM_SUCCESS) {
-			comment = "Your user has not been added to the accounting system yet.";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = SLURM_ERROR;
-			goto end_it;
-		}
-		if (!user.coord_accts || !list_count(user.coord_accts)) {
-			comment = "Your user doesn't have privilege to perform this action";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = ESLURM_ACCESS_DENIED;
-			goto end_it;
-		}
-		/* If the user is a coord of any acct they can add
-		 * accounts they are only able to make associations to
-		 * these accounts if they are coordinators of the
-		 * parent they are trying to add to
-		 */
-	}
 
 	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_ADD_ACCOUNTS, in_buffer) !=
@@ -569,6 +554,8 @@ static int _add_accounts(slurmdbd_conn_t *slurmdbd_conn,
 
 	rc = acct_storage_g_add_accounts(slurmdbd_conn->db_conn, *uid,
 					 get_msg->my_list);
+	if (rc == ESLURM_ACCESS_DENIED)
+		comment = "Your user doesn't have privilege to perform this action";
 end_it:
 	slurmdbd_free_list_msg(get_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
@@ -592,58 +579,14 @@ static int _add_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	debug2("DBD_ADD_ACCOUNT_COORDS: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		ListIterator itr = NULL;
-		ListIterator itr2 = NULL;
-		slurmdb_user_rec_t user;
-		slurmdb_coord_rec_t *coord = NULL;
-		char *acct = NULL;
-		int bad = 0;
-
-		memset(&user, 0, sizeof(slurmdb_user_rec_t));
-		user.uid = *uid;
-		if (assoc_mgr_fill_in_user(
-			    slurmdbd_conn->db_conn, &user, 1, NULL)
-		    != SLURM_SUCCESS) {
-			comment = "Your user has not been added to the accounting system yet.";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = SLURM_ERROR;
-			goto end_it;
-		}
-		if (!user.coord_accts || !list_count(user.coord_accts)) {
-			comment = "Your user doesn't have privilege to perform this action";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = ESLURM_ACCESS_DENIED;
-			goto end_it;
-		}
-		itr = list_iterator_create(get_msg->acct_list);
-		itr2 = list_iterator_create(user.coord_accts);
-		while ((acct = list_next(itr))) {
-			while ((coord = list_next(itr2))) {
-				if (!strcasecmp(coord->name, acct))
-					break;
-			}
-			if (!coord)  {
-				bad = 1;
-				break;
-			}
-			list_iterator_reset(itr2);
-		}
-		list_iterator_destroy(itr2);
-		list_iterator_destroy(itr);
-
-		if (bad)  {
-			comment = "Your user doesn't have privilege to perform this action";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = ESLURM_ACCESS_DENIED;
-			goto end_it;
-		}
-	}
 
 	rc = acct_storage_g_add_coord(slurmdbd_conn->db_conn, *uid,
 				      get_msg->acct_list, get_msg->cond);
+
+	if (rc == ESLURM_ACCESS_DENIED) {
+		comment = "Your user doesn't have privilege to perform this action";
+		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
+	}
 end_it:
 	slurmdbd_free_acct_coord_msg(get_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
@@ -651,6 +594,40 @@ end_it:
 	return rc;
 }
 
+static int _add_tres(slurmdbd_conn_t *slurmdbd_conn,
+		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	int rc = SLURM_SUCCESS;
+	dbd_list_msg_t *get_msg = NULL;
+	char *comment = NULL;
+
+	debug2("DBD_ADD_TRES: called");
+
+	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
+				     DBD_ADD_TRES, in_buffer) !=
+	    SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_ADD_TRES message";
+		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
+		rc = SLURM_ERROR;
+		goto end_it;
+	}
+
+	rc = acct_storage_g_add_tres(slurmdbd_conn->db_conn, *uid,
+				       get_msg->my_list);
+end_it:
+	slurmdbd_free_list_msg(get_msg);
+	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
+				      rc, comment, DBD_ADD_TRES);
+
+	/* This happens before the slurmctld registers and only when
+	   the slurmctld starts up.  So always commit, success or not.
+	   (don't ever use autocommit with innodb)
+	*/
+	acct_storage_g_commit(slurmdbd_conn->db_conn, 1);
+
+	return rc;
+}
+
 static int _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
@@ -676,7 +653,7 @@ static int _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
 		ListIterator itr2 = NULL;
 		slurmdb_user_rec_t user;
 		slurmdb_coord_rec_t *coord = NULL;
-		slurmdb_association_rec_t *object = NULL;
+		slurmdb_assoc_rec_t *object = NULL;
 
 		memset(&user, 0, sizeof(slurmdb_user_rec_t));
 		user.uid = *uid;
@@ -720,7 +697,7 @@ static int _add_assocs(slurmdbd_conn_t *slurmdbd_conn,
 		}
 	}
 
-	rc = acct_storage_g_add_associations(slurmdbd_conn->db_conn, *uid,
+	rc = acct_storage_g_add_assocs(slurmdbd_conn->db_conn, *uid,
 					     get_msg->my_list);
 end_it:
 	slurmdbd_free_list_msg(get_msg);
@@ -737,14 +714,6 @@ static int _add_clusters(slurmdbd_conn_t *slurmdbd_conn,
 	char *comment = NULL;
 
 	debug2("DBD_ADD_CLUSTERS: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		rc = ESLURM_ACCESS_DENIED;
-		goto end_it;
-	}
 
 	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_ADD_CLUSTERS, in_buffer) !=
@@ -757,7 +726,9 @@ static int _add_clusters(slurmdbd_conn_t *slurmdbd_conn,
 
 	rc = acct_storage_g_add_clusters(slurmdbd_conn->db_conn, *uid,
 					 get_msg->my_list);
-	if (rc != SLURM_SUCCESS)
+	if (rc == ESLURM_ACCESS_DENIED)
+		comment = "Your user doesn't have privilege to perform this action";
+	else if (rc != SLURM_SUCCESS)
 		comment = "Failed to add cluster.";
 
 end_it:
@@ -775,14 +746,6 @@ static int _add_qos(slurmdbd_conn_t *slurmdbd_conn,
 	char *comment = NULL;
 
 	debug2("DBD_ADD_QOS: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && (assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-		< SLURMDB_ADMIN_SUPER_USER)) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		rc = ESLURM_ACCESS_DENIED;
-		goto end_it;
-	}
 
 	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_ADD_QOS, in_buffer) !=
@@ -795,7 +758,9 @@ static int _add_qos(slurmdbd_conn_t *slurmdbd_conn,
 
 	rc = acct_storage_g_add_qos(slurmdbd_conn->db_conn, *uid,
 				    get_msg->my_list);
-	if (rc != SLURM_SUCCESS)
+	if (rc == ESLURM_ACCESS_DENIED)
+		comment = "Your user doesn't have privilege to perform this action";
+	else if (rc != SLURM_SUCCESS)
 		comment = "Failed to add qos.";
 
 end_it:
@@ -813,15 +778,6 @@ static int _add_res(slurmdbd_conn_t *slurmdbd_conn,
 	char *comment = NULL;
 
 	debug2("DBD_ADD_RES: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this "
-			  "action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		rc = ESLURM_ACCESS_DENIED;
-		goto end_it;
-	}
 
 	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_ADD_RES, in_buffer) !=
@@ -834,7 +790,9 @@ static int _add_res(slurmdbd_conn_t *slurmdbd_conn,
 
 	rc = acct_storage_g_add_res(slurmdbd_conn->db_conn, *uid,
 					 get_msg->my_list);
-	if (rc != SLURM_SUCCESS)
+	if (rc == ESLURM_ACCESS_DENIED)
+		comment = "Your user doesn't have privilege to perform this action";
+	else if (rc != SLURM_SUCCESS)
 		comment = "Failed to add system resource.";
 
 end_it:
@@ -851,33 +809,6 @@ static int _add_users(slurmdbd_conn_t *slurmdbd_conn,
 	dbd_list_msg_t *get_msg = NULL;
 	char *comment = NULL;
 	debug2("DBD_ADD_USERS: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		slurmdb_user_rec_t user;
-
-		memset(&user, 0, sizeof(slurmdb_user_rec_t));
-		user.uid = *uid;
-		if (assoc_mgr_fill_in_user(
-			    slurmdbd_conn->db_conn, &user, 1, NULL)
-		    != SLURM_SUCCESS) {
-			comment = "Your user has not been added to the accounting system yet.";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = SLURM_ERROR;
-			goto end_it;
-		}
-		if (!user.coord_accts || !list_count(user.coord_accts)) {
-			comment = "Your user doesn't have privilege to perform this action";
-			error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-			rc = ESLURM_ACCESS_DENIED;
-			goto end_it;
-		}
-		/* If the user is a coord of any acct they can add
-		 * users they are only able to make associations to
-		 * these users if they are coordinators of the
-		 * account they are trying to add to
-		 */
-	}
 
 	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_ADD_USERS, in_buffer) !=
@@ -891,6 +822,9 @@ static int _add_users(slurmdbd_conn_t *slurmdbd_conn,
 	rc = acct_storage_g_add_users(slurmdbd_conn->db_conn, *uid,
 				      get_msg->my_list);
 
+	if (rc == ESLURM_ACCESS_DENIED)
+		comment = "Your user doesn't have privilege to perform this action";
+
 end_it:
 	slurmdbd_free_list_msg(get_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
@@ -906,14 +840,6 @@ static int _add_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	char *comment = NULL;
 
 	debug2("DBD_ADD_WCKEYS: called");
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		rc = ESLURM_ACCESS_DENIED;
-		goto end_it;
-	}
 
 	if (slurmdbd_unpack_list_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_ADD_WCKEYS, in_buffer) !=
@@ -1065,46 +991,49 @@ end_it:
 	return rc;
 }
 
-static int _cluster_cpus(slurmdbd_conn_t *slurmdbd_conn,
+static int _cluster_tres(slurmdbd_conn_t *slurmdbd_conn,
 			 Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
-	dbd_cluster_cpus_msg_t *cluster_cpus_msg = NULL;
+	dbd_cluster_tres_msg_t *cluster_tres_msg = NULL;
 	int rc = SLURM_SUCCESS;
 	char *comment = NULL;
 
 	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)) {
-		comment = "DBD_CLUSTER_CPUS message from invalid uid";
-		error("DBD_CLUSTER_CPUS message from invalid uid %u", *uid);
+		comment = "DBD_CLUSTER_TRES message from invalid uid";
+		error("DBD_CLUSTER_TRES message from invalid uid %u", *uid);
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_cluster_cpus_msg(&cluster_cpus_msg,
+	if (slurmdbd_unpack_cluster_tres_msg(&cluster_tres_msg,
 					     slurmdbd_conn->rpc_version,
 					     in_buffer) !=
 	    SLURM_SUCCESS) {
-		comment = "Failed to unpack DBD_CLUSTER_CPUS message";
+		comment = "Failed to unpack DBD_CLUSTER_TRES message";
 		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
 		rc = SLURM_ERROR;
 		goto end_it;
 	}
-	debug2("DBD_CLUSTER_CPUS: called for %s(%u)",
+	debug2("DBD_CLUSTER_TRES: called for %s(%s)",
 	       slurmdbd_conn->cluster_name,
-	       cluster_cpus_msg->cpu_count);
+	       cluster_tres_msg->tres_str);
 
-	rc = clusteracct_storage_g_cluster_cpus(
+	rc = clusteracct_storage_g_cluster_tres(
 		slurmdbd_conn->db_conn,
-		cluster_cpus_msg->cluster_nodes,
-		cluster_cpus_msg->cpu_count,
-		cluster_cpus_msg->event_time);
+		cluster_tres_msg->cluster_nodes,
+		cluster_tres_msg->tres_str,
+		cluster_tres_msg->event_time);
 	if (rc == ESLURM_ACCESS_DENIED) {
 		comment = "This cluster hasn't been added to accounting yet";
 		rc = SLURM_ERROR;
 	}
 end_it:
-	if (rc == SLURM_SUCCESS)
-		slurmdbd_conn->cluster_cpus = cluster_cpus_msg->cpu_count;
+	if (rc == SLURM_SUCCESS) {
+		xfree(slurmdbd_conn->tres_str);
+		slurmdbd_conn->tres_str = cluster_tres_msg->tres_str;
+		cluster_tres_msg->tres_str = NULL;
+	}
 	if (!slurmdbd_conn->ctld_port) {
-		info("DBD_CLUSTER_CPUS: cluster not registered");
+		info("DBD_CLUSTER_TRES: cluster not registered");
 		slurmdbd_conn->ctld_port =
 			clusteracct_storage_g_register_disconn_ctld(
 				slurmdbd_conn->db_conn, slurmdbd_conn->ip);
@@ -1112,9 +1041,9 @@ end_it:
 		_add_registered_cluster(slurmdbd_conn);
 	}
 
-	slurmdbd_free_cluster_cpus_msg(cluster_cpus_msg);
+	slurmdbd_free_cluster_tres_msg(cluster_tres_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-				      rc, comment, DBD_CLUSTER_CPUS);
+				      rc, comment, DBD_CLUSTER_TRES);
 	return rc;
 }
 
@@ -1156,8 +1085,50 @@ static int _get_accounts(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_ACCOUNTS);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
+
+	return rc;
+}
+
+static int _get_tres(slurmdbd_conn_t *slurmdbd_conn,
+		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
+{
+	dbd_cond_msg_t *get_msg = NULL;
+	dbd_list_msg_t list_msg;
+	char *comment = NULL;
+	int rc = SLURM_SUCCESS;
+
+	debug2("DBD_GET_TRES: called");
+	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
+				     DBD_GET_TRES, in_buffer) !=
+	    SLURM_SUCCESS) {
+		comment = "Failed to unpack DBD_GET_TRES message";
+		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
+					      SLURM_ERROR, comment,
+					      DBD_GET_TRES);
+		return SLURM_ERROR;
+	}
+
+	list_msg.my_list = acct_storage_g_get_tres(
+		slurmdbd_conn->db_conn, *uid, get_msg->cond);
+
+	if (!errno) {
+		if (!list_msg.my_list)
+			list_msg.my_list = list_create(NULL);
+		*out_buffer = init_buf(1024);
+		pack16((uint16_t) DBD_GOT_TRES, *out_buffer);
+		slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
+				       DBD_GOT_TRES, *out_buffer);
+	} else {
+		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
+					      errno, slurm_strerror(errno),
+					      DBD_GET_TRES);
+		rc = SLURM_ERROR;
+	}
+
+	slurmdbd_free_cond_msg(get_msg, DBD_GET_TRES);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1182,7 +1153,7 @@ static int _get_assocs(slurmdbd_conn_t *slurmdbd_conn,
 		return SLURM_ERROR;
 	}
 
-	list_msg.my_list = acct_storage_g_get_associations(
+	list_msg.my_list = acct_storage_g_get_assocs(
 		slurmdbd_conn->db_conn, *uid, get_msg->cond);
 
 	if (!errno) {
@@ -1200,9 +1171,7 @@ static int _get_assocs(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_ASSOCS);
-
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1246,9 +1215,7 @@ static int _get_clusters(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_CLUSTERS);
-
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1300,8 +1267,7 @@ static int _get_config(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_CONFIG, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_CONFIG, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 	xfree(config_name);
 
 	return SLURM_SUCCESS;
@@ -1346,9 +1312,7 @@ static int _get_events(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_EVENTS);
-
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1391,8 +1355,7 @@ static int _get_jobs_cond(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(cond_msg, DBD_GET_JOBS_COND);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1407,18 +1370,6 @@ static int _get_probs(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_GET_PROBS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_GET_PROBS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_GET_PROBS, in_buffer) !=
 	    SLURM_SUCCESS) {
@@ -1448,9 +1399,7 @@ static int _get_probs(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_PROBS);
-
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1496,8 +1445,7 @@ static int _get_qos(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(cond_msg, DBD_GET_QOS);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1540,9 +1488,7 @@ static int _get_res(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_RES);
-
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 	return rc;
 }
 
@@ -1584,8 +1530,7 @@ static int _get_txn(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(cond_msg, DBD_GET_TXN);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1708,8 +1653,7 @@ static int _get_users(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_USERS);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1724,6 +1668,9 @@ static int _get_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_GET_WCKEYS: called");
 
+	/* We have to check this here, and not in the plugin.  There
+	 * are places in the plugin that a non-admin can call this and
+	 * it be ok. */
 	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
 	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
 	    < SLURMDB_ADMIN_OPERATOR) {
@@ -1764,8 +1711,7 @@ static int _get_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_WCKEYS);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1809,8 +1755,7 @@ static int _get_reservations(slurmdbd_conn_t *slurmdbd_conn,
 	}
 
 	slurmdbd_free_cond_msg(get_msg, DBD_GET_RESVS);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -1818,7 +1763,7 @@ static int _get_reservations(slurmdbd_conn_t *slurmdbd_conn,
 static int _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
 		       Buf in_buffer, Buf *out_buffer, uint32_t *uid)
 {
-	dbd_cluster_cpus_msg_t *cluster_cpus_msg = NULL;
+	dbd_cluster_tres_msg_t *cluster_tres_msg = NULL;
 	int rc = SLURM_SUCCESS;
 	char *comment = NULL;
 
@@ -1828,8 +1773,8 @@ static int _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
 		rc = ESLURM_ACCESS_DENIED;
 		goto end_it;
 	}
-	if (slurmdbd_unpack_cluster_cpus_msg(
-		    &cluster_cpus_msg, slurmdbd_conn->rpc_version, in_buffer)
+	if (slurmdbd_unpack_cluster_tres_msg(
+		    &cluster_tres_msg, slurmdbd_conn->rpc_version, in_buffer)
 	    != SLURM_SUCCESS) {
 		comment = "Failed to unpack DBD_FLUSH_JOBS message";
 		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
@@ -1841,9 +1786,9 @@ static int _flush_jobs(slurmdbd_conn_t *slurmdbd_conn,
 
 	rc = acct_storage_g_flush_jobs_on_cluster(
 		slurmdbd_conn->db_conn,
-		cluster_cpus_msg->event_time);
+		cluster_tres_msg->event_time);
 end_it:
-	slurmdbd_free_cluster_cpus_msg(cluster_cpus_msg);
+	slurmdbd_free_cluster_tres_msg(cluster_tres_msg);
 	*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
 				      rc, comment, DBD_FLUSH_JOBS);
 	return rc;
@@ -1863,13 +1808,13 @@ static int _init_conn(slurmdbd_conn_t *slurmdbd_conn,
 		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
 		goto end_it;
 	}
-	if ((init_msg->version < SLURMDBD_MIN_VERSION) ||
+	if ((init_msg->version < SLURM_MIN_PROTOCOL_VERSION) ||
 	    (init_msg->version > SLURM_PROTOCOL_VERSION)) {
 		comment = "Incompatible RPC version";
 		error("Incompatible RPC version received "
 		      "(%u not between %d and %d)",
 		      init_msg->version,
-		      SLURMDBD_MIN_VERSION, SLURM_PROTOCOL_VERSION);
+		      SLURM_MIN_PROTOCOL_VERSION, SLURM_PROTOCOL_VERSION);
 		rc = SLURM_PROTOCOL_VERSION_ERROR;
 		goto end_it;
 	}
@@ -2066,7 +2011,7 @@ static int  _job_suspend(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_JOB_SUSPEND: ID:%u STATE:%s",
 	       job_suspend_msg->job_id,
-	       job_state_string((enum job_states) job_suspend_msg->job_state));
+	       job_state_string(job_suspend_msg->job_state));
 
 	memset(&job, 0, sizeof(struct job_record));
 	memset(&details, 0, sizeof(struct job_details));
@@ -2104,18 +2049,6 @@ static int   _modify_accounts(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_MODIFY_ACCOUNTS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_MODIFY_ACCOUNTS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_modify_msg(&get_msg, slurmdbd_conn->rpc_version,
 				       DBD_MODIFY_ACCOUNTS,
 				       in_buffer) != SLURM_SUCCESS) {
@@ -2159,8 +2092,7 @@ static int   _modify_accounts(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2192,7 +2124,7 @@ static int   _modify_assocs(slurmdbd_conn_t *slurmdbd_conn,
 	 * until we process it through the database.
 	 */
 
-	if (!(list_msg.my_list = acct_storage_g_modify_associations(
+	if (!(list_msg.my_list = acct_storage_g_modify_assocs(
 		      slurmdbd_conn->db_conn, *uid, get_msg->cond,
 		      get_msg->rec))) {
 		if (errno == ESLURM_ACCESS_DENIED) {
@@ -2224,8 +2156,7 @@ static int   _modify_assocs(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2240,18 +2171,6 @@ static int   _modify_clusters(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_MODIFY_CLUSTERS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_MODIFY_CLUSTERS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_modify_msg(&get_msg, slurmdbd_conn->rpc_version,
 				       DBD_MODIFY_CLUSTERS,
 				       in_buffer) != SLURM_SUCCESS) {
@@ -2295,8 +2214,7 @@ static int   _modify_clusters(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2354,8 +2272,7 @@ static int   _modify_job(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2370,18 +2287,6 @@ static int   _modify_qos(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_MODIFY_QOS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_MODIFY_QOS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_modify_msg(&get_msg, slurmdbd_conn->rpc_version,
 				       DBD_MODIFY_QOS,
 				       in_buffer) != SLURM_SUCCESS) {
@@ -2428,8 +2333,7 @@ static int   _modify_qos(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2444,18 +2348,6 @@ static int   _modify_res(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_MODIFY_RES: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this "
-			  "action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_MODIFY_RES);
-
-		return ESLURM_ACCESS_DENIED;
-	}
 	if (slurmdbd_unpack_modify_msg(&get_msg, slurmdbd_conn->rpc_version,
 				       DBD_MODIFY_RES,
 				       in_buffer) != SLURM_SUCCESS) {
@@ -2499,8 +2391,7 @@ static int   _modify_res(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 	return rc;
 }
 
@@ -2621,8 +2512,7 @@ is_same_user:
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2637,18 +2527,6 @@ static int   _modify_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_MODIFY_WCKEYS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_MODIFY_WCKEYS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_modify_msg(&get_msg, slurmdbd_conn->rpc_version,
 				       DBD_MODIFY_WCKEYS,
 				       in_buffer) != SLURM_SUCCESS) {
@@ -2692,8 +2570,7 @@ static int   _modify_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -2760,7 +2637,7 @@ static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 
 	memset(&node_ptr, 0, sizeof(struct node_record));
 	node_ptr.name = node_state_msg->hostlist;
-	node_ptr.cpus = node_state_msg->cpu_count;
+	node_ptr.tres_str = node_state_msg->tres_str;
 	node_ptr.node_state = node_state_msg->state;
 	node_ptr.reason = node_state_msg->reason;
 	node_ptr.reason_time = node_state_msg->event_time;
@@ -2768,7 +2645,7 @@ static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 
 	slurmctld_conf.fast_schedule = 0;
 
-	if (!node_ptr.cpus)
+	if (!node_ptr.tres_str)
 		node_state_msg->new_state = DBD_NODE_STATE_UP;
 
 	if (node_state_msg->new_state == DBD_NODE_STATE_UP) {
@@ -2777,6 +2654,7 @@ static int _node_state(slurmdbd_conn_t *slurmdbd_conn,
 		       _node_state_string(node_state_msg->new_state),
 		       node_state_msg->reason,
 		       (long)node_state_msg->event_time);
+
 		/* clusteracct_storage_g_node_up can change the reason
 		 * field so copy it to avoid memory issues.
 		 */
@@ -2832,7 +2710,6 @@ static void _process_job_start(slurmdbd_conn_t *slurmdbd_conn,
 	memset(&array_recs, 0, sizeof(job_array_struct_t));
 	memset(id_rc_msg, 0, sizeof(dbd_id_rc_msg_t));
 
-	job.total_cpus = job_start_msg->alloc_cpus;
 	job.total_nodes = job_start_msg->alloc_nodes;
 	job.account = _replace_double_quotes(job_start_msg->account);
 	job.array_job_id = job_start_msg->array_job_id;
@@ -2860,6 +2737,8 @@ static void _process_job_start(slurmdbd_conn_t *slurmdbd_conn,
 	job.priority = job_start_msg->priority;
 	job.start_time = job_start_msg->start_time;
 	job.time_limit = job_start_msg->timelimit;
+	job.tres_alloc_str = job_start_msg->tres_alloc_str;
+	job.tres_req_str = job_start_msg->tres_req_str;
 	job.gres_alloc = job_start_msg->gres_alloc;
 	job.gres_req = job_start_msg->gres_req;
 	job.gres_used = job_start_msg->gres_used;
@@ -3000,11 +2879,8 @@ static int   _register_ctld(slurmdbd_conn_t *slurmdbd_conn,
 		rc = SLURM_ERROR;
 	}
 
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
-
-
-	list_destroy(cluster_q.cluster_list);
+	FREE_NULL_LIST(list_msg.my_list);
+	FREE_NULL_LIST(cluster_q.cluster_list);
 	/*
 	 * Outgoing message header must have flag set:
 	 * out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
@@ -3028,7 +2904,7 @@ static int   _register_ctld(slurmdbd_conn_t *slurmdbd_conn,
 			/* We probably need to add matching recv_msg function
 			 * for an arbitray fd or should these be fire
 			 * and forget? */
-			slurm_close_stream(fd);
+			slurm_close(fd);
 		}
 	}
 #endif
@@ -3057,18 +2933,6 @@ static int   _remove_accounts(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_REMOVE_ACCOUNTS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_ACCOUNTS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_REMOVE_ACCOUNTS,
 				     in_buffer) != SLURM_SUCCESS) {
@@ -3112,8 +2976,7 @@ static int   _remove_accounts(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -3179,8 +3042,7 @@ static int   _remove_account_coords(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -3210,7 +3072,7 @@ static int   _remove_assocs(slurmdbd_conn_t *slurmdbd_conn,
 	 * until we process it through the database.
 	 */
 
-	if (!(list_msg.my_list = acct_storage_g_remove_associations(
+	if (!(list_msg.my_list = acct_storage_g_remove_assocs(
 		      slurmdbd_conn->db_conn, *uid, get_msg->cond))) {
 		if (errno == ESLURM_ACCESS_DENIED) {
 			comment = "Your user doesn't have privilege to perform this action";
@@ -3242,8 +3104,7 @@ static int   _remove_assocs(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 
@@ -3259,18 +3120,6 @@ static int   _remove_clusters(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_REMOVE_CLUSTERS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_CLUSTERS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_REMOVE_CLUSTERS,
 				     in_buffer) != SLURM_SUCCESS) {
@@ -3314,8 +3163,7 @@ static int   _remove_clusters(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -3330,18 +3178,6 @@ static int   _remove_qos(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_REMOVE_QOS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_QOS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_REMOVE_QOS,
 				     in_buffer) != SLURM_SUCCESS) {
@@ -3385,8 +3221,7 @@ static int   _remove_qos(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -3401,18 +3236,6 @@ static int _remove_res(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_REMOVE_RES: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this "
-			  "action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_RES);
-
-		return ESLURM_ACCESS_DENIED;
-	}
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_REMOVE_RES,
 				     in_buffer) != SLURM_SUCCESS) {
@@ -3470,18 +3293,6 @@ static int   _remove_users(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_REMOVE_USERS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_OPERATOR) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_USERS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_REMOVE_USERS, in_buffer) !=
 	    SLURM_SUCCESS) {
@@ -3525,8 +3336,7 @@ static int   _remove_users(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -3541,18 +3351,6 @@ static int   _remove_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 
 	debug2("DBD_REMOVE_WCKEYS: called");
 
-	if ((*uid != slurmdbd_conf->slurm_user_id && *uid != 0)
-	    && assoc_mgr_get_admin_level(slurmdbd_conn->db_conn, *uid)
-	    < SLURMDB_ADMIN_SUPER_USER) {
-		comment = "Your user doesn't have privilege to perform this action";
-		error("CONN:%u %s", slurmdbd_conn->newsockfd, comment);
-		*out_buffer = make_dbd_rc_msg(slurmdbd_conn->rpc_version,
-					      ESLURM_ACCESS_DENIED,
-					      comment, DBD_REMOVE_WCKEYS);
-
-		return ESLURM_ACCESS_DENIED;
-	}
-
 	if (slurmdbd_unpack_cond_msg(&get_msg, slurmdbd_conn->rpc_version,
 				     DBD_REMOVE_WCKEYS,
 				     in_buffer) != SLURM_SUCCESS) {
@@ -3596,8 +3394,7 @@ static int   _remove_wckeys(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_LIST, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_LIST, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return rc;
 }
@@ -3725,8 +3522,7 @@ static int   _send_mult_job_start(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_MULT_JOB_START, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_MULT_JOB_START, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return SLURM_SUCCESS;
 }
@@ -3785,8 +3581,7 @@ static int   _send_mult_msg(slurmdbd_conn_t *slurmdbd_conn,
 	pack16((uint16_t) DBD_GOT_MULT_MSG, *out_buffer);
 	slurmdbd_pack_list_msg(&list_msg, slurmdbd_conn->rpc_version,
 			       DBD_GOT_MULT_MSG, *out_buffer);
-	if (list_msg.my_list)
-		list_destroy(list_msg.my_list);
+	FREE_NULL_LIST(list_msg.my_list);
 
 	return SLURM_SUCCESS;
 }
@@ -3911,9 +3706,11 @@ static int  _step_start(slurmdbd_conn_t *slurmdbd_conn,
 	step.start_time = step_start_msg->start_time;
 	details.submit_time = step_start_msg->job_submit_time;
 	step.step_id = step_start_msg->step_id;
-	step.cpu_count = step_start_msg->total_cpus;
 	details.num_tasks = step_start_msg->total_tasks;
-	step.cpu_freq = step_start_msg->req_cpufreq;
+	step.cpu_freq_min = step_start_msg->req_cpufreq_min;
+	step.cpu_freq_max = step_start_msg->req_cpufreq_max;
+	step.cpu_freq_gov = step_start_msg->req_cpufreq_gov;
+	step.tres_alloc_str = step_start_msg->tres_alloc_str;
 
 	layout.node_cnt = step_start_msg->node_cnt;
 	layout.task_dist = step_start_msg->task_dist;
diff --git a/src/slurmdbd/proc_req.h b/src/slurmdbd/proc_req.h
index ddd1e76e6..76a7a6cb5 100644
--- a/src/slurmdbd/proc_req.h
+++ b/src/slurmdbd/proc_req.h
@@ -45,13 +45,13 @@
 
 typedef struct {
 	char *cluster_name;
-	uint32_t cluster_cpus;
 	uint16_t ctld_port; /* slurmctld_port */
 	void *db_conn; /* database connection */
 	char ip[32];
 	slurm_fd_t newsockfd; /* socket connection descriptor */
 	uint16_t orig_port;
 	uint16_t rpc_version; /* version of rpc */
+	char *tres_str;
 } slurmdbd_conn_t;
 
 /* Process an incoming RPC
diff --git a/src/slurmdbd/read_config.c b/src/slurmdbd/read_config.c
index 624b9218f..22a76b05e 100644
--- a/src/slurmdbd/read_config.c
+++ b/src/slurmdbd/read_config.c
@@ -640,23 +640,23 @@ extern List dump_config(void)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("ArchiveEvents");
-	key_pair->value = xstrdup_printf(
-		"%u",
-		SLURMDB_PURGE_ARCHIVE_SET(slurmdbd_conf->purge_event) ? 1 : 0);
+	key_pair->value = xstrdup(
+		SLURMDB_PURGE_ARCHIVE_SET(
+			slurmdbd_conf->purge_event) ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("ArchiveJobs");
-	key_pair->value = xstrdup_printf(
-		"%u",
-		SLURMDB_PURGE_ARCHIVE_SET(slurmdbd_conf->purge_job) ? 1 : 0);
+	key_pair->value = xstrdup(
+		SLURMDB_PURGE_ARCHIVE_SET(
+			slurmdbd_conf->purge_job) ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("ArchiveResvs");
-	key_pair->value = xstrdup_printf(
-		"%u",
-		SLURMDB_PURGE_ARCHIVE_SET(slurmdbd_conf->purge_resv) ? 1 : 0);
+	key_pair->value = xstrdup(
+		SLURMDB_PURGE_ARCHIVE_SET(
+			slurmdbd_conf->purge_resv) ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -666,16 +666,16 @@ extern List dump_config(void)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("ArchiveSteps");
-	key_pair->value = xstrdup_printf(
-		"%u",
-		SLURMDB_PURGE_ARCHIVE_SET(slurmdbd_conf->purge_step) ? 1 : 0);
+	key_pair->value = xstrdup(
+		SLURMDB_PURGE_ARCHIVE_SET(
+			slurmdbd_conf->purge_step) ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("ArchiveSuspend");
-	key_pair->value = xstrdup_printf(
-		"%u", SLURMDB_PURGE_ARCHIVE_SET(
-			slurmdbd_conf->purge_suspend) ? 1 : 0);
+	key_pair->value = xstrdup(
+		SLURMDB_PURGE_ARCHIVE_SET(
+			slurmdbd_conf->purge_suspend) ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -696,7 +696,7 @@ extern List dump_config(void)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("CommitDelay");
-	key_pair->value = xstrdup_printf("%u", slurmdbd_conf->commit_delay);
+	key_pair->value = xstrdup(slurmdbd_conf->commit_delay ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -726,8 +726,7 @@ extern List dump_config(void)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("DebugLevel");
-	key_pair->value = xstrdup_printf("%s",
-		 log_num2string(slurmdbd_conf->debug_level));
+	key_pair->value = xstrdup(log_num2string(slurmdbd_conf->debug_level));
 	list_append(my_list, key_pair);
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
@@ -868,7 +867,7 @@ extern List dump_config(void)
 
 	key_pair = xmalloc(sizeof(config_key_pair_t));
 	key_pair->name = xstrdup("TrackSlurmctldDown");
-	key_pair->value = xstrdup_printf("%u", slurmdbd_conf->track_ctld);
+	key_pair->value = xstrdup(slurmdbd_conf->track_ctld ? "Yes" : "No");
 	list_append(my_list, key_pair);
 
 	return my_list;
diff --git a/src/slurmdbd/rpc_mgr.c b/src/slurmdbd/rpc_mgr.c
index 162cde41d..063b2387c 100644
--- a/src/slurmdbd/rpc_mgr.c
+++ b/src/slurmdbd/rpc_mgr.c
@@ -259,7 +259,7 @@ static void * _service_connection(void *arg)
 			cluster_rec.name = conn->cluster_name;
 			cluster_rec.control_host = conn->ip;
 			cluster_rec.control_port = conn->ctld_port;
-			cluster_rec.cpu_count = conn->cluster_cpus;
+			cluster_rec.tres_str = conn->tres_str;
 			debug("cluster %s has disconnected",
 			      conn->cluster_name);
 
@@ -282,11 +282,12 @@ static void * _service_connection(void *arg)
 	}
 
 	acct_storage_g_close_connection(&conn->db_conn);
-	if (slurm_close_accepted_conn(conn->newsockfd) < 0)
+	if (slurm_close(conn->newsockfd) < 0)
 		error("close(%d): %m(%s)",  conn->newsockfd, conn->ip);
 	else
 		debug2("Closed connection %d uid(%d)", conn->newsockfd, uid);
 
+	xfree(conn->tres_str);
 	xfree(conn->cluster_name);
 	xfree(conn);
 	_free_server_thread(pthread_self());
diff --git a/src/slurmdbd/slurmdbd.c b/src/slurmdbd/slurmdbd.c
index 2a2e5e60b..3f1933761 100644
--- a/src/slurmdbd/slurmdbd.c
+++ b/src/slurmdbd/slurmdbd.c
@@ -56,14 +56,15 @@
 #include "src/common/daemonize.h"
 #include "src/common/fd.h"
 #include "src/common/log.h"
+#include "src/common/proc_args.h"
 #include "src/common/read_config.h"
 #include "src/common/slurm_accounting_storage.h"
 #include "src/common/slurm_auth.h"
+#include "src/common/slurm_time.h"
 #include "src/common/uid.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/proc_args.h"
 
 #include "src/slurmdbd/read_config.h"
 #include "src/slurmdbd/rpc_mgr.h"
@@ -182,7 +183,8 @@ int main(int argc, char *argv[])
 
 	/* If we are tacking wckey we need to cache
 	   wckeys, if we aren't only cache the users, qos */
-	assoc_init_arg.cache_level = ASSOC_MGR_CACHE_USER | ASSOC_MGR_CACHE_QOS;
+	assoc_init_arg.cache_level = ASSOC_MGR_CACHE_USER |
+		ASSOC_MGR_CACHE_QOS | ASSOC_MGR_CACHE_TRES;
 	if (slurmdbd_conf->track_wckey)
 		assoc_init_arg.cache_level |= ASSOC_MGR_CACHE_WCKEY;
 
@@ -207,7 +209,7 @@ int main(int argc, char *argv[])
 			acct_storage_g_commit(db_conn, 1);
 			run_dbd_backup();
 			if (!shutdown_time)
-				assoc_mgr_refresh_lists(db_conn);
+				assoc_mgr_refresh_lists(db_conn, 0);
 		} else if (slurmdbd_conf->dbd_host &&
 			   (!strcmp(slurmdbd_conf->dbd_host, node_name) ||
 			    !strcmp(slurmdbd_conf->dbd_host, "localhost"))) {
@@ -550,7 +552,7 @@ static void _request_registrations(void *db_conn)
 			clusteracct_storage_g_fini_ctld(db_conn, cluster_rec);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(cluster_list);
+	FREE_NULL_LIST(cluster_list);
 }
 
 static void _rollup_handler_cancel()
@@ -574,7 +576,7 @@ static void *_rollup_handler(void *db_conn)
 	(void) pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, NULL);
 	(void) pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
 
-	if (!localtime_r(&start_time, &tm)) {
+	if (!slurm_localtime_r(&start_time, &tm)) {
 		fatal("Couldn't get localtime for rollup handler %ld",
 		      (long)start_time);
 		return NULL;
@@ -586,7 +588,7 @@ static void *_rollup_handler(void *db_conn)
 		/* run the roll up */
 		slurm_mutex_lock(&rollup_lock);
 		running_rollup = 1;
-		debug2("running rollup at %s", slurm_ctime(&start_time));
+		debug2("running rollup at %s", slurm_ctime2(&start_time));
 		acct_storage_g_roll_usage(db_conn, 0, 0, 1);
 		acct_storage_g_commit(db_conn, 1);
 		running_rollup = 0;
@@ -597,7 +599,7 @@ static void *_rollup_handler(void *db_conn)
 		tm.tm_min = 0;
 		tm.tm_hour++;
 		tm.tm_isdst = -1;
-		next_time = mktime(&tm);
+		next_time = slurm_mktime(&tm);
 
 		/* get the time now we have rolled usage */
 		start_time = time(NULL);
@@ -605,7 +607,7 @@ static void *_rollup_handler(void *db_conn)
 		sleep((next_time-start_time));
 
 		start_time = time(NULL);
-		if (!localtime_r(&start_time, &tm)) {
+		if (!slurm_localtime_r(&start_time, &tm)) {
 			fatal("Couldn't get localtime for rollup handler %ld",
 			      (long)start_time);
 			return NULL;
@@ -689,14 +691,13 @@ static int _send_slurmctld_register_req(slurmdb_cluster_rec_t *cluster_rec)
 		slurm_msg_t_init(&out_msg);
 		out_msg.msg_type = ACCOUNTING_REGISTER_CTLD;
 		out_msg.flags = SLURM_GLOBAL_AUTH_KEY;
-		out_msg.protocol_version
-			= slurmdbd_translate_rpc(cluster_rec->rpc_version);
+		out_msg.protocol_version = cluster_rec->rpc_version;
 		slurm_send_node_msg(fd, &out_msg);
 		/* We probably need to add matching recv_msg function
 		 * for an arbitray fd or should these be fire
 		 * and forget?  For this, that we can probably
 		 * forget about it */
-		slurm_close_stream(fd);
+		slurm_close(fd);
 	}
 	return rc;
 }
diff --git a/src/smap/Makefile.in b/src/smap/Makefile.in
index 0e552b332..0c874f343 100644
--- a/src/smap/Makefile.in
+++ b/src/smap/Makefile.in
@@ -108,6 +108,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -116,10 +117,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -132,7 +135,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -270,6 +273,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -319,8 +324,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = $(NCURSES)
 LIBTOOL = @LIBTOOL@
@@ -339,6 +348,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -382,6 +394,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -405,6 +418,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/smap/configure_functions.c b/src/smap/configure_functions.c
index 7f1a217ad..c01e432dd 100644
--- a/src/smap/configure_functions.c
+++ b/src/smap/configure_functions.c
@@ -109,7 +109,7 @@ static void _destroy_allocated_block(void *object)
 			_set_nodes(allocated_block->nodes, 0, '.');
 			bg_configure_remove_block(
 				allocated_block->nodes, is_small);
-			list_destroy(allocated_block->nodes);
+			FREE_NULL_LIST(allocated_block->nodes);
 		}
 		destroy_select_ba_request(allocated_block->request);
 		xfree(allocated_block);
@@ -148,8 +148,7 @@ static allocated_block_t *_make_request(select_ba_request_t *request)
 		results = NULL;
 	}
 
-	if (results)
-		list_destroy(results);
+	FREE_NULL_LIST(results);
 	return allocated_block;
 
 }
@@ -1482,8 +1481,7 @@ void get_command(void)
 
 		if (!strcmp(com, "exit")) {
 			endwin();
-			if (allocated_blocks)
-				list_destroy(allocated_blocks);
+			FREE_NULL_LIST(allocated_blocks);
 			bg_configure_ba_fini();
 			exit(0);
 		}
@@ -1544,8 +1542,7 @@ void get_command(void)
 			exit(1);
 		}
 	}
-	if (allocated_blocks)
-		list_destroy(allocated_blocks);
+	FREE_NULL_LIST(allocated_blocks);
 	params.display = 0;
 	noecho();
 
diff --git a/src/smap/job_functions.c b/src/smap/job_functions.c
index d8672429a..d78606c74 100644
--- a/src/smap/job_functions.c
+++ b/src/smap/job_functions.c
@@ -43,9 +43,6 @@
 #include "src/common/parse_time.h"
 #include "src/smap/smap.h"
 
-static int  _get_node_cnt(job_info_t * job);
-static int  _max_cpus_per_node(void);
-static int  _nodes_in_list(char *node_list);
 static void _print_header_job(void);
 static int  _print_text_job(job_info_t * job_ptr);
 
@@ -308,11 +305,12 @@ static int _print_text_job(job_info_t * job_ptr)
 		node_cnt = job_ptr->num_nodes;
 
 	if ((node_cnt  == 0) || (node_cnt == NO_VAL))
-		node_cnt = _get_node_cnt(job_ptr);
+		node_cnt = job_ptr->num_nodes;
 
 	if (params.cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)node_cnt, tmp_cnt,
-				 sizeof(tmp_cnt), UNIT_NONE);
+				 sizeof(tmp_cnt), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 	else
 		snprintf(tmp_cnt, sizeof(tmp_cnt), "%d", node_cnt);
 
@@ -462,53 +460,3 @@ static int _print_text_job(job_info_t * job_ptr)
 
 	return printed;
 }
-
-static int _get_node_cnt(job_info_t * job)
-{
-	int node_cnt = 0, round;
-	bool completing = job->job_state & JOB_COMPLETING;
-	uint16_t base_job_state = job->job_state & (~JOB_COMPLETING);
-	static int max_cpus = 0;
-
-	if (base_job_state == JOB_PENDING || completing) {
-		if (max_cpus == 0)
-			max_cpus = _max_cpus_per_node();
-
-		node_cnt = _nodes_in_list(job->req_nodes);
-		node_cnt = MAX(node_cnt, job->num_nodes);
-		round  = job->num_cpus + max_cpus - 1;
-		round /= max_cpus;      /* round up */
-		node_cnt = MAX(node_cnt, round);
-	} else
-		node_cnt = _nodes_in_list(job->nodes);
-	return node_cnt;
-}
-
-static int _nodes_in_list(char *node_list)
-{
-	hostset_t host_set = hostset_create(node_list);
-	int count = hostset_count(host_set);
-	hostset_destroy(host_set);
-	return count;
-}
-
-/* Return the maximum number of processors for any node in the cluster */
-static int   _max_cpus_per_node(void)
-{
-	int error_code, max_cpus = 1;
-	node_info_msg_t *node_info_ptr = NULL;
-
-	error_code = slurm_load_node ((time_t) NULL, &node_info_ptr,
-				      params.all_flag ? 1 : 0);
-	if (error_code == SLURM_SUCCESS) {
-		int i;
-		node_info_t *node_ptr = node_info_ptr->node_array;
-		for (i=0; i<node_info_ptr->record_count; i++) {
-			max_cpus = MAX(max_cpus, node_ptr[i].cpus);
-		}
-		slurm_free_node_info_msg (node_info_ptr);
-	}
-
-	return max_cpus;
-}
-
diff --git a/src/smap/opts.c b/src/smap/opts.c
index 4b1bfb671..cf7f8a3d5 100644
--- a/src/smap/opts.c
+++ b/src/smap/opts.c
@@ -39,6 +39,7 @@
 
 #include "src/smap/smap.h"
 #include "src/common/proc_args.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xstring.h"
 
 /* FUNCTIONS */
@@ -133,10 +134,9 @@ extern void parse_command_line(int argc, char *argv[])
 			}
 			break;
 		case 'M':
-			if (params.clusters)
-				list_destroy(params.clusters);
+			FREE_NULL_LIST(params.clusters);
 			if (!(params.clusters =
-			     slurmdb_get_info_cluster(optarg))) {
+			      slurmdb_get_info_cluster(optarg))) {
 				print_db_notok(optarg, 0);
 				exit(1);
 			}
@@ -187,11 +187,11 @@ extern void print_date(void)
 	time_t now_time = time(NULL);
 
 	if (params.commandline) {
-		printf("%s", ctime(&now_time));
+		printf("%s", slurm_ctime(&now_time));
 	} else {
 		mvwprintw(text_win, main_ycord,
 			  main_xcord, "%s",
-			  slurm_ctime(&now_time));
+			  slurm_ctime2(&now_time));
 		main_ycord++;
 	}
 }
diff --git a/src/smap/partition_functions.c b/src/smap/partition_functions.c
index c04ef606a..5cd16e044 100644
--- a/src/smap/partition_functions.c
+++ b/src/smap/partition_functions.c
@@ -370,7 +370,7 @@ extern void get_bg_part(void)
 			}
 			list_iterator_destroy(itr);
 		}
-		list_destroy(nodelist);
+		FREE_NULL_LIST(nodelist);
 	}
 
 	/* Report the BG Blocks */
@@ -557,7 +557,8 @@ static int _print_text_part(partition_info_t *part_ptr,
 
 	if (params.cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->total_nodes, tmp_cnt,
-				 sizeof(tmp_cnt), UNIT_NONE);
+				 sizeof(tmp_cnt), UNIT_NONE,
+				 CONVERT_NUM_UNIT_EXACT);
 	else
 		snprintf(tmp_cnt, sizeof(tmp_cnt), "%u", part_ptr->total_nodes);
 
@@ -810,12 +811,8 @@ static void _block_list_del(void *object)
 		xfree(block_ptr->slurm_part_name);
 		xfree(block_ptr->mp_str);
 		xfree(block_ptr->ionode_str);
-		if (block_ptr->nodelist)
-			list_destroy(block_ptr->nodelist);
-		if (block_ptr->job_list) {
-			list_destroy(block_ptr->job_list);
-			block_ptr->job_list = NULL;
-		}
+		FREE_NULL_LIST(block_ptr->nodelist);
+		FREE_NULL_LIST(block_ptr->job_list);
 		xfree(block_ptr);
 
 	}
diff --git a/src/smd/Makefile.in b/src/smd/Makefile.in
index 3f1476583..8ba886b23 100644
--- a/src/smd/Makefile.in
+++ b/src/smd/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sprio/Makefile.in b/src/sprio/Makefile.in
index 495458806..b8b006f01 100644
--- a/src/sprio/Makefile.in
+++ b/src/sprio/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sprio/opts.c b/src/sprio/opts.c
index 3163bafca..07579b756 100644
--- a/src/sprio/opts.c
+++ b/src/sprio/opts.c
@@ -138,10 +138,9 @@ parse_command_line( int argc, char* argv[] )
 			params.long_list = true;
 			break;
 		case (int) 'M':
-			if (params.clusters)
-				list_destroy(params.clusters);
+			FREE_NULL_LIST(params.clusters);
 			if (!(params.clusters =
-			     slurmdb_get_info_cluster(optarg))) {
+			      slurmdb_get_info_cluster(optarg))) {
 				print_db_notok(optarg, 0);
 				exit(1);
 			}
@@ -309,6 +308,14 @@ extern int parse_format( char* format )
 							     field_size,
 							     right_justify,
 							     suffix );
+		else if (field[0] == 't')
+			job_format_add_tres_normalized(params.format_list,
+						     field_size, right_justify,
+						     suffix);
+		else if (field[0] == 'T')
+			job_format_add_tres_weighted(params.format_list,
+						     field_size, right_justify,
+						     suffix);
 		else
 			error( "Invalid job format specification: %c",
 			       field[0] );
diff --git a/src/sprio/print.c b/src/sprio/print.c
index 4b988fdf0..8a144c95c 100644
--- a/src/sprio/print.c
+++ b/src/sprio/print.c
@@ -80,6 +80,7 @@ int print_jobs_array(List jobs, List format)
 
 static double _get_priority(priority_factors_object_t *prio_factors)
 {
+	int i = 0;
 	double priority = prio_factors->priority_age
 		+ prio_factors->priority_fs
 		+ prio_factors->priority_js
@@ -87,6 +88,12 @@ static double _get_priority(priority_factors_object_t *prio_factors)
 		+ prio_factors->priority_qos
 		- (double)(prio_factors->nice - NICE_OFFSET);
 
+	for (i = 0; i < prio_factors->tres_cnt; i++) {
+		if (!prio_factors->priority_tres[i])
+			continue;
+		priority += prio_factors->priority_tres[i];
+	}
+
 	return priority;
 }
 
@@ -442,3 +449,59 @@ int _print_job_user_name(priority_factors_object_t * job, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_tres_normalized(priority_factors_object_t * job, int width,
+			   bool right, char* suffix)
+{
+	if (job == NULL) {	/* Print the Header instead */
+		_print_str("TRES", width, right, true);
+	} else if (job == (priority_factors_object_t *) -1)
+		_print_str("", width, right, true);
+	else {
+		char *values = xstrdup("");
+		int i = 0;
+
+		for (i = 0; i < job->tres_cnt; i++) {
+			if (!job->priority_tres[i])
+				continue;
+			if (values[0])
+				xstrcat(values, ",");
+			xstrfmtcat(values, "%s=%.2f", job->tres_names[i],
+				   job->priority_tres[i]/job->tres_weights[i]);
+		}
+
+		_print_str(values, width, right, true);
+		xfree(values);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
+int _print_tres_weighted(priority_factors_object_t * job, int width,
+			 bool right, char* suffix)
+{
+	if (job == NULL) {	/* Print the Header instead */
+		_print_str("TRES", width, right, true);
+	} else if (job == (priority_factors_object_t *) -1)
+		_print_str(weight_tres, width, right, true);
+	else {
+		char *values = xstrdup("");
+		int i = 0;
+
+		for (i = 0; i < job->tres_cnt; i++) {
+			if (!job->priority_tres[i])
+				continue;
+			if (values[0])
+				xstrcat(values, ",");
+			xstrfmtcat(values, "%s=%.0f", job->tres_names[i],
+				   job->priority_tres[i]);
+		}
+
+		_print_str(values, width, right, true);
+		xfree(values);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
diff --git a/src/sprio/print.h b/src/sprio/print.h
index 558bad14a..27c8c1811 100644
--- a/src/sprio/print.h
+++ b/src/sprio/print.h
@@ -98,6 +98,10 @@ int job_format_add_function(List list, int width, bool right_justify,
 	job_format_add_function(list,wid,right,suffix,_print_job_nice)
 #define job_format_add_user_name(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_user_name)
+#define job_format_add_tres_normalized(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_tres_normalized)
+#define job_format_add_tres_weighted(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_tres_weighted)
 
 /*****************************************************************************
  * Job Line Print Functions
@@ -134,5 +138,9 @@ int _print_job_nice(priority_factors_object_t * job, int width,
 		    bool right_justify, char* suffix);
 int _print_job_user_name(priority_factors_object_t * job, int width,
 			 bool right_justify, char* suffix);
+int _print_tres_normalized(priority_factors_object_t * job, int width,
+			   bool right_justify, char* suffix);
+int _print_tres_weighted(priority_factors_object_t * job, int width,
+			 bool right_justify, char* suffix);
 
 #endif
diff --git a/src/sprio/sprio.c b/src/sprio/sprio.c
index 04e1d4216..3ffc5ff39 100644
--- a/src/sprio/sprio.c
+++ b/src/sprio/sprio.c
@@ -69,6 +69,7 @@ uint32_t weight_fs; /* weight for Fairshare factor */
 uint32_t weight_js; /* weight for Job Size factor */
 uint32_t weight_part; /* weight for Partition factor */
 uint32_t weight_qos; /* weight for QOS factor */
+char    *weight_tres; /* weights for TRES factors */
 
 static int _get_info(priority_factors_request_msg_t *factors_req,
 		     priority_factors_response_msg_t **factors_resp);
@@ -104,6 +105,7 @@ int main (int argc, char *argv[])
 		weight_js   = slurm_ctl_conf_ptr->priority_weight_js;
 		weight_part = slurm_ctl_conf_ptr->priority_weight_part;
 		weight_qos  = slurm_ctl_conf_ptr->priority_weight_qos;
+		weight_tres = slurm_ctl_conf_ptr->priority_weight_tres;
 		prio_type   = xstrdup(slurm_ctl_conf_ptr->priority_type);
 		slurm_free_ctl_conf(slurm_ctl_conf_ptr);
 	} else {
@@ -112,6 +114,7 @@ int main (int argc, char *argv[])
 		weight_js   = slurm_get_priority_weight_job_size();
 		weight_part = slurm_get_priority_weight_partition();
 		weight_qos  = slurm_get_priority_weight_qos();
+		weight_tres = slurm_get_priority_weight_tres();
 		prio_type   = slurm_get_priority_type();
 	}
 
@@ -119,8 +122,7 @@ int main (int argc, char *argv[])
 	if (strcasecmp(prio_type, "priority/basic") == 0) {
 		fprintf (stderr, "You are not running a supported "
 			 "priority plugin\n(%s).\n"
-			 "Only 'priority/multifactor' and "
-			 "'priority/multifactor2' are supported.\n",
+			 "Only 'priority/multifactor' is supported.\n",
 			 prio_type);
 		exit(1);
 	}
@@ -150,7 +152,7 @@ int main (int argc, char *argv[])
 		if (params.normalized) {
 			if (params.long_list)
 				params.format = "%.15i %.8u %10y %10a %10f "
-					"%10j %10p %10q";
+					"%10j %10p %10q %20t";
 			else{
 				params.format = xstrdup("%.15i");
 				if (params.users)
@@ -166,11 +168,13 @@ int main (int argc, char *argv[])
 					xstrcat(params.format, " %10p");
 				if (weight_qos)
 					xstrcat(params.format, " %10q");
+				if (weight_tres)
+					xstrcat(params.format, " %20t");
 			}
 		} else {
 			if (params.long_list)
 				params.format = "%.15i %.8u %.10Y %.10A %.10F "
-					"%.10J %.10P %.10Q %.6N";
+					"%.10J %.10P %.10Q %.6N %.20T";
 			else{
 				params.format = xstrdup("%.15i");
 				if (params.users)
@@ -186,6 +190,8 @@ int main (int argc, char *argv[])
 					xstrcat(params.format, " %.10P");
 				if (weight_qos)
 					xstrcat(params.format, " %.10Q");
+				if (weight_tres)
+					xstrcat(params.format, " %.20T");
 			}
 		}
 	}
@@ -203,7 +209,7 @@ int main (int argc, char *argv[])
 #if 0
 	/* Free storage here if we want to verify that logic.
 	 * Since we exit next, this is not important */
- 	list_destroy(params.format_list);
+	FREE_NULL_LIST(params.format_list);
 	slurm_free_priority_factors_response_msg(resp_msg);
 #endif
 
diff --git a/src/sprio/sprio.h b/src/sprio/sprio.h
index be2d66b54..8d35e13b5 100644
--- a/src/sprio/sprio.h
+++ b/src/sprio/sprio.h
@@ -99,6 +99,7 @@ extern uint32_t weight_fs; /* weight for Fairshare factor */
 extern uint32_t weight_js; /* weight for Job Size factor */
 extern uint32_t weight_part; /* weight for Partition factor */
 extern uint32_t weight_qos; /* weight for QOS factor */
+extern char    *weight_tres; /* weight str TRES factors */
 
 extern void parse_command_line( int argc, char* argv[] );
 extern int  parse_format( char* format );
diff --git a/src/squeue/Makefile.in b/src/squeue/Makefile.in
index cfc017760..b41256663 100644
--- a/src/squeue/Makefile.in
+++ b/src/squeue/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -252,6 +255,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -301,8 +306,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -321,6 +330,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -364,6 +376,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -387,6 +400,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/squeue/opts.c b/src/squeue/opts.c
index cd142b73e..f9e6d2622 100644
--- a/src/squeue/opts.c
+++ b/src/squeue/opts.c
@@ -66,10 +66,11 @@
 #include "src/squeue/squeue.h"
 
 /* getopt_long options, integers but not characters */
-#define OPT_LONG_HELP  0x100
-#define OPT_LONG_USAGE 0x101
-#define OPT_LONG_HIDE  0x102
-#define OPT_LONG_START 0x103
+#define OPT_LONG_HELP      0x100
+#define OPT_LONG_USAGE     0x101
+#define OPT_LONG_HIDE      0x102
+#define OPT_LONG_START     0x103
+#define OPT_LONG_NOCONVERT 0x104
 
 /* FUNCTIONS */
 static List  _build_job_list( char* str );
@@ -80,7 +81,7 @@ static List  _build_step_list( char* str );
 static List  _build_user_list( char* str );
 static char *_get_prefix(char *token);
 static void  _help( void );
-static int   _parse_state( char* str, uint16_t* states );
+static int   _parse_state( char* str, uint32_t* states );
 static void  _parse_token( char *token, char *field, int *field_size,
 			   bool *right_justify, char **suffix);
 static void _parse_long_token( char *token, char *sep, int *field_size,
@@ -115,6 +116,7 @@ parse_command_line( int argc, char* argv[] )
 		{"cluster",    required_argument, 0, 'M'},
 		{"clusters",   required_argument, 0, 'M'},
 		{"name",       required_argument, 0, 'n'},
+                {"noconvert",  no_argument,       0, OPT_LONG_NOCONVERT},
 		{"node",       required_argument, 0, 'w'},
 		{"nodes",      required_argument, 0, 'w'},
 		{"nodelist",   required_argument, 0, 'w'},
@@ -135,6 +137,8 @@ parse_command_line( int argc, char* argv[] )
 		{NULL,         0,                 0, 0}
 	};
 
+	params.convert_flags = CONVERT_NUM_UNIT_EXACT;
+
 	if (getenv("SQUEUE_ALL"))
 		params.all_flag = true;
 	if (getenv("SQUEUE_ARRAY"))
@@ -197,7 +201,7 @@ parse_command_line( int argc, char* argv[] )
 			break;
 		case (int) 'M':
 			if (params.clusters)
-				list_destroy(params.clusters);
+				FREE_NULL_LIST(params.clusters);
 			if (!(params.clusters =
 			      slurmdb_get_info_cluster(optarg))) {
 				print_db_notok(optarg, 0);
@@ -308,6 +312,9 @@ parse_command_line( int argc, char* argv[] )
 			params.start_flag = true;
 			override_format_env = true;
 			break;
+		case OPT_LONG_NOCONVERT:
+			params.convert_flags |= CONVERT_NUM_UNIT_NO;
+			break;
 		case OPT_LONG_USAGE:
 			_usage();
 			exit(0);
@@ -468,13 +475,13 @@ parse_command_line( int argc, char* argv[] )
  * RET 0 or error code
  */
 static int
-_parse_state( char* str, uint16_t* states )
+_parse_state( char* str, uint32_t* states )
 {
-	int i;
+	uint32_t i;
 	char *state_names;
 
 	if ((i = job_state_num(str)) >= 0) {
-		*states = (uint16_t) i;
+		*states = i;
 		return SLURM_SUCCESS;
 	}
 
@@ -1059,6 +1066,11 @@ extern int parse_long_format( char* format_long )
 							  field_size,
 							  right_justify,
 							  suffix );
+			else if (!strcasecmp(token,"burstbuffer"))
+				job_format_add_burst_buffer(params.format_list,
+							    field_size,
+							    right_justify,
+							    suffix );
 			else if (!strcasecmp(token,"mincpus"))
 				job_format_add_min_cpus( params.format_list,
 							 field_size,
@@ -1451,6 +1463,11 @@ extern int parse_long_format( char* format_long )
 							   field_size,
 							   right_justify,
 							   suffix );
+			else if (!strcasecmp(token, "tres"))
+				job_format_add_tres(params.format_list,
+						    field_size,
+						    right_justify,
+						    suffix );
 			else {
 				job_format_add_invalid( params.format_list,
 							field_size,
@@ -1556,7 +1573,7 @@ _print_options(void)
 	int i;
 	char *license, *name, *part;
 	uint32_t *user;
-	enum job_states *state_id;
+	uint32_t *state_id;
 	squeue_job_step_t *job_step_id;
 	char hostlist[8192];
 
@@ -1747,7 +1764,7 @@ _build_state_list( char* str )
 {
 	List my_list;
 	char *state = NULL, *tmp_char = NULL, *my_state_list = NULL;
-	uint16_t *state_id = NULL;
+	uint32_t *state_id = NULL;
 
 	if ( str == NULL)
 		return NULL;
@@ -1759,7 +1776,7 @@ _build_state_list( char* str )
 	state = strtok_r( my_state_list, ",", &tmp_char );
 	while (state)
 	{
-		state_id = xmalloc( sizeof( uint16_t ) );
+		state_id = xmalloc( sizeof( uint32_t ) );
 		if ( _parse_state( state, state_id ) != SLURM_SUCCESS )
 			exit( 1 );
 		list_append( my_list, state_id );
@@ -1769,6 +1786,15 @@ _build_state_list( char* str )
 
 }
 
+static void _append_state_list(List my_list, uint32_t state_id)
+{
+	uint16_t *state_rec;
+
+	state_rec = xmalloc(sizeof(uint32_t));
+	*state_rec = state_id;
+	list_append(my_list, state_rec);
+}
+
 /*
  * _build_all_states_list - build a list containing all possible job states
  * RET List of uint16_t values
@@ -1777,21 +1803,16 @@ static List
 _build_all_states_list( void )
 {
 	List my_list;
-	int i;
-	uint16_t * state_id;
+	uint32_t i;
 
 	my_list = list_create( NULL );
-	for (i = 0; i<JOB_END; i++) {
-		state_id = xmalloc( sizeof(uint16_t) );
-		*state_id = (uint16_t) i;
-		list_append( my_list, state_id );
-	}
-	state_id = xmalloc( sizeof(uint16_t) );
-	*state_id = (uint16_t) JOB_COMPLETING;
-	list_append( my_list, state_id );
-	state_id = xmalloc( sizeof(uint16_t) );
-	*state_id = (uint16_t) JOB_CONFIGURING;
-	list_append( my_list, state_id );
+	for (i = 0; i < JOB_END; i++)
+		_append_state_list(my_list, i);
+
+	_append_state_list(my_list, JOB_COMPLETING);
+	_append_state_list(my_list, JOB_CONFIGURING);
+	_append_state_list(my_list, JOB_SPECIAL_EXIT);
+
 	return my_list;
 
 }
@@ -1904,6 +1925,8 @@ Usage: squeue [OPTIONS]\n\
                                   current cluster.  cluster with no name will\n\
                                   reset to default.\n\
   -n, --name=job_name(s)          comma separated list of job names to view\n\
+  --noconvert                     don't convert units from their original type\n\
+				  (e.g. 2048M won't be converted to 2G).\n\
   -o, --format=format             format specification\n\
   -p, --partition=partition(s)    comma separated list of partitions\n\
 				  to view, default is all partitions\n\
diff --git a/src/squeue/print.c b/src/squeue/print.c
index 67fd9feaf..6bbcf0e46 100644
--- a/src/squeue/print.c
+++ b/src/squeue/print.c
@@ -45,6 +45,7 @@
 #include <time.h>
 #include <sys/types.h>
 
+#include "src/common/cpu_frequency.h"
 #include "src/common/hostlist.h"
 #include "src/common/list.h"
 #include "src/common/macros.h"
@@ -61,9 +62,7 @@
 static int	_filter_job(job_info_t * job);
 static int	_filter_job_part(char *part_name);
 static int	_filter_step(job_step_info_t * step);
-static int	_get_node_cnt(job_info_t * job);
 static void	_job_list_del(void *x);
-static int	_nodes_in_list(char *node_list);
 static uint32_t	_part_get_prio(char *part_name);
 static void	_part_state_free(void);
 static void	_part_state_load(void);
@@ -137,7 +136,7 @@ int print_jobs_array(job_info_t * jobs, int size, List format)
 
 	/* Print the jobs of interest */
 	list_for_each (l, (ListForF) print_job_from_format, (void *) format);
-	list_destroy (l);
+	FREE_NULL_LIST (l);
 
 	return SLURM_SUCCESS;
 }
@@ -170,7 +169,7 @@ int print_steps_array(job_step_info_t * steps, int size, List format)
 			print_step_from_format(step_ptr, format);
 		}
 		list_iterator_destroy(step_iterator);
-		list_destroy(step_list);
+		FREE_NULL_LIST(step_list);
 	}
 
 	return SLURM_SUCCESS;
@@ -454,15 +453,35 @@ int _print_job_batch_host(job_info_t * job, int width, bool right, char* suffix)
 	return SLURM_SUCCESS;
 }
 
+int _print_job_burst_buffer(job_info_t * job, int width, bool right, char* suffix)
+{
+	if (job == NULL)	/* Print the Header instead */
+		_print_str("BURST_BUFFER", width, right, true);
+	else {
+		char id[FORMAT_STRING_SIZE];
+		snprintf(id, FORMAT_STRING_SIZE, "%s", job->burst_buffer);
+		_print_str(id, width, right, true);
+	}
+	if (suffix)
+		printf("%s", suffix);
+	return SLURM_SUCCESS;
+}
+
 int _print_job_core_spec(job_info_t * job, int width, bool right, char* suffix)
 {
-	if (job == NULL) 	/* Print the Header instead */
+	char spec[FORMAT_STRING_SIZE];
+
+	if (job == NULL) {	/* Print the Header instead */
 		_print_str("CORE_SPEC", width, right, true);
-	else
-		if (job->core_spec == (uint16_t) NO_VAL)
-			_print_str("*", width, right, true);
-		else
-			_print_int(job->core_spec, width, right, true);
+	} else if (job->core_spec == (uint16_t) NO_VAL) {
+		_print_str("N/A", width, right, true);
+	} else if (job->core_spec & CORE_SPEC_THREAD) {
+		snprintf(spec, FORMAT_STRING_SIZE, "%d Threads",
+			 (job->core_spec & (~CORE_SPEC_THREAD)));
+		_print_str(spec, width, right, true);
+	} else {
+		_print_int(job->core_spec, width, right, true);
+	}
 	if (suffix)
 		printf("%s", suffix);
 	return SLURM_SUCCESS;
@@ -480,7 +499,7 @@ int _print_job_job_id(job_info_t * job, int width, bool right, char* suffix)
 		if (getenv("SLURM_BITSTR_LEN")) {
 			len = strlen(job->array_task_str) + 64;
 			buf = xmalloc(len);
-			sprintf(buf, "%u_[%s]\n", job->array_job_id,
+			sprintf(buf, "%u_[%s]", job->array_job_id,
 				job->array_task_str);
 			_print_str(buf, width, right, false);
 			xfree(buf);
@@ -875,12 +894,13 @@ int _print_job_reason_list(job_info_t * job, int width, bool right,
 		   && (IS_JOB_PENDING(job)
 		       || IS_JOB_TIMEOUT(job)
 		       || IS_JOB_FAILED(job))) {
-		char id[FORMAT_STRING_SIZE], *reason;
+		int len = width ? width : FORMAT_STRING_SIZE;
+		char id[len], *reason;
 		if (job->state_desc)
 			reason = job->state_desc;
 		else
 			reason = job_reason_string(job->state_reason);
-		snprintf(id, FORMAT_STRING_SIZE, "(%s)", reason);
+		snprintf(id, len, "(%s)", reason);
 		_print_str(id, width, right, true);
 	} else {
 		char *nodes = xstrdup(job->nodes);
@@ -931,7 +951,8 @@ int _print_job_num_cpus(job_info_t * job, int width, bool right, char* suffix)
 	else {
 		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			convert_num_unit((float)job->num_cpus, tmp_char,
-					 sizeof(tmp_char), UNIT_NONE);
+					 sizeof(tmp_char), UNIT_NONE,
+					 params.convert_flags);
 		else
 			snprintf(tmp_char, sizeof(tmp_char),
 				 "%u", job->num_cpus);
@@ -957,11 +978,12 @@ int _print_job_num_nodes(job_info_t * job, int width, bool right_justify,
 						    &node_cnt);
 
 		if ((node_cnt == 0) || (node_cnt == NO_VAL))
-			node_cnt = _get_node_cnt(job);
+			node_cnt = job->num_nodes;
 
 		if (params.cluster_flags & CLUSTER_FLAG_BG)
 			convert_num_unit((float)node_cnt, tmp_char,
-					 sizeof(tmp_char), UNIT_NONE);
+					 sizeof(tmp_char), UNIT_NONE,
+					 params.convert_flags);
 		else
 			snprintf(tmp_char, sizeof(tmp_char), "%d", node_cnt);
 
@@ -972,40 +994,6 @@ int _print_job_num_nodes(job_info_t * job, int width, bool right_justify,
 	return SLURM_SUCCESS;
 }
 
-static int _get_node_cnt(job_info_t * job)
-{
-	int node_cnt = 0;
-
-	/*  For PENDING jobs, return the maximum of the requested nodelist,
-	 *   requested maximum number of nodes, or requested CPUs rounded
-	 *   to nearest node.
-	 *
-	 *  For COMPLETING jobs, the job->nodes nodelist has already been
-	 *   altered to list only the nodes still in the comp state, and
-	 *   thus we count only those nodes toward the total nodes still
-	 *   allocated to this job.
-	 */
-
-	if (IS_JOB_PENDING(job)) {
-		node_cnt = _nodes_in_list(job->req_nodes);
-		node_cnt = MAX(node_cnt, job->num_nodes);
-		if ((node_cnt == 1) && (job->num_cpus > 1)
-		    && job->ntasks_per_node
-		    && (job->ntasks_per_node != (uint16_t) NO_VAL)) {
-			int num_tasks = job->num_cpus;
-			if (job->cpus_per_task != (uint16_t) NO_VAL)
-				num_tasks /= job->cpus_per_task;
-			node_cnt = (num_tasks + 1) / job->ntasks_per_node;
-			if (node_cnt > num_tasks)
-				node_cnt = num_tasks;
-			else if (!node_cnt)
-				node_cnt = 1;
-		}
-	} else
-		node_cnt = _nodes_in_list(job->nodes);
-	return node_cnt;
-}
-
 int _print_job_num_sct(job_info_t * job, int width, bool right_justify,
 			 char* suffix)
 {
@@ -1018,17 +1006,20 @@ int _print_job_num_sct(job_info_t * job, int width, bool right_justify,
 			strcpy(sockets, "*");
 		else
 			convert_num_unit((float)job->sockets_per_node, sockets,
-					sizeof(sockets), UNIT_NONE);
+					sizeof(sockets), UNIT_NONE,
+					params.convert_flags);
 		if (job->cores_per_socket == (uint16_t) NO_VAL)
 			strcpy(cores, "*");
 		else
 			convert_num_unit((float)job->cores_per_socket, cores,
-					sizeof(cores), UNIT_NONE);
+					sizeof(cores), UNIT_NONE,
+					params.convert_flags);
 		if (job->threads_per_core == (uint16_t) NO_VAL)
 			strcpy(threads, "*");
 		else
 			convert_num_unit((float)job->threads_per_core, threads,
-					sizeof(threads), UNIT_NONE);
+					sizeof(threads), UNIT_NONE,
+					params.convert_flags);
 		sct[0] = '\0';
 		strcat(sct, sockets);
 		strcat(sct, ":");
@@ -1045,14 +1036,6 @@ int _print_job_num_sct(job_info_t * job, int width, bool right_justify,
 	return SLURM_SUCCESS;
 }
 
-static int _nodes_in_list(char *node_list)
-{
-	hostset_t host_set = hostset_create(node_list);
-	int count = hostset_count(host_set);
-	hostset_destroy(host_set);
-	return count;
-}
-
 int _print_job_shared(job_info_t * job, int width, bool right_justify,
 		      char* suffix)
 {
@@ -1064,9 +1047,11 @@ int _print_job_shared(job_info_t * job, int width, bool right_justify,
 			_print_str("no", width, right_justify, true);
 			break;
 		case 1:
-		case 2:
 			_print_str("yes", width, right_justify, true);
 			break;
+		case 2:
+			_print_str("user", width, right_justify, true);
+			break;
 		case (uint16_t)NO_VAL:
 		default:
 			_print_str("unknwn", width, right_justify, true);
@@ -1101,7 +1086,8 @@ int _print_pn_min_cpus(job_info_t * job, int width, bool right_justify,
 		_print_str("MIN_CPUS", width, right_justify, true);
 	else {
 		convert_num_unit((float)job->pn_min_cpus, tmp_char,
-				 sizeof(tmp_char), UNIT_NONE);
+				 sizeof(tmp_char), UNIT_NONE,
+				 params.convert_flags);
 		_print_str(tmp_char, width, right_justify, true);
 	}
 	if (suffix)
@@ -1121,7 +1107,8 @@ int _print_sockets(job_info_t * job, int width, bool right_justify,
 			strcpy(tmp_char, "*");
 		else
 			convert_num_unit((float)job->sockets_per_node, tmp_char,
-				 sizeof(tmp_char), UNIT_NONE);
+				 sizeof(tmp_char), UNIT_NONE,
+				 params.convert_flags);
 		_print_str(tmp_char, width, right_justify, true);
 	}
 	if (suffix)
@@ -1141,7 +1128,8 @@ int _print_cores(job_info_t * job, int width, bool right_justify,
 			strcpy(tmp_char, "*");
 		else
 			convert_num_unit((float)job->cores_per_socket, tmp_char,
-					sizeof(tmp_char), UNIT_NONE);
+					sizeof(tmp_char), UNIT_NONE,
+					params.convert_flags);
 		_print_str(tmp_char, width, right_justify, true);
 	}
 	if (suffix)
@@ -1161,7 +1149,8 @@ int _print_threads(job_info_t * job, int width, bool right_justify,
 			strcpy(tmp_char, "*");
 		else
 			convert_num_unit((float)job->threads_per_core, tmp_char,
-					sizeof(tmp_char), UNIT_NONE);
+					sizeof(tmp_char), UNIT_NONE,
+					params.convert_flags);
 		_print_str(tmp_char, width, right_justify, true);
 	}
 	if (suffix)
@@ -1181,7 +1170,8 @@ int _print_pn_min_memory(job_info_t * job, int width, bool right_justify,
 	    	tmp_char[0] = '\0';
 		job->pn_min_memory &= (~MEM_PER_CPU);
 		convert_num_unit((float)job->pn_min_memory, min_mem,
-				 sizeof(min_mem), UNIT_MEGA);
+				 sizeof(min_mem), UNIT_MEGA,
+				 params.convert_flags);
 		strcat(tmp_char, min_mem);
 		_print_str(tmp_char, width, right_justify, true);
 	}
@@ -1201,7 +1191,8 @@ _print_pn_min_tmp_disk(job_info_t * job, int width, bool right_justify,
 		_print_str("MIN_TMP_DISK", width, right_justify, true);
 	else {
 		convert_num_unit((float)job->pn_min_tmp_disk,
-				 tmp_char, sizeof(tmp_char), UNIT_MEGA);
+				 tmp_char, sizeof(tmp_char), UNIT_MEGA,
+				 params.convert_flags);
 		_print_str(tmp_char, width, right_justify, true);
 	}
 
@@ -1558,7 +1549,7 @@ int _print_job_max_nodes(job_info_t * job, int width, bool right_justify,
 	else if (job->max_nodes != 0)
 		_print_int(job->max_nodes, width, right_justify, true);
 	else
-		_print_int(_get_node_cnt(job), width, right_justify, true);
+		_print_int(job->num_nodes, width, right_justify, true);
 
 	if (suffix)
 		printf("%s",suffix);
@@ -1843,6 +1834,23 @@ int _print_job_wait4switch(job_info_t * job, int width,
 	return SLURM_SUCCESS;
 }
 
+int _print_job_tres(job_info_t *job, int width,
+		    bool right_justify, char *suffix)
+{
+	if (job == NULL) {
+		_print_str("TRES", width, right_justify, true);
+	} else {
+		if (job->tres_alloc_str)
+			_print_str(job->tres_alloc_str, width,
+				   right_justify, true);
+		else
+			_print_str("N/A", width,
+				   right_justify, true);
+
+	}
+	return SLURM_SUCCESS;
+}
+
 
 /*****************************************************************************
  * Job Step Print Functions
@@ -2208,12 +2216,19 @@ int _print_step_num_cpus(job_step_info_t * step, int width, bool right,
 int _print_step_cpu_freq(job_step_info_t * step, int width, bool right,
 			 char* suffix)
 {
-	if (step == NULL)
+	char bfm[16], bfx[16], bfg[16], bfall[48];
+
+	if (step == NULL) {
 		_print_str("CPU_FREQ", width, right, true);
-	else if (step->cpu_freq != NO_VAL)
-		_print_int(step->cpu_freq, width, right, true);
-	else
-		_print_str("N/A", width, right, true);
+		if (suffix)
+			printf("%s", suffix);
+		return SLURM_SUCCESS;
+	}
+	cpu_freq_to_string(bfm, sizeof(bfm), step->cpu_freq_min);
+	cpu_freq_to_string(bfx, sizeof(bfx), step->cpu_freq_max);
+	cpu_freq_to_string(bfg, sizeof(bfg), step->cpu_freq_gov);
+	snprintf(bfall, sizeof(bfall), "%s-%s:%s", bfm, bfx, bfg);
+	_print_str(bfall, width, right, true);
 
 	if (suffix)
 		printf("%s", suffix);
@@ -2253,7 +2268,7 @@ static int _filter_job(job_info_t * job)
 	int filter;
 	ListIterator iterator;
 	uint32_t *user;
-	uint16_t *state_id;
+	uint32_t *state_id;
 	char *account, *license, *qos, *name;
 	squeue_job_step_t *job_step_id;
 
@@ -2333,15 +2348,14 @@ static int _filter_job(job_info_t * job)
 		filter = 1;
 		iterator = list_iterator_create(params.state_list);
 		while ((state_id = list_next(iterator))) {
-			if ((*state_id == job->job_state) ||
-			    ((*state_id == JOB_COMPLETING) &&
-			     (*state_id & job->job_state)) ||
-			    ((*state_id == JOB_CONFIGURING) &&
-			     (*state_id & job->job_state)) ||
-			    ((*state_id == JOB_RESIZING) &&
-			     (*state_id & job->job_state))||
-			    ((*state_id == JOB_SPECIAL_EXIT) &&
-			     (*state_id & job->job_state))) {
+			bool match = false;
+			job->job_state &= ~JOB_UPDATE_DB;
+			if (*state_id &  JOB_STATE_FLAGS) {
+				if (*state_id &  job->job_state)
+					match = true;
+			} else if (*state_id == job->job_state)
+				match = true;
+			if (match) {
 				filter = 0;
 				break;
 			}
diff --git a/src/squeue/print.h b/src/squeue/print.h
index 58ef521c4..687fe5403 100644
--- a/src/squeue/print.h
+++ b/src/squeue/print.h
@@ -92,6 +92,8 @@ int job_format_add_function(List list, int width, bool right_justify,
 	job_format_add_function(list,wid,right,suffix,_print_job_array_task_id)
 #define job_format_add_batch_host(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_batch_host)
+#define job_format_add_burst_buffer(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_burst_buffer)
 #define job_format_add_core_spec(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_core_spec)
 #define job_format_add_job_id(list,wid,right,suffix) \
@@ -269,6 +271,8 @@ int job_format_add_function(List list, int width, bool right_justify,
 	job_format_add_function(list,wid,right,suffix,_print_job_min_time)
 #define job_format_add_wait4switch(list,wid,right,suffix) \
 	job_format_add_function(list,wid,right,suffix,_print_job_wait4switch)
+#define job_format_add_tres(list,wid,right,suffix) \
+	job_format_add_function(list,wid,right,suffix,_print_job_tres)
 
 
 /*****************************************************************************
@@ -280,6 +284,8 @@ int _print_job_array_task_id(job_info_t * job, int width, bool right_justify,
 			     char* suffix);
 int _print_job_batch_host(job_info_t * job, int width, bool right_justify,
 			char* suffix);
+int _print_job_burst_buffer(job_info_t * job, int width, bool right_justify,
+			    char* suffix);
 int _print_job_core_spec(job_info_t * job, int width, bool right_justify,
 			 char* suffix);
 int _print_job_job_id(job_info_t * job, int width, bool right_justify,
@@ -449,6 +455,8 @@ int _print_job_min_time(job_info_t * job, int width,
 			bool right_justify, char* suffix);
 int _print_job_wait4switch(job_info_t * job, int width,
 			   bool right_justify, char* suffix);
+int _print_job_tres(job_info_t * job, int width,
+		    bool right_justify, char *suffix);
 
 /*****************************************************************************
  * Step Print Format Functions
diff --git a/src/squeue/squeue.c b/src/squeue/squeue.c
index c30584917..4b08787e9 100644
--- a/src/squeue/squeue.c
+++ b/src/squeue/squeue.c
@@ -50,6 +50,7 @@
 #include <termios.h>
 
 #include "src/common/read_config.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xstring.h"
 #include "src/squeue/squeue.h"
 
@@ -274,10 +275,10 @@ _print_job_steps( bool clear_old )
 			error_code = SLURM_SUCCESS;
 			new_step_ptr = old_step_ptr;
 		}
-	}
-	else
+	} else {
 		error_code = slurm_get_job_steps((time_t) 0, NO_VAL, NO_VAL,
 						 &new_step_ptr, show_flags);
+	}
 	if (error_code) {
 		slurm_perror ("slurm_get_job_steps error");
 		return SLURM_ERROR;
@@ -313,5 +314,5 @@ _print_date( void )
 	time_t now;
 
 	now = time( NULL );
-	printf("%s", ctime( &now ));
+	printf("%s", slurm_ctime( &now ));
 }
diff --git a/src/squeue/squeue.h b/src/squeue/squeue.h
index 2c8cd0afd..f88420624 100644
--- a/src/squeue/squeue.h
+++ b/src/squeue/squeue.h
@@ -109,6 +109,8 @@ struct squeue_parameters {
 	uint32_t job_id;	/* set if request for a single job ID */
 	uint32_t user_id;	/* set if request for a single user ID */
 
+	uint32_t convert_flags;
+
 	List  account_list;
 	List  format_list;
 	List  job_list;
diff --git a/src/sreport/Makefile.in b/src/sreport/Makefile.in
index 9d6a5feb5..c88329a9b 100644
--- a/src/sreport/Makefile.in
+++ b/src/sreport/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -250,6 +253,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -299,8 +304,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -319,6 +328,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -362,6 +374,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -385,6 +398,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sreport/cluster_reports.c b/src/sreport/cluster_reports.c
index c1217511e..2fbd63a29 100644
--- a/src/sreport/cluster_reports.c
+++ b/src/sreport/cluster_reports.c
@@ -2,7 +2,7 @@
  *  cluster_reports.c - functions for generating cluster reports
  *                       from accounting infrastructure.
  *****************************************************************************
- *
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
@@ -43,20 +43,21 @@ bool tree_display = 0;
 
 enum {
 	PRINT_CLUSTER_NAME,
-	PRINT_CLUSTER_CPUS,
-	PRINT_CLUSTER_ACPU,
-	PRINT_CLUSTER_DCPU,
-	PRINT_CLUSTER_ICPU,
-	PRINT_CLUSTER_PDCPU,
-	PRINT_CLUSTER_OCPU,
-	PRINT_CLUSTER_RCPU,
-	PRINT_CLUSTER_TOTAL,
+	PRINT_CLUSTER_TRES_CNT,
+	PRINT_CLUSTER_TRES_ALLOC,
+	PRINT_CLUSTER_TRES_DOWN,
+	PRINT_CLUSTER_TRES_IDLE,
+	PRINT_CLUSTER_TRES_PLAN_DOWN,
+	PRINT_CLUSTER_TRES_OVER,
+	PRINT_CLUSTER_TRES_RESV,
+	PRINT_CLUSTER_TRES_REPORTED,
 	PRINT_CLUSTER_ACCT,
 	PRINT_CLUSTER_USER_LOGIN,
 	PRINT_CLUSTER_USER_PROPER,
 	PRINT_CLUSTER_AMOUNT_USED,
 	PRINT_CLUSTER_WCKEY,
 	PRINT_CLUSTER_ENERGY,
+	PRINT_CLUSTER_TRES_NAME,
 };
 
 typedef enum {
@@ -93,12 +94,12 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 	if (!wckey_cond->cluster_list)
 		wckey_cond->cluster_list = list_create(slurm_destroy_char);
 
-	for (i=(*start); i<argc; i++) {
+	for (i = (*start); i < argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!end)
 			command_len=strlen(argv[i]);
 		else {
-			command_len=end-1;
+			command_len = end - 1;
 			if (argv[i][end] == '=') {
 				end++;
 			}
@@ -130,6 +131,7 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 				set = 1;
 		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
 			wckey_cond->usage_end = parse_time(argv[i]+end, 1);
+			wckey_cond->usage_end = sanity_check_endtime(wckey_cond->usage_end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 1))) {
@@ -175,7 +177,7 @@ static int _set_wckey_cond(int *start, int argc, char *argv[],
 }
 
 static int _set_assoc_cond(int *start, int argc, char *argv[],
-			   slurmdb_association_cond_t *assoc_cond,
+			   slurmdb_assoc_cond_t *assoc_cond,
 			   List format_list)
 {
 	int i;
@@ -186,7 +188,7 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 	int command_len = 0;
 
 	if (!assoc_cond) {
-		error("We need an slurmdb_association_cond to call this");
+		error("We need an slurmdb_assoc_cond to call this");
 		return SLURM_ERROR;
 	}
 
@@ -195,12 +197,12 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 
 	if (!assoc_cond->cluster_list)
 		assoc_cond->cluster_list = list_create(slurm_destroy_char);
-	for (i=(*start); i<argc; i++) {
+	for (i = (*start); i < argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!end)
-			command_len=strlen(argv[i]);
+			command_len = strlen(argv[i]);
 		else {
-			command_len=end-1;
+			command_len = end - 1;
 			if (argv[i][end] == '=') {
 				end++;
 			}
@@ -238,6 +240,7 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
 			assoc_cond->usage_end = parse_time(argv[i]+end, 1);
+			assoc_cond->usage_end = sanity_check_endtime(assoc_cond->usage_end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 1))) {
@@ -249,7 +252,7 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 			assoc_cond->usage_start = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown condition: %s\n"
 			       "Use keyword set to modify value\n", argv[i]);
 		}
@@ -271,7 +274,6 @@ static int _set_assoc_cond(int *start, int argc, char *argv[],
 	slurmdb_report_set_start_end_time(&start_time, &end_time);
 	assoc_cond->usage_start = start_time;
 	assoc_cond->usage_end = end_time;
-
 	return set;
 }
 
@@ -296,12 +298,12 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 
 	if (!cluster_cond->cluster_list)
 		cluster_cond->cluster_list = list_create(slurm_destroy_char);
-	for (i=(*start); i<argc; i++) {
+	for (i = (*start); i < argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!end)
 			command_len=strlen(argv[i]);
 		else {
-			command_len=end-1;
+			command_len = end - 1;
 			if (argv[i][end] == '=') {
 				end++;
 			}
@@ -318,6 +320,7 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
 			cluster_cond->usage_end = parse_time(argv[i]+end, 1);
+			cluster_cond->usage_end = sanity_check_endtime(cluster_cond->usage_end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 1))) {
@@ -329,7 +332,7 @@ static int _set_cluster_cond(int *start, int argc, char *argv[],
 			cluster_cond->usage_start = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr," Unknown condition: %s\n"
 			       "Use keyword set to modify value\n", argv[i]);
 		}
@@ -362,7 +365,7 @@ static int _setup_print_fields_list(List format_list)
 	char *object = NULL;
 
 	if (!format_list || !list_count(format_list)) {
-		exit_code=1;
+		exit_code = 1;
 			fprintf(stderr, " we need a format list "
 				"to set up the print.\n");
 		return SLURM_ERROR;
@@ -372,7 +375,7 @@ static int _setup_print_fields_list(List format_list)
 		print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(format_list);
-	while((object = list_next(itr))) {
+	while ((object = list_next(itr))) {
 		char *tmp_char = NULL;
 		int command_len = 0;
 		int newlen = 0;
@@ -395,7 +398,7 @@ static int _setup_print_fields_list(List format_list)
 			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("allocated", object,
 				       MAX(command_len, 2))) {
-			field->type = PRINT_CLUSTER_ACPU;
+			field->type = PRINT_CLUSTER_TRES_ALLOC;
 			field->name = xstrdup("Allocated");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -410,14 +413,8 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("cpucount", object,
-				       MAX(command_len, 2))) {
-			field->type = PRINT_CLUSTER_CPUS;
-			field->name = xstrdup("CPU count");
-			field->len = 9;
-			field->print_routine = print_fields_uint;
 		} else if (!strncasecmp("down", object, MAX(command_len, 1))) {
-			field->type = PRINT_CLUSTER_DCPU;
+			field->type = PRINT_CLUSTER_TRES_DOWN;
 			field->name = xstrdup("Down");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -427,7 +424,7 @@ static int _setup_print_fields_list(List format_list)
 				field->len = 10;
 			field->print_routine = slurmdb_report_print_time;
 		} else if (!strncasecmp("idle", object, MAX(command_len, 1))) {
-			field->type = PRINT_CLUSTER_ICPU;
+			field->type = PRINT_CLUSTER_TRES_IDLE;
 			field->name = xstrdup("Idle");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -443,7 +440,7 @@ static int _setup_print_fields_list(List format_list)
 			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("overcommited", object,
 				       MAX(command_len, 1))) {
-			field->type = PRINT_CLUSTER_OCPU;
+			field->type = PRINT_CLUSTER_TRES_OVER;
 			field->name = xstrdup("Over Comm");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -454,7 +451,7 @@ static int _setup_print_fields_list(List format_list)
 			field->print_routine = slurmdb_report_print_time;
 		} else if (!strncasecmp("PlannedDown", object,
 				       MAX(command_len, 2))) {
-			field->type = PRINT_CLUSTER_PDCPU;
+			field->type = PRINT_CLUSTER_TRES_PLAN_DOWN;
 			field->name = xstrdup("PLND Down");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -463,14 +460,15 @@ static int _setup_print_fields_list(List format_list)
 			else
 				field->len = 10;
 			field->print_routine = slurmdb_report_print_time;
-		} else if (!strncasecmp("Proper", object, MAX(command_len, 2))) {
+		} else if (!strncasecmp("Proper", object,
+					MAX(command_len, 2))) {
 			field->type = PRINT_CLUSTER_USER_PROPER;
 			field->name = xstrdup("Proper Name");
 			field->len = 15;
 			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("reported", object,
 				       MAX(command_len, 3))) {
-			field->type = PRINT_CLUSTER_TOTAL;
+			field->type = PRINT_CLUSTER_TRES_REPORTED;
 			field->name = xstrdup("Reported");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -481,7 +479,7 @@ static int _setup_print_fields_list(List format_list)
 			field->print_routine = slurmdb_report_print_time;
 		} else if (!strncasecmp("reserved", object,
 				       MAX(command_len, 3))) {
-			field->type = PRINT_CLUSTER_RCPU;
+			field->type = PRINT_CLUSTER_TRES_RESV;
 			field->name = xstrdup("Reserved");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -490,6 +488,21 @@ static int _setup_print_fields_list(List format_list)
 			else
 				field->len = 9;
 			field->print_routine = slurmdb_report_print_time;
+		} else if (!strncasecmp("TresCount", object,
+					MAX(command_len, 5)) ||
+			   !strncasecmp("cpucount", object,
+					MAX(command_len, 2)) ||
+			   !strncasecmp("count", object, MAX(command_len, 2))) {
+			field->type = PRINT_CLUSTER_TRES_CNT;
+			field->name = xstrdup("TRES Count");
+			field->len = 10;
+			field->print_routine = print_fields_uint;
+		} else if (!strncasecmp("TresName", object,
+				       MAX(command_len, 5))) {
+			field->type = PRINT_CLUSTER_TRES_NAME;
+			field->name = xstrdup("TRES Name");
+			field->len = 14;
+			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("Used", object, MAX(command_len, 1))) {
 			field->type = PRINT_CLUSTER_AMOUNT_USED;
 			field->name = xstrdup("Used");
@@ -520,7 +533,7 @@ static int _setup_print_fields_list(List format_list)
 				field->len = 10;
 			field->print_routine = slurmdb_report_print_time;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
@@ -541,7 +554,7 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 {
 	slurmdb_cluster_cond_t *cluster_cond =
 		xmalloc(sizeof(slurmdb_cluster_cond_t));
-	int i=0;
+	int i = 0;
 	List cluster_list = NULL;
 
 	slurmdb_init_cluster_cond(cluster_cond, 0);
@@ -552,7 +565,7 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 
 	cluster_list = slurmdb_clusters_get(db_conn, cluster_cond);
 	if (!cluster_list) {
-		exit_code=1;
+		exit_code = 1;
 		fprintf(stderr, " Problem with cluster query.\n");
 		return NULL;
 	}
@@ -569,16 +582,14 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 				    end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
-		printf("%s %s - %s (%d*cpus secs)\n",
-		       report_name, start_char, end_char,
-		       (int)(cluster_cond->usage_end
-			     - cluster_cond->usage_start));
+		printf("%s %s - %s\n",
+		       report_name, start_char, end_char);
 		switch(time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n", time_format_string);
+			printf("Use reported in TRES %s\n", time_format_string);
 			break;
 		}
 		printf("----------------------------------------"
@@ -591,24 +602,150 @@ static List _get_cluster_list(int argc, char *argv[], uint32_t *total_time,
 	return cluster_list;
 }
 
+static void _cluster_account_by_user_tres_report(slurmdb_tres_rec_t *tres,
+		slurmdb_report_cluster_rec_t *slurmdb_report_cluster,
+		slurmdb_report_assoc_rec_t *slurmdb_report_assoc)
+{
+	slurmdb_tres_rec_t *cluster_tres_rec, *tres_rec, *total_energy;
+	char *tmp_char = NULL;
+	int curr_inx = 1, field_count;
+	ListIterator iter = NULL;
+	print_field_t *field;
+	uint64_t cluster_energy_cnt = 0, assoc_energy_cnt = 0;
+	uint32_t tres_energy;
+	char *tres_tmp = NULL;
+	char *print_acct = NULL;
+
+	if (!(cluster_tres_rec = list_find_first(
+				slurmdb_report_cluster->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!(tres_rec = list_find_first(slurmdb_report_assoc->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!tres_rec->alloc_secs) {
+		debug2("error, no %s(%d) TRES usage", tres->type, tres->id);
+		return;
+	}
+
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		struct passwd *pwd = NULL;
+		switch (field->type) {
+		case PRINT_CLUSTER_ACCT:
+			if (tree_display) {
+				List tree_list = NULL;
+				char *local_acct = NULL;
+				char *parent_acct = NULL;
+				if (slurmdb_report_assoc->user) {
+					local_acct = xstrdup_printf("|%s",
+						     slurmdb_report_assoc->acct);
+					parent_acct = slurmdb_report_assoc->acct;
+				} else {
+					local_acct = xstrdup(
+						     slurmdb_report_assoc->acct);
+					parent_acct = slurmdb_report_assoc->
+						      parent_acct;
+				}
+				tree_list = list_create(slurmdb_destroy_print_tree);
+				print_acct = slurmdb_tree_name_get(local_acct,
+								   parent_acct,
+								   tree_list);
+				FREE_NULL_LIST(tree_list);
+				xfree(local_acct);
+			} else {
+				print_acct = slurmdb_report_assoc->acct;
+			}
+			field->print_routine(field, print_acct,
+					     (curr_inx == field_count));
+
+			break;
+		case PRINT_CLUSTER_NAME:
+			field->print_routine(field,
+					     slurmdb_report_cluster->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_LOGIN:
+			field->print_routine(field,
+					     slurmdb_report_assoc->user,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_PROPER:
+			if (slurmdb_report_assoc->user)
+				pwd = getpwnam(slurmdb_report_assoc->user);
+			if (pwd) {
+				tmp_char = strtok(pwd->pw_gecos, ",");
+				if (!tmp_char)
+					tmp_char = pwd->pw_gecos;
+			}
+			field->print_routine(field, tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_AMOUNT_USED:
+			field->print_routine(field, tres_rec->alloc_secs,
+					     cluster_tres_rec->alloc_secs,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_ENERGY:
+			/* For backward compatibility with pre-TRES logic,
+			 * get energy_cnt here */
+			tres_energy = TRES_ENERGY;
+			if ((total_energy = list_find_first(
+				     slurmdb_report_cluster->tres_list,
+				     slurmdb_find_tres_in_list,
+				     &tres_energy)))
+				cluster_energy_cnt = total_energy->alloc_secs;
+			if ((total_energy = list_find_first(
+					slurmdb_report_assoc->tres_list,
+					slurmdb_find_tres_in_list,
+					&tres_energy)))
+				assoc_energy_cnt = total_energy->alloc_secs;
+			field->print_routine(field, assoc_energy_cnt,
+					     cluster_energy_cnt,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+	}
+	list_iterator_destroy(iter);
+	printf("\n");
+}
+
 extern int cluster_account_by_user(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	slurmdb_association_cond_t *assoc_cond =
-		xmalloc(sizeof(slurmdb_association_cond_t));
+	slurmdb_assoc_cond_t *assoc_cond =
+		xmalloc(sizeof(slurmdb_assoc_cond_t));
 	slurmdb_cluster_cond_t cluster_cond;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	ListIterator cluster_itr = NULL;
 	List format_list = list_create(slurm_destroy_char);
 	List slurmdb_report_cluster_list = NULL;
-	List tree_list = NULL;
-	int i=0;
+	int i = 0;
 	slurmdb_report_assoc_rec_t *slurmdb_report_assoc = NULL;
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster = NULL;
-	print_field_t *field = NULL;
-	int field_count = 0;
-	char *print_acct = NULL;
 
 	print_fields_list = list_create(destroy_print_field);
 
@@ -617,13 +754,18 @@ extern int cluster_account_by_user(int argc, char *argv[])
 	assoc_cond->with_sub_accts = 1;
 
 	_set_assoc_cond(&i, argc, argv, assoc_cond, format_list);
-
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list,
-				      "Cluster,Ac,Login,Proper,Used,Energy");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+				"Cluster,Ac,Login,Proper,TresName,Used");
+		} else {
+			slurm_addto_char_list(format_list,
+				"Cluster,Ac,Login,Proper,Used,Energy");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (!(slurmdb_report_cluster_list =
 	     slurmdb_report_cluster_account_by_user(db_conn, assoc_cond))) {
@@ -635,7 +777,7 @@ extern int cluster_account_by_user(int argc, char *argv[])
 		char start_char[20];
 		char end_char[20];
 		time_t my_start = assoc_cond->usage_start;
-		time_t my_end = assoc_cond->usage_end-1;
+		time_t my_end = assoc_cond->usage_end - 1;
 
 		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
 		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
@@ -647,10 +789,10 @@ extern int cluster_account_by_user(int argc, char *argv[])
 
 		switch(time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n",
+			printf("Use reported in TRES %s\n",
 			       time_format_string);
 			break;
 		}
@@ -658,155 +800,159 @@ extern int cluster_account_by_user(int argc, char *argv[])
 		       "----------------------------------------\n");
 	}
 
-	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
 	list_sort(slurmdb_report_cluster_list, (ListCmpF)sort_cluster_dec);
 
 	cluster_itr = list_iterator_create(slurmdb_report_cluster_list);
-	while((slurmdb_report_cluster = list_next(cluster_itr))) {
+	while ((slurmdb_report_cluster = list_next(cluster_itr))) {
 		//list_sort(slurmdb_report_cluster->assoc_list,
 		//  (ListCmpF)sort_assoc_dec);
-		if (tree_list)
-			list_flush(tree_list);
-		else
-			tree_list = list_create(slurmdb_destroy_print_tree);
 
 		itr = list_iterator_create(slurmdb_report_cluster->assoc_list);
-		while((slurmdb_report_assoc = list_next(itr))) {
-			int curr_inx = 1;
-			if (!slurmdb_report_assoc->cpu_secs)
-				continue;
-			while((field = list_next(itr2))) {
-				char *tmp_char = NULL;
-				struct passwd *pwd = NULL;
-				switch(field->type) {
-				case PRINT_CLUSTER_ACCT:
-					if (tree_display) {
-						char *local_acct = NULL;
-						char *parent_acct = NULL;
-						if (slurmdb_report_assoc->user) {
-							local_acct =
-								xstrdup_printf(
-									"|%s",
-									slurmdb_report_assoc->acct);
-							parent_acct =
-								slurmdb_report_assoc->acct;
-						} else {
-							local_acct = xstrdup(
-								slurmdb_report_assoc->acct);
-							parent_acct = slurmdb_report_assoc->
-								parent_acct;
-						}
-						print_acct =
-							slurmdb_tree_name_get(
-								local_acct,
-								parent_acct,
-								tree_list);
-						xfree(local_acct);
-					} else {
-						print_acct =
-							slurmdb_report_assoc->acct;
-					}
-					field->print_routine(
-						field,
-						print_acct,
-						(curr_inx == field_count));
-
-					break;
-				case PRINT_CLUSTER_NAME:
-					field->print_routine(
-						field,
-						slurmdb_report_cluster->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_LOGIN:
-					field->print_routine(
-						field,
-						slurmdb_report_assoc->user,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_PROPER:
-					if (slurmdb_report_assoc->user)
-						pwd = getpwnam(
-							slurmdb_report_assoc->user);
-					if (pwd) {
-						tmp_char =
-							strtok(pwd->pw_gecos,
-							       ",");
-						if (!tmp_char)
-							tmp_char =
-								pwd->pw_gecos;
-					}
-					field->print_routine(field,
-							     tmp_char,
-							     (curr_inx ==
-							      field_count));
-					break;
-				case PRINT_CLUSTER_AMOUNT_USED:
-					field->print_routine(
-						field,
-						slurmdb_report_assoc->cpu_secs,
-						slurmdb_report_cluster->
-						cpu_secs,
-						(curr_inx == field_count));
-					break;
-                                case PRINT_CLUSTER_ENERGY:
-                                        field->print_routine(
-                                                field,
-                                                slurmdb_report_assoc->
-						consumed_energy,
-                                                slurmdb_report_cluster->
-						consumed_energy,
-                                                (curr_inx == field_count));
-                                        break;
-				default:
-					field->print_routine(
-						field, NULL,
-						(curr_inx == field_count));
-					break;
-				}
-				curr_inx++;
+		while ((slurmdb_report_assoc = list_next(itr))) {
+			slurmdb_tres_rec_t *tres;
+			itr2 = list_iterator_create(tres_list);
+			while ((tres = list_next(itr2))) {
+				if (tres->id == NO_VAL)
+					continue;
+				_cluster_account_by_user_tres_report(tres,
+					slurmdb_report_cluster,
+					slurmdb_report_assoc);
 			}
-			list_iterator_reset(itr2);
-			printf("\n");
+			list_iterator_destroy(itr2);
 		}
 		list_iterator_destroy(itr);
 	}
 	list_iterator_destroy(cluster_itr);
+
 end_it:
-	slurmdb_destroy_association_cond(assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
+	FREE_NULL_LIST(slurmdb_report_cluster_list);
+	FREE_NULL_LIST(print_fields_list);
 
-	if (slurmdb_report_cluster_list) {
-		list_destroy(slurmdb_report_cluster_list);
-		slurmdb_report_cluster_list = NULL;
-	}
+	return rc;
+}
 
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
+static void _cluster_user_by_account_tres_report(slurmdb_tres_rec_t *tres,
+		slurmdb_report_cluster_rec_t *slurmdb_report_cluster,
+		slurmdb_report_user_rec_t *slurmdb_report_user)
+{
+	slurmdb_tres_rec_t *cluster_tres_rec, *tres_rec, *total_energy;
+	char *tmp_char = NULL;
+	struct passwd *pwd = NULL;
+	int curr_inx = 1, field_count;
+	ListIterator iter = NULL;
+	print_field_t *field;
+	uint64_t cluster_energy_cnt = 0, user_energy_cnt = 0;
+	uint32_t tres_energy;
+	char *tres_tmp = NULL;
+
+	if (!(cluster_tres_rec = list_find_first(
+				slurmdb_report_cluster->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!(tres_rec = list_find_first(slurmdb_report_user->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!tres_rec->alloc_secs) {
+		debug2("error, no %s(%d) TRES usage", tres->type, tres->id);
+		return;
 	}
 
-	return rc;
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		switch (field->type) {
+		case PRINT_CLUSTER_ACCT:
+			field->print_routine(field,
+					     slurmdb_report_user->acct,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_NAME:
+			field->print_routine(field,
+					     slurmdb_report_cluster->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_LOGIN:
+			field->print_routine(field,
+					     slurmdb_report_user->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_PROPER:
+			pwd = getpwnam(slurmdb_report_user->name);
+			if (pwd) {
+				tmp_char = strtok(pwd->pw_gecos, ",");
+				if (!tmp_char)
+					tmp_char = pwd->pw_gecos;
+			}
+			field->print_routine(field, tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_AMOUNT_USED:
+			field->print_routine(field, tres_rec->alloc_secs,
+					     cluster_tres_rec->alloc_secs,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_ENERGY:
+			/* For backward compatibility with pre-TRES logic,
+			 * get energy_cnt here */
+			tres_energy = TRES_ENERGY;
+			if ((total_energy = list_find_first(
+				     slurmdb_report_cluster->tres_list,
+				     slurmdb_find_tres_in_list,
+				     &tres_energy)))
+				cluster_energy_cnt = total_energy->alloc_secs;
+			if ((total_energy = list_find_first(
+					slurmdb_report_user->tres_list,
+					slurmdb_find_tres_in_list,
+					&tres_energy)))
+				user_energy_cnt = total_energy->alloc_secs;
+			field->print_routine(field, user_energy_cnt,
+					     cluster_energy_cnt,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+	}
+	list_iterator_destroy(iter);
+	printf("\n");
 }
 
 extern int cluster_user_by_account(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
-	slurmdb_association_cond_t *assoc_cond =
-		xmalloc(sizeof(slurmdb_association_cond_t));
+	slurmdb_assoc_cond_t *assoc_cond =
+		xmalloc(sizeof(slurmdb_assoc_cond_t));
 	slurmdb_cluster_cond_t cluster_cond;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	ListIterator cluster_itr = NULL;
 	List format_list = list_create(slurm_destroy_char);
 	List slurmdb_report_cluster_list = NULL;
-	int i=0;
+	int i = 0;
 	slurmdb_report_user_rec_t *slurmdb_report_user = NULL;
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster = NULL;
-	print_field_t *field = NULL;
-	int field_count = 0;
 
 	print_fields_list = list_create(destroy_print_field);
 
@@ -814,12 +960,18 @@ extern int cluster_user_by_account(int argc, char *argv[])
 
 	_set_assoc_cond(&i, argc, argv, assoc_cond, format_list);
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list,
-				      "Cluster,Login,Proper,Ac,Used,Energy");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+				"Cluster,Login,Proper,Ac,TresName,Used");
+		} else {
+			slurm_addto_char_list(format_list,
+				"Cluster,Login,Proper,Ac,Used,Energy");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (!(slurmdb_report_cluster_list =
 	     slurmdb_report_cluster_user_by_account(db_conn, assoc_cond))) {
@@ -841,12 +993,12 @@ extern int cluster_user_by_account(int argc, char *argv[])
 		       start_char, end_char,
 		       (int)(assoc_cond->usage_end - assoc_cond->usage_start));
 
-		switch(time_format) {
+		switch (time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n",
+			printf("Use reported in TRES %s\n",
 			       time_format_string);
 			break;
 		}
@@ -854,105 +1006,140 @@ extern int cluster_user_by_account(int argc, char *argv[])
 		       "----------------------------------------\n");
 	}
 
-	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
 	cluster_itr = list_iterator_create(slurmdb_report_cluster_list);
-	while((slurmdb_report_cluster = list_next(cluster_itr))) {
+	while ((slurmdb_report_cluster = list_next(cluster_itr))) {
 		list_sort(slurmdb_report_cluster->user_list,
 			  (ListCmpF)sort_user_dec);
-
 		itr = list_iterator_create(slurmdb_report_cluster->user_list);
-		while((slurmdb_report_user = list_next(itr))) {
-			int curr_inx = 1;
-
-			/* we don't care if they didn't use any time */
-			if (!slurmdb_report_user->cpu_secs)
-				continue;
-
-			while((field = list_next(itr2))) {
-				char *tmp_char = NULL;
-				struct passwd *pwd = NULL;
-				switch(field->type) {
-				case PRINT_CLUSTER_ACCT:
-					field->print_routine(
-						field,
-						slurmdb_report_user->acct,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_NAME:
-					field->print_routine(
-						field,
-						slurmdb_report_cluster->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_LOGIN:
-					field->print_routine(
-						field,
-						slurmdb_report_user->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_PROPER:
-					pwd = getpwnam(slurmdb_report_user->name);
-					if (pwd) {
-						tmp_char =
-							strtok(pwd->pw_gecos,
-							       ",");
-						if (!tmp_char)
-							tmp_char =
-								pwd->pw_gecos;
-					}
-					field->print_routine(field,
-							     tmp_char,
-							     (curr_inx ==
-							      field_count));
-					break;
-				case PRINT_CLUSTER_AMOUNT_USED:
-					field->print_routine(
-						field,
-						slurmdb_report_user->cpu_secs,
-						slurmdb_report_cluster->
-						cpu_secs,
-						(curr_inx == field_count));
-					break;
-                                case PRINT_CLUSTER_ENERGY:
-                                        field->print_routine(
-                                                field,
-                                                slurmdb_report_user->
-						consumed_energy,
-                                                slurmdb_report_cluster->
-						consumed_energy,
-                                                (curr_inx == field_count));
-                                        break;
-				default:
-					field->print_routine(
-						field, NULL,
-						(curr_inx == field_count));
-					break;
-				}
-				curr_inx++;
+		while ((slurmdb_report_user = list_next(itr))) {
+			slurmdb_tres_rec_t *tres;
+			itr2 = list_iterator_create(tres_list);
+			while ((tres = list_next(itr2))) {
+				if (tres->id == NO_VAL)
+					continue;
+				_cluster_user_by_account_tres_report(tres,
+					slurmdb_report_cluster,
+					slurmdb_report_user);
 			}
-			list_iterator_reset(itr2);
-			printf("\n");
+			list_iterator_destroy(itr2);
+
 		}
 		list_iterator_destroy(itr);
 	}
 	list_iterator_destroy(cluster_itr);
 end_it:
-	slurmdb_destroy_association_cond(assoc_cond);
+	slurmdb_destroy_assoc_cond(assoc_cond);
+	FREE_NULL_LIST(slurmdb_report_cluster_list);
+	FREE_NULL_LIST(print_fields_list);
 
-	if (slurmdb_report_cluster_list) {
-		list_destroy(slurmdb_report_cluster_list);
-		slurmdb_report_cluster_list = NULL;
-	}
+	return rc;
+}
 
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
+static void _cluster_user_by_wckey_tres_report(slurmdb_tres_rec_t *tres,
+		slurmdb_report_cluster_rec_t *slurmdb_report_cluster,
+		slurmdb_report_user_rec_t *slurmdb_report_user)
+{
+	slurmdb_tres_rec_t *cluster_tres_rec, *tres_rec, *total_energy;
+	char *tmp_char = NULL;
+	struct passwd *pwd = NULL;
+	int curr_inx = 1, field_count;
+	ListIterator iter = NULL;
+	print_field_t *field;
+	uint64_t cluster_energy_cnt = 0, user_energy_cnt = 0;
+	uint32_t tres_energy;
+	char *tres_tmp = NULL;
+
+	if (!(cluster_tres_rec = list_find_first(
+				slurmdb_report_cluster->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!(tres_rec = list_find_first(slurmdb_report_user->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!tres_rec->alloc_secs) {
+		debug2("error, no %s(%d) TRES usage", tres->type, tres->id);
+		return;
 	}
 
-	return rc;
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		switch (field->type) {
+		case PRINT_CLUSTER_WCKEY:
+			field->print_routine(field,
+					     slurmdb_report_user->acct,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_NAME:
+			field->print_routine(field,
+					     slurmdb_report_cluster->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_LOGIN:
+			field->print_routine(field,
+					     slurmdb_report_user->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_PROPER:
+			pwd = getpwnam(slurmdb_report_user->name);
+			if (pwd) {
+				tmp_char = strtok(pwd->pw_gecos, ",");
+				if (!tmp_char)
+					tmp_char = pwd->pw_gecos;
+			}
+			field->print_routine(field, tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_AMOUNT_USED:
+			field->print_routine(field, tres_rec->alloc_secs,
+					     cluster_tres_rec->alloc_secs,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_ENERGY:
+			/* For backward compatibility with pre-TRES logic,
+			 * get energy_cnt here */
+			tres_energy = TRES_ENERGY;
+			if ((total_energy = list_find_first(
+				     slurmdb_report_cluster->tres_list,
+				     slurmdb_find_tres_in_list,
+				     &tres_energy)))
+				cluster_energy_cnt = total_energy->alloc_secs;
+			if ((total_energy = list_find_first(
+					slurmdb_report_user->tres_list,
+					slurmdb_find_tres_in_list,
+					&tres_energy)))
+				user_energy_cnt = total_energy->alloc_secs;
+			field->print_routine(field, user_energy_cnt,
+					     cluster_energy_cnt,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+	}
+	list_iterator_destroy(iter);
+	printf("\n");
 }
 
 extern int cluster_user_by_wckey(int argc, char *argv[])
@@ -966,11 +1153,9 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 	ListIterator cluster_itr = NULL;
 	List format_list = list_create(slurm_destroy_char);
 	List slurmdb_report_cluster_list = NULL;
-	int i=0;
+	int i = 0;
 	slurmdb_report_user_rec_t *slurmdb_report_user = NULL;
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster = NULL;
-	print_field_t *field = NULL;
-	int field_count = 0;
 
 	print_fields_list = list_create(destroy_print_field);
 
@@ -978,12 +1163,18 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 
 	_set_wckey_cond(&i, argc, argv, wckey_cond, format_list);
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list,
-				      "Cluster,Login,Proper,WCkey,Used");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+				"Cluster,Login,Proper,WCkey,TresName,Used");
+		} else {
+			slurm_addto_char_list(format_list,
+				"Cluster,Login,Proper,WCkey,Used");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (!(slurmdb_report_cluster_list =
 	     slurmdb_report_cluster_user_by_wckey(db_conn, wckey_cond))) {
@@ -1005,12 +1196,12 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 		       start_char, end_char,
 		       (int)(wckey_cond->usage_end - wckey_cond->usage_start));
 
-		switch(time_format) {
+		switch (time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n",
+			printf("Use reported in TRES %s\n",
 			       time_format_string);
 			break;
 		}
@@ -1018,106 +1209,142 @@ extern int cluster_user_by_wckey(int argc, char *argv[])
 		       "----------------------------------------\n");
 	}
 
-	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
 	cluster_itr = list_iterator_create(slurmdb_report_cluster_list);
-	while((slurmdb_report_cluster = list_next(cluster_itr))) {
+	while ((slurmdb_report_cluster = list_next(cluster_itr))) {
 		list_sort(slurmdb_report_cluster->user_list,
 			  (ListCmpF)sort_user_dec);
-
 		itr = list_iterator_create(slurmdb_report_cluster->user_list);
-		while((slurmdb_report_user = list_next(itr))) {
-			int curr_inx = 1;
-
-			/* we don't care if they didn't use any time */
-			if (!slurmdb_report_user->cpu_secs)
-				continue;
-
-			while((field = list_next(itr2))) {
-				char *tmp_char = NULL;
-				struct passwd *pwd = NULL;
-				switch(field->type) {
-				case PRINT_CLUSTER_WCKEY:
-					field->print_routine(
-						field,
-						slurmdb_report_user->acct,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_NAME:
-					field->print_routine(
-						field,
-						slurmdb_report_cluster->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_LOGIN:
-					field->print_routine(
-						field,
-						slurmdb_report_user->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_PROPER:
-					pwd = getpwnam(slurmdb_report_user->name);
-					if (pwd) {
-						tmp_char =
-							strtok(pwd->pw_gecos,
-							       ",");
-						if (!tmp_char)
-							tmp_char =
-								pwd->pw_gecos;
-					}
-					field->print_routine(field,
-							     tmp_char,
-							     (curr_inx ==
-							      field_count));
-					break;
-				case PRINT_CLUSTER_AMOUNT_USED:
-					field->print_routine(
-						field,
-						slurmdb_report_user->cpu_secs,
-						slurmdb_report_cluster->
-						cpu_secs,
-						(curr_inx == field_count));
-					break;
-                                case PRINT_CLUSTER_ENERGY:
-                                        field->print_routine(
-                                                field,
-                                                slurmdb_report_user->
-						consumed_energy,
-                                                slurmdb_report_cluster->
-						consumed_energy,
-                                                (curr_inx == field_count));
-                                        break;
-
-				default:
-					field->print_routine(
-						field, NULL,
-						(curr_inx == field_count));
-					break;
-				}
-				curr_inx++;
+		while ((slurmdb_report_user = list_next(itr))) {
+			slurmdb_tres_rec_t *tres;
+			itr2 = list_iterator_create(tres_list);
+			while ((tres = list_next(itr2))) {
+				if (tres->id == NO_VAL)
+					continue;
+				_cluster_user_by_wckey_tres_report(tres,
+					slurmdb_report_cluster,
+					slurmdb_report_user);
 			}
-			list_iterator_reset(itr2);
-			printf("\n");
+			list_iterator_destroy(itr2);
 		}
 		list_iterator_destroy(itr);
 	}
 	list_iterator_destroy(cluster_itr);
 end_it:
 	slurmdb_destroy_wckey_cond(wckey_cond);
+	FREE_NULL_LIST(slurmdb_report_cluster_list);
+	FREE_NULL_LIST(print_fields_list);
 
-	if (slurmdb_report_cluster_list) {
-		list_destroy(slurmdb_report_cluster_list);
-		slurmdb_report_cluster_list = NULL;
-	}
+	return rc;
+}
 
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
+static void _cluster_util_tres_report(slurmdb_tres_rec_t *tres,
+				slurmdb_cluster_rec_t *cluster,
+				uint32_t total_time, List total_tres_acct)
+{
+	slurmdb_cluster_accounting_rec_t *total_acct;
+	slurmdb_cluster_accounting_rec_t *total_energy;
+	uint64_t total_reported = 0;
+	uint64_t local_total_time = 0;
+	int curr_inx = 1, field_count;
+	ListIterator iter;
+	char *tres_tmp = NULL;
+	print_field_t *field;
+	uint32_t tres_energy;
+	uint64_t energy_cnt = 0;
+
+	if (!(total_acct = list_find_first(total_tres_acct,
+				slurmdb_find_cluster_accting_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
 	}
 
-	return rc;
+	local_total_time = (uint64_t)total_time *
+			   (uint64_t)total_acct->tres_rec.count;
+	total_reported = total_acct->alloc_secs + total_acct->down_secs
+			 + total_acct->pdown_secs + total_acct->idle_secs
+			 + total_acct->resv_secs;
+
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		switch (field->type) {
+		case PRINT_CLUSTER_NAME:
+			field->print_routine(field, cluster->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_CNT:
+			field->print_routine(field,
+					     total_acct->tres_rec.count,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_ALLOC:
+			field->print_routine(field, total_acct->alloc_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_DOWN:
+			field->print_routine(field, total_acct->down_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_IDLE:
+			field->print_routine(field, total_acct->idle_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_RESV:
+			field->print_routine(field, total_acct->resv_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_OVER:
+			field->print_routine(field, total_acct->over_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_PLAN_DOWN:
+			field->print_routine(field, total_acct->pdown_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_REPORTED:
+			field->print_routine(field, total_reported,
+					     local_total_time,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_ENERGY:
+			/* For backward compatibility with pre-TRES logic,
+			 * get energy_cnt here */
+			tres_energy = TRES_ENERGY;
+			if ((total_energy = list_find_first(total_tres_acct,
+					slurmdb_find_cluster_accting_tres_in_list,
+					&tres_energy)))
+				energy_cnt = total_energy->tres_rec.count;
+			field->print_routine(field, energy_cnt, energy_cnt,
+			                     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+	}
+	list_iterator_destroy(iter);
+	printf("\n");
 }
 
 extern int cluster_utilization(int argc, char *argv[])
@@ -1127,168 +1354,158 @@ extern int cluster_utilization(int argc, char *argv[])
 	ListIterator itr2 = NULL;
 	ListIterator itr3 = NULL;
 	slurmdb_cluster_rec_t *cluster = NULL;
-
-	print_field_t *field = NULL;
 	uint32_t total_time = 0;
-
 	List cluster_list = NULL;
-
 	List format_list = list_create(slurm_destroy_char);
-	int field_count = 0;
 
 	print_fields_list = list_create(destroy_print_field);
 
 
 	if (!(cluster_list = _get_cluster_list(argc, argv, &total_time,
-					      "Cluster Utilization",
-					      format_list)))
+					       "Cluster Utilization",
+					       format_list)))
 		goto end_it;
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list, "Cl,al,d,planned,i,res,rep");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+					      "Cl,TresName,al,d,planned,i,res,rep");
+		} else {
+			slurm_addto_char_list(format_list,
+					      "Cl,al,d,planned,i,res,rep");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
-
-	itr = list_iterator_create(cluster_list);
-	itr2 = list_iterator_create(print_fields_list);
+	FREE_NULL_LIST(format_list);
 
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
-
-	while((cluster = list_next(itr))) {
+	itr = list_iterator_create(cluster_list);
+	while ((cluster = list_next(itr))) {
 		slurmdb_cluster_accounting_rec_t *accting = NULL;
-		slurmdb_cluster_accounting_rec_t total_acct;
-		uint64_t total_reported = 0;
-		uint64_t local_total_time = 0;
-		int curr_inx = 1;
+		slurmdb_tres_rec_t *tres;
+		List total_tres_acct = NULL;
 
 		if (!cluster->accounting_list
 		   || !list_count(cluster->accounting_list))
 			continue;
 
-		memset(&total_acct, 0,
-		       sizeof(slurmdb_cluster_accounting_rec_t));
-
 		itr3 = list_iterator_create(cluster->accounting_list);
-		while((accting = list_next(itr3))) {
-			total_acct.alloc_secs += accting->alloc_secs;
-			total_acct.down_secs += accting->down_secs;
-			total_acct.pdown_secs += accting->pdown_secs;
-			total_acct.idle_secs += accting->idle_secs;
-			total_acct.resv_secs += accting->resv_secs;
-			total_acct.over_secs += accting->over_secs;
-			total_acct.cpu_count += accting->cpu_count;
-			total_acct.consumed_energy += accting->consumed_energy;
+		while ((accting = list_next(itr3))) {
+			slurmdb_sum_accounting_list(
+				accting, &total_tres_acct);
 		}
 		list_iterator_destroy(itr3);
 
-		total_acct.cpu_count /= list_count(cluster->accounting_list);
-
-		local_total_time =
-			(uint64_t)total_time * (uint64_t)total_acct.cpu_count;
-		total_reported = total_acct.alloc_secs + total_acct.down_secs
-			+ total_acct.pdown_secs + total_acct.idle_secs
-			+ total_acct.resv_secs;
-
-		while((field = list_next(itr2))) {
-			switch(field->type) {
-			case PRINT_CLUSTER_NAME:
-				field->print_routine(field,
-						     cluster->name,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_CPUS:
-				field->print_routine(field,
-						     total_acct.cpu_count,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_ACPU:
-				field->print_routine(field,
-						     total_acct.alloc_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_DCPU:
-				field->print_routine(field,
-						     total_acct.down_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_ICPU:
-				field->print_routine(field,
-						     total_acct.idle_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_RCPU:
-				field->print_routine(field,
-						     total_acct.resv_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_OCPU:
-					field->print_routine(field,
-						     total_acct.over_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_PDCPU:
-					field->print_routine(field,
-						     total_acct.pdown_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_CLUSTER_ENERGY:
-				field->print_routine(field,
-				                     total_acct.consumed_energy,
-				                     total_acct.consumed_energy,
-				                     (curr_inx ==
-				                      field_count));
-				break;
-			case PRINT_CLUSTER_TOTAL:
-				field->print_routine(field,
-						     total_reported,
-						     local_total_time,
-						     (curr_inx ==
-						      field_count));
-				break;
-			default:
-				field->print_routine(
-					field, NULL,
-					(curr_inx == field_count));
-				break;
-			}
-			curr_inx++;
+		itr3 = list_iterator_create(total_tres_acct);
+		while ((accting = list_next(itr3))) {
+			accting->tres_rec.count /=
+				accting->tres_rec.rec_count;
 		}
-		list_iterator_reset(itr2);
-		printf("\n");
-	}
+		list_iterator_destroy(itr3);
 
-	list_iterator_destroy(itr2);
+		itr2 = list_iterator_create(tres_list);
+		while ((tres = list_next(itr2))) {
+			if (tres->id == NO_VAL)
+				continue;
+			_cluster_util_tres_report(tres, cluster, total_time,
+						  total_tres_acct);
+		}
+		list_iterator_destroy(itr2);
+	}
 	list_iterator_destroy(itr);
 
 end_it:
-	if (cluster_list) {
-		list_destroy(cluster_list);
-		cluster_list = NULL;
-	}
+	FREE_NULL_LIST(cluster_list);
+	FREE_NULL_LIST(print_fields_list);
+
+	return rc;
+}
 
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
+static void _cluster_wckey_by_user_tres_report(slurmdb_tres_rec_t *tres,
+		slurmdb_report_cluster_rec_t *slurmdb_report_cluster,
+		slurmdb_report_assoc_rec_t *slurmdb_report_assoc)
+{
+	slurmdb_tres_rec_t *cluster_tres_rec, *tres_rec;
+	int curr_inx = 1, field_count;
+	ListIterator iter = NULL;
+	print_field_t *field;
+	char *tres_tmp = NULL;
+
+	if (!(cluster_tres_rec = list_find_first(
+				slurmdb_report_cluster->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!(tres_rec = list_find_first(slurmdb_report_assoc->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!tres_rec->alloc_secs) {
+		debug2("error, no %s(%d) TRES usage", tres->type, tres->id);
+		return;
 	}
 
-	return rc;
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		char *tmp_char = NULL;
+		struct passwd *pwd = NULL;
+		switch (field->type) {
+		case PRINT_CLUSTER_WCKEY:
+			field->print_routine(field,
+					     slurmdb_report_assoc->acct,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_NAME:
+			field->print_routine(field,
+					     slurmdb_report_cluster->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_LOGIN:
+			field->print_routine(field, slurmdb_report_assoc->user,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_USER_PROPER:
+			if (slurmdb_report_assoc->user)
+				pwd = getpwnam(slurmdb_report_assoc->user);
+			if (pwd) {
+				tmp_char = strtok(pwd->pw_gecos, ",");
+				if (!tmp_char)
+					tmp_char = pwd->pw_gecos;
+			}
+			field->print_routine(field, tmp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_AMOUNT_USED:
+			field->print_routine(field, tres_rec->alloc_secs,
+					     cluster_tres_rec->alloc_secs,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_CLUSTER_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+	}
+	list_iterator_destroy(iter);
+	printf("\n");
 }
 
 extern int cluster_wckey_by_user(int argc, char *argv[])
@@ -1302,12 +1519,9 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 	ListIterator cluster_itr = NULL;
 	List format_list = list_create(slurm_destroy_char);
 	List slurmdb_report_cluster_list = NULL;
-	List tree_list = NULL;
-	int i=0;
+	int i = 0;
 	slurmdb_report_assoc_rec_t *slurmdb_report_assoc = NULL;
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster = NULL;
-	print_field_t *field = NULL;
-	int field_count = 0;
 
 	print_fields_list = list_create(destroy_print_field);
 
@@ -1315,12 +1529,18 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 
 	_set_wckey_cond(&i, argc, argv, wckey_cond, format_list);
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list,
-				      "Cluster,WCKey,Login,Proper,Used");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+				"Cluster,WCKey,Login,Proper,TresName,Used");
+		} else {
+			slurm_addto_char_list(format_list,
+				"Cluster,WCKey,Login,Proper,Used");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (!(slurmdb_report_cluster_list =
 	     slurmdb_report_cluster_wckey_by_user(db_conn, wckey_cond))) {
@@ -1342,12 +1562,12 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 		       start_char, end_char,
 		       (int)(wckey_cond->usage_end - wckey_cond->usage_start));
 
-		switch(time_format) {
+		switch (time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n",
+			printf("Use reported in TRES %s\n",
 			       time_format_string);
 			break;
 		}
@@ -1355,100 +1575,41 @@ extern int cluster_wckey_by_user(int argc, char *argv[])
 		       "----------------------------------------\n");
 	}
 
-	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
 	list_sort(slurmdb_report_cluster_list, (ListCmpF)sort_cluster_dec);
 
 	cluster_itr = list_iterator_create(slurmdb_report_cluster_list);
-	while((slurmdb_report_cluster = list_next(cluster_itr))) {
-		//list_sort(slurmdb_report_cluster->wckey_list,
-		//  (ListCmpF)sort_wckey_dec);
-		if (tree_list)
-			list_flush(tree_list);
-		else
-			tree_list = list_create(slurmdb_destroy_print_tree);
+	while ((slurmdb_report_cluster = list_next(cluster_itr))) {
+		slurmdb_tres_rec_t *tres;
+
+		if (!slurmdb_report_cluster->tres_list ||
+		    !list_count(slurmdb_report_cluster->tres_list)) {
+			error("No TRES given for cluster %s",
+			      slurmdb_report_cluster->name);
+			continue;
+		}
 
 		itr = list_iterator_create(slurmdb_report_cluster->assoc_list);
-		while((slurmdb_report_assoc = list_next(itr))) {
-			int curr_inx = 1;
-			if (!slurmdb_report_assoc->cpu_secs)
-				continue;
-			while((field = list_next(itr2))) {
-				char *tmp_char = NULL;
-				struct passwd *pwd = NULL;
-				switch(field->type) {
-				case PRINT_CLUSTER_WCKEY:
-					field->print_routine(
-						field,
-						slurmdb_report_assoc->acct,
-						(curr_inx == field_count));
-
-					break;
-				case PRINT_CLUSTER_NAME:
-					field->print_routine(
-						field,
-						slurmdb_report_cluster->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_LOGIN:
-					field->print_routine(
-						field,
-						slurmdb_report_assoc->user,
-						(curr_inx == field_count));
-					break;
-				case PRINT_CLUSTER_USER_PROPER:
-					if (slurmdb_report_assoc->user)
-						pwd = getpwnam(
-							slurmdb_report_assoc->user);
-					if (pwd) {
-						tmp_char =
-							strtok(pwd->pw_gecos,
-							       ",");
-						if (!tmp_char)
-							tmp_char =
-								pwd->pw_gecos;
-					}
-					field->print_routine(field,
-							     tmp_char,
-							     (curr_inx ==
-							      field_count));
-					break;
-				case PRINT_CLUSTER_AMOUNT_USED:
-					field->print_routine(
-						field,
-						slurmdb_report_assoc->cpu_secs,
-						slurmdb_report_cluster->cpu_secs,
-						(curr_inx == field_count));
-					break;
-				default:
-					field->print_routine(
-						field, NULL,
-						(curr_inx == field_count));
-					break;
-				}
-				curr_inx++;
+		while ((slurmdb_report_assoc = list_next(itr))) {
+			itr2 = list_iterator_create(tres_list);
+			while ((tres = list_next(itr2))) {
+				if (tres->id == NO_VAL)
+					continue;
+				_cluster_wckey_by_user_tres_report(tres,
+					slurmdb_report_cluster,
+					slurmdb_report_assoc);
 			}
-			list_iterator_reset(itr2);
-			printf("\n");
+			list_iterator_destroy(itr2);
 		}
 		list_iterator_destroy(itr);
 	}
 	list_iterator_destroy(cluster_itr);
+
 end_it:
 	slurmdb_destroy_wckey_cond(wckey_cond);
-
-	if (slurmdb_report_cluster_list) {
-		list_destroy(slurmdb_report_cluster_list);
-		slurmdb_report_cluster_list = NULL;
-	}
-
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
-	}
+	FREE_NULL_LIST(slurmdb_report_cluster_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
-
diff --git a/src/sreport/common.c b/src/sreport/common.c
index ec6ffca19..eee40e079 100644
--- a/src/sreport/common.c
+++ b/src/sreport/common.c
@@ -137,6 +137,18 @@ extern int parse_option_end(char *option)
 	return end;
 }
 
+
+/* Do not allow the endtime request for sreport to exceed 'now'. */
+extern time_t sanity_check_endtime(time_t endtime)
+{
+	time_t now = time(NULL);
+
+	if (endtime > now)
+		endtime = now;
+
+	return endtime;
+}
+
 /* you need to xfree whatever is sent from here */
 extern char *strip_quotes(char *option, int *increased)
 {
@@ -238,14 +250,32 @@ extern int sort_user_dec(void *v1, void *v2)
 	slurmdb_report_user_rec_t *user_a;
 	slurmdb_report_user_rec_t *user_b;
 	int diff;
+	/* FIXME : this only works for CPUs now */
+	int tres_id = TRES_CPU;
 
 	user_a = *(slurmdb_report_user_rec_t **)v1;
 	user_b = *(slurmdb_report_user_rec_t **)v2;
 
 	if (sort_flag == SLURMDB_REPORT_SORT_TIME) {
-		if (user_a->cpu_secs > user_b->cpu_secs)
+		slurmdb_tres_rec_t *tres_a, *tres_b;
+
+		if (!user_a->tres_list || !user_b->tres_list)
+			return 0;
+
+		if (!(tres_a = list_find_first(user_a->tres_list,
+					       slurmdb_find_tres_in_list,
+					       &tres_id)))
+			return 1;
+
+		if (!(tres_b = list_find_first(user_b->tres_list,
+					       slurmdb_find_tres_in_list,
+					       &tres_id)))
+			return -1;
+
+
+		if (tres_a->alloc_secs > tres_b->alloc_secs)
 			return -1;
-		else if (user_a->cpu_secs < user_b->cpu_secs)
+		else if (tres_a->alloc_secs < tres_b->alloc_secs)
 			return 1;
 	}
 
diff --git a/src/sreport/job_reports.c b/src/sreport/job_reports.c
index 239849849..353808913 100644
--- a/src/sreport/job_reports.c
+++ b/src/sreport/job_reports.c
@@ -1,8 +1,8 @@
 /*****************************************************************************\
  *  job_reports.c - functions for generating job reports
- *                     from accounting infrastructure.
+ *                  from accounting infrastructure.
  *****************************************************************************
- *
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
@@ -45,14 +45,20 @@ enum {
 	PRINT_JOB_ACCOUNT,
 	PRINT_JOB_CLUSTER,
 	PRINT_JOB_COUNT,
-	PRINT_JOB_CPUS,
 	PRINT_JOB_DUR,
 	PRINT_JOB_NODES,
 	PRINT_JOB_SIZE,
+	PRINT_JOB_TRES_COUNT,
 	PRINT_JOB_USER,
 	PRINT_JOB_WCKEY
 };
 
+enum {
+	GROUPED_TOP_ACCT,
+	GROUPED_WCKEY,
+	GROUPED_TOP_ACCT_AND_WCKEY,
+};
+
 static List print_fields_list = NULL; /* types are of print_field_t */
 static List grouping_print_fields_list = NULL; /* types are of print_field_t */
 static int print_job_count = 0;
@@ -156,7 +162,7 @@ static char *_string_to_uid( char *name )
 /* returns number of objects added to list */
 static int _addto_uid_char_list(List char_list, char *names)
 {
-	int i=0, start=0;
+	int i = 0, start = 0;
 	char *name = NULL, *tmp_char = NULL;
 	ListIterator itr = NULL;
 	char quote_c = '\0';
@@ -176,7 +182,7 @@ static int _addto_uid_char_list(List char_list, char *names)
 			i++;
 		}
 		start = i;
-		while(names[i]) {
+		while (names[i]) {
 			//info("got %d - %d = %d", i, start, i-start);
 			if (quote && names[i] == quote_c)
 				break;
@@ -189,7 +195,7 @@ static int _addto_uid_char_list(List char_list, char *names)
 					//info("got %s %d", name, i-start);
 					name = _string_to_uid( name );
 
-					while((tmp_char = list_next(itr))) {
+					while ((tmp_char = list_next(itr))) {
 						if (!strcasecmp(tmp_char, name))
 							break;
 					}
@@ -217,7 +223,7 @@ static int _addto_uid_char_list(List char_list, char *names)
 			memcpy(name, names+start, (i-start));
 			name = _string_to_uid( name );
 
-			while((tmp_char = list_next(itr))) {
+			while ((tmp_char = list_next(itr))) {
 				if (!strcasecmp(tmp_char, name))
 					break;
 			}
@@ -247,12 +253,12 @@ static int _set_cond(int *start, int argc, char *argv[],
 	if (!job_cond->cluster_list)
 		job_cond->cluster_list = list_create(slurm_destroy_char);
 
-	for (i=(*start); i<argc; i++) {
+	for (i = (*start); i < argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!end)
-			command_len=strlen(argv[i]);
+			command_len = strlen(argv[i]);
 		else
-			command_len=end-1;
+			command_len = end-1;
 
 		if (!end && !strncasecmp(argv[i], "all_clusters",
 					       MAX(command_len, 1))) {
@@ -292,6 +298,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
 			job_cond->usage_end = parse_time(argv[i]+end, 1);
+			job_cond->usage_end = sanity_check_endtime(job_cond->usage_end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 2))) {
@@ -384,7 +391,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 					      argv[i]+end);
 			set = 1;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown condition: %s\n"
 				"Use keyword set to modify value\n", argv[i]);
 		}
@@ -418,7 +425,7 @@ static int _setup_print_fields_list(List format_list)
 	char *object = NULL;
 
 	if (!format_list || !list_count(format_list)) {
-		exit_code=1;
+		exit_code = 1;
 		fprintf(stderr,
 			" We need a format list to set up the print.\n");
 		return SLURM_ERROR;
@@ -428,7 +435,7 @@ static int _setup_print_fields_list(List format_list)
 		print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(format_list);
-	while((object = list_next(itr))) {
+	while ((object = list_next(itr))) {
 		char *tmp_char = NULL;
 		int command_len = 0;
 		int newlen = 0;
@@ -453,12 +460,6 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("CpuCount", object,
-				       MAX(command_len, 2))) {
-			field->type = PRINT_JOB_CPUS;
-			field->name = xstrdup("CPU Count");
-			field->len = 9;
-			field->print_routine = print_fields_uint;
 		} else if (!strncasecmp("Duration", object,
 				       MAX(command_len, 1))) {
 			field->type = PRINT_JOB_DUR;
@@ -477,6 +478,15 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Node Count");
 			field->len = 9;
 			field->print_routine = print_fields_uint;
+		} else if (!strncasecmp("TresCount", object,
+					MAX(command_len, 5)) ||
+			   !strncasecmp("CpuCount", object,
+					MAX(command_len, 2)) ||
+			   !strncasecmp("count", object, MAX(command_len, 2))) {
+			field->type = PRINT_JOB_TRES_COUNT;
+			field->name = xstrdup("TRES Count");
+			field->len = 10;
+			field->print_routine = print_fields_uint;
 		} else if (!strncasecmp("User", object,
 				       MAX(command_len, 1))) {
 			field->type = PRINT_JOB_USER;
@@ -490,7 +500,7 @@ static int _setup_print_fields_list(List format_list)
 			field->len = 9;
 			field->print_routine = print_fields_str;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
@@ -514,12 +524,17 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 	char *last_object = NULL;
 	uint32_t last_size = 0;
 	uint32_t size = 0;
-	char *tmp_char = NULL;
+	char *tmp_char = NULL, *tres_type;
+
+	if (!tres_str || !strcasecmp(tres_str, "cpu"))
+		tres_type = "CPUs";
+	else
+		tres_type = "TRES";
 
 	if (!grouping_list || !list_count(grouping_list)) {
-		exit_code=1;
-		fprintf(stderr, " We need a grouping list to "
-			"set up the print.\n");
+		exit_code = 1;
+		fprintf(stderr,
+			" We need a grouping list to set up the print.\n");
 		return SLURM_ERROR;
 	}
 
@@ -527,7 +542,7 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 		grouping_print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(grouping_list);
-	while((object = list_next(itr))) {
+	while ((object = list_next(itr))) {
 		field = xmalloc(sizeof(print_field_t));
 		size = atoi(object);
 		if (print_job_count)
@@ -535,10 +550,10 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 		else
 			field->type = PRINT_JOB_SIZE;
 		if (individual_grouping)
-			field->name = xstrdup_printf("%u cpus", size);
+			field->name = xstrdup_printf("%u %s", size, tres_type);
 		else
-			field->name = xstrdup_printf("%u-%u cpus",
-						     last_size, size-1);
+			field->name = xstrdup_printf("%u-%u %s", last_size,
+						     size-1, tres_type);
 		if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 		   || time_format == SLURMDB_REPORT_TIME_MINS_PER
 		   || time_format == SLURMDB_REPORT_TIME_HOURS_PER)
@@ -553,7 +568,7 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 		last_size = size;
 		last_object = object;
 		if ((tmp_char = strstr(object, "\%"))) {
-			int newlen = atoi(tmp_char+1);
+			int newlen = atoi(tmp_char + 1);
 			if (newlen)
 				field->len = newlen;
 		}
@@ -568,7 +583,7 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 		else
 			field->type = PRINT_JOB_SIZE;
 
-		field->name = xstrdup_printf(">= %u cpus", last_size);
+		field->name = xstrdup_printf(">= %u %s", last_size, tres_type);
 		if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 		   || time_format == SLURMDB_REPORT_TIME_MINS_PER
 		   || time_format == SLURMDB_REPORT_TIME_HOURS_PER)
@@ -590,284 +605,116 @@ static int _setup_grouping_print_fields_list(List grouping_list)
 	return SLURM_SUCCESS;
 }
 
-extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
+static int _run_report(int type, int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
 	slurmdb_job_cond_t *job_cond = xmalloc(sizeof(slurmdb_job_cond_t));
-
-	int i=0;
-
+	uint32_t tres_id = TRES_CPU;
+	int i = 0, tres_cnt = 0;
+	slurmdb_tres_rec_t *tres;
 	uint64_t count1, count2;
-
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	ListIterator cluster_itr = NULL;
 	ListIterator local_itr = NULL;
 	ListIterator acct_itr = NULL;
-
 	slurmdb_report_cluster_grouping_t *cluster_group = NULL;
 	slurmdb_report_acct_grouping_t *acct_group = NULL;
 	slurmdb_report_job_grouping_t *job_group = NULL;
-
 	print_field_t *field = NULL;
 	print_field_t total_field;
 	slurmdb_report_time_format_t temp_format;
-
 	List slurmdb_report_cluster_grouping_list = NULL;
 	List assoc_list = NULL;
-
 	List format_list = list_create(slurm_destroy_char);
 	List grouping_list = list_create(slurm_destroy_char);
-
 	List header_list = NULL;
+	char *object_str = "";
 
-//	slurmdb_report_time_format_t temp_time_format = time_format;
-
+	/* init memory before chance of going to end_it before being init'ed. */
+	memset(&total_field, 0, sizeof(print_field_t));
 	print_fields_list = list_create(destroy_print_field);
 
 	_set_cond(&i, argc, argv, job_cond, format_list, grouping_list);
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list, "Cl,a");
-
 	if (!individual_grouping && !list_count(grouping_list))
 		slurm_addto_char_list(grouping_list, "50,250,500,1000");
 
-	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
-
-	if (!(slurmdb_report_cluster_grouping_list =
-	     slurmdb_report_job_sizes_grouped_by_top_account(db_conn,
-		     job_cond, grouping_list, flat_view))) {
+	switch (type) {
+	case GROUPED_TOP_ACCT:
+		if (!(slurmdb_report_cluster_grouping_list =
+		      slurmdb_report_job_sizes_grouped_by_top_account(
+			      db_conn, job_cond, grouping_list, flat_view))) {
+			exit_code = 1;
+			goto end_it;
+		}
+		if (!list_count(format_list))
+			slurm_addto_char_list(format_list, "Cl,a");
+		break;
+	case GROUPED_WCKEY:
+		if (!(slurmdb_report_cluster_grouping_list =
+		      slurmdb_report_job_sizes_grouped_by_wckey(
+			      db_conn, job_cond, grouping_list))) {
+			exit_code = 1;
+			goto end_it;
+		}
+		if (!list_count(format_list))
+			slurm_addto_char_list(format_list, "Cl,wc");
+		object_str = "by Wckey ";
+		break;
+	case GROUPED_TOP_ACCT_AND_WCKEY:
+		if (!(slurmdb_report_cluster_grouping_list =
+		      slurmdb_report_job_sizes_grouped_by_top_account_then_wckey(
+			      db_conn, job_cond, grouping_list, flat_view))) {
+			exit_code = 1;
+			goto end_it;
+		}
+		if (!list_count(format_list))
+			slurm_addto_char_list(format_list, "Cl,a%-20");
+		break;
+	default:
 		exit_code = 1;
 		goto end_it;
+		break;
 	}
 
-	if (_setup_grouping_print_fields_list(grouping_list) != SLURM_SUCCESS) {
-		goto end_it;
-	}
-
-	if (print_fields_have_header) {
-		char start_char[20];
-		char end_char[20];
-		time_t my_start = job_cond->usage_start;
-		time_t my_end = job_cond->usage_end-1;
-
-		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
-		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
-		printf("----------------------------------------"
-		       "----------------------------------------\n");
-		printf("Job Sizes %s - %s (%d secs)\n",
-		       start_char, end_char,
-		       (int)(job_cond->usage_end - job_cond->usage_start));
-		if (print_job_count)
-			printf("Units are in number of jobs ran\n");
-		else
-			printf("Time reported in %s\n", time_format_string);
-		printf("----------------------------------------"
-		       "----------------------------------------\n");
-	}
-
-	header_list = list_create(NULL);
-	list_append_list(header_list, print_fields_list);
-	list_append_list(header_list, grouping_print_fields_list);
-
-	memset(&total_field, 0, sizeof(print_field_t));
-	total_field.type = PRINT_JOB_SIZE;
-	total_field.name = xstrdup("% of cluster");
-	total_field.len = 12;
-	total_field.print_routine = slurmdb_report_print_time;
-	list_append(header_list, &total_field);
-
-	print_fields_header(header_list);
-	list_destroy(header_list);
-
-//	time_format = SLURMDB_REPORT_TIME_PERCENT;
-
-	itr = list_iterator_create(print_fields_list);
-	itr2 = list_iterator_create(grouping_print_fields_list);
-	list_sort(slurmdb_report_cluster_grouping_list,
-	          (ListCmpF)_sort_cluster_grouping_dec);
-	cluster_itr =
-		list_iterator_create(slurmdb_report_cluster_grouping_list);
-	while((cluster_group = list_next(cluster_itr))) {
-		list_sort(cluster_group->acct_list,
-		          (ListCmpF)_sort_acct_grouping_dec);
-		acct_itr = list_iterator_create(cluster_group->acct_list);
-		while((acct_group = list_next(acct_itr))) {
-
-			while((field = list_next(itr))) {
-				switch(field->type) {
-				case PRINT_JOB_CLUSTER:
-					field->print_routine(
-						field,
-						cluster_group->cluster, 0);
-					break;
-				case PRINT_JOB_ACCOUNT:
-					field->print_routine(field,
-							     acct_group->acct,
-							     0);
-					break;
-				default:
-					field->print_routine(field,
-							     NULL,
-							     0);
-					break;
-				}
-			}
-			list_iterator_reset(itr);
-			local_itr = list_iterator_create(acct_group->groups);
-			while((job_group = list_next(local_itr))) {
-				field = list_next(itr2);
-				switch(field->type) {
-				case PRINT_JOB_SIZE:
-					field->print_routine(
-						field,
-						job_group->cpu_secs,
-						acct_group->cpu_secs,
-						0);
-					break;
-				case PRINT_JOB_COUNT:
-					field->print_routine(
-						field,
-						job_group->count,
-						0);
-					break;
-				default:
-					field->print_routine(field,
-							     NULL,
-							     0);
-					break;
-				}
-			}
-			list_iterator_reset(itr2);
-			list_iterator_destroy(local_itr);
-
-			temp_format = time_format;
-			time_format = SLURMDB_REPORT_TIME_PERCENT;
-			if (!print_job_count) {
-				count1 = acct_group->cpu_secs;
-				count2 = cluster_group->cpu_secs;
-			} else {
-				count1 = acct_group->count;
-				count2 = cluster_group->count;
-			}
-			total_field.print_routine(&total_field,
-						  count1, count2, 1);
-			time_format = temp_format;
-			printf("\n");
-		}
-		list_iterator_destroy(acct_itr);
-	}
-	list_iterator_destroy(itr);
-
-//	time_format = temp_time_format;
-
-end_it:
-	xfree(total_field.name);
-	if (print_job_count)
-		print_job_count = 0;
-
-	if (individual_grouping)
-		individual_grouping = 0;
-
-	slurmdb_destroy_job_cond(job_cond);
-
-	if (grouping_list) {
-		list_destroy(grouping_list);
-		grouping_list = NULL;
-	}
-
-	if (assoc_list) {
-		list_destroy(assoc_list);
-		assoc_list = NULL;
-	}
-
-	if (slurmdb_report_cluster_grouping_list) {
-		list_destroy(slurmdb_report_cluster_grouping_list);
-		slurmdb_report_cluster_grouping_list = NULL;
-	}
-
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
+	itr2 = list_iterator_create(tres_list);
+	while ((tres = list_next(itr2))) {
+		if (tres->id == NO_VAL)
+			continue;
+		tres_id = tres->id;
+		tres_cnt++;
 	}
-
-	if (grouping_print_fields_list) {
-		list_destroy(grouping_print_fields_list);
-		grouping_print_fields_list = NULL;
+	list_iterator_destroy(itr2);
+	if (tres_cnt > 1) {
+		fprintf(stderr,
+		        " Job report only support a single --tres type.\n"
+			" Generate a separate report for each TRES type.\n");
+		exit_code = 1;
+		goto end_it;
 	}
 
-	return rc;
-}
-
-extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
-{
-	int rc = SLURM_SUCCESS;
-	slurmdb_job_cond_t *job_cond = xmalloc(sizeof(slurmdb_job_cond_t));
-	int i=0;
-
-	uint64_t count1, count2;
-
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	ListIterator cluster_itr = NULL;
-	ListIterator local_itr = NULL;
-	ListIterator acct_itr = NULL;
-
-	slurmdb_report_cluster_grouping_t *cluster_group = NULL;
-	slurmdb_report_acct_grouping_t *acct_group = NULL;
-	slurmdb_report_job_grouping_t *job_group = NULL;
-
-	print_field_t *field = NULL;
-	print_field_t total_field;
-	slurmdb_report_time_format_t temp_format;
-
-	List slurmdb_report_cluster_grouping_list = NULL;
-	List wckey_list = NULL;
-
-	List format_list = list_create(slurm_destroy_char);
-	List grouping_list = list_create(slurm_destroy_char);
-
-	List header_list = NULL;
-
-//	slurmdb_report_time_format_t temp_time_format = time_format;
-
-	print_fields_list = list_create(destroy_print_field);
-
-	_set_cond(&i, argc, argv, job_cond, format_list, grouping_list);
-
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list, "Cl,wc");
-
-	if (!individual_grouping && !list_count(grouping_list))
-		slurm_addto_char_list(grouping_list, "50,250,500,1000");
-
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
-	if (!(slurmdb_report_cluster_grouping_list =
-	     slurmdb_report_job_sizes_grouped_by_wckey(db_conn,
-		     job_cond, grouping_list))) {
-		exit_code = 1;
+	if (_setup_grouping_print_fields_list(grouping_list) != SLURM_SUCCESS)
 		goto end_it;
-	}
-
-	_setup_grouping_print_fields_list(grouping_list);
 
 	if (print_fields_have_header) {
 		char start_char[20];
 		char end_char[20];
 		time_t my_start = job_cond->usage_start;
-		time_t my_end = job_cond->usage_end-1;
+		time_t my_end = job_cond->usage_end - 1;
 
 		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
 		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
-		printf("Job Sizes by Wckey %s - %s (%d secs)\n",
-		       start_char, end_char,
+		printf("Job Sizes %s%s - %s (%d secs)\n",
+		       object_str, start_char, end_char,
 		       (int)(job_cond->usage_end - job_cond->usage_start));
+		if (tres_str)
+			printf("TRES type is %s\n", tres_str);
 		if (print_job_count)
 			printf("Units are in number of jobs ran\n");
 		else
@@ -880,7 +727,6 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 	list_append_list(header_list, print_fields_list);
 	list_append_list(header_list, grouping_print_fields_list);
 
-	memset(&total_field, 0, sizeof(print_field_t));
 	total_field.type = PRINT_JOB_SIZE;
 	total_field.name = xstrdup("% of cluster");
 	total_field.len = 12;
@@ -888,7 +734,7 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 	list_append(header_list, &total_field);
 
 	print_fields_header(header_list);
-	list_destroy(header_list);
+	FREE_NULL_LIST(header_list);
 
 //	time_format = SLURMDB_REPORT_TIME_PERCENT;
 
@@ -898,20 +744,39 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 	          (ListCmpF)_sort_cluster_grouping_dec);
 	cluster_itr = list_iterator_create(
 		slurmdb_report_cluster_grouping_list);
-	while((cluster_group = list_next(cluster_itr))) {
+	while ((cluster_group = list_next(cluster_itr))) {
+		slurmdb_tres_rec_t *tres_rec;
+		uint64_t cluster_tres_alloc_secs = 0;
+
+		if (cluster_group->tres_list &&
+		    (tres_rec = list_find_first(
+			    cluster_group->tres_list,
+			    slurmdb_find_tres_in_list,
+			    &tres_id)))
+			cluster_tres_alloc_secs = tres_rec->alloc_secs;
+
 		list_sort(cluster_group->acct_list,
 		          (ListCmpF)_sort_acct_grouping_dec);
 		acct_itr = list_iterator_create(cluster_group->acct_list);
-		while((acct_group = list_next(acct_itr))) {
-
-			while((field = list_next(itr))) {
-				switch(field->type) {
+		while ((acct_group = list_next(acct_itr))) {
+			uint64_t acct_tres_alloc_secs = 0;
+
+			if (acct_group->tres_list &&
+			    (tres_rec = list_find_first(
+				    acct_group->tres_list,
+				    slurmdb_find_tres_in_list,
+				    &tres_id)))
+				acct_tres_alloc_secs = tres_rec->alloc_secs;
+
+			while ((field = list_next(itr))) {
+				switch (field->type) {
 				case PRINT_JOB_CLUSTER:
 					field->print_routine(
 						field,
 						cluster_group->cluster, 0);
 					break;
 				case PRINT_JOB_WCKEY:
+				case PRINT_JOB_ACCOUNT:
 					field->print_routine(field,
 							     acct_group->acct,
 							     0);
@@ -925,14 +790,24 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 			}
 			list_iterator_reset(itr);
 			local_itr = list_iterator_create(acct_group->groups);
-			while((job_group = list_next(local_itr))) {
+			while ((job_group = list_next(local_itr))) {
+				uint64_t job_cpu_alloc_secs = 0;
+
+				if (job_group->tres_list &&
+				    (tres_rec = list_find_first(
+					    job_group->tres_list,
+					    slurmdb_find_tres_in_list,
+					    &tres_id)))
+					job_cpu_alloc_secs =
+						tres_rec->alloc_secs;
+
 				field = list_next(itr2);
-				switch(field->type) {
+				switch (field->type) {
 				case PRINT_JOB_SIZE:
 					field->print_routine(
 						field,
-						job_group->cpu_secs,
-						acct_group->cpu_secs,
+						job_cpu_alloc_secs,
+						acct_tres_alloc_secs,
 						0);
 					break;
 				case PRINT_JOB_COUNT:
@@ -954,8 +829,8 @@ extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
 			temp_format = time_format;
 			time_format = SLURMDB_REPORT_TIME_PERCENT;
 			if (!print_job_count) {
-				count1 = acct_group->cpu_secs;
-				count2 = cluster_group->cpu_secs;
+				count1 = acct_tres_alloc_secs;
+				count2 = cluster_tres_alloc_secs;
 			} else {
 				count1 = acct_group->count;
 				count2 = cluster_group->count;
@@ -981,240 +856,27 @@ end_it:
 
 	slurmdb_destroy_job_cond(job_cond);
 
-	if (grouping_list) {
-		list_destroy(grouping_list);
-		grouping_list = NULL;
-	}
-
-	if (wckey_list) {
-		list_destroy(wckey_list);
-		wckey_list = NULL;
-	}
-
-	if (slurmdb_report_cluster_grouping_list) {
-		list_destroy(slurmdb_report_cluster_grouping_list);
-		slurmdb_report_cluster_grouping_list = NULL;
-	}
-
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
-	}
-
-	if (grouping_print_fields_list) {
-		list_destroy(grouping_print_fields_list);
-		grouping_print_fields_list = NULL;
-	}
+	FREE_NULL_LIST(grouping_list);
+	FREE_NULL_LIST(assoc_list);
+	FREE_NULL_LIST(slurmdb_report_cluster_grouping_list);
+	FREE_NULL_LIST(print_fields_list);
+	FREE_NULL_LIST(grouping_print_fields_list);
 
 	return rc;
 }
 
-extern int job_sizes_grouped_by_top_acct_and_wckey(int argc, char *argv[])
+extern int job_sizes_grouped_by_top_acct(int argc, char *argv[])
 {
-	int rc = SLURM_SUCCESS;
-	slurmdb_job_cond_t *job_cond = xmalloc(sizeof(slurmdb_job_cond_t));
-
-	int i=0;
-
-	uint64_t count1, count2;
-
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	ListIterator cluster_itr = NULL;
-	ListIterator local_itr = NULL;
-	ListIterator acct_itr = NULL;
-
-	slurmdb_report_cluster_grouping_t *cluster_group = NULL;
-	slurmdb_report_acct_grouping_t *acct_group = NULL;
-	slurmdb_report_job_grouping_t *job_group = NULL;
-
-	print_field_t *field = NULL;
-	print_field_t total_field;
-	slurmdb_report_time_format_t temp_format;
-
-	List slurmdb_report_cluster_grouping_list = NULL;
-	List assoc_list = NULL;
-
-	List format_list = list_create(slurm_destroy_char);
-	List grouping_list = list_create(slurm_destroy_char);
-
-	List header_list = NULL;
-
-//	slurmdb_report_time_format_t temp_time_format = time_format;
-
-	print_fields_list = list_create(destroy_print_field);
-
-	_set_cond(&i, argc, argv, job_cond, format_list, grouping_list);
-
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list, "Cl,a%-20");
-
-	if (!individual_grouping && !list_count(grouping_list))
-		slurm_addto_char_list(grouping_list, "50,250,500,1000");
-
-	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
-
-	if (!(slurmdb_report_cluster_grouping_list =
-	     slurmdb_report_job_sizes_grouped_by_top_account_then_wckey(
-		     db_conn, job_cond, grouping_list, flat_view))) {
-		exit_code = 1;
-		goto end_it;
-	}
-
-	_setup_grouping_print_fields_list(grouping_list);
-
-	if (print_fields_have_header) {
-		char start_char[20];
-		char end_char[20];
-		time_t my_start = job_cond->usage_start;
-		time_t my_end = job_cond->usage_end-1;
-
-		slurm_make_time_str(&my_start, start_char, sizeof(start_char));
-		slurm_make_time_str(&my_end, end_char, sizeof(end_char));
-		printf("----------------------------------------"
-		       "----------------------------------------\n");
-		printf("Job Sizes %s - %s (%d secs)\n",
-		       start_char, end_char,
-		       (int)(job_cond->usage_end - job_cond->usage_start));
-		if (print_job_count)
-			printf("Units are in number of jobs ran\n");
-		else
-			printf("Time reported in %s\n", time_format_string);
-		printf("----------------------------------------"
-		       "----------------------------------------\n");
-	}
-
-	header_list = list_create(NULL);
-	list_append_list(header_list, print_fields_list);
-	list_append_list(header_list, grouping_print_fields_list);
-
-	memset(&total_field, 0, sizeof(print_field_t));
-	total_field.type = PRINT_JOB_SIZE;
-	total_field.name = xstrdup("% of cluster");
-	total_field.len = 12;
-	total_field.print_routine = slurmdb_report_print_time;
-	list_append(header_list, &total_field);
-
-	print_fields_header(header_list);
-	list_destroy(header_list);
-
-//	time_format = SLURMDB_REPORT_TIME_PERCENT;
-
-	itr = list_iterator_create(print_fields_list);
-	itr2 = list_iterator_create(grouping_print_fields_list);
-	list_sort(slurmdb_report_cluster_grouping_list,
-		  (ListCmpF)_sort_cluster_grouping_dec);
-	cluster_itr =
-		list_iterator_create(slurmdb_report_cluster_grouping_list);
-	while((cluster_group = list_next(cluster_itr))) {
-		list_sort(cluster_group->acct_list,
-			  (ListCmpF)_sort_acct_grouping_dec);
-		acct_itr = list_iterator_create(cluster_group->acct_list);
-		while((acct_group = list_next(acct_itr))) {
-
-			while((field = list_next(itr))) {
-				switch(field->type) {
-				case PRINT_JOB_CLUSTER:
-					field->print_routine(
-						field,
-						cluster_group->cluster, 0);
-					break;
-				case PRINT_JOB_ACCOUNT:
-					field->print_routine(field,
-							     acct_group->acct,
-							     0);
-					break;
-				default:
-					field->print_routine(field,
-							     NULL,
-							     0);
-					break;
-				}
-			}
-			list_iterator_reset(itr);
-			local_itr = list_iterator_create(acct_group->groups);
-			while((job_group = list_next(local_itr))) {
-				field = list_next(itr2);
-				switch(field->type) {
-				case PRINT_JOB_SIZE:
-					field->print_routine(
-						field,
-						job_group->cpu_secs,
-						acct_group->cpu_secs,
-						0);
-					break;
-				case PRINT_JOB_COUNT:
-					field->print_routine(
-						field,
-						job_group->count,
-						0);
-					break;
-				default:
-					field->print_routine(field,
-							     NULL,
-							     0);
-					break;
-				}
-			}
-			list_iterator_reset(itr2);
-			list_iterator_destroy(local_itr);
-
-			temp_format = time_format;
-			time_format = SLURMDB_REPORT_TIME_PERCENT;
-			if (!print_job_count) {
-				count1 = acct_group->cpu_secs;
-				count2 = cluster_group->cpu_secs;
-			} else {
-				count1 = acct_group->count;
-				count2 = cluster_group->count;
-			}
-			total_field.print_routine(&total_field,
-						  count1, count2, 1);
-			time_format = temp_format;
-			printf("\n");
-		}
-		list_iterator_destroy(acct_itr);
-	}
-	list_iterator_destroy(itr);
-
-//	time_format = temp_time_format;
-
-end_it:
-	xfree(total_field.name);
-	if (print_job_count)
-		print_job_count = 0;
-
-	if (individual_grouping)
-		individual_grouping = 0;
-
-	slurmdb_destroy_job_cond(job_cond);
-
-	if (grouping_list) {
-		list_destroy(grouping_list);
-		grouping_list = NULL;
-	}
-
-	if (assoc_list) {
-		list_destroy(assoc_list);
-		assoc_list = NULL;
-	}
-
-	if (slurmdb_report_cluster_grouping_list) {
-		list_destroy(slurmdb_report_cluster_grouping_list);
-		slurmdb_report_cluster_grouping_list = NULL;
-	}
-
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
-	}
+	return _run_report(GROUPED_TOP_ACCT, argc, argv);
+}
 
-	if (grouping_print_fields_list) {
-		list_destroy(grouping_print_fields_list);
-		grouping_print_fields_list = NULL;
-	}
+extern int job_sizes_grouped_by_wckey(int argc, char *argv[])
+{
+	return _run_report(GROUPED_WCKEY, argc, argv);
+}
 
-	return rc;
+extern int job_sizes_grouped_by_top_acct_and_wckey(int argc, char *argv[])
+{
+	return _run_report(GROUPED_TOP_ACCT_AND_WCKEY, argc, argv);
 }
 
diff --git a/src/sreport/resv_reports.c b/src/sreport/resv_reports.c
index 33737df55..7f49bac77 100644
--- a/src/sreport/resv_reports.c
+++ b/src/sreport/resv_reports.c
@@ -2,7 +2,7 @@
  *  resv_reports.c - functions for generating reservation reports
  *                       from accounting infrastructure.
  *****************************************************************************
- *
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
@@ -41,20 +41,20 @@
 #include "cluster_reports.h"
 #include "resv_reports.h"
 enum {
-	PRINT_RESV_NAME,
-	PRINT_RESV_CLUSTER,
-	PRINT_RESV_CPUS,
-	PRINT_RESV_ACPU,
-	PRINT_RESV_DCPU,
-	PRINT_RESV_ICPU,
-	PRINT_RESV_NODES,
 	PRINT_RESV_ASSOCS,
-	PRINT_RESV_START,
+	PRINT_RESV_CLUSTER,
 	PRINT_RESV_END,
 	PRINT_RESV_FLAGS,
-	PRINT_RESV_TIME,
-	PRINT_RESV_CPUTIME,
 	PRINT_RESV_ID,
+	PRINT_RESV_NAME,
+	PRINT_RESV_NODES,
+	PRINT_RESV_START,
+	PRINT_RESV_TIME,
+	PRINT_RESV_TRES_ALLOC,
+	PRINT_RESV_TRES_CNT,
+	PRINT_RESV_TRES_IDLE,
+	PRINT_RESV_TRES_NAME,
+	PRINT_RESV_TRES_USAGE,
 };
 
 typedef enum {
@@ -69,6 +69,17 @@ typedef enum {
 
 static List print_fields_list = NULL; /* types are of print_field_t */
 
+static int _find_resv(void *x, void *key)
+{
+	slurmdb_reservation_rec_t *rec = (slurmdb_reservation_rec_t *)x;
+	uint32_t id = *(uint32_t *)key;
+
+	if (rec->id == id)
+		return 1;
+
+	return 0;
+}
+
 static int _set_resv_cond(int *start, int argc, char *argv[],
 			  slurmdb_reservation_cond_t *resv_cond,
 			  List format_list)
@@ -119,6 +130,7 @@ static int _set_resv_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
 			resv_cond->time_end = parse_time(argv[i]+end, 1);
+			resv_cond->time_end = sanity_check_endtime(resv_cond->time_end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Flags",
 					 MAX(command_len, 2))) {
@@ -153,7 +165,7 @@ static int _set_resv_cond(int *start, int argc, char *argv[],
 			resv_cond->time_start = parse_time(argv[i]+end, 1);
 			set = 1;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr," Unknown condition: %s\n"
 			       "Use keyword set to modify value\n", argv[i]);
 		}
@@ -186,9 +198,9 @@ static int _setup_print_fields_list(List format_list)
 	char *object = NULL;
 
 	if (!format_list || !list_count(format_list)) {
-		exit_code=1;
-			fprintf(stderr, " we need a format list "
-				"to set up the print.\n");
+		exit_code = 1;
+		fprintf(stderr,
+			" we need a format list to set up the print.\n");
 		return SLURM_ERROR;
 	}
 
@@ -196,13 +208,13 @@ static int _setup_print_fields_list(List format_list)
 		print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(format_list);
-	while((object = list_next(itr))) {
+	while ((object = list_next(itr))) {
 		char *tmp_char = NULL;
 		int command_len = 0;
 		int newlen = 0;
 
 		if ((tmp_char = strstr(object, "\%"))) {
-			newlen = atoi(tmp_char+1);
+			newlen = atoi(tmp_char + 1);
 			tmp_char[0] = '\0';
 		}
 
@@ -211,7 +223,7 @@ static int _setup_print_fields_list(List format_list)
 		field = xmalloc(sizeof(print_field_t));
 		if (!strncasecmp("allocated", object,
 				MAX(command_len, 2))) {
-			field->type = PRINT_RESV_ACPU;
+			field->type = PRINT_RESV_TRES_ALLOC;
 			field->name = xstrdup("Allocated");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -232,30 +244,19 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("cpucount", object,
+		} else if (!strncasecmp("End", object,
 				       MAX(command_len, 2))) {
-			field->type = PRINT_RESV_CPUS;
-			field->name = xstrdup("CPU count");
-			field->len = 9;
-			field->print_routine = print_fields_uint;
-		} else if (!strncasecmp("ReservationId", object,
-					MAX(command_len, 2))) {
-			field->type = PRINT_RESV_ID;
-			field->name = xstrdup("Id");
-			field->len = 8;
-			field->print_routine = print_fields_uint;
-        } else if (!strncasecmp("down", object, MAX(command_len, 1))) {
-			field->type = PRINT_RESV_DCPU;
-			field->name = xstrdup("Down");
-			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
-			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
-			   || time_format == SLURMDB_REPORT_TIME_HOURS_PER)
-				field->len = 20;
-			else
-				field->len = 9;
-			field->print_routine = slurmdb_report_print_time;
-		} else if (!strncasecmp("idle", object, MAX(command_len, 1))) {
-			field->type = PRINT_RESV_ICPU;
+			field->type = PRINT_RESV_END;
+			field->name = xstrdup("End");
+			field->len = 19;
+			field->print_routine = print_fields_date;
+		} else if (!strncasecmp("Flags", object, MAX(command_len, 2))) {
+			field->type = PRINT_RESV_FLAGS;
+			field->name = xstrdup("Flags");
+			field->len = 20;
+			field->print_routine = print_fields_str;
+		} else if (!strncasecmp("Idle", object, MAX(command_len, 1))) {
+			field->type = PRINT_RESV_TRES_IDLE;
 			field->name = xstrdup("Idle");
 			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
 			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
@@ -264,48 +265,60 @@ static int _setup_print_fields_list(List format_list)
 			else
 				field->len = 9;
 			field->print_routine = slurmdb_report_print_time;
-		} else if (!strncasecmp("Nodes", object, MAX(command_len, 2))) {
-			field->type = PRINT_RESV_NODES;
-			field->name = xstrdup("Nodes");
-			field->len = 15;
-			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("Name", object,
 				       MAX(command_len, 2))) {
 			field->type = PRINT_RESV_NAME;
 			field->name = xstrdup("Name");
 			field->len = 9;
 			field->print_routine = print_fields_str;
+		} else if (!strncasecmp("Nodes", object, MAX(command_len, 2))) {
+			field->type = PRINT_RESV_NODES;
+			field->name = xstrdup("Nodes");
+			field->len = 15;
+			field->print_routine = print_fields_str;
+		} else if (!strncasecmp("ReservationId", object,
+					MAX(command_len, 2))) {
+			field->type = PRINT_RESV_ID;
+			field->name = xstrdup("Id");
+			field->len = 8;
+			field->print_routine = print_fields_uint;
 		} else if (!strncasecmp("Start", object,
 				       MAX(command_len, 2))) {
 			field->type = PRINT_RESV_START;
 			field->name = xstrdup("Start");
 			field->len = 19;
 			field->print_routine = print_fields_date;
-		} else if (!strncasecmp("End", object,
-				       MAX(command_len, 2))) {
-			field->type = PRINT_RESV_END;
-			field->name = xstrdup("End");
-			field->len = 19;
-			field->print_routine = print_fields_date;
-		} else if (!strncasecmp("Flags", object, MAX(command_len, 2))) {
-			field->type = PRINT_RESV_FLAGS;
-			field->name = xstrdup("Flags");
-			field->len = 20;
-			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("TotalTime", object,
 				       MAX(command_len, 2))) {
 			field->type = PRINT_RESV_TIME;
 			field->name = xstrdup("TotalTime");
 			field->len = 9;
 			field->print_routine = print_fields_time_from_secs;
-		} else if (!strncasecmp("CPUTime", object,
-				       MAX(command_len, 2))) {
-			field->type = PRINT_RESV_CPUTIME;
-			field->name = xstrdup("CPUTime");
+		} else if (!strncasecmp("TresCount", object,
+					MAX(command_len, 5)) ||
+			   !strncasecmp("CpuCount", object,
+					MAX(command_len, 2)) ||
+			   !strncasecmp("count", object, MAX(command_len, 2))) {
+			field->type = PRINT_RESV_TRES_CNT;
+			field->name = xstrdup("TRES count");
+			field->len = 9;
+			field->print_routine = print_fields_uint;
+		} else if (!strncasecmp("TresName", object,
+					MAX(command_len, 5))) {
+			field->type = PRINT_RESV_TRES_NAME;
+			field->name = xstrdup("TRES Name");
+			field->len = 14;
+			field->print_routine = print_fields_str;
+		} else if (!strncasecmp("TresTime", object,
+					MAX(command_len, 2)) ||
+			   !strncasecmp("CpuTime", object,
+					 MAX(command_len, 5))) {
+			field->type = PRINT_RESV_TRES_USAGE;
+			field->name = xstrdup("TRES Time");
 			field->len = 9;
 			field->print_routine = print_fields_time_from_secs;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
@@ -326,7 +339,7 @@ static List _get_resv_list(int argc, char *argv[],
 {
 	slurmdb_reservation_cond_t *resv_cond =
 		xmalloc(sizeof(slurmdb_reservation_cond_t));
-	int i=0;
+	int i = 0;
 	List resv_list = NULL;
 
 	resv_cond->with_usage = 1;
@@ -335,8 +348,8 @@ static List _get_resv_list(int argc, char *argv[],
 
 	resv_list = slurmdb_reservations_get(db_conn, resv_cond);
 	if (!resv_list) {
-		exit_code=1;
-		fprintf(stderr, " Problem with resv query.\n");
+		exit_code = 1;
+		fprintf(stderr, " Problem with reservation query.\n");
 		return NULL;
 	}
 
@@ -354,12 +367,12 @@ static List _get_resv_list(int argc, char *argv[],
 		       "----------------------------------------\n");
 		printf("%s %s - %s\n",
 		       report_name, start_char, end_char);
-		switch(time_format) {
+		switch (time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n", time_format_string);
+			printf("Use reported in TRES %s\n", time_format_string);
 			break;
 		}
 		printf("----------------------------------------"
@@ -371,6 +384,116 @@ static List _get_resv_list(int argc, char *argv[],
 	return resv_list;
 }
 
+static void _resv_tres_report(slurmdb_tres_rec_t *tres,
+			      slurmdb_reservation_rec_t *tot_resv)
+{
+	uint64_t idle_secs = 0, total_reported = 0;
+	uint64_t tres_alloc = 0, tres_alloc_secs = 0;
+	int curr_inx = 1;
+	char *temp_char = NULL, *tres_tmp = NULL;
+	slurmdb_tres_rec_t *tres_rec;
+	print_field_t *field;
+	int field_count = 0;
+	ListIterator iter = NULL;
+	int32_t total_time = 0;
+
+	total_time = tot_resv->time_end - tot_resv->time_start;
+	if (total_time <= 0)
+		return;
+
+	if (!tot_resv->tres_list ||
+	    !(tres_rec = list_find_first(tot_resv->tres_list,
+					 slurmdb_find_tres_in_list,
+					 &tres->id))) {
+		debug("error, no %s(%d) TRES in reservation %s",
+		      tres->type, tres->id, tot_resv->name);
+	} else {
+		tres_alloc = tres_rec->count;
+		tres_alloc_secs = tres_rec->alloc_secs;
+		total_reported = (uint64_t)(total_time * tres_rec->alloc_secs);
+		idle_secs = total_reported - tres_rec->alloc_secs;
+	}
+
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		switch (field->type) {
+		case PRINT_RESV_NAME:
+			field->print_routine(field, tot_resv->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_CLUSTER:
+			field->print_routine(field, tot_resv->cluster,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_TRES_CNT:
+			field->print_routine(field, tres_alloc,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_ID:
+			field->print_routine(field, tot_resv->id,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_TRES_ALLOC:
+			field->print_routine(field, tres_alloc_secs,
+					     total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_TRES_IDLE:
+			field->print_routine(field, idle_secs, total_reported,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_NODES:
+			field->print_routine(field, tot_resv->nodes,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_ASSOCS:
+			field->print_routine(field, tot_resv->assocs,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_START:
+			field->print_routine(field, tot_resv->time_start,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_END:
+			field->print_routine(field, tot_resv->time_end,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_FLAGS:
+			temp_char = reservation_flags_string(tot_resv->flags);
+			field->print_routine(field, temp_char,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_TIME:
+			field->print_routine(field, (uint32_t)total_time,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_RESV_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		case PRINT_RESV_TRES_USAGE:
+			field->print_routine(field, total_reported,
+					     (curr_inx == field_count));
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+		xfree(temp_char);
+	}
+	list_iterator_reset(iter);
+	printf("\n");
+}
+
 extern int resv_utilization(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
@@ -379,208 +502,102 @@ extern int resv_utilization(int argc, char *argv[])
 	ListIterator itr2 = NULL;
 	slurmdb_reservation_rec_t *resv = NULL;
 	slurmdb_reservation_rec_t *tot_resv = NULL;
-
-	print_field_t *field = NULL;
-	int32_t total_time = 0;
-
 	List resv_list = NULL;
 	List tot_resv_list = NULL;
 
 	List format_list = list_create(slurm_destroy_char);
-	int field_count = 0;
 
 	print_fields_list = list_create(destroy_print_field);
 
-
 	if (!(resv_list = _get_resv_list(argc, argv,
-					"Reservation Utilization",
-					format_list)))
+					 "Reservation Utilization",
+					 format_list)))
 		goto end_it;
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list,
-				      "Cl,name,start,end,al,i");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+					      "Cl,name,start,end,TresName,al,i");
+		} else {
+			slurm_addto_char_list(format_list,
+					      "Cl,name,start,end,al,i");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	/* we will just use the pointers returned from the
-	   get_resv_list here, so don't remove them
-	*/
+	 * get_resv_list here, so don't remove them */
 	tot_resv_list = list_create(NULL);
 
-	itr = list_iterator_create(resv_list);
-	tot_itr = list_iterator_create(tot_resv_list);
-	itr2 = list_iterator_create(print_fields_list);
-
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
+	/* Compress duplicate reservations into a single record. Reservations
+	 * can have multiple entries if there are changes after starting (e.g.
+	 * changing node count). Compressed reservations will have their
+	 * resource usage averaged. */
+	itr = list_iterator_create(resv_list);
+	while ((resv = list_next(itr))) {
+		if (!(tot_resv = list_find_first(
+			      tot_resv_list, _find_resv, &resv->id))) {
+			list_append(tot_resv_list, resv);
+			continue;
+		}
 
-	/* compress all the reservations into a single reservation.
-	   Since reservations can have multiple entries like if the
-	   node count changes or something after the reservation
-	   starts.  Here we colapse them into 1 record.
-	*/
-	while((resv = list_next(itr))) {
-		while((tot_resv = list_next(tot_itr))) {
-			if (tot_resv->id == resv->id) {
-				/* get an average of cpus if the
-				   reservation changes we will just
-				   get an average.
-				*/
-				tot_resv->cpus += resv->cpus;
-				tot_resv->cpus /= 2;
-				tot_resv->alloc_secs += resv->alloc_secs;
-				tot_resv->down_secs += resv->down_secs;
-				if (resv->time_start < tot_resv->time_start)
-					tot_resv->time_start = resv->time_start;
-				if (resv->time_end > tot_resv->time_end)
-					tot_resv->time_end = resv->time_end;
-				break;
+		if (resv->tres_list && list_count(resv->tres_list)) {
+			if (!tot_resv->tres_list) {
+				tot_resv->tres_list = slurmdb_copy_tres_list(
+					resv->tres_list);
+			} else {
+				slurmdb_tres_rec_t *tres_rec, *loc_tres_rec;
+				ListIterator tres_itr = list_iterator_create(
+					resv->tres_list);
+				while ((tres_rec = list_next(tres_itr))) {
+					if (!(loc_tres_rec = list_find_first(
+						      tot_resv->tres_list,
+						      slurmdb_find_tres_in_list,
+						      &tres_rec->id))) {
+						loc_tres_rec =
+							slurmdb_copy_tres_rec(
+								tres_rec);
+						list_append(tot_resv->tres_list,
+							    loc_tres_rec);
+						continue;
+					}
+					loc_tres_rec->count += tres_rec->count;
+					loc_tres_rec->count /= 2;
+					loc_tres_rec->alloc_secs +=
+						tres_rec->alloc_secs;
+				}
+				list_iterator_destroy(tres_itr);
 			}
 		}
-		if (!tot_resv)
-			list_append(tot_resv_list, resv);
-
-		list_iterator_reset(tot_itr);
+		if (resv->time_start < tot_resv->time_start)
+			tot_resv->time_start = resv->time_start;
+		if (resv->time_end > tot_resv->time_end)
+			tot_resv->time_end = resv->time_end;
 	}
+	list_iterator_destroy(itr);
 
 	list_sort(tot_resv_list, (ListCmpF)sort_reservations_dec);
-	list_iterator_reset(tot_itr);
-	while((tot_resv = list_next(tot_itr))) {
-		uint64_t idle_secs = 0, total_reported = 0;
-		int curr_inx = 1;
-		char *temp_char = NULL;
-
-		total_time = tot_resv->time_end - tot_resv->time_start;
-		if (total_time <= 0)
-			continue;
-		total_reported = (uint64_t)(total_time * tot_resv->cpus);
-
-		idle_secs = total_reported
-			- tot_resv->alloc_secs - tot_resv->down_secs;
-
-		while((field = list_next(itr2))) {
-			switch(field->type) {
-			case PRINT_RESV_NAME:
-				field->print_routine(field,
-						     tot_resv->name,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_CLUSTER:
-				field->print_routine(field,
-						     tot_resv->cluster,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_CPUS:
-				field->print_routine(field,
-						     tot_resv->cpus,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_ID:
-				field->print_routine(field,
-						     tot_resv->id,
-						     (curr_inx ==
-						     field_count));
-				break;
-			case PRINT_RESV_ACPU:
-				field->print_routine(field,
-						     tot_resv->alloc_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_DCPU:
-				field->print_routine(field,
-						     tot_resv->down_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_ICPU:
-				field->print_routine(field,
-						     idle_secs,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_NODES:
-				field->print_routine(field,
-						     tot_resv->nodes,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_ASSOCS:
-				field->print_routine(field,
-						     tot_resv->assocs,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_START:
-				field->print_routine(field,
-						     tot_resv->time_start,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_END:
-				field->print_routine(field,
-						     tot_resv->time_end,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_FLAGS:
-				temp_char = reservation_flags_string(tot_resv->flags);
-				field->print_routine(field,
-						     temp_char,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_TIME:
-				field->print_routine(field,
-						     (uint64_t)total_time,
-						     (curr_inx ==
-						      field_count));
-				break;
-			case PRINT_RESV_CPUTIME:
-				field->print_routine(field,
-						     total_reported,
-						     (curr_inx ==
-						      field_count));
-				break;
-			default:
-				field->print_routine(
-					field, NULL,
-					(curr_inx == field_count));
-				break;
-			}
-			curr_inx++;
-			xfree(temp_char);
+	tot_itr = list_iterator_create(tot_resv_list);
+	while ((tot_resv = list_next(tot_itr))) {
+		slurmdb_tres_rec_t *tres;
+		itr2 = list_iterator_create(tres_list);
+		while ((tres = list_next(itr2))) {
+			if (tres->id == NO_VAL)
+				continue;
+			_resv_tres_report(tres, tot_resv);
 		}
-		list_iterator_reset(itr2);
-		printf("\n");
+		list_iterator_destroy(itr2);
 	}
-
 	list_iterator_destroy(tot_itr);
-	list_iterator_destroy(itr2);
-	list_iterator_destroy(itr);
 
 end_it:
-	if (resv_list) {
-		list_destroy(resv_list);
-		resv_list = NULL;
-	}
-	if (tot_resv_list) {
-		list_destroy(tot_resv_list);
-		tot_resv_list = NULL;
-	}
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
-	}
+	FREE_NULL_LIST(resv_list);
+	FREE_NULL_LIST(tot_resv_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
diff --git a/src/sreport/sreport.c b/src/sreport/sreport.c
index c3dcc12dc..e9090545c 100644
--- a/src/sreport/sreport.c
+++ b/src/sreport/sreport.c
@@ -1,6 +1,7 @@
 /*****************************************************************************\
  *  sreport.c - report generating tool for slurm accounting.
  *****************************************************************************
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -54,6 +55,8 @@ int exit_code;		/* sreport's exit code, =1 on any error at any time */
 int exit_flag;		/* program to terminate if =1 */
 int input_words;	/* number of words of input permitted */
 int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
+char *tres_str = NULL;	/* --tres= value */
+List tres_list;		/* TRES to report, built from --tres= value */
 int all_clusters_flag = 0;
 slurmdb_report_time_format_t time_format = SLURMDB_REPORT_TIME_MINS;
 char *time_format_string = "Minutes";
@@ -61,17 +64,18 @@ void *db_conn = NULL;
 uint32_t my_uid = 0;
 slurmdb_report_sort_t sort_flag = SLURMDB_REPORT_SORT_TIME;
 
-static void	_job_rep (int argc, char *argv[]);
-static void	_user_rep (int argc, char *argv[]);
-static void	_resv_rep (int argc, char *argv[]);
-static void	_cluster_rep (int argc, char *argv[]);
 static void	_assoc_rep (int argc, char *argv[]);
+static List	_build_tres_list(char *tres_str);
+static void	_cluster_rep (int argc, char *argv[]);
 static int	_get_command (int *argc, char *argv[]);
+static void	_job_rep (int argc, char *argv[]);
 static void     _print_version( void );
 static int	_process_command (int argc, char *argv[]);
-static int      _set_time_format(char *format);
+static void	_resv_rep (int argc, char *argv[]);
 static int      _set_sort(char *format);
-static void	_usage ();
+static int      _set_time_format(char *format);
+static void	_usage ( void );
+static void	_user_rep (int argc, char *argv[]);
 
 int
 main (int argc, char *argv[])
@@ -90,6 +94,7 @@ main (int argc, char *argv[])
 		{"parsable2",0, 0, 'P'},
 		{"quiet",    0, 0, 'Q'},
 		{"sort",     0, 0, 's'},
+		{"tres",     1, 0, 'T'},
 		{"usage",    0, 0, 'h'},
 		{"verbose",  0, 0, 'v'},
 		{"version",  0, 0, 'V'},
@@ -118,7 +123,11 @@ main (int argc, char *argv[])
 	}
 	xfree(temp);
 
-	while((opt_char = getopt_long(argc, argv, "ahnpPQs:t:vV",
+	temp = getenv("SREPORT_TRES");
+	if (temp)
+		tres_str = xstrdup(temp);
+
+	while ((opt_char = getopt_long(argc, argv, "ahnpPQs:t:T:vV",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
@@ -138,11 +147,11 @@ main (int argc, char *argv[])
 			break;
 		case (int)'p':
 			print_fields_parsable_print =
-			PRINT_FIELDS_PARSABLE_ENDING;
+				PRINT_FIELDS_PARSABLE_ENDING;
 			break;
 		case (int)'P':
 			print_fields_parsable_print =
-			PRINT_FIELDS_PARSABLE_NO_ENDING;
+				PRINT_FIELDS_PARSABLE_NO_ENDING;
 			break;
 		case (int)'Q':
 			quiet_flag = 1;
@@ -153,6 +162,10 @@ main (int argc, char *argv[])
 		case (int)'t':
 			_set_time_format(optarg);
 			break;
+		case (int)'T':
+			xfree(tres_str);
+			tres_str = xstrdup(optarg);
+			break;
 		case (int)'v':
 			quiet_flag = -1;
 			break;
@@ -179,13 +192,13 @@ main (int argc, char *argv[])
 		}
 	}
 
+	my_uid = getuid();
 	db_conn = slurmdb_connection_get();
-
 	if (errno) {
-		error("Problem talking to the database: %m");
+		fatal("Problem connecting to the database: %m");
 		exit(1);
 	}
-	my_uid = getuid();
+	tres_list = _build_tres_list(tres_str);
 
 	if (input_field_count)
 		exit_flag = 1;
@@ -205,6 +218,47 @@ main (int argc, char *argv[])
 	exit(exit_code);
 }
 
+static List _build_tres_list(char *tres_str)
+{
+	List tres_list = NULL;
+	ListIterator iter;
+	slurmdb_tres_rec_t *tres;
+	slurmdb_tres_cond_t cond;
+	char *tres_tmp = NULL, *tres_tmp2 = NULL, *save_ptr = NULL, *tok;
+
+	memset(&cond, 0, sizeof(slurmdb_tres_cond_t));
+	tres_list = acct_storage_g_get_tres(db_conn, my_uid, &cond);
+	if (!tres_list) {
+		fatal("Problem getting TRES data: %m");
+		exit(1);
+	}
+
+	iter = list_iterator_create(tres_list);
+	while ((tres = list_next(iter))) {
+		if (tres_str) {
+			tres_tmp = xstrdup(tres_str);
+			xstrfmtcat(tres_tmp2, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+			tok = strtok_r(tres_tmp, ",", &save_ptr);
+			while (tok) {
+				if (!strcasecmp(tres_tmp2, tok))
+					break;
+				tok = strtok_r(NULL, ",", &save_ptr);
+			}
+			if (!tok) /* Not found */
+				tres->id = NO_VAL;	/* Skip this TRES */
+			xfree(tres_tmp2);
+			xfree(tres_tmp);
+		} else if (tres->id != TRES_CPU) {
+			tres->id = NO_VAL;		/* Skip this TRES */
+		}
+	}
+	list_iterator_destroy(iter);
+	return tres_list;
+}
+
 #if !HAVE_READLINE
 /*
  * Alternative to readline if readline is not available
@@ -803,12 +857,12 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
        Cluster                                                             \n\
        - AccountUtilizationByUser                                          \n\
        - UserUtilizationByAccount                                          \n\
-             - Accounts, Cluster, CPUCount, Login, Proper, Used            \n\
+             - Accounts, Cluster, Count, Login, Proper, Used               \n\
        - UserUtilizationByWckey                                            \n\
        - WCKeyUtilizationByUser                                            \n\
-             - Cluster, CPUCount, Login, Proper, Used, Wckey               \n\
+             - Cluster, Count, Login, Proper, Used, Wckey                  \n\
        - Utilization                                                       \n\
-             - Allocated, Cluster, CPUCount, Down, Idle, Overcommited,     \n\
+             - Allocated, Cluster, Count, Down, Idle, Overcommited,        \n\
                PlannedDown, Reported, Reserved                             \n\
                                                                            \n\
        Job                                                                 \n\
@@ -817,7 +871,7 @@ sreport [<OPTION>] [<COMMAND>]                                             \n\
                                                                            \n\
        Reservation                                                         \n\
        - Utilization                                                       \n\
-             - Allocated, Associations, Cluster, CPUCount, CPUTime,        \n\
+             - Allocated, Associations, Cluster, Count, CPUTime,           \n\
                End, Flags, Idle, Name, Nodes, ReservationId, Start, TotalTime \n\
                                                                            \n\
        User                                                                \n\
diff --git a/src/sreport/sreport.h b/src/sreport/sreport.h
index d50f5e7bc..83bcaa6ac 100644
--- a/src/sreport/sreport.h
+++ b/src/sreport/sreport.h
@@ -1,6 +1,7 @@
 /*****************************************************************************\
  *  sreport.h - report generating tool for slurm accounting header file.
  *****************************************************************************
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
@@ -96,6 +97,8 @@ extern int exit_code;	/* sacctmgr's exit code, =1 on any error at any time */
 extern int exit_flag;	/* program to terminate if =1 */
 extern int input_words;	/* number of words of input permitted */
 extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
+extern char *tres_str;	/* --tres= value */
+List tres_list;		/* TRES to report, built from --tres= value */
 extern void *db_conn;
 extern uint32_t my_uid;
 extern int all_clusters_flag;
@@ -104,6 +107,7 @@ extern slurmdb_report_sort_t sort_flag;
 extern void slurmdb_report_print_time(print_field_t *field,
 			       uint64_t value, uint64_t total_time, int last);
 extern int parse_option_end(char *option);
+extern time_t sanity_check_endtime(time_t endtime);
 extern char *strip_quotes(char *option, int *increased);
 extern int sort_user_dec(void *, void *);
 extern int sort_cluster_dec(void *, void *);
diff --git a/src/sreport/user_reports.c b/src/sreport/user_reports.c
index 3c09c3da4..3554a5999 100644
--- a/src/sreport/user_reports.c
+++ b/src/sreport/user_reports.c
@@ -2,7 +2,7 @@
  *  user_reports.c - functions for generating user reports
  *                     from accounting infrastructure.
  *****************************************************************************
- *
+ *  Copyright (C) 2010-2015 SchedMD LLC.
  *  Copyright (C) 2008 Lawrence Livermore National Security.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
@@ -47,6 +47,7 @@ enum {
 	PRINT_USER_PROPER,
 	PRINT_USER_USED,
 	PRINT_USER_ENERGY,
+	PRINT_USER_TRES_NAME,
 };
 
 static List print_fields_list = NULL; /* types are of print_field_t */
@@ -60,7 +61,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 	int set = 0;
 	int end = 0;
 	int local_cluster_flag = all_clusters_flag;
-	slurmdb_association_cond_t *assoc_cond = NULL;
+	slurmdb_assoc_cond_t *assoc_cond = NULL;
 	time_t start_time, end_time;
 	int command_len = 0;
 
@@ -73,19 +74,19 @@ static int _set_cond(int *start, int argc, char *argv[],
 	user_cond->with_assocs = 1;
 	if (!user_cond->assoc_cond) {
 		user_cond->assoc_cond =
-			xmalloc(sizeof(slurmdb_association_cond_t));
+			xmalloc(sizeof(slurmdb_assoc_cond_t));
 		user_cond->assoc_cond->with_usage = 1;
 	}
 	assoc_cond = user_cond->assoc_cond;
 
 	if (!assoc_cond->cluster_list)
 		assoc_cond->cluster_list = list_create(slurm_destroy_char);
-	for (i=(*start); i<argc; i++) {
+	for (i = (*start); i < argc; i++) {
 		end = parse_option_end(argv[i]);
 		if (!end)
-			command_len=strlen(argv[i]);
+			command_len = strlen(argv[i]);
 		else
-			command_len=end-1;
+			command_len = end - 1;
 
 		if (!end && !strncasecmp(argv[i], "all_clusters",
 					       MAX(command_len, 1))) {
@@ -120,6 +121,7 @@ static int _set_cond(int *start, int argc, char *argv[],
 			set = 1;
 		} else if (!strncasecmp (argv[i], "End", MAX(command_len, 1))) {
 			assoc_cond->usage_end = parse_time(argv[i]+end, 1);
+			assoc_cond->usage_end = sanity_check_endtime(assoc_cond->usage_end);
 			set = 1;
 		} else if (!strncasecmp (argv[i], "Format",
 					 MAX(command_len, 1))) {
@@ -168,7 +170,7 @@ static int _setup_print_fields_list(List format_list)
 	char *object = NULL;
 
 	if (!format_list || !list_count(format_list)) {
-		exit_code=1;
+		exit_code = 1;
 		fprintf(stderr,
 			" We need a format list to set up the print.\n");
 		return SLURM_ERROR;
@@ -178,7 +180,7 @@ static int _setup_print_fields_list(List format_list)
 		print_fields_list = list_create(destroy_print_field);
 
 	itr = list_iterator_create(format_list);
-	while((object = list_next(itr))) {
+	while ((object = list_next(itr))) {
 		char *tmp_char = NULL;
 		int command_len = 0;
 		int newlen = 0;
@@ -202,6 +204,16 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Cluster");
 			field->len = 9;
 			field->print_routine = print_fields_str;
+		} else if (!strncasecmp("Energy", object, MAX(command_len, 1))){
+			field->type = PRINT_USER_ENERGY;
+			field->name = xstrdup("Energy");
+			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
+			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
+			   || time_format == SLURMDB_REPORT_TIME_HOURS_PER)
+				field->len = 18;
+			else
+				field->len = 10;
+			field->print_routine = slurmdb_report_print_time;
 		} else if (!strncasecmp("Login", object, MAX(command_len, 1))) {
 			field->type = PRINT_USER_LOGIN;
 			field->name = xstrdup("Login");
@@ -212,6 +224,12 @@ static int _setup_print_fields_list(List format_list)
 			field->name = xstrdup("Proper Name");
 			field->len = 15;
 			field->print_routine = print_fields_str;
+		} else if (!strncasecmp("TresName", object,
+				        MAX(command_len, 5))) {
+			field->type = PRINT_USER_TRES_NAME;
+			field->name = xstrdup("TRES Name");
+			field->len = 14;
+			field->print_routine = print_fields_str;
 		} else if (!strncasecmp("Used", object, MAX(command_len, 1))) {
 			field->type = PRINT_USER_USED;
 			field->name = xstrdup("Used");
@@ -222,18 +240,8 @@ static int _setup_print_fields_list(List format_list)
 			else
 				field->len = 10;
 			field->print_routine = slurmdb_report_print_time;
-		}else if (!strncasecmp("Energy", object, MAX(command_len, 1))) {
-			field->type = PRINT_USER_ENERGY;
-			field->name = xstrdup("Energy");
-			if (time_format == SLURMDB_REPORT_TIME_SECS_PER
-			   || time_format == SLURMDB_REPORT_TIME_MINS_PER
-			   || time_format == SLURMDB_REPORT_TIME_HOURS_PER)
-				field->len = 18;
-			else
-				field->len = 10;
-			field->print_routine = slurmdb_report_print_time;
 		} else {
-			exit_code=1;
+			exit_code = 1;
 			fprintf(stderr, " Unknown field '%s'\n", object);
 			xfree(field);
 			continue;
@@ -249,33 +257,147 @@ static int _setup_print_fields_list(List format_list)
 	return SLURM_SUCCESS;
 }
 
+static void _user_top_tres_report(slurmdb_tres_rec_t *tres,
+			slurmdb_report_cluster_rec_t *slurmdb_report_cluster,
+			slurmdb_report_user_rec_t *slurmdb_report_user)
+{
+	slurmdb_tres_rec_t *cluster_tres_rec, *tres_rec, *total_energy;
+	ListIterator iter = NULL;
+	ListIterator itr2 = NULL;
+	print_field_t *field;
+	char *object = NULL, *tres_tmp = NULL, *tmp_char = NULL;
+	struct passwd *pwd = NULL;
+	int curr_inx = 1, field_count;
+	uint32_t tres_energy;
+	uint64_t cluster_energy_cnt = 0, user_energy_cnt = 0;
+
+	if (!(cluster_tres_rec = list_find_first(
+				slurmdb_report_cluster->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!(tres_rec = list_find_first(slurmdb_report_user->tres_list,
+				slurmdb_find_tres_in_list,
+				&tres->id))) {
+		info("error, no %s(%d) TRES!", tres->type, tres->id);
+		return;
+	}
+	if (!tres_rec->alloc_secs) {
+		debug2("error, no %s(%d) TRES usage", tres->type, tres->id);
+		return;
+	}
+
+	field_count = list_count(print_fields_list);
+	iter = list_iterator_create(print_fields_list);
+	while ((field = list_next(iter))) {
+		switch (field->type) {
+		case PRINT_USER_ACCT:
+			itr2 = list_iterator_create(
+				slurmdb_report_user->acct_list);
+			while ((object = list_next(itr2))) {
+				if (tmp_char)
+					xstrfmtcat(tmp_char, ", %s", object);
+				else
+					xstrcat(tmp_char, object);
+			}
+			list_iterator_destroy(itr2);
+			field->print_routine(field, tmp_char,
+					     (curr_inx == field_count));
+			xfree(tmp_char);
+			break;
+		case PRINT_USER_CLUSTER:
+			field->print_routine(field,
+					     slurmdb_report_cluster->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_USER_LOGIN:
+			field->print_routine(field, slurmdb_report_user->name,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_USER_PROPER:
+			pwd = getpwnam(slurmdb_report_user->name);
+			if (pwd) {
+				tmp_char = strtok(pwd->pw_gecos, ",");
+				if (!tmp_char)
+					tmp_char = pwd->pw_gecos;
+			}
+			field->print_routine(field, tmp_char,
+					     (curr_inx == field_count));
+			tmp_char = NULL;	/* Not xmalloced */
+			break;
+		case PRINT_USER_USED:
+			field->print_routine(field, tres_rec->alloc_secs,
+					     cluster_tres_rec->alloc_secs,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_USER_ENERGY:
+			/* For backward compatibility with pre-TRES logic,
+			 * get energy_cnt here */
+			tres_energy = TRES_ENERGY;
+			if ((total_energy = list_find_first(
+				     slurmdb_report_cluster->tres_list,
+				     slurmdb_find_tres_in_list,
+				     &tres_energy)))
+				cluster_energy_cnt = total_energy->alloc_secs;
+			if ((total_energy = list_find_first(
+					slurmdb_report_user->tres_list,
+					slurmdb_find_tres_in_list,
+					&tres_energy)))
+				user_energy_cnt = total_energy->alloc_secs;
+			field->print_routine(field, user_energy_cnt,
+					     cluster_energy_cnt,
+					     (curr_inx ==field_count));
+			break;
+		case PRINT_USER_TRES_NAME:
+			xstrfmtcat(tres_tmp, "%s%s%s",
+				   tres->type,
+				   tres->name ? "/" : "",
+				   tres->name ? tres->name : "");
+
+			field->print_routine(field, tres_tmp,
+					     (curr_inx == field_count));
+			xfree(tres_tmp);
+			break;
+		default:
+			field->print_routine(field, NULL,
+					     (curr_inx == field_count));
+			break;
+		}
+		curr_inx++;
+	}
+	list_iterator_destroy(iter);
+	printf("\n");
+}
+
 extern int user_top(int argc, char *argv[])
 {
 	int rc = SLURM_SUCCESS;
 	slurmdb_user_cond_t *user_cond = xmalloc(sizeof(slurmdb_user_cond_t));
-	ListIterator itr = NULL;
-	ListIterator itr2 = NULL;
-	ListIterator itr3 = NULL;
+	ListIterator itr = NULL, itr2 = NULL;
 	ListIterator cluster_itr = NULL;
 	List format_list = list_create(slurm_destroy_char);
 	List slurmdb_report_cluster_list = NULL;
-	char *object = NULL;
-
-	int i=0;
+	int i = 0;
 	slurmdb_report_user_rec_t *slurmdb_report_user = NULL;
 	slurmdb_report_cluster_rec_t *slurmdb_report_cluster = NULL;
-	print_field_t *field = NULL;
-	int field_count = 0;
 
 	print_fields_list = list_create(destroy_print_field);
 
 	_set_cond(&i, argc, argv, user_cond, format_list);
 
-	if (!list_count(format_list))
-		slurm_addto_char_list(format_list, "Cl,L,P,A,U,Energy");
+	if (!list_count(format_list)) {
+		if (tres_str) {
+			slurm_addto_char_list(format_list,
+					      "Cl,L,P,A,TresName,Used");
+		} else {
+			slurm_addto_char_list(format_list, "Cl,L,P,A,U,Energy");
+		}
+	}
 
 	_setup_print_fields_list(format_list);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (!(slurmdb_report_cluster_list =
 	     slurmdb_report_user_top_usage(db_conn, user_cond, group_accts))) {
@@ -300,106 +422,37 @@ extern int user_top(int argc, char *argv[])
 
 		switch(time_format) {
 		case SLURMDB_REPORT_TIME_PERCENT:
-			printf("Time reported in %s\n", time_format_string);
+			printf("Use reported in %s\n", time_format_string);
 			break;
 		default:
-			printf("Time reported in CPU %s\n", time_format_string);
+			printf("Use reported in TRES %s\n", time_format_string);
 			break;
 		}
 		printf("----------------------------------------"
 		       "----------------------------------------\n");
 	}
 
-	itr2 = list_iterator_create(print_fields_list);
 	print_fields_header(print_fields_list);
 
-	field_count = list_count(print_fields_list);
-
 	cluster_itr = list_iterator_create(slurmdb_report_cluster_list);
-	while((slurmdb_report_cluster = list_next(cluster_itr))) {
+	while ((slurmdb_report_cluster = list_next(cluster_itr))) {
 		int count = 0;
+
 		list_sort(slurmdb_report_cluster->user_list,
 			  (ListCmpF)sort_user_dec);
 
 		itr = list_iterator_create(slurmdb_report_cluster->user_list);
-		while((slurmdb_report_user = list_next(itr))) {
-			int curr_inx = 1;
-			while((field = list_next(itr2))) {
-				char *tmp_char = NULL;
-				struct passwd *pwd = NULL;
-				switch(field->type) {
-				case PRINT_USER_ACCT:
-					itr3 = list_iterator_create(
-						slurmdb_report_user->acct_list);
-					while((object = list_next(itr3))) {
-						if (tmp_char)
-							xstrfmtcat(tmp_char,
-								   ", %s",
-								   object);
-						else
-							xstrcat(tmp_char,
-								object);
-					}
-					list_iterator_destroy(itr3);
-					field->print_routine(
-						field,
-						tmp_char,
-						(curr_inx == field_count));
-					xfree(tmp_char);
-					break;
-				case PRINT_USER_CLUSTER:
-					field->print_routine(
-						field,
-						slurmdb_report_cluster->name,
-						(curr_inx == field_count));
-					break;
-				case PRINT_USER_LOGIN:
-					field->print_routine(field,
-							     slurmdb_report_user->name,
-							     (curr_inx ==
-							      field_count));
-					break;
-				case PRINT_USER_PROPER:
-					pwd = getpwnam(slurmdb_report_user->name);
-					if (pwd) {
-						tmp_char = strtok(pwd->pw_gecos,
-								  ",");
-						if (!tmp_char)
-							tmp_char =
-								pwd->pw_gecos;
-					}
-					field->print_routine(field,
-							     tmp_char,
-							     (curr_inx ==
-							      field_count));
-					break;
-				case PRINT_USER_USED:
-					field->print_routine(
-						field,
-						slurmdb_report_user->cpu_secs,
-						slurmdb_report_cluster->
-						cpu_secs,
-						(curr_inx == field_count));
-					break;
-				case PRINT_USER_ENERGY:
-					field->print_routine(
-						field,
-						slurmdb_report_user->
-						consumed_energy,
-						slurmdb_report_cluster->
-						consumed_energy,
-						(curr_inx ==field_count));
-					break;
-				default:
-					field->print_routine(
-						field, NULL,
-						(curr_inx == field_count));
-					break;
-				}
-				curr_inx++;
+		while ((slurmdb_report_user = list_next(itr))) {
+			slurmdb_tres_rec_t *tres;
+			itr2 = list_iterator_create(tres_list);
+			while ((tres = list_next(itr2))) {
+				if (tres->id == NO_VAL)
+					continue;
+				_user_top_tres_report(tres,
+					slurmdb_report_cluster,
+					slurmdb_report_user);
 			}
-			list_iterator_reset(itr2);
-			printf("\n");
+			list_iterator_destroy(itr2);
 			count++;
 			if (count >= top_limit)
 				break;
@@ -407,22 +460,15 @@ extern int user_top(int argc, char *argv[])
 		list_iterator_destroy(itr);
 	}
 	list_iterator_destroy(cluster_itr);
+
 end_it:
 	/* group_accts could be set in the set_cond function and needs
 	 * to be cleared here, or anytime _set_cond is called.
 	 */
 	group_accts = 0;
 	slurmdb_destroy_user_cond(user_cond);
-
-	if (slurmdb_report_cluster_list) {
-		list_destroy(slurmdb_report_cluster_list);
-		slurmdb_report_cluster_list = NULL;
-	}
-
-	if (print_fields_list) {
-		list_destroy(print_fields_list);
-		print_fields_list = NULL;
-	}
+	FREE_NULL_LIST(slurmdb_report_cluster_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return rc;
 }
diff --git a/src/srun/Makefile.in b/src/srun/Makefile.in
index ef70e06c8..6cf563a3d 100644
--- a/src/srun/Makefile.in
+++ b/src/srun/Makefile.in
@@ -105,6 +105,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -113,10 +114,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -129,7 +132,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -294,6 +297,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -343,8 +348,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -363,6 +372,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -406,6 +418,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -429,6 +442,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/srun/libsrun/Makefile.am b/src/srun/libsrun/Makefile.am
index 4011cf4eb..910637ccf 100644
--- a/src/srun/libsrun/Makefile.am
+++ b/src/srun/libsrun/Makefile.am
@@ -3,7 +3,8 @@
 AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
 
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/srun/libsrun
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/srun/libsrun \
+	 $(CRAY_JOB_CPPFLAGS)
 
 noinst_LTLIBRARIES = libsrun.la
 
@@ -16,7 +17,8 @@ libsrun_la_SOURCES = \
 	opt.c opt.h \
 	srun_job.c srun_job.h
 libsrun_la_LIBADD = $(DL_LIBS)
-libsrun_la_LDFLAGS  = $(LIB_LDFLAGS) -module --export-dynamic
+libsrun_la_LDFLAGS  = $(LIB_LDFLAGS) -module --export-dynamic \
+	$(CRAY_JOB_LDFLAGS)
 
 force:
 $(convenience_libs) : force
diff --git a/src/srun/libsrun/Makefile.in b/src/srun/libsrun/Makefile.in
index 6baf530fa..1f4016ac3 100644
--- a/src/srun/libsrun/Makefile.in
+++ b/src/srun/libsrun/Makefile.in
@@ -100,6 +100,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -108,10 +109,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -124,7 +127,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -246,6 +249,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -295,8 +300,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -315,6 +324,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -358,6 +370,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -381,6 +394,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -444,7 +458,9 @@ top_builddir = @top_builddir@
 top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 CLEANFILES = core.*
-AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/srun/libsrun
+AM_CPPFLAGS = -I$(top_srcdir) -I$(top_srcdir)/src/srun/libsrun \
+	 $(CRAY_JOB_CPPFLAGS)
+
 noinst_LTLIBRARIES = libsrun.la
 libsrun_la_SOURCES = \
 	allocate.c allocate.h \
@@ -456,7 +472,9 @@ libsrun_la_SOURCES = \
 	srun_job.c srun_job.h
 
 libsrun_la_LIBADD = $(DL_LIBS)
-libsrun_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic
+libsrun_la_LDFLAGS = $(LIB_LDFLAGS) -module --export-dynamic \
+	$(CRAY_JOB_LDFLAGS)
+
 all: all-am
 
 .SUFFIXES:
diff --git a/src/srun/libsrun/allocate.c b/src/srun/libsrun/allocate.c
index b4d7b3600..f5318afda 100644
--- a/src/srun/libsrun/allocate.c
+++ b/src/srun/libsrun/allocate.c
@@ -47,16 +47,17 @@
 #include <sys/types.h>
 #include <pwd.h>
 
+#include "src/common/env.h"
+#include "src/common/fd.h"
+#include "src/common/forward.h"
 #include "src/common/log.h"
 #include "src/common/macros.h"
 #include "src/common/slurm_auth.h"
 #include "src/common/slurm_protocol_api.h"
+#include "src/common/slurm_time.h"
 #include "src/common/xmalloc.h"
 #include "src/common/xsignal.h"
 #include "src/common/xstring.h"
-#include "src/common/forward.h"
-#include "src/common/env.h"
-#include "src/common/fd.h"
 
 #include "allocate.h"
 #include "opt.h"
@@ -79,7 +80,7 @@ extern uint64_t job_getjid(pid_t pid);
 #define MAX_ALLOC_WAIT	60	/* seconds */
 #define MIN_ALLOC_WAIT	5	/* seconds */
 #define MAX_RETRIES	10
-#define POLL_SLEEP	3	/* retry interval in seconds  */
+#define POLL_SLEEP	0.1	/* retry interval in seconds  */
 
 pthread_mutex_t msg_lock = PTHREAD_MUTEX_INITIALIZER;
 pthread_cond_t msg_cond = PTHREAD_COND_INITIALIZER;
@@ -181,7 +182,7 @@ static void _timeout_handler(srun_timeout_msg_t *msg)
 	if (msg->timeout != last_timeout) {
 		last_timeout = msg->timeout;
 		verbose("job time limit to be reached at %s",
-			slurm_ctime(&msg->timeout));
+			slurm_ctime2(&msg->timeout));
 	}
 }
 
@@ -256,7 +257,8 @@ static int _wait_bluegene_block_ready(resource_allocation_response_msg_t *alloc)
 {
 	int is_ready = 0, i, rc;
 	char *block_id = NULL;
-	int cur_delay = 0;
+	double cur_delay = 0;
+	double cur_sleep = 0;
 	int max_delay = BG_FREE_PREVIOUS_BLOCK + BG_MIN_BLOCK_BOOT +
 		(BG_INCR_BLOCK_BOOT * alloc->node_cnt);
 
@@ -264,15 +266,17 @@ static int _wait_bluegene_block_ready(resource_allocation_response_msg_t *alloc)
 				    SELECT_JOBDATA_BLOCK_ID,
 				    &block_id);
 
-	for (i=0; (cur_delay < max_delay); i++) {
-		if (i == 1)
+	for (i = 0; cur_delay < max_delay; i++) {
+		cur_sleep = POLL_SLEEP * i;
+		if (i == 1) {
 			debug("Waiting for block %s to become ready for job",
 			      block_id);
+		}
 		if (i) {
-			sleep(POLL_SLEEP);
+			usleep(1000000 * cur_sleep);
 			rc = _blocks_dealloc();
 			if ((rc == 0) || (rc == -1))
-				cur_delay += POLL_SLEEP;
+				cur_delay += cur_sleep;
 			debug2("still waiting");
 		}
 
@@ -348,7 +352,8 @@ static int _blocks_dealloc(void)
 static int _wait_nodes_ready(resource_allocation_response_msg_t *alloc)
 {
 	int is_ready = 0, i, rc;
-	int cur_delay = 0;
+	double cur_delay = 0;
+	double cur_sleep = 0;
 	int suspend_time, resume_time, max_delay;
 
 	suspend_time = slurm_get_suspend_timeout();
@@ -360,14 +365,18 @@ static int _wait_nodes_ready(resource_allocation_response_msg_t *alloc)
 
 	pending_job_id = alloc->job_id;
 
-	for (i = 0; (cur_delay < max_delay); i++) {
+	for (i = 0; cur_delay < max_delay; i++) {
 		if (i) {
-			if (i == 1)
-				verbose("Waiting for nodes to boot");
-			else
-				debug("still waiting");
-			sleep(POLL_SLEEP);
-			cur_delay += POLL_SLEEP;
+			cur_sleep = POLL_SLEEP * i;
+			if (i == 1) {
+				verbose("Waiting for nodes to boot (delay looping %d times @ %f secs x index)",
+					max_delay, POLL_SLEEP);
+			} else {
+				debug("Waited %f sec and still waiting: next sleep for %f sec",
+				      cur_delay, cur_sleep);
+			}
+			usleep(1000000 * cur_sleep);
+			cur_delay += cur_sleep;
 		}
 
 		rc = slurm_job_node_ready(alloc->job_id);
@@ -689,7 +698,7 @@ job_desc_msg_create_from_opts (void)
 
 	}
 
-	if (opt.distribution == SLURM_DIST_ARBITRARY
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
 	   && !j->req_nodes) {
 		error("With Arbitrary distribution you need to "
 		      "specify a nodelist or hostfile with the -w option");
@@ -739,6 +748,8 @@ job_desc_msg_create_from_opts (void)
 
 	if (opt.mail_user)
 		j->mail_user = opt.mail_user;
+	if (opt.burst_buffer)
+		j->burst_buffer = opt.burst_buffer;
 	if (opt.begin)
 		j->begin_time = opt.begin;
 	if (opt.licenses)
@@ -819,13 +830,21 @@ job_desc_msg_create_from_opts (void)
 		j->time_limit          = opt.time_limit;
 	if (opt.time_min != NO_VAL)
 		j->time_min            = opt.time_min;
-	j->shared = opt.shared;
+	if (opt.shared != (uint16_t) NO_VAL)
+		j->shared = opt.shared;
 
 	if (opt.warn_signal)
 		j->warn_signal = opt.warn_signal;
 	if (opt.warn_time)
 		j->warn_time = opt.warn_time;
 
+	if (opt.cpu_freq_min != NO_VAL)
+		j->cpu_freq_min = opt.cpu_freq_min;
+	if (opt.cpu_freq_max != NO_VAL)
+		j->cpu_freq_max = opt.cpu_freq_max;
+	if (opt.cpu_freq_gov != NO_VAL)
+		j->cpu_freq_gov = opt.cpu_freq_gov;
+
 	if (opt.req_switch >= 0)
 		j->req_switch = opt.req_switch;
 	if (opt.wait4switch >= 0)
@@ -841,6 +860,11 @@ job_desc_msg_create_from_opts (void)
 		j->spank_job_env_size = opt.spank_job_env_size;
 	}
 
+	if (opt.power_flags)
+		j->power_flags = opt.power_flags;
+	if (opt.sicp_mode)
+		j->sicp_mode = opt.sicp_mode;
+
 	return j;
 }
 
diff --git a/src/srun/libsrun/launch.c b/src/srun/libsrun/launch.c
index 85f6456e9..b1541ba6e 100644
--- a/src/srun/libsrun/launch.c
+++ b/src/srun/libsrun/launch.c
@@ -162,6 +162,7 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 	int i, rc;
 	unsigned long step_wait = 0, my_sleep = 0;
 	time_t begin_time;
+	uint16_t base_dist;
 
 	if (!job) {
 		error("launch_common_create_job_step: no job given");
@@ -222,7 +223,9 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 		job->ctx_params.cpu_count = opt.ntasks;
 	}
 
-	job->ctx_params.cpu_freq = opt.cpu_freq;
+	job->ctx_params.cpu_freq_min = opt.cpu_freq_min;
+	job->ctx_params.cpu_freq_max = opt.cpu_freq_max;
+	job->ctx_params.cpu_freq_gov = opt.cpu_freq_gov;
 	job->ctx_params.relative = (uint16_t)opt.relative;
 	job->ctx_params.ckpt_interval = (uint16_t)opt.ckpt_interval;
 	job->ctx_params.ckpt_dir = opt.ckpt_dir;
@@ -245,7 +248,7 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 #endif
 	}
 
-	switch (opt.distribution) {
+	switch (opt.distribution & SLURM_DIST_NODESOCKMASK) {
 	case SLURM_DIST_BLOCK:
 	case SLURM_DIST_ARBITRARY:
 	case SLURM_DIST_CYCLIC:
@@ -264,12 +267,20 @@ extern int launch_common_create_job_step(srun_job_t *job, bool use_all_cpus,
 		job->ctx_params.plane_size = opt.plane_size;
 		break;
 	default:
-		job->ctx_params.task_dist = (job->ctx_params.task_count <=
-					     job->ctx_params.min_nodes)
-			? SLURM_DIST_CYCLIC : SLURM_DIST_BLOCK;
+		/* Leave distribution set to unknown if taskcount <= nodes and
+		 * memory is set to 0. step_mgr will handle the 0mem case.
+		 * ex. SallocDefaultCommand=srun -n1 -N1 --mem=0 ... */
+		if (!opt.mem_per_cpu || !opt.pn_min_memory)
+			base_dist = SLURM_DIST_UNKNOWN;
+		else
+			base_dist = (job->ctx_params.task_count <=
+				     job->ctx_params.min_nodes)
+				     ? SLURM_DIST_CYCLIC : SLURM_DIST_BLOCK;
+		opt.distribution &= SLURM_DIST_STATE_FLAGS;
+		opt.distribution |= base_dist;
+		job->ctx_params.task_dist = opt.distribution;
 		if (opt.ntasks_per_node != NO_VAL)
 			job->ctx_params.plane_size = opt.ntasks_per_node;
-		opt.distribution = job->ctx_params.task_dist;
 		break;
 
 	}
diff --git a/src/srun/libsrun/opt.c b/src/srun/libsrun/opt.c
index fd88805cb..5beb47390 100644
--- a/src/srun/libsrun/opt.c
+++ b/src/srun/libsrun/opt.c
@@ -3,6 +3,7 @@
  *****************************************************************************
  *  Copyright (C) 2002-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC <http://www.schedmd.com>
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Mark Grondona <grondona1@llnl.gov>, et. al.
  *  CODE-OCEC-09-009. All rights reserved.
@@ -67,9 +68,10 @@
 #include <stdio.h>
 #include <stdlib.h>		/* getenv     */
 #include <sys/param.h>		/* MAXPATHLEN */
-#include <unistd.h>
+#include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/utsname.h>
+#include <unistd.h>
 
 #include "src/common/cpu_frequency.h"
 #include "src/common/list.h"
@@ -123,6 +125,9 @@
 #define OPT_TIME_VAL    0x18
 #define OPT_CPU_FREQ    0x19
 #define OPT_CORE_SPEC   0x1a
+#define OPT_SICP        0x1b
+#define OPT_POWER       0x1c
+#define OPT_THREAD_SPEC 0x1d
 #define OPT_PROFILE     0x20
 #define OPT_EXPORT	0x21
 #define OPT_HINT	0x22
@@ -131,7 +136,6 @@
 #define LONG_OPT_HELP        0x100
 #define LONG_OPT_USAGE       0x101
 #define LONG_OPT_XTO         0x102
-#define LONG_OPT_LAUNCH      0x103
 #define LONG_OPT_TIMEO       0x104
 #define LONG_OPT_JOBID       0x105
 #define LONG_OPT_TMP         0x106
@@ -142,8 +146,11 @@
 #define LONG_OPT_GID         0x10b
 #define LONG_OPT_MPI         0x10c
 #define LONG_OPT_RESV_PORTS  0x10d
+#define LONG_OPT_SICP        0x10e
+#define LONG_OPT_POWER       0x10f
 #define LONG_OPT_DEBUG_TS    0x110
 #define LONG_OPT_CONNTYPE    0x111
+#define LONG_OPT_THREAD_SPEC 0x112
 #define LONG_OPT_TEST_ONLY   0x113
 #define LONG_OPT_NETWORK     0x114
 #define LONG_OPT_EXCLUSIVE   0x115
@@ -161,6 +168,8 @@
 #define LONG_OPT_MULTI       0x122
 #define LONG_OPT_COMMENT     0x124
 #define LONG_OPT_QOS             0x127
+#define LONG_OPT_BURST_BUFFER_SPEC  0x128
+#define LONG_OPT_BURST_BUFFER_FILE  0x129
 #define LONG_OPT_SOCKETSPERNODE  0x130
 #define LONG_OPT_CORESPERSOCKET	 0x131
 #define LONG_OPT_THREADSPERCORE  0x132
@@ -198,7 +207,7 @@
 #define LONG_OPT_PROFILE         0x157
 #define LONG_OPT_EXPORT          0x158
 #define LONG_OPT_PRIORITY        0x160
-
+#define LONG_OPT_ACCEL_BIND      0x161
 
 extern char **environ;
 
@@ -236,7 +245,7 @@ static void  _opt_list(void);
 static bool _opt_verify(void);
 
 static void _process_env_var(env_vars_t *e, const char *val);
-
+static char *_read_file(char *fname);
 static bool  _under_parallel_debugger(void);
 static void  _usage(void);
 static bool  _valid_node_list(char **node_list_pptr);
@@ -257,7 +266,7 @@ int initialize_and_process_args(int argc, char *argv[])
 	if (!_opt_verify())
 		exit(error_exit);
 
-	if (_verbose > 3)
+	if (_verbose)
 		_opt_list();
 
 	if (opt.launch_cmd) {
@@ -368,7 +377,7 @@ static void argerror(const char *msg, ...)
 /*
  * _opt_default(): used by initialize_and_process_args to set defaults
  */
-static void _opt_default()
+static void _opt_default(void)
 {
 	char buf[MAXPATHLEN + 1];
 	int i;
@@ -409,6 +418,7 @@ static void _opt_default()
 	opt.cpu_bind = NULL;
 	opt.mem_bind_type = 0;
 	opt.mem_bind = NULL;
+	opt.accel_bind_type = 0;
 	opt.core_spec = (uint16_t) NO_VAL;
 	opt.core_spec_set = false;
 	opt.time_limit = NO_VAL;
@@ -472,7 +482,7 @@ static void _opt_default()
 
 	opt.pn_min_cpus    = NO_VAL;
 	opt.pn_min_memory  = NO_VAL;
-	opt.mem_per_cpu     = NO_VAL;
+	opt.mem_per_cpu    = NO_VAL;
 	opt.pn_min_tmp_disk= NO_VAL;
 
 	opt.hold	    = false;
@@ -482,12 +492,11 @@ static void _opt_default()
 	opt.hostfile	    = NULL;
 	opt.nodelist	    = NULL;
 	opt.exc_nodes	    = NULL;
-	opt.max_launch_time = 120;/* 120 seconds to launch job             */
 	opt.max_exit_timeout= 60; /* Warn user 60 seconds after task exit */
 	/* Default launch msg timeout           */
 	opt.msg_timeout     = slurm_get_msg_timeout();
 
-	for (i=0; i<HIGHEST_DIMENSIONS; i++) {
+	for (i = 0; i < HIGHEST_DIMENSIONS; i++) {
 		opt.conn_type[i]    = (uint16_t) NO_VAL;
 		opt.geometry[i]	    = 0;
 	}
@@ -515,7 +524,6 @@ static void _opt_default()
 	 * Reset some default values if running under a parallel debugger
 	 */
 	if ((opt.parallel_debug = _under_parallel_debugger())) {
-		opt.max_launch_time = 120;
 		opt.max_threads     = 1;
 		pmi_server_max_threads(opt.max_threads);
 		opt.msg_timeout     = 15;
@@ -524,7 +532,9 @@ static void _opt_default()
 	opt.pty = false;
 	opt.open_mode = 0;
 	opt.acctg_freq = NULL;
-	opt.cpu_freq = NO_VAL;
+	opt.cpu_freq_min = NO_VAL;
+	opt.cpu_freq_max = NO_VAL;
+	opt.cpu_freq_gov = NO_VAL;
 	opt.reservation = NULL;
 	opt.wckey = NULL;
 	opt.req_switch = -1;
@@ -534,6 +544,8 @@ static void _opt_default()
 
 	opt.nice = 0;
 	opt.priority = 0;
+	opt.sicp_mode = 0;
+	opt.power_flags = 0;
 }
 
 /*---[ env var processing ]-----------------------------------------------*/
@@ -559,6 +571,7 @@ env_vars_t env_vars[] = {
 {"SLURM_ACCOUNT",       OPT_STRING,     &opt.account,       NULL             },
 {"SLURM_ACCTG_FREQ",    OPT_STRING,     &opt.acctg_freq,    NULL             },
 {"SLURM_BLRTS_IMAGE",   OPT_STRING,     &opt.blrtsimage,    NULL             },
+{"SLURM_BURST_BUFFER",  OPT_STRING,     &opt.burst_buffer,  NULL             },
 {"SLURM_CHECKPOINT",    OPT_STRING,     &opt.ckpt_interval_str, NULL         },
 {"SLURM_CHECKPOINT_DIR",OPT_STRING,     &opt.ckpt_dir,      NULL             },
 {"SLURM_CNLOAD_IMAGE",  OPT_STRING,     &opt.linuximage,    NULL             },
@@ -603,14 +616,17 @@ env_vars_t env_vars[] = {
 {"SLURM_OPEN_MODE",     OPT_OPEN_MODE,  NULL,               NULL             },
 {"SLURM_OVERCOMMIT",    OPT_OVERCOMMIT, NULL,               NULL             },
 {"SLURM_PARTITION",     OPT_STRING,     &opt.partition,     NULL             },
+{"SLURM_POWER",         OPT_POWER,      NULL,               NULL             },
 {"SLURM_PROFILE",       OPT_PROFILE,    NULL,               NULL             },
 {"SLURM_PROLOG",        OPT_STRING,     &opt.prolog,        NULL             },
 {"SLURM_QOS",           OPT_STRING,     &opt.qos,           NULL             },
 {"SLURM_RAMDISK_IMAGE", OPT_STRING,     &opt.ramdiskimage,  NULL             },
 {"SLURM_REMOTE_CWD",    OPT_STRING,     &opt.cwd,           NULL             },
+{"SLURM_REQ_SWITCH",    OPT_INT,        &opt.req_switch,    NULL             },
 {"SLURM_RESERVATION",   OPT_STRING,     &opt.reservation,   NULL             },
 {"SLURM_RESTART_DIR",   OPT_STRING,     &opt.restart_dir ,  NULL             },
 {"SLURM_RESV_PORTS",    OPT_RESV_PORTS, NULL,               NULL             },
+{"SLURM_SICP",          OPT_SICP,       NULL,               NULL             },
 {"SLURM_SIGNAL",        OPT_SIGNAL,     NULL,               NULL             },
 {"SLURM_SRUN_MULTI",    OPT_MULTI,      NULL,               NULL             },
 {"SLURM_STDERRMODE",    OPT_STRING,     &opt.efname,        NULL             },
@@ -618,14 +634,14 @@ env_vars_t env_vars[] = {
 {"SLURM_STDOUTMODE",    OPT_STRING,     &opt.ofname,        NULL             },
 {"SLURM_TASK_EPILOG",   OPT_STRING,     &opt.task_epilog,   NULL             },
 {"SLURM_TASK_PROLOG",   OPT_STRING,     &opt.task_prolog,   NULL             },
+{"SLURM_THREAD_SPEC",   OPT_THREAD_SPEC,NULL,               NULL             },
 {"SLURM_THREADS",       OPT_INT,        &opt.max_threads,   NULL             },
 {"SLURM_TIMELIMIT",     OPT_STRING,     &opt.time_limit_str,NULL             },
 {"SLURM_UNBUFFEREDIO",  OPT_INT,        &opt.unbuffered,    NULL             },
 {"SLURM_WAIT",          OPT_INT,        &opt.max_wait,      NULL             },
+{"SLURM_WAIT4SWITCH",   OPT_TIME_VAL,   NULL,               NULL             },
 {"SLURM_WCKEY",         OPT_STRING,     &opt.wckey,         NULL             },
 {"SLURM_WORKING_DIR",   OPT_STRING,     &opt.cwd,           &opt.cwd_set     },
-{"SLURM_REQ_SWITCH",    OPT_INT,        &opt.req_switch,    NULL             },
-{"SLURM_WAIT4SWITCH",   OPT_TIME_VAL,   NULL,               NULL             },
 {NULL, 0, NULL, NULL}
 };
 
@@ -635,7 +651,7 @@ env_vars_t env_vars[] = {
  *            environment variables. See comments above for how to
  *            extend srun to process different vars
  */
-static void _opt_env()
+static void _opt_env(void)
 {
 	char       *val = NULL;
 	env_vars_t *e   = env_vars;
@@ -665,7 +681,7 @@ _process_env_var(env_vars_t *e, const char *val)
 		*((char **) e->arg) = xstrdup(val);
 		break;
 	case OPT_INT:
-		if (val != NULL) {
+		if (val[0] != '\0') {
 			*((int *) e->arg) = (int) strtol(val, &end, 10);
 			if (!(end && *end == '\0')) {
 				error("%s=%s invalid. ignoring...",
@@ -692,7 +708,8 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_CPU_FREQ:
-		if (cpu_freq_verify_param(val, &opt.cpu_freq))
+		if (cpu_freq_verify_cmdline(val, &opt.cpu_freq_min,
+				&opt.cpu_freq_max, &opt.cpu_freq_gov))
 			error("Invalid --cpu-freq argument: %s. Ignored", val);
 		break;
 	case OPT_HINT:
@@ -729,8 +746,15 @@ _process_env_var(env_vars_t *e, const char *val)
 		break;
 
 	case OPT_EXCLUSIVE:
-		opt.exclusive = true;
-		opt.shared = 0;
+		if (val[0] == '\0') {
+			opt.exclusive = true;
+			opt.shared = 0;
+		} else if (!strcasecmp(val, "user")) {
+			opt.shared = 2;
+		} else {
+			error("\"%s=%s\" -- invalid value, ignoring...",
+			      e->var, val);
+		}
 		break;
 
 	case OPT_EXPORT:
@@ -802,6 +826,16 @@ _process_env_var(env_vars_t *e, const char *val)
 	case OPT_PROFILE:
 		opt.profile = acct_gather_profile_from_string((char *)val);
 		break;
+	case OPT_POWER:
+		opt.power_flags = power_flags_id((char *)val);
+		break;
+	case OPT_SICP:
+		opt.sicp_mode = 1;
+		break;
+	case OPT_THREAD_SPEC:
+		opt.core_spec = _get_int(val, "thread_spec", true) |
+					 CORE_SPEC_THREAD;
+		break;
 	default:
 		/* do nothing */
 		break;
@@ -817,20 +851,7 @@ _process_env_var(env_vars_t *e, const char *val)
 static int
 _get_int(const char *arg, const char *what, bool positive)
 {
-	char *p;
-	long int result = strtol(arg, &p, 10);
-
-	if ((*p != '\0') || (result < 0L)
-	||  (positive && (result <= 0L))) {
-		error ("Invalid numeric value \"%s\" for %s.", arg, what);
-		exit(error_exit);
-	} else if (result > INT_MAX) {
-		error ("Numeric argument (%ld) to big for %s.", result, what);
-	} else if (result < INT_MIN) {
-		error ("Numeric argument %ld to small for %s.", result, what);
-	}
-
-	return (int) result;
+	return parse_int(what, arg, positive);
 }
 
 static void _set_options(const int argc, char **argv)
@@ -879,7 +900,10 @@ static void _set_options(const int argc, char **argv)
 		{"exclude",       required_argument, 0, 'x'},
 		{"disable-status", no_argument,      0, 'X'},
 		{"no-allocate",   no_argument,       0, 'Z'},
+		{"accel-bind",       required_argument, 0, LONG_OPT_ACCEL_BIND},
 		{"acctg-freq",       required_argument, 0, LONG_OPT_ACCTG_FREQ},
+		{"bb",               required_argument, 0, LONG_OPT_BURST_BUFFER_SPEC},
+		{"bbf",              required_argument, 0, LONG_OPT_BURST_BUFFER_FILE},
 		{"begin",            required_argument, 0, LONG_OPT_BEGIN},
 		{"blrts-image",      required_argument, 0, LONG_OPT_BLRTS_IMAGE},
 		{"checkpoint",       required_argument, 0, LONG_OPT_CHECKPOINT},
@@ -893,7 +917,7 @@ static void _set_options(const int argc, char **argv)
 		{"cpu-freq",         required_argument, 0, LONG_OPT_CPU_FREQ},
 		{"debugger-test",    no_argument,       0, LONG_OPT_DEBUG_TS},
 		{"epilog",           required_argument, 0, LONG_OPT_EPILOG},
-		{"exclusive",        no_argument,       0, LONG_OPT_EXCLUSIVE},
+		{"exclusive",        optional_argument, 0, LONG_OPT_EXCLUSIVE},
 		{"export",           required_argument, 0, LONG_OPT_EXPORT},
 		{"get-user-env",     optional_argument, 0, LONG_OPT_GET_USER_ENV},
 		{"gid",              required_argument, 0, LONG_OPT_GID},
@@ -904,11 +928,10 @@ static void _set_options(const int argc, char **argv)
 		{"jobid",            required_argument, 0, LONG_OPT_JOBID},
 		{"linux-image",      required_argument, 0, LONG_OPT_LINUX_IMAGE},
 		{"launch-cmd",       no_argument,       0, LONG_OPT_LAUNCH_CMD},
-		{"launcher-opts",      required_argument, 0, LONG_OPT_LAUNCHER_OPTS},
+		{"launcher-opts",    required_argument, 0, LONG_OPT_LAUNCHER_OPTS},
 		{"mail-type",        required_argument, 0, LONG_OPT_MAIL_TYPE},
 		{"mail-user",        required_argument, 0, LONG_OPT_MAIL_USER},
 		{"max-exit-timeout", required_argument, 0, LONG_OPT_XTO},
-		{"max-launch-time",  required_argument, 0, LONG_OPT_LAUNCH},
 		{"mem",              required_argument, 0, LONG_OPT_MEM},
 		{"mem-per-cpu",      required_argument, 0, LONG_OPT_MEM_PER_CPU},
 		{"mem_bind",         required_argument, 0, LONG_OPT_MEM_BIND},
@@ -926,6 +949,7 @@ static void _set_options(const int argc, char **argv)
 		{"ntasks-per-node",  required_argument, 0, LONG_OPT_NTASKSPERNODE},
 		{"ntasks-per-socket",required_argument, 0, LONG_OPT_NTASKSPERSOCKET},
 		{"open-mode",        required_argument, 0, LONG_OPT_OPEN_MODE},
+		{"power",            required_argument, 0, LONG_OPT_POWER},
 		{"priority",         required_argument, 0, LONG_OPT_PRIORITY},
 		{"profile",          required_argument, 0, LONG_OPT_PROFILE},
 		{"prolog",           required_argument, 0, LONG_OPT_PROLOG},
@@ -938,6 +962,7 @@ static void _set_options(const int argc, char **argv)
 		{"restart-dir",      required_argument, 0, LONG_OPT_RESTART_DIR},
 		{"resv-ports",       optional_argument, 0, LONG_OPT_RESV_PORTS},
 		{"runjob-opts",      required_argument, 0, LONG_OPT_LAUNCHER_OPTS},
+		{"sicp",             optional_argument, 0, LONG_OPT_SICP},
 		{"signal",	     required_argument, 0, LONG_OPT_SIGNAL},
 		{"slurmd-debug",     required_argument, 0, LONG_OPT_DEBUG_SLURMD},
 		{"sockets-per-node", required_argument, 0, LONG_OPT_SOCKETSPERNODE},
@@ -946,6 +971,7 @@ static void _set_options(const int argc, char **argv)
 		{"task-prolog",      required_argument, 0, LONG_OPT_TASK_PROLOG},
 		{"tasks-per-node",   required_argument, 0, LONG_OPT_NTASKSPERNODE},
 		{"test-only",        no_argument,       0, LONG_OPT_TEST_ONLY},
+		{"thread-spec",      required_argument, 0, LONG_OPT_THREAD_SPEC},
 		{"time-min",         required_argument, 0, LONG_OPT_TIME_MIN},
 		{"threads-per-core", required_argument, 0, LONG_OPT_THREADSPERCORE},
 		{"tmp",              required_argument, 0, LONG_OPT_TMP},
@@ -958,6 +984,7 @@ static void _set_options(const int argc, char **argv)
 		"o:Op:P:qQr:RsS:t:T:uU:vVw:W:x:XZ";
 	char *pos_delimit;
 	bool ntasks_set_opt = false;
+	bool nodes_set_opt = false;
 
 #ifdef HAVE_PTY_H
 	char *tmp_str;
@@ -974,8 +1001,8 @@ static void _set_options(const int argc, char **argv)
 	else
 		error("opt.progname is already set.");
 	optind = 0;
-	while((opt_char = getopt_long(argc, argv, opt_string,
-				      optz, &option_index)) != -1) {
+	while ((opt_char = getopt_long(argc, argv, opt_string,
+				       optz, &option_index)) != -1) {
 		switch (opt_char) {
 
 		case (int)'?':
@@ -1108,6 +1135,7 @@ static void _set_options(const int argc, char **argv)
 				_get_int(optarg, "number of tasks", true);
 			break;
 		case (int)'N':
+			nodes_set_opt = true;
 			opt.nodes_set_opt =
 				get_resource_arg_range( optarg,
 							"requested node count",
@@ -1162,8 +1190,7 @@ static void _set_options(const int argc, char **argv)
 			opt.shared = 1;
 			break;
 		case (int)'S':
-			opt.core_spec = _get_int(optarg, "core_spec",
-				false);
+			opt.core_spec = _get_int(optarg, "core_spec", false);
 			opt.core_spec_set = true;
 			break;
 		case (int)'t':
@@ -1211,8 +1238,15 @@ static void _set_options(const int argc, char **argv)
 			opt.contiguous = true;
 			break;
                 case LONG_OPT_EXCLUSIVE:
-			opt.exclusive = true;
-                        opt.shared = 0;
+			if (optarg == NULL) {
+				opt.exclusive = true;
+				opt.shared = 0;
+			} else if (!strcasecmp(optarg, "user")) {
+				opt.shared = 2;
+			} else {
+				error("invalid exclusive option %s", optarg);
+				exit(error_exit);
+			}
                         break;
 		case LONG_OPT_EXPORT:
 			xfree(opt.export_env);
@@ -1269,6 +1303,7 @@ static void _set_options(const int argc, char **argv)
 			break;
 		case LONG_OPT_MEM:
 			opt.pn_min_memory = (int) str_to_mbytes(optarg);
+			opt.mem_per_cpu = NO_VAL;
 			if (opt.pn_min_memory < 0) {
 				error("invalid memory constraint %s",
 				      optarg);
@@ -1277,6 +1312,7 @@ static void _set_options(const int argc, char **argv)
 			break;
 		case LONG_OPT_MEM_PER_CPU:
 			opt.mem_per_cpu = (int) str_to_mbytes(optarg);
+			opt.pn_min_memory = NO_VAL;
 			if (opt.mem_per_cpu < 0) {
 				error("invalid memory constraint %s",
 				      optarg);
@@ -1316,10 +1352,6 @@ static void _set_options(const int argc, char **argv)
 			opt.msg_timeout =
 				_get_int(optarg, "msg-timeout", true);
 			break;
-		case LONG_OPT_LAUNCH:
-			opt.max_launch_time =
-				_get_int(optarg, "max-launch-time", true);
-			break;
 		case LONG_OPT_XTO:
 			opt.max_exit_timeout =
 				_get_int(optarg, "max-exit-timeout", true);
@@ -1356,7 +1388,6 @@ static void _set_options(const int argc, char **argv)
 			/* make other parameters look like debugger
 			 * is really attached */
 			opt.parallel_debug   = true;
-			opt.max_launch_time = 120;
 			opt.max_threads     = 1;
 			pmi_server_max_threads(opt.max_threads);
 			opt.msg_timeout     = 15;
@@ -1395,6 +1426,14 @@ static void _set_options(const int argc, char **argv)
 			xfree(opt.epilog);
 			opt.epilog = xstrdup(optarg);
 			break;
+		case LONG_OPT_BURST_BUFFER_SPEC:
+			xfree(opt.burst_buffer);
+			opt.burst_buffer = xstrdup(optarg);
+			break;
+		case LONG_OPT_BURST_BUFFER_FILE:
+			xfree(opt.burst_buffer);
+			opt.burst_buffer = _read_file(optarg);
+			break;
 		case LONG_OPT_BEGIN:
 			opt.begin = parse_time(optarg, 0);
 			if (errno == ESLURM_INVALID_TIME_VALUE) {
@@ -1494,16 +1533,17 @@ static void _set_options(const int argc, char **argv)
 				opt.threads_per_core = NO_VAL;
 			break;
 		case LONG_OPT_NTASKSPERNODE:
-			opt.ntasks_per_node = _get_int(optarg, "ntasks-per-node",
-				true);
+			opt.ntasks_per_node = _get_int(optarg,
+						       "ntasks-per-node", true);
 			break;
 		case LONG_OPT_NTASKSPERSOCKET:
 			opt.ntasks_per_socket = _get_int(optarg,
-				"ntasks-per-socket", true);
+							 "ntasks-per-socket",
+							 true);
 			break;
 		case LONG_OPT_NTASKSPERCORE:
-			opt.ntasks_per_core = _get_int(optarg, "ntasks-per-core",
-				true);
+			opt.ntasks_per_core = _get_int(optarg,
+						       "ntasks-per-core", true);
 			break;
 		case LONG_OPT_HINT:
 			/* Keep after other options filled in */
@@ -1579,11 +1619,6 @@ static void _set_options(const int argc, char **argv)
 			xfree(opt.acctg_freq);
 			opt.acctg_freq = xstrdup(optarg);
 			break;
-		case LONG_OPT_CPU_FREQ:
-		        if (cpu_freq_verify_param(optarg, &opt.cpu_freq))
-				error("Invalid --cpu-freq argument: %s. Ignored",
-				      optarg);
-			break;
 		case LONG_OPT_WCKEY:
 			xfree(opt.wckey);
 			opt.wckey = xstrdup(optarg);
@@ -1628,6 +1663,12 @@ static void _set_options(const int argc, char **argv)
 			xfree(opt.gres);
 			opt.gres = xstrdup(optarg);
 			break;
+		case LONG_OPT_CPU_FREQ:
+		        if (cpu_freq_verify_cmdline(optarg, &opt.cpu_freq_min,
+					&opt.cpu_freq_max, &opt.cpu_freq_gov))
+				error("Invalid --cpu-freq argument: %s. "
+						"Ignored", optarg);
+			break;
 		case LONG_OPT_REQ_SWITCH:
 			pos_delimit = strstr(optarg,"@");
 			if (pos_delimit != NULL) {
@@ -1635,8 +1676,27 @@ static void _set_options(const int argc, char **argv)
 				pos_delimit++;
 				opt.wait4switch = time_str2secs(pos_delimit);
 			}
-			opt.req_switch = _get_int(optarg, "switches",
-				true);
+			opt.req_switch = _get_int(optarg, "switches", true);
+			break;
+		case LONG_OPT_POWER:
+			opt.power_flags = power_flags_id(optarg);
+			break;
+		case LONG_OPT_SICP:
+			opt.sicp_mode = 1;
+			break;
+		case LONG_OPT_THREAD_SPEC:
+			opt.core_spec = _get_int(optarg, "thread_spec", true) |
+				CORE_SPEC_THREAD;
+			break;
+		case LONG_OPT_ACCEL_BIND:
+			if (strchr(optarg, 'v'))
+				opt.accel_bind_type |= ACCEL_BIND_VERBOSE;
+			if (strchr(optarg, 'g'))
+				opt.accel_bind_type |= ACCEL_BIND_CLOSEST_GPU;
+			if (strchr(optarg, 'm'))
+				opt.accel_bind_type |= ACCEL_BIND_CLOSEST_MIC;
+			if (strchr(optarg, 'n'))
+				opt.accel_bind_type |= ACCEL_BIND_CLOSEST_NIC;
 			break;
 		default:
 			if (spank_process_option (opt_char, optarg) < 0) {
@@ -1648,9 +1708,13 @@ static void _set_options(const int argc, char **argv)
 	/* This means --ntasks was read from the environment.  We will override
 	 * it with what the user specified in the hostlist. POE launched
 	 * jobs excluded (they have the SLURM_STARTED_STEP env var set). */
-	if (!ntasks_set_opt && (opt.distribution == SLURM_DIST_ARBITRARY) &&
-	    !getenv("SLURM_STARTED_STEP"))
-		opt.ntasks_set = false;
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
+	    && !getenv("SLURM_STARTED_STEP")) {
+		if (!ntasks_set_opt)
+			opt.ntasks_set = false;
+		if (!nodes_set_opt)
+			opt.nodes_set = false;
+	}
 
 	spank_option_table_destroy (optz);
 }
@@ -1662,6 +1726,8 @@ static void _opt_args(int argc, char **argv)
 {
 	int i, command_pos = 0, command_args = 0;
 	char **rest = NULL;
+	char *fullpath, *launch_params;
+	bool test_exec = false;
 
 	_set_options(argc, argv);
 
@@ -1673,31 +1739,6 @@ static void _opt_args(int argc, char **argv)
 		}
 	}
 
-        /* Check to see if user has specified enough resources to
-	 * satisfy the plane distribution with the specified
-	 * plane_size.
-	 * if (n/plane_size < N) and ((N-1) * plane_size >= n) -->
-	 * problem Simple check will not catch all the problem/invalid
-	 * cases.
-	 * The limitations of the plane distribution in the cons_res
-	 * environment are more extensive and are documented in the
-	 * SLURM reference guide.  */
-	if (opt.distribution == SLURM_DIST_PLANE && opt.plane_size) {
-		if ((opt.ntasks/opt.plane_size) < opt.min_nodes) {
-			if (((opt.min_nodes-1)*opt.plane_size) >= opt.ntasks) {
-#if (0)
-				info("Too few processes ((n/plane_size) %d < N %d) "
-				     "and ((N-1)*(plane_size) %d >= n %d)) ",
-				     opt.ntasks/opt.plane_size, opt.min_nodes,
-				     (opt.min_nodes-1)*opt.plane_size, opt.ntasks);
-#endif
-				error("Too few processes for the requested "
-				      "{plane,node} distribution");
-				exit(error_exit);
-			}
-		}
-	}
-
 	if (opt.pty) {
 		char *launch_type = slurm_get_launch_type();
 		if (strcmp(launch_type, "launch/slurm")) {
@@ -1788,21 +1829,36 @@ static void _opt_args(int argc, char **argv)
 	}
 	opt.argv[i] = NULL;	/* End of argv's (for possible execv) */
 
+	if (getenv("SLURM_TEST_EXEC")) {
+		test_exec = true;
+	} else {
+		launch_params = slurm_get_launch_params();
+		if (launch_params && strstr(launch_params, "test_exec"))
+			test_exec = true;
+		xfree(launch_params);
+	}
 #if defined HAVE_BG && !defined HAVE_BG_L_P
 	/* BGQ's runjob command required a fully qualified path */
 	if (!launch_g_handle_multi_prog_verify(command_pos) &&
 	    (opt.argc > command_pos)) {
-		char *fullpath;
-
 		if ((fullpath = search_path(opt.cwd,
 					    opt.argv[command_pos],
-					    false, X_OK))) {
+					    false, X_OK, test_exec))) {
 			xfree(opt.argv[command_pos]);
 			opt.argv[command_pos] = fullpath;
 		}
 	}
 #else
 	(void) launch_g_handle_multi_prog_verify(command_pos);
+	if (test_exec) {
+		if ((fullpath = search_path(opt.cwd, opt.argv[command_pos],
+					    false, X_OK, test_exec))) {
+			xfree(opt.argv[command_pos]);
+			opt.argv[command_pos] = fullpath;
+		} else {
+			fatal("Can not execute %s", opt.argv[command_pos]);
+		}
+	}
 #endif
 
 #if 0
@@ -1877,7 +1933,8 @@ static bool _opt_verify(void)
 				xfree(opt.nodelist);
 				opt.nodelist = add_slash;
 			}
-			opt.distribution = SLURM_DIST_ARBITRARY;
+			opt.distribution &= SLURM_DIST_STATE_FLAGS;
+			opt.distribution |= SLURM_DIST_ARBITRARY;
 			opt.hostfile = xstrdup(opt.nodelist);
 			if (!_valid_node_list(&opt.nodelist)) {
 				error("Failure getting NodeNames from "
@@ -1897,7 +1954,7 @@ static bool _opt_verify(void)
 
 	/* set up the proc and node counts based on the arbitrary list
 	   of nodes */
-	if ((opt.distribution == SLURM_DIST_ARBITRARY)
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
 	   && (!opt.nodes_set || !opt.ntasks_set)) {
 		hostlist_t hl = hostlist_create(opt.nodelist);
 		if (!opt.ntasks_set) {
@@ -1917,8 +1974,8 @@ static bool _opt_verify(void)
 	 * nodelist but only if it isn't arbitrary since the user has
 	 * laid it out how it should be so don't mess with it print an
 	 * error later if it doesn't work the way they wanted */
-	if (opt.max_nodes && opt.nodelist
-	   && opt.distribution != SLURM_DIST_ARBITRARY) {
+	if (opt.max_nodes && opt.nodelist &&
+	    ((opt.distribution & SLURM_DIST_STATE_BASE)!=SLURM_DIST_ARBITRARY)) {
 		hostlist_t hl = hostlist_create(opt.nodelist);
 		int count = hostlist_count(hl);
 		if (count > opt.max_nodes) {
@@ -2072,8 +2129,8 @@ static bool _opt_verify(void)
 				error("memory allocation failure");
 				exit(error_exit);
 			}
-			if (opt.distribution == SLURM_DIST_ARBITRARY
-			   && !opt.ntasks_set) {
+			if (((opt.distribution & SLURM_DIST_STATE_BASE) ==
+			     SLURM_DIST_ARBITRARY) && !opt.ntasks_set) {
 				opt.ntasks = hostlist_count(hl);
 				opt.ntasks_set = true;
 			}
@@ -2183,8 +2240,6 @@ static bool _opt_verify(void)
 		mpi_type = slurm_get_mpi_default();
 		(void) mpi_hook_client_init(NULL);
 	}
-	if ((opt.resv_port_cnt == NO_VAL) && !strcmp(mpi_type, "openmpi"))
-		opt.resv_port_cnt = 0;
 	xfree(mpi_type);
 
 	return verified;
@@ -2200,7 +2255,7 @@ extern void init_spank_env(void)
 	if (environ == NULL)
 		return;
 
-	for (i=0; environ[i]; i++) {
+	for (i = 0; environ[i]; i++) {
 		if (strncmp(environ[i], "SLURM_SPANK_", 12))
 			continue;
 		name = xstrdup(environ[i] + 12);
@@ -2234,7 +2289,7 @@ extern char *spank_get_job_env(const char *name)
 	xstrcat(tmp_str, "=");
 	len = strlen(tmp_str);
 
-	for (i=0; i<opt.spank_job_env_size; i++) {
+	for (i = 0; i < opt.spank_job_env_size; i++) {
 		if (strncmp(opt.spank_job_env[i], tmp_str, len))
 			continue;
 		xfree(tmp_str);
@@ -2261,7 +2316,7 @@ extern int   spank_set_job_env(const char *name, const char *value,
 	len = strlen(tmp_str);
 	xstrcat(tmp_str, value);
 
-	for (i=0; i<opt.spank_job_env_size; i++) {
+	for (i = 0; i < opt.spank_job_env_size; i++) {
 		if (strncmp(opt.spank_job_env[i], tmp_str, len))
 			continue;
 		if (overwrite) {
@@ -2294,11 +2349,11 @@ extern int   spank_unset_job_env(const char *name)
 	xstrcat(tmp_str, "=");
 	len = strlen(tmp_str);
 
-	for (i=0; i<opt.spank_job_env_size; i++) {
+	for (i = 0; i < opt.spank_job_env_size; i++) {
 		if (strncmp(opt.spank_job_env[i], tmp_str, len))
 			continue;
 		xfree(opt.spank_job_env[i]);
-		for (j=(i+1); j<opt.spank_job_env_size; i++, j++)
+		for (j = (i+1); j < opt.spank_job_env_size; i++, j++)
 			opt.spank_job_env[i] = opt.spank_job_env[j];
 		opt.spank_job_env_size--;
 		if (opt.spank_job_env_size == 0)
@@ -2313,7 +2368,7 @@ extern int   spank_unset_job_env(const char *name)
  *
  * warning: returns pointer to memory allocated on the stack.
  */
-static char *print_constraints()
+static char *print_constraints(void)
 {
 	char *buf = xstrdup("");
 
@@ -2375,17 +2430,20 @@ static void _opt_list(void)
 	     acct_gather_profile_to_string(opt.profile));
 	info("job name       : `%s'", opt.job_name);
 	info("reservation    : `%s'", opt.reservation);
+	info("burst_buffer   : `%s'", opt.burst_buffer);
 	info("wckey          : `%s'", opt.wckey);
+	info("cpu_freq_min   : %u", opt.cpu_freq_min);
+	info("cpu_freq_max   : %u", opt.cpu_freq_max);
+	info("cpu_freq_gov   : %u", opt.cpu_freq_gov);
 	info("switches       : %d", opt.req_switch);
 	info("wait-for-switches : %d", opt.wait4switch);
 	info("distribution   : %s", format_task_dist_states(opt.distribution));
-	if (opt.distribution == SLURM_DIST_PLANE)
+	if ((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_PLANE)
 		info("plane size   : %u", opt.plane_size);
 	info("cpu_bind       : %s",
 	     opt.cpu_bind == NULL ? "default" : opt.cpu_bind);
 	info("mem_bind       : %s",
 	     opt.mem_bind == NULL ? "default" : opt.mem_bind);
-	info("cpu_freq       : %u", opt.cpu_freq);
 	info("verbose        : %d", _verbose);
 	info("slurmd_debug   : %d", opt.slurmd_debug);
 	if (opt.immediate <= 1)
@@ -2476,15 +2534,57 @@ static void _opt_list(void)
 	info("ntasks-per-socket : %d", opt.ntasks_per_socket);
 	info("ntasks-per-core   : %d", opt.ntasks_per_core);
 	info("plane_size        : %u", opt.plane_size);
-	info("core-spec         : %d", opt.core_spec);
+	if (opt.core_spec == (uint16_t) NO_VAL)
+		info("core-spec         : NA");
+	else if (opt.core_spec & CORE_SPEC_THREAD) {
+		info("thread-spec       : %d",
+		     opt.core_spec & (~CORE_SPEC_THREAD));
+	} else
+		info("core-spec         : %d", opt.core_spec);
 	if (opt.resv_port_cnt != NO_VAL)
 		info("resv_port_cnt     : %d", opt.resv_port_cnt);
+	info("power             : %s", power_flags_str(opt.power_flags));
+	info("sicp              : %u", opt.sicp_mode);
 	str = print_commandline(opt.argc, opt.argv);
 	info("remote command    : `%s'", str);
 	xfree(str);
 
 }
 
+/* Read specified file's contents into a buffer.
+ * Caller must xfree the buffer's contents */
+static char *_read_file(char *fname)
+{
+	int fd, i, offset = 0;
+	struct stat stat_buf;
+	char *file_buf;
+
+	fd = open(fname, O_RDONLY);
+	if (fd < 0) {
+		fatal("Could not open burst buffer specification file %s: %m",
+		      fname);
+	}
+	if (fstat(fd, &stat_buf) < 0) {
+		fatal("Could not stat burst buffer specification file %s: %m",
+		      fname);
+	}
+	file_buf = xmalloc(stat_buf.st_size);
+	while (stat_buf.st_size > offset) {
+		i = read(fd, file_buf + offset, stat_buf.st_size - offset);
+		if (i < 0) {
+			if (errno == EAGAIN)
+				continue;
+			fatal("Could not read burst buffer specification "
+			      "file %s: %m", fname);
+		}
+		if (i == 0)
+			break;	/* EOF */
+		offset += i;
+	}
+	close(fd);
+	return file_buf;
+}
+
 /* Determine if srun is under the control of a parallel debugger or not */
 static bool _under_parallel_debugger (void)
 {
@@ -2536,10 +2636,13 @@ static void _usage(void)
 "            [--prolog=fname] [--epilog=fname]\n"
 "            [--task-prolog=fname] [--task-epilog=fname]\n"
 "            [--ctrl-comm-ifhn=addr] [--multi-prog]\n"
-"            [--switches=max-switches{@max-time-to-wait}]\n"
-"            [--core-spec=cores] [--reboot]\n"
-"            [-w hosts...] [-x hosts...] executable [args...]\n"
-"            [--acctg-freq=<datatype>=<interval>\n");
+"            [--cpu-freq=min[-max[:gov]] [--sicp] [--power=flags]\n"
+"            [--switches=max-switches{@max-time-to-wait}] [--reboot]\n"
+"            [--core-spec=cores] [--thread-spec=threads]\n"
+"            [--bb=burst_buffer_spec] [--bbf=burst_buffer_file]\n"
+"            [--acctg-freq=<datatype>=<interval>\n"
+"            [-w hosts...] [-x hosts...] executable [args...]\n");
+
 }
 
 static void _help(void)
@@ -2551,16 +2654,19 @@ static void _help(void)
 "\n"
 "Parallel run options:\n"
 "  -A, --account=name          charge job to specified account\n"
-"      --begin=time            defer job until HH:MM MM/DD/YY\n"
 "      --acctg-freq=<datatype>=<interval> accounting and profiling sampling\n"
 "                              intervals. Supported datatypes:\n"
 "                              task=<interval> energy=<interval>\n"
 "                              network=<interval> filesystem=<interval>\n"
+"      --bb=<spec>             burst buffer specifications\n"
+"      --bbf=<file_name>       burst buffer specification file\n"
+"      --begin=time            defer job until HH:MM MM/DD/YY\n"
 "  -c, --cpus-per-task=ncpus   number of cpus required per task\n"
 "      --checkpoint=time       job step checkpoint interval\n"
 "      --checkpoint-dir=dir    directory to store job step checkpoint image \n"
 "                              files\n"
 "      --comment=name          arbitrary comment\n"
+"      --cpu-freq=min[-max[:gov]] requested cpu frequency (and governor)\n"
 "  -d, --dependency=type:jobid defer job until condition on jobid is satisfied\n"
 "  -D, --chdir=path            change remote current working directory\n"
 "      --export=env_vars|NONE  environment variables passed to launcher with\n"
@@ -2599,6 +2705,7 @@ static void _help(void)
 "  -o, --output=out            location of stdout redirection\n"
 "  -O, --overcommit            overcommit resources\n"
 "  -p, --partition=partition   partition requested\n"
+"      --power=flags           power management options\n"
 "      --priority=value        set the priority of the job to value\n"
 "      --prolog=program        run \"program\" before launching job step\n"
 "      --profile=value         enable acct_gather_profile for detailed data\n"
@@ -2616,6 +2723,8 @@ static void _help(void)
 "      --restart-dir=dir       directory of checkpoint image files to restart\n"
 "                              from\n"
 "  -s, --share                 share nodes with other jobs\n"
+"      --sicp                  If specified, signifies job is to receive\n"
+"                              job id from the incluster reserve range.\n"
 "  -S, --core-spec=cores       count of reserved cores\n"
 "      --signal=[B:]num[@time] send signal when time limit within time seconds\n"
 "      --slurmd-debug=level    slurmd debug level\n"
@@ -2623,6 +2732,7 @@ static void _help(void)
 "                              Optimum switches and max time to wait for optimum\n"
 "      --task-epilog=program   run \"program\" after launching task\n"
 "      --task-prolog=program   run \"program\" before launching task\n"
+"      --thread-spec=threads   count of reserved threads\n"
 "  -T, --threads=threads       set srun launch fanout\n"
 "  -t, --time=minutes          time limit\n"
 "      --time-min=minutes      minimum time limit (if distinct)\n"
@@ -2646,7 +2756,7 @@ static void _help(void)
 "  -Z, --no-allocate           don't allocate nodes (must supply -w)\n"
 "\n"
 "Consumable resources related options:\n"
-"      --exclusive             allocate nodes in exclusive mode when\n"
+"      --exclusive[=user]      allocate nodes in exclusive mode when\n"
 "                              cpu consumable resource is enabled\n"
 "                              or don't share CPUs for job steps\n"
 "      --mem-per-cpu=MB        maximum amount of real memory per allocated\n"
diff --git a/src/srun/libsrun/opt.h b/src/srun/libsrun/opt.h
index 42e5abfe3..a88854432 100644
--- a/src/srun/libsrun/opt.h
+++ b/src/srun/libsrun/opt.h
@@ -99,6 +99,7 @@ typedef struct srun_options {
 	char *cpu_bind;		/* binding map for map/mask_cpu */
 	mem_bind_type_t mem_bind_type; /* --mem_bind=		*/
 	char *mem_bind;		/* binding map for map/mask_mem	*/
+	uint16_t accel_bind_type; /* --accel-bind= */
 	bool nodes_set;		/* true if nodes explicitly set */
 	bool nodes_set_env;	/* true if nodes set via SLURM_NNODES */
 	bool nodes_set_opt;	/* true if nodes explicitly set using
@@ -216,7 +217,6 @@ typedef struct srun_options {
 	uint8_t open_mode;	/* --open-mode=append|truncate	*/
 	char *acctg_freq;	/* --acctg-freq=<type1>=<freq1>,*/
 				/* 	<type2>=<freq2>,...	*/
-	uint32_t cpu_freq;     	/* --cpu_freq=kilohertz		*/
 	bool pty;		/* --pty			*/
 	char *restart_dir;	/* --restart                    */
 	int argc;		/* length of argv array		*/
@@ -231,6 +231,12 @@ typedef struct srun_options {
 	bool user_managed_io;   /* 0 for "normal" IO, 1 for "user manged" IO */
 	int core_spec;		/* --core-spec=n,      -S n	*/
 	bool core_spec_set;	/* true if core_spec explicitly set */
+	char *burst_buffer;	/* -bb				*/
+	uint32_t cpu_freq_min;  /* Minimum cpu frequency  */
+	uint32_t cpu_freq_max;  /* Maximum cpu frequency  */
+	uint32_t cpu_freq_gov;  /* cpu frequency governor */
+	uint8_t power_flags;	/* Power management options	*/
+	uint8_t sicp_mode;	/* Inter-cluster job ID		*/
 } opt_t;
 
 extern opt_t opt;
diff --git a/src/srun/libsrun/srun_job.c b/src/srun/libsrun/srun_job.c
index 2238c4624..c8acd8ea1 100644
--- a/src/srun/libsrun/srun_job.c
+++ b/src/srun/libsrun/srun_job.c
@@ -327,11 +327,13 @@ job_step_create_allocation(resource_allocation_response_msg_t *resp)
 	/* get the correct number of hosts to run tasks on */
 	if (opt.nodelist)
 		step_nodelist = opt.nodelist;
-	else if ((opt.distribution == SLURM_DIST_ARBITRARY) && (count == 0))
+	else if (((opt.distribution & SLURM_DIST_STATE_BASE) ==
+		  SLURM_DIST_ARBITRARY) && (count == 0))
 		step_nodelist = getenv("SLURM_ARBITRARY_NODELIST");
 	if (step_nodelist) {
 		hl = hostlist_create(step_nodelist);
-		if (opt.distribution != SLURM_DIST_ARBITRARY)
+		if ((opt.distribution & SLURM_DIST_STATE_BASE) !=
+		    SLURM_DIST_ARBITRARY)
 			hostlist_uniq(hl);
 		if (!hostlist_count(hl)) {
 			error("Hostlist is now nothing!  Can not run job.");
@@ -352,8 +354,8 @@ job_step_create_allocation(resource_allocation_response_msg_t *resp)
 		opt.nodelist = buf;
 	}
 
-	if ((opt.distribution == SLURM_DIST_ARBITRARY) &&
-	    (count != opt.ntasks)) {
+	if (((opt.distribution & SLURM_DIST_STATE_BASE) == SLURM_DIST_ARBITRARY)
+	    && (count != opt.ntasks)) {
 		error("You asked for %d tasks but hostlist specified %d nodes",
 		      opt.ntasks, count);
 		goto error;
@@ -402,6 +404,9 @@ job_create_allocation(resource_allocation_response_msg_t *resp)
 	i->select_jobinfo = select_g_select_jobinfo_copy(resp->select_jobinfo);
 
 	job = _job_create_structure(i);
+	job->account = xstrdup(resp->account);
+	job->qos = xstrdup(resp->qos);
+	job->resv_name = xstrdup(resp->resv_name);
 
 	xfree(i->nodelist);
 	xfree(i);
@@ -669,7 +674,7 @@ extern void fini_srun(srun_job_t *job, bool got_alloc, uint32_t *global_rc,
 	if (got_alloc) {
 		cleanup_allocation();
 
-		/* send the controller we were cancelled */
+		/* Tell slurmctld that we were cancelled */
 		if (job->state >= SRUN_JOB_CANCELLED)
 			slurm_complete_job(job->jobid, NO_VAL);
 		else
@@ -1131,7 +1136,8 @@ static int _run_srun_script (srun_job_t *job, char *script)
 
 static void _set_env_vars(resource_allocation_response_msg_t *resp)
 {
-	char *tmp;
+	char *key, *value, *tmp;
+	int i;
 
 	if (!getenv("SLURM_JOB_CPUS_PER_NODE")) {
 		tmp = uint32_compressed_to_str(resp->num_cpu_groups,
@@ -1153,6 +1159,20 @@ static void _set_env_vars(resource_allocation_response_msg_t *resp)
 		unsetenv("SLURM_NODE_ALIASES");
 	}
 
+	if (resp->env_size) {	/* Used to set Burst Buffer environment */
+		for (i = 0; i < resp->env_size; i++) {
+			tmp = xstrdup(resp->environment[i]);
+			key = tmp;
+			value = strchr(tmp, '=');
+			if (value) {
+				value[0] = '\0';
+				value++;
+				setenv(key, value, 0);
+			}
+			xfree(tmp);
+		}
+	}
+
 	return;
 }
 
@@ -1245,7 +1265,7 @@ static int _set_rlimit_env(void)
 	return rc;
 }
 
-/* Set SLURM_CLUSTER_NAME< SLURM_SUBMIT_DIR and SLURM_SUBMIT_HOST environment 
+/* Set SLURM_CLUSTER_NAME< SLURM_SUBMIT_DIR and SLURM_SUBMIT_HOST environment
  * variables within current state */
 static void _set_submit_dir_env(void)
 {
diff --git a/src/srun/libsrun/srun_job.h b/src/srun/libsrun/srun_job.h
index c3c6aea5a..eba8fda21 100644
--- a/src/srun/libsrun/srun_job.h
+++ b/src/srun/libsrun/srun_job.h
@@ -114,6 +114,9 @@ typedef struct srun_job {
 	uint16_t ws_row;	/* window size, row count */
 	slurm_step_ctx_t *step_ctx;
 	slurm_step_ctx_params_t ctx_params;
+	char *account;    /* account of this job */
+	char *qos;        /* job's qos */
+	char *resv_name;  /* reservation the job is using */
 } srun_job_t;
 
 void    update_job_state(srun_job_t *job, srun_job_state_t newstate);
diff --git a/src/srun/srun.c b/src/srun/srun.c
index 33cb10f75..93bd99723 100644
--- a/src/srun/srun.c
+++ b/src/srun/srun.c
@@ -205,7 +205,10 @@ int srun(int ac, char **av)
 		env->plane_size = opt.plane_size;
 	env->cpu_bind_type = opt.cpu_bind_type;
 	env->cpu_bind = opt.cpu_bind;
-	env->cpu_freq = opt.cpu_freq;
+
+	env->cpu_freq_min = opt.cpu_freq_min;
+	env->cpu_freq_max = opt.cpu_freq_max;
+	env->cpu_freq_gov = opt.cpu_freq_gov;
 	env->mem_bind_type = opt.mem_bind_type;
 	env->mem_bind = opt.mem_bind;
 	env->overcommit = opt.overcommit;
@@ -232,6 +235,9 @@ int srun(int ac, char **av)
 		env->task_count = _uint16_array_to_str(job->nhosts, tasks);
 		env->jobid = job->jobid;
 		env->stepid = job->stepid;
+		env->account = job->account;
+		env->qos = job->qos;
+		env->resv_name = job->resv_name;
 	}
 	if (opt.pty && (set_winsize(job) < 0)) {
 		error("Not using a pseudo-terminal, disregarding --pty option");
@@ -246,6 +252,9 @@ int srun(int ac, char **av)
 		tcgetattr(fd, &term);
 		/* Set raw mode on local tty */
 		cfmakeraw(&term);
+		/* Re-enable output processing such that debug() and
+		 * and error() work properly. */
+		term.c_oflag |= OPOST;
 		tcsetattr(fd, TCSANOW, &term);
 		atexit(&_pty_restore);
 
diff --git a/src/srun_cr/Makefile.in b/src/srun_cr/Makefile.in
index 0ba6e74b0..cd34990b7 100644
--- a/src/srun_cr/Makefile.in
+++ b/src/srun_cr/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -254,6 +257,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -303,8 +308,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -323,6 +332,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -366,6 +378,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -389,6 +402,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sshare/Makefile.in b/src/sshare/Makefile.in
index 682fcaca1..235ab2d1a 100644
--- a/src/sshare/Makefile.in
+++ b/src/sshare/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -248,6 +251,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -297,8 +302,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -317,6 +326,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -360,6 +372,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -383,6 +396,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sshare/process.c b/src/sshare/process.c
index 873bcdefd..48d016fbb 100644
--- a/src/sshare/process.c
+++ b/src/sshare/process.c
@@ -40,161 +40,175 @@
 #include "src/sshare/sshare.h"
 #include <math.h>
 
-extern int long_flag;
+static void _print_tres(print_field_t *field, uint64_t *tres_cnts,
+			int last);
+
+int long_flag;		/* exceeds 80 character limit with more info */
+char **tres_names = NULL;
+uint32_t tres_cnt = 0;
+char *opt_field_list = NULL;
+
+print_field_t fields[] = {
+	{-20, "Account", print_fields_str, PRINT_ACCOUNT},
+	{10, "Cluster", print_fields_str, PRINT_CLUSTER},
+	{13, "EffectvUsage", print_fields_double, PRINT_EUSED},
+	{10, "FairShare", print_fields_double, PRINT_FSFACTOR},
+	{10, "LevelFS", print_fields_double, PRINT_LEVELFS},
+	{6, "ID", print_fields_uint, PRINT_ID},
+	{11, "NormShares", print_fields_double, PRINT_NORMS},
+	{11, "NormUsage", print_fields_double, PRINT_NORMU},
+	{12, "Partition", print_fields_str, PRINT_PART},
+	{10, "RawShares", print_fields_uint32, PRINT_RAWS},
+	{11, "RawUsage", print_fields_uint64, PRINT_RAWU},
+	{10, "User", print_fields_str, PRINT_USER},
+	{30, "GrpTRESMins", _print_tres, PRINT_TRESMINS},
+	{30, "TRESRunMins", _print_tres, PRINT_RUNMINS},
+	{0,  NULL, NULL, 0}
+};
+
+static void _print_tres(print_field_t *field, uint64_t *tres_cnts,
+			int last)
+{
+	int abs_len = abs(field->len);
+	char *print_this;
+
+	print_this = slurmdb_make_tres_string_from_arrays(
+		tres_names, tres_cnts, tres_cnt, TRES_STR_FLAG_REMOVE);
+
+	if (!print_this)
+		print_this = xstrdup("");
+
+	if (print_fields_parsable_print == PRINT_FIELDS_PARSABLE_NO_ENDING
+	    && last)
+		printf("%s", print_this);
+	else if (print_fields_parsable_print)
+		printf("%s|", print_this);
+	else {
+		if (strlen(print_this) > abs_len)
+			print_this[abs_len-1] = '+';
+
+		if (field->len == abs_len)
+			printf("%*.*s ", abs_len, abs_len, print_this);
+		else
+			printf("%-*.*s ", abs_len, abs_len, print_this);
+	}
+	xfree(print_this);
+}
 
 extern int process(shares_response_msg_t *resp, uint16_t options)
 {
 	uint32_t flags = slurmctld_conf.priority_flags;
-	int rc = SLURM_SUCCESS;
-	association_shares_object_t *share = NULL;
+	int rc = SLURM_SUCCESS, i;
+	assoc_shares_object_t *share = NULL;
 	ListIterator itr = NULL;
 	ListIterator itr2 = NULL;
 	char *object = NULL;
 	char *print_acct = NULL;
 	List tree_list = NULL;
+	char *tmp_char = NULL;
 
 	int field_count = 0;
 
-	print_field_t *field = NULL;
-
 	List format_list;
 	List print_fields_list; /* types are of print_field_t */
 
-	enum {
-		PRINT_ACCOUNT,
-		PRINT_CLUSTER,
-		PRINT_CPUMINS,
-		PRINT_EUSED,
-		PRINT_FSFACTOR,
-		PRINT_ID,
-		PRINT_NORMS,
-		PRINT_NORMU,
-		PRINT_RAWS,
-		PRINT_RAWU,
-		PRINT_RUNMINS,
-		PRINT_USER,
-		PRINT_LEVELFS
-	};
-
 	if (!resp)
 		return SLURM_ERROR;
 
+	tres_names = resp->tres_names;
+	tres_cnt = resp->tres_cnt;
+
 	format_list = list_create(slurm_destroy_char);
-	if (flags & PRIORITY_FLAGS_FAIR_TREE) {
+	if (opt_field_list) {
+		slurm_addto_char_list(format_list, opt_field_list);
+	} else if (flags & PRIORITY_FLAGS_FAIR_TREE) {
 		if (long_flag) {
-			slurm_addto_char_list(format_list,
-					      "A,User,RawShares,NormShares,"
-					      "RawUsage,NormUsage,EffUsage,"
-					      "FSFctr,LevelFS,GrpCPUMins,"
-					      "CPURunMins");
+			if (options & PRINT_PARTITIONS)
+				slurm_addto_char_list(
+					format_list,
+					"A,User,P,RawShares,NormShares,"
+					"RawUsage,NormUsage,Eff,"
+					"Fairshare,LevelFS,GrpTRESMins,"
+					"TRESRunMins");
+			else
+				slurm_addto_char_list(
+					format_list,
+					"A,User,RawShares,NormShares,"
+					"RawUsage,NormUsage,Eff,"
+					"Fairshare,LevelFS,GrpTRESMins,"
+					"TRESRunMins");
+
 		} else {
-			slurm_addto_char_list(format_list,
-					      "A,User,RawShares,NormShares,"
-					      "RawUsage,EffUsage,FSFctr");
+			if (options & PRINT_PARTITIONS)
+				slurm_addto_char_list(
+					format_list,
+					"A,User,P,RawShares,NormShares,"
+					"RawUsage,Eff,Fairshare");
+			else
+				slurm_addto_char_list(
+					format_list,
+					"A,User,RawShares,NormShares,"
+					"RawUsage,Eff,Fairshare");
 		}
 	} else {
 		if (long_flag) {
-			slurm_addto_char_list(format_list,
-					      "A,User,RawShares,NormShares,"
-					      "RawUsage,NormUsage,EffUsage,"
-					      "FSFctr,GrpCPUMins,CPURunMins");
+			if (options & PRINT_PARTITIONS)
+				slurm_addto_char_list(
+					format_list,
+					"A,User,P,RawShares,NormShares,"
+					"RawUsage,NormUsage,Eff,"
+					"Fairshare,GrpTRESMins,TRESRunMins");
+			else
+				slurm_addto_char_list(
+					format_list,
+					"A,User,RawShares,NormShares,"
+					"RawUsage,NormUsage,Eff,"
+					"Fairshare,GrpTRESMins,TRESRunMins");
 		} else {
-			slurm_addto_char_list(format_list,
-					      "A,User,RawShares,NormShares,"
-					      "RawUsage,EffUsage,FSFctr");
+			if (options & PRINT_PARTITIONS)
+				slurm_addto_char_list(
+					format_list,
+					"A,User,P,RawShares,NormShares,"
+					"RawUsage,Eff,Fairshare");
+			else
+				slurm_addto_char_list(
+					format_list,
+					"A,User,RawShares,NormShares,"
+					"RawUsage,Eff,Fairshare");
 		}
 	}
 
 
-	print_fields_list = list_create(destroy_print_field);
+	print_fields_list = list_create(NULL);
 	itr = list_iterator_create(format_list);
 	while ((object = list_next(itr))) {
-		char *tmp_char = NULL;
-		field = xmalloc(sizeof(print_field_t));
-		if (!strncasecmp("Account", object, 1)) {
-			field->type = PRINT_ACCOUNT;
-			field->name = xstrdup("Account");
-			field->len = -20;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("Cluster", object, 2)) {
-			field->type = PRINT_CLUSTER;
-			field->name = xstrdup("Cluster");
-			field->len = 10;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("EffUsage", object, 1)) {
-			field->type = PRINT_EUSED;
-			field->name = xstrdup("Effectv Usage");
-			field->len = 13;
-			field->print_routine = print_fields_double;
-		} else if (!strncasecmp("FSFctr", object, 4)) {
-			field->type = PRINT_FSFACTOR;
-			field->name = xstrdup("FairShare");
-			field->len = 10;
-			field->print_routine = print_fields_double;
-		} else if (!strncasecmp("LevelFS", object, 1)) {
-			field->type = PRINT_LEVELFS;
-			field->name = xstrdup("Level FS");
-			field->len = 10;
-			field->print_routine = print_fields_double;
-		} else if (!strncasecmp("ID", object, 1)) {
-			field->type = PRINT_ID;
-			field->name = xstrdup("ID");
-			field->len = 6;
-			field->print_routine = print_fields_uint;
-		} else if (!strncasecmp("NormShares", object, 5)) {
-			field->type = PRINT_NORMS;
-			field->name = xstrdup("Norm Shares");
-			field->len = 11;
-			field->print_routine = print_fields_double;
-		} else if (!strncasecmp("NormUsage", object, 5)) {
-			field->type = PRINT_NORMU;
-			field->name = xstrdup("Norm Usage");
-			field->len = 11;
-			field->print_routine = print_fields_double;
-		} else if (!strncasecmp("RawShares", object, 4)) {
-			field->type = PRINT_RAWS;
-			field->name = xstrdup("Raw Shares");
-			field->len = 10;
-			field->print_routine = print_fields_uint32;
-		} else if (!strncasecmp("RawUsage", object, 4)) {
-			field->type = PRINT_RAWU;
-			field->name = xstrdup("Raw Usage");
-			field->len = 11;
-			field->print_routine = print_fields_uint64;
-		} else if (!strncasecmp("User", object, 1)) {
-			field->type = PRINT_USER;
-			field->name = xstrdup("User");
-			field->len = 10;
-			field->print_routine = print_fields_str;
-		} else if (!strncasecmp("GrpCPUMins", object, 1)) {
-			field->type = PRINT_CPUMINS;
-			field->name = xstrdup("GrpCPUMins");
-			field->len = 11;
-			field->print_routine = print_fields_uint64;
-		} else if (!strncasecmp("CPURunMins", object, 2)) {
-			field->type = PRINT_RUNMINS;
-			field->name = xstrdup("CPURunMins");
-			field->len = 15;
-			field->print_routine = print_fields_uint64;
-		} else {
-			exit_code=1;
-			fprintf(stderr, "Unknown field '%s'\n", object);
-			exit(1);
-			xfree(field);
-			continue;
+		for (i = 0; fields[i].name; i++) {
+			if ((tmp_char = strstr(object, "\%")))
+				tmp_char[0] = '\0';
+
+			if (!strncasecmp(fields[i].name,
+					 object, strlen(object))) {
+				if (tmp_char) {
+					int newlen = atoi(tmp_char+1);
+					if (newlen)
+						fields[i].len = newlen;
+				}
+
+				list_append(print_fields_list, &fields[i]);
+				break;
+			}
 		}
-		if ((tmp_char = strstr(object, "\%"))) {
-			int newlen = atoi(tmp_char+1);
-			if (newlen)
-				field->len = newlen;
+
+		if (!fields[i].name) {
+			error("Invalid field requested: \"%s\"", object);
+			exit(1);
 		}
-		list_append(print_fields_list, field);
 	}
 	list_iterator_destroy(itr);
-	list_destroy(format_list);
+	FREE_NULL_LIST(format_list);
 
 	if (exit_code) {
-		list_destroy(print_fields_list);
+		FREE_NULL_LIST(print_fields_list);
 		return SLURM_ERROR;
 	}
 
@@ -212,6 +226,7 @@ extern int process(shares_response_msg_t *resp, uint16_t options)
 		int curr_inx = 1;
 		char *tmp_char = NULL;
 		char *local_acct = NULL;
+		print_field_t *field = NULL;
 
 		if ((options & PRINT_USERS_ONLY) && share->user == 0)
 			continue;
@@ -318,14 +333,22 @@ extern int process(shares_response_msg_t *resp, uint16_t options)
 						     tmp_char,
 						     (curr_inx == field_count));
 				break;
-			case PRINT_CPUMINS:
+			case PRINT_PART:
 				field->print_routine(field,
-						     share->grp_cpu_mins,
+						     share->partition,
+						     (curr_inx == field_count));
+				break;
+			case PRINT_TRESMINS:
+				field->print_routine(field,
+						     share->tres_grp_mins,
 						     (curr_inx == field_count));
 				break;
 			case PRINT_RUNMINS:
+				/* convert to minutes */
+				for (i=0; i<tres_cnt; i++)
+					share->tres_run_secs[i] /= 60;
 				field->print_routine(field,
-						     share->cpu_run_mins,
+						     share->tres_run_secs,
 						     (curr_inx == field_count));
 				break;
 			default:
@@ -340,11 +363,9 @@ extern int process(shares_response_msg_t *resp, uint16_t options)
 		printf("\n");
 	}
 
-	if (tree_list)
-		list_destroy(tree_list);
-
+	FREE_NULL_LIST(tree_list);
 	list_iterator_destroy(itr2);
 	list_iterator_destroy(itr);
-	list_destroy(print_fields_list);
+	FREE_NULL_LIST(print_fields_list);
 	return rc;
 }
diff --git a/src/sshare/sshare.c b/src/sshare/sshare.c
index 0310954ca..845d8d8bc 100644
--- a/src/sshare/sshare.c
+++ b/src/sshare/sshare.c
@@ -46,22 +46,21 @@
 #define OPT_LONG_HELP  0x100
 #define OPT_LONG_USAGE 0x101
 
-int exit_code;		/* sshare's exit code, =1 on any error at any time */
-int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
-int long_flag;		/* exceeds 80 character limit with more info */
-int verbosity;		/* count of -v options */
-uint32_t my_uid = 0;
-List clusters = NULL;
-
 static int      _get_info(shares_request_msg_t *shares_req,
 			  shares_response_msg_t **shares_resp);
 static int      _addto_name_char_list(List char_list, char *names, bool gid);
 static char *   _convert_to_name(int id, bool gid);
 static void     _print_version( void );
 static void	_usage(void);
+static void     _help_format_msg(void);
 
-int
-main (int argc, char *argv[])
+int exit_code;		/* sshare's exit code, =1 on any error at any time */
+int quiet_flag;		/* quiet=1, verbose=-1, normal=0 */
+int verbosity;		/* count of -v options */
+uint32_t my_uid = 0;
+List clusters = NULL;
+
+int main (int argc, char *argv[])
 {
 	int error_code = SLURM_SUCCESS, opt_char;
 	log_options_t opts = LOG_OPTS_STDERR_ONLY;
@@ -75,10 +74,13 @@ main (int argc, char *argv[])
 	static struct option long_options[] = {
 		{"accounts", 1, 0, 'A'},
 		{"all",      0, 0, 'a'},
+                {"helpformat",0,0, 'e'},
 		{"long",     0, 0, 'l'},
+		{"partition",0, 0, 'm'},
 		{"cluster",  1, 0, 'M'},
 		{"clusters", 1, 0, 'M'},
-		{"noheader", 0, 0, 'h'},
+		{"noheader", 0, 0, 'n'},
+		{"format",   1, 0, 'o'},
 		{"parsable", 0, 0, 'p'},
 		{"parsable2",0, 0, 'P'},
 		{"users",    1, 0, 'u'},
@@ -98,7 +100,7 @@ main (int argc, char *argv[])
 	slurm_conf_init(NULL);
 	log_init("sshare", opts, SYSLOG_FACILITY_DAEMON, NULL);
 
-	while((opt_char = getopt_long(argc, argv, "aA:hlM:npPqUu:t:vV",
+	while((opt_char = getopt_long(argc, argv, "aA:ehlM:no:pPqUu:t:vVm",
 			long_options, &option_index)) != -1) {
 		switch (opt_char) {
 		case (int)'?':
@@ -115,6 +117,10 @@ main (int argc, char *argv[])
 					list_create(slurm_destroy_char);
 			slurm_addto_char_list(req_msg.acct_list, optarg);
 			break;
+		case 'e':
+			_help_format_msg();
+			exit(0);
+			break;
 		case 'h':
 			print_fields_have_header = 0;
 			break;
@@ -124,8 +130,7 @@ main (int argc, char *argv[])
 			long_flag = 1;
 			break;
 		case 'M':
-			if (clusters)
-				list_destroy(clusters);
+			FREE_NULL_LIST(clusters);
 			if (!(clusters =
 			     slurmdb_get_info_cluster(optarg))) {
 				print_db_notok(optarg, 0);
@@ -133,9 +138,15 @@ main (int argc, char *argv[])
 			}
 			working_cluster_rec = list_peek(clusters);
 			break;
+		case 'm':
+			options |= PRINT_PARTITIONS;
+			break;
 		case 'n':
 			print_fields_have_header = 0;
 			break;
+		case 'o':
+			xstrfmtcat(opt_field_list, "%s,", optarg);
+			break;
 		case 'p':
 			print_fields_parsable_print =
 			PRINT_FIELDS_PARSABLE_ENDING;
@@ -187,8 +198,7 @@ main (int argc, char *argv[])
 	if (all_users) {
 		if (req_msg.user_list
 		   && list_count(req_msg.user_list)) {
-			list_destroy(req_msg.user_list);
-			req_msg.user_list = NULL;
+			FREE_NULL_LIST(req_msg.user_list);
 		}
 		if (verbosity)
 			fprintf(stderr, "Users requested:\n\t: all\n");
@@ -222,8 +232,7 @@ main (int argc, char *argv[])
 	} else {
 		if (req_msg.acct_list
 		   && list_count(req_msg.acct_list)) {
-			list_destroy(req_msg.acct_list);
-			req_msg.acct_list = NULL;
+			FREE_NULL_LIST(req_msg.acct_list);
 		}
 		if (verbosity)
 			fprintf(stderr, "Accounts requested:\n\t: all\n");
@@ -232,10 +241,8 @@ main (int argc, char *argv[])
 
 	error_code = _get_info(&req_msg, &resp_msg);
 
-	if (req_msg.acct_list)
-		list_destroy(req_msg.acct_list);
-	if (req_msg.user_list)
-		list_destroy(req_msg.user_list);
+	FREE_NULL_LIST(req_msg.acct_list);
+	FREE_NULL_LIST(req_msg.user_list);
 
 	if (error_code) {
 		slurm_perror("Couldn't get shares from controller");
@@ -416,11 +423,16 @@ Usage:  sshare [OPTION]                                                    \n\
   Valid OPTIONs are:                                                       \n\
     -a or --all            list all users                                  \n\
     -A or --accounts=      display specific accounts (comma separated list)\n\
-    -h or --noheader       omit header from output                         \n\
+    -e or --helpformat     Print a list of fields that can be specified    \n\
+                           with the '--format' option                      \n\
+    -l or --long           include normalized usage in output              \n\
+    -m or --partition      print the partition part of the association     \n\
     -M or --cluster=name   cluster to issue commands to.  Default is       \n\
                            current cluster.  cluster with no name will     \n\
                            reset to default.                               \n\
-    -l or --long           include normalized usage in output              \n\
+    -n or --noheader       omit header from output                         \n\
+    -o or --format=        Comma separated list of fields. (use            \n\
+                           (\"--helpformat\" for a list of available fields).\n\
     -p or --parsable       '|' delimited output with a trailing '|'        \n\
     -P or --parsable2      '|' delimited output without a trailing '|'     \n\
     -u or --users=         display specific users (comma separated list)   \n\
@@ -432,3 +444,17 @@ Usage:  sshare [OPTION]                                                    \n\
                                                                            \n\n");
 }
 
+static void _help_format_msg(void)
+{
+	int i;
+
+	for (i = 0; fields[i].name; i++) {
+		if (i & 3)
+			printf(" ");
+		else if (i)
+			printf("\n");
+		printf("%-17s", fields[i].name);
+	}
+	printf("\n");
+	return;
+}
diff --git a/src/sshare/sshare.h b/src/sshare/sshare.h
index 3cf9964f7..3dc2facaa 100644
--- a/src/sshare/sshare.h
+++ b/src/sshare/sshare.h
@@ -90,6 +90,10 @@
 /* Print only the users and not the hierarchy.
  */
 #define PRINT_USERS_ONLY 0x01
+/* If you have partition base associations
+ * print them
+ */
+#define PRINT_PARTITIONS 0x02
 
 typedef enum {
 	SSHARE_TIME_SECS,
@@ -97,12 +101,34 @@ typedef enum {
 	SSHARE_TIME_HOURS,
 } sshare_time_format_t;
 
+enum {
+	PRINT_ACCOUNT,
+	PRINT_CLUSTER,
+	PRINT_TRESMINS,
+	PRINT_EUSED,
+	PRINT_FSFACTOR,
+	PRINT_ID,
+	PRINT_NORMS,
+	PRINT_NORMU,
+	PRINT_PART,
+	PRINT_RAWS,
+	PRINT_RAWU,
+	PRINT_RUNMINS,
+	PRINT_USER,
+	PRINT_LEVELFS
+};
+
 extern int exit_code;	/* sshare's exit code, =1 on any error at any time */
 extern int quiet_flag;	/* quiet=1, verbose=-1, normal=0 */
 extern uint32_t my_uid;
 extern sshare_time_format_t time_format;
 extern char *time_format_string;
 extern List clusters;
+extern print_field_t fields[];
+extern char **tres_names;
+extern uint32_t tres_cnt;
+extern int long_flag;
+extern char *opt_field_list;
 
 extern int process(shares_response_msg_t *msg, uint16_t options);
 
diff --git a/src/sstat/Makefile.in b/src/sstat/Makefile.in
index b2dc49fed..f892fa82f 100644
--- a/src/sstat/Makefile.in
+++ b/src/sstat/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/sstat/options.c b/src/sstat/options.c
index 5b716e763..5985bc482 100644
--- a/src/sstat/options.c
+++ b/src/sstat/options.c
@@ -43,6 +43,9 @@
 #include "sstat.h"
 #include <time.h>
 
+/* getopt_long options, integers but not characters */
+#define OPT_LONG_NOCONVERT 0x100
+
 void _help_fields_msg(void);
 void _help_msg(void);
 void _usage(void);
@@ -89,6 +92,8 @@ sstat [<OPTION>] -j <job(.stepid)>                                          \n\
      -n, --noheader:                                                        \n\
 	           No header will be added to the beginning of output.      \n\
                    The default is to print a header.                        \n\
+     --noconvert:  Don't convert units from their original type             \n\
+		   (e.g. 2048M won't be converted to 2G).                   \n\
      -o, --format:                                                          \n\
 	           Comma separated list of fields. (use \"--helpformat\"    \n\
                    for a list of available fields).                         \n\
@@ -132,6 +137,7 @@ void _do_help(void)
 void _init_params()
 {
 	memset(&params, 0, sizeof(sstat_parameters_t));
+	params.convert_flags = CONVERT_NUM_UNIT_EXACT;
 }
 
 /* returns number of objects added to list */
@@ -339,6 +345,7 @@ void parse_command_line(int argc, char **argv)
 		{"noheader", 0, 0, 'n'},
 		{"fields", 1, 0, 'o'},
 		{"format", 1, 0, 'o'},
+                {"noconvert",  no_argument, 0, OPT_LONG_NOCONVERT},
 		{"pidformat", 0, 0, 'i'},
 		{"parsable", 0, 0, 'p'},
 		{"parsable2", 0, 0, 'P'},
@@ -382,6 +389,9 @@ void parse_command_line(int argc, char **argv)
 		case 'n':
 			print_fields_have_header = 0;
 			break;
+		case OPT_LONG_NOCONVERT:
+			params.convert_flags |= CONVERT_NUM_UNIT_NO;
+			break;
 		case 'o':
 			xstrfmtcat(params.opt_field_list, "%s,", optarg);
 			break;
diff --git a/src/sstat/print.c b/src/sstat/print.c
index 4b9f0871e..d1da23ce6 100644
--- a/src/sstat/print.c
+++ b/src/sstat/print.c
@@ -88,7 +88,8 @@ static void _print_small_double(
 		return;
 
 	if (dub > 1)
-		convert_num_unit((double)dub, outbuf, buf_size, units);
+		convert_num_unit((double)dub, outbuf, buf_size, units,
+				 params.convert_flags);
 	else if (dub > 0)
 		snprintf(outbuf, buf_size, "%.2fM", dub);
 	else
@@ -120,7 +121,8 @@ void print_fields(slurmdb_step_rec_t *step)
 
 			convert_num_unit2((double)step->stats.act_cpufreq,
 					  outbuf, sizeof(outbuf),
-					  UNIT_KILO, 1000, false);
+					  UNIT_KILO, 1000, params.convert_flags
+					  & (~CONVERT_NUM_UNIT_EXACT));
 
 			field->print_routine(field,
 					     outbuf,
@@ -131,7 +133,9 @@ void print_fields(slurmdb_step_rec_t *step)
 				convert_num_unit2((double)
 						  step->stats.consumed_energy,
 						  outbuf, sizeof(outbuf),
-						  UNIT_NONE, 1000, false);
+						  UNIT_NONE, 1000,
+						  params.convert_flags &
+						  (~CONVERT_NUM_UNIT_EXACT));
 			}
 			field->print_routine(field,
 					     outbuf,
@@ -163,7 +167,7 @@ void print_fields(slurmdb_step_rec_t *step)
 		case PRINT_AVEPAGES:
 			convert_num_unit((double)step->stats.pages_ave,
 					 outbuf, sizeof(outbuf),
-					 UNIT_KILO);
+					 UNIT_KILO, params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -172,7 +176,7 @@ void print_fields(slurmdb_step_rec_t *step)
 		case PRINT_AVERSS:
 			convert_num_unit((double)step->stats.rss_ave,
 					 outbuf, sizeof(outbuf),
-					 UNIT_KILO);
+					 UNIT_KILO, params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -181,7 +185,7 @@ void print_fields(slurmdb_step_rec_t *step)
 		case PRINT_AVEVSIZE:
 			convert_num_unit((double)step->stats.vsize_ave,
 					 outbuf, sizeof(outbuf),
-					 UNIT_KILO);
+					 UNIT_KILO, params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -249,7 +253,7 @@ void print_fields(slurmdb_step_rec_t *step)
 		case PRINT_MAXPAGES:
 			convert_num_unit((double)step->stats.pages_max,
 					 outbuf, sizeof(outbuf),
-					 UNIT_KILO);
+					 UNIT_KILO, params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -272,7 +276,7 @@ void print_fields(slurmdb_step_rec_t *step)
 		case PRINT_MAXRSS:
 			convert_num_unit((double)step->stats.rss_max,
 					 outbuf, sizeof(outbuf),
-					 UNIT_KILO);
+					 UNIT_KILO, params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -295,7 +299,7 @@ void print_fields(slurmdb_step_rec_t *step)
 		case PRINT_MAXVSIZE:
 			convert_num_unit((double)step->stats.vsize_max,
 					 outbuf, sizeof(outbuf),
-					 UNIT_KILO);
+					 UNIT_KILO, params.convert_flags);
 
 			field->print_routine(field,
 					     outbuf,
@@ -352,9 +356,23 @@ void print_fields(slurmdb_step_rec_t *step)
                                              step->pid_str,
                                              (curr_inx == field_count));
                         break;
-		case PRINT_REQ_CPUFREQ:
+		case PRINT_REQ_CPUFREQ_MIN:
 			cpu_freq_to_string(outbuf, sizeof(outbuf),
-					   step->req_cpufreq);
+					   step->req_cpufreq_min);
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_REQ_CPUFREQ_MAX:
+			cpu_freq_to_string(outbuf, sizeof(outbuf),
+					   step->req_cpufreq_max);
+			field->print_routine(field,
+					     outbuf,
+					     (curr_inx == field_count));
+			break;
+		case PRINT_REQ_CPUFREQ_GOV:
+			cpu_freq_to_string(outbuf, sizeof(outbuf),
+					   step->req_cpufreq_gov);
 			field->print_routine(field,
 					     outbuf,
 					     (curr_inx == field_count));
diff --git a/src/sstat/sstat.c b/src/sstat/sstat.c
index e2e38463d..e9db6e345 100644
--- a/src/sstat/sstat.c
+++ b/src/sstat/sstat.c
@@ -45,7 +45,8 @@ int _sstat_query(slurm_step_layout_t *step_layout, uint32_t job_id,
 		 uint32_t step_id);
 int _process_results();
 int _do_stat(uint32_t jobid, uint32_t stepid, char *nodelist,
-	     uint32_t req_cpufreq);
+	     uint32_t req_cpufreq_min, uint32_t req_cpufreq_max,
+	     uint32_t req_cpufreq_gov);
 
 /*
  * Globals
@@ -84,7 +85,10 @@ print_field_t fields[] = {
 	{20, "Nodelist", print_fields_str, PRINT_NODELIST},
 	{8, "NTasks", print_fields_uint, PRINT_NTASKS},
 	{20, "Pids", print_fields_str, PRINT_PIDS},
-	{10, "ReqCPUFreq", print_fields_str, PRINT_REQ_CPUFREQ},
+	{10, "ReqCPUFreq", print_fields_str, PRINT_REQ_CPUFREQ_MIN}, /*vestigial*/
+	{13, "ReqCPUFreqMin", print_fields_str, PRINT_REQ_CPUFREQ_MIN},
+	{13, "ReqCPUFreqMax", print_fields_str, PRINT_REQ_CPUFREQ_MAX},
+	{13, "ReqCPUFreqGov", print_fields_str, PRINT_REQ_CPUFREQ_GOV},
 	{0, NULL, NULL, 0}};
 
 List jobs = NULL;
@@ -95,7 +99,8 @@ ListIterator print_fields_itr = NULL;
 int field_count = 0;
 
 int _do_stat(uint32_t jobid, uint32_t stepid, char *nodelist,
-	     uint32_t req_cpufreq)
+	     uint32_t req_cpufreq_min, uint32_t req_cpufreq_max,
+	     uint32_t req_cpufreq_gov)
 {
 	job_step_stat_response_msg_t *step_stat_response = NULL;
 	int rc = SLURM_SUCCESS;
@@ -132,7 +137,9 @@ int _do_stat(uint32_t jobid, uint32_t stepid, char *nodelist,
 	step.job_ptr = &job;
 	step.stepid = stepid;
 	step.nodes = xmalloc(BUF_SIZE);
-	step.req_cpufreq = req_cpufreq;
+	step.req_cpufreq_min = req_cpufreq_min;
+	step.req_cpufreq_max = req_cpufreq_max;
+	step.req_cpufreq_gov = req_cpufreq_gov;
 	step.stepname = NULL;
 	step.state = JOB_RUNNING;
 
@@ -195,7 +202,9 @@ int _do_stat(uint32_t jobid, uint32_t stepid, char *nodelist,
 int main(int argc, char **argv)
 {
 	ListIterator itr = NULL;
-	uint32_t req_cpufreq = NO_VAL;
+	uint32_t req_cpufreq_min = NO_VAL;
+	uint32_t req_cpufreq_max = NO_VAL;
+	uint32_t req_cpufreq_gov = NO_VAL;
 	uint32_t stepid = NO_VAL;
 	slurmdb_selected_step_t *selected_step = NULL;
 
@@ -258,7 +267,9 @@ int main(int argc, char **argv)
 				_do_stat(selected_step->jobid,
 					 step_ptr->job_steps[i].step_id,
 					 step_ptr->job_steps[i].nodes,
-					 step_ptr->job_steps[i].cpu_freq);
+					 step_ptr->job_steps[i].cpu_freq_min,
+					 step_ptr->job_steps[i].cpu_freq_max,
+					 step_ptr->job_steps[i].cpu_freq_gov);
 			}
 			slurm_free_job_step_info_response_msg(step_ptr);
 			continue;
@@ -279,22 +290,22 @@ int main(int argc, char **argv)
 			}
 			stepid = step_ptr->job_steps[0].step_id;
 			nodelist = step_ptr->job_steps[0].nodes;
-			req_cpufreq = step_ptr->job_steps[0].cpu_freq;
+			req_cpufreq_min = step_ptr->job_steps[0].cpu_freq_min;
+			req_cpufreq_max = step_ptr->job_steps[0].cpu_freq_max;
+			req_cpufreq_gov = step_ptr->job_steps[0].cpu_freq_gov;
 		}
-		_do_stat(selected_step->jobid, stepid, nodelist, req_cpufreq);
+		_do_stat(selected_step->jobid, stepid, nodelist,
+			 req_cpufreq_min, req_cpufreq_max, req_cpufreq_gov);
 		if (free_nodelist && nodelist)
 			free(nodelist);
 	}
 	list_iterator_destroy(itr);
 
 	xfree(params.opt_field_list);
-	if (params.opt_job_list)
-		list_destroy(params.opt_job_list);
-
+	FREE_NULL_LIST(params.opt_job_list);
 	if (print_fields_itr)
 		list_iterator_destroy(print_fields_itr);
-	if (print_fields_list)
-		list_destroy(print_fields_list);
+	FREE_NULL_LIST(print_fields_list);
 
 	return 0;
 }
diff --git a/src/sstat/sstat.h b/src/sstat/sstat.h
index 335e05645..dce2ebeb2 100644
--- a/src/sstat/sstat.h
+++ b/src/sstat/sstat.h
@@ -66,7 +66,7 @@
 
 #define ERROR 2
 
-#define STAT_FIELDS "jobid,maxvmsize,maxvmsizenode,maxvmsizetask,avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,avecpufreq,reqcpufreq,consumedenergy,maxdiskread,maxdiskreadnode,maxdiskreadtask,avediskread,maxdiskwrite,maxdiskwritenode,maxdiskwritetask,avediskwrite"
+#define STAT_FIELDS "jobid,maxvmsize,maxvmsizenode,maxvmsizetask,avevmsize,maxrss,maxrssnode,maxrsstask,averss,maxpages,maxpagesnode,maxpagestask,avepages,mincpu,mincpunode,mincputask,avecpu,ntasks,avecpufreq,reqcpufreqmin,reqcpufreqmax,reqcpufreqgov,consumedenergy,maxdiskread,maxdiskreadnode,maxdiskreadtask,avediskread,maxdiskwrite,maxdiskwritenode,maxdiskwritetask,avediskwrite"
 
 #define STAT_FIELDS_PID "jobid,nodelist,pids"
 
@@ -112,7 +112,9 @@ typedef enum {
 		PRINT_NODELIST,
 		PRINT_NTASKS,
 		PRINT_PIDS,
-		PRINT_REQ_CPUFREQ,
+		PRINT_REQ_CPUFREQ_MIN,
+		PRINT_REQ_CPUFREQ_MAX,
+		PRINT_REQ_CPUFREQ_GOV,
 } sstat_print_types_t;
 
 
@@ -124,6 +126,7 @@ typedef struct {
 	int opt_noheader;	/* can only be cleared */
 	int opt_verbose;	/* --verbose */
 	bool pid_format;
+	uint32_t convert_flags;
 } sstat_parameters_t;
 
 extern List print_fields_list;
diff --git a/src/strigger/Makefile.in b/src/strigger/Makefile.in
index b98fd4c0f..97be0046d 100644
--- a/src/strigger/Makefile.in
+++ b/src/strigger/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -251,6 +254,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -300,8 +305,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -320,6 +329,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -363,6 +375,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -386,6 +399,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/src/strigger/opts.c b/src/strigger/opts.c
index 05fb5530a..57ad86d49 100644
--- a/src/strigger/opts.c
+++ b/src/strigger/opts.c
@@ -210,8 +210,7 @@ extern void parse_command_line(int argc, char *argv[])
 			params.job_id = tmp_l;
 			break;
 		case (int) 'M':
-			if (params.clusters)
-				list_destroy(params.clusters);
+			FREE_NULL_LIST(params.clusters);
 			if (!(params.clusters =
 			      slurmdb_get_info_cluster(optarg))) {
 				print_db_notok(optarg, 0);
diff --git a/src/sview/Makefile.am b/src/sview/Makefile.am
index 683d59ccf..9f20dc745 100644
--- a/src/sview/Makefile.am
+++ b/src/sview/Makefile.am
@@ -11,11 +11,11 @@ bin_PROGRAMS = sview
 
 sview_LDADD = $(top_builddir)/src/db_api/libslurmdb.o $(DL_LIBS)
 
-noinst_HEADERS = sview.h
+noinst_HEADERS = sview.h gthread_helper.h
 sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \
 	block_info.c front_end_info.c node_info.c resv_info.c \
 	submit_info.c admin_info.c common.c \
-	config_info.c defaults.c gthread_helper.c
+	config_info.c defaults.c gthread_helper.c bb_info.c
 
 force:
 $(sview_LDADD) : force
diff --git a/src/sview/Makefile.in b/src/sview/Makefile.in
index fab8bdb6b..2b446f884 100644
--- a/src/sview/Makefile.in
+++ b/src/sview/Makefile.in
@@ -104,6 +104,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -112,10 +113,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -128,7 +131,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -140,7 +143,7 @@ PROGRAMS = $(bin_PROGRAMS)
 am__sview_SOURCES_DIST = sview.c popups.c grid.c part_info.c \
 	job_info.c block_info.c front_end_info.c node_info.c \
 	resv_info.c submit_info.c admin_info.c common.c config_info.c \
-	defaults.c gthread_helper.c
+	defaults.c gthread_helper.c bb_info.c
 @BUILD_SVIEW_TRUE@am_sview_OBJECTS = sview-sview.$(OBJEXT) \
 @BUILD_SVIEW_TRUE@	sview-popups.$(OBJEXT) sview-grid.$(OBJEXT) \
 @BUILD_SVIEW_TRUE@	sview-part_info.$(OBJEXT) \
@@ -154,7 +157,8 @@ am__sview_SOURCES_DIST = sview.c popups.c grid.c part_info.c \
 @BUILD_SVIEW_TRUE@	sview-common.$(OBJEXT) \
 @BUILD_SVIEW_TRUE@	sview-config_info.$(OBJEXT) \
 @BUILD_SVIEW_TRUE@	sview-defaults.$(OBJEXT) \
-@BUILD_SVIEW_TRUE@	sview-gthread_helper.$(OBJEXT)
+@BUILD_SVIEW_TRUE@	sview-gthread_helper.$(OBJEXT) \
+@BUILD_SVIEW_TRUE@	sview-bb_info.$(OBJEXT)
 am__EXTRA_sview_SOURCES_DIST = sview.h sview.c popups.c grid.c \
 	part_info.c job_info.c block_info.c front_end_info.c \
 	node_info.c resv_info.c gthread_helper.c submit_info.c \
@@ -213,7 +217,7 @@ am__can_run_installinfo = \
     n|no|NO) false;; \
     *) (install-info --version) >/dev/null 2>&1;; \
   esac
-am__noinst_HEADERS_DIST = sview.h
+am__noinst_HEADERS_DIST = sview.h gthread_helper.h
 HEADERS = $(noinst_HEADERS)
 am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP)
 # Read a list of newline-separated strings from the standard input,
@@ -276,6 +280,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -325,8 +331,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -345,6 +355,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -388,6 +401,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -411,6 +425,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -475,11 +490,11 @@ top_srcdir = @top_srcdir@
 AUTOMAKE_OPTIONS = foreign
 AM_CPPFLAGS = -I$(top_srcdir) $(BG_INCLUDES)
 @BUILD_SVIEW_TRUE@sview_LDADD = $(top_builddir)/src/db_api/libslurmdb.o $(DL_LIBS)
-@BUILD_SVIEW_TRUE@noinst_HEADERS = sview.h
+@BUILD_SVIEW_TRUE@noinst_HEADERS = sview.h gthread_helper.h
 @BUILD_SVIEW_TRUE@sview_SOURCES = sview.c popups.c grid.c part_info.c job_info.c \
 @BUILD_SVIEW_TRUE@	block_info.c front_end_info.c node_info.c resv_info.c \
 @BUILD_SVIEW_TRUE@	submit_info.c admin_info.c common.c \
-@BUILD_SVIEW_TRUE@	config_info.c defaults.c gthread_helper.c
+@BUILD_SVIEW_TRUE@	config_info.c defaults.c gthread_helper.c bb_info.c
 
 @BUILD_SVIEW_TRUE@sview_LDFLAGS = -export-dynamic $(CMD_LDFLAGS) $(BG_LDFLAGS) $(GTK_LIBS)
 @BUILD_SVIEW_TRUE@sview_CFLAGS = $(GTK_CFLAGS)
@@ -582,6 +597,7 @@ distclean-compile:
 	-rm -f *.tab.c
 
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-admin_info.Po@am__quote@
+@AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-bb_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-block_info.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-common.Po@am__quote@
 @AMDEP_TRUE@@am__include@ @am__quote@./$(DEPDIR)/sview-config_info.Po@am__quote@
@@ -828,6 +844,20 @@ sview-gthread_helper.obj: gthread_helper.c
 @AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
 @am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-gthread_helper.obj `if test -f 'gthread_helper.c'; then $(CYGPATH_W) 'gthread_helper.c'; else $(CYGPATH_W) '$(srcdir)/gthread_helper.c'; fi`
 
+sview-bb_info.o: bb_info.c
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-bb_info.o -MD -MP -MF $(DEPDIR)/sview-bb_info.Tpo -c -o sview-bb_info.o `test -f 'bb_info.c' || echo '$(srcdir)/'`bb_info.c
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/sview-bb_info.Tpo $(DEPDIR)/sview-bb_info.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='bb_info.c' object='sview-bb_info.o' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-bb_info.o `test -f 'bb_info.c' || echo '$(srcdir)/'`bb_info.c
+
+sview-bb_info.obj: bb_info.c
+@am__fastdepCC_TRUE@	$(AM_V_CC)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -MT sview-bb_info.obj -MD -MP -MF $(DEPDIR)/sview-bb_info.Tpo -c -o sview-bb_info.obj `if test -f 'bb_info.c'; then $(CYGPATH_W) 'bb_info.c'; else $(CYGPATH_W) '$(srcdir)/bb_info.c'; fi`
+@am__fastdepCC_TRUE@	$(AM_V_at)$(am__mv) $(DEPDIR)/sview-bb_info.Tpo $(DEPDIR)/sview-bb_info.Po
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	$(AM_V_CC)source='bb_info.c' object='sview-bb_info.obj' libtool=no @AMDEPBACKSLASH@
+@AMDEP_TRUE@@am__fastdepCC_FALSE@	DEPDIR=$(DEPDIR) $(CCDEPMODE) $(depcomp) @AMDEPBACKSLASH@
+@am__fastdepCC_FALSE@	$(AM_V_CC@am__nodep@)$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(sview_CFLAGS) $(CFLAGS) -c -o sview-bb_info.obj `if test -f 'bb_info.c'; then $(CYGPATH_W) 'bb_info.c'; else $(CYGPATH_W) '$(srcdir)/bb_info.c'; fi`
+
 mostlyclean-libtool:
 	-rm -f *.lo
 
diff --git a/src/sview/bb_info.c b/src/sview/bb_info.c
new file mode 100644
index 000000000..31ab29596
--- /dev/null
+++ b/src/sview/bb_info.c
@@ -0,0 +1,1037 @@
+/*****************************************************************************\
+ *  bb_info.c - Functions related to Burst Buffer display mode of sview.
+ *****************************************************************************
+ *  Copyright (C) 2014-2015 SchedMD LLC.
+ *  Written by Nathan Yee <nyee32@shedmd.com>
+ *
+ *  This file is part of SLURM, a resource management program.
+ *  For details, see <http://slurm.schedmd.com/>.
+ *  Please also read the included file: DISCLAIMER.
+ *
+ *  SLURM is free software; you can redistribute it and/or modify it under
+ *  the terms of the GNU General Public License as published by the Free
+ *  Software Foundation; either version 2 of the License, or (at your option)
+ *  any later version.
+ *
+ *  SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+ *  WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+ *  FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+ *  details.
+ *
+ *  You should have received a copy of the GNU General Public License along
+ *  with SLURM; if not, write to the Free Software Foundation, Inc.,
+ *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+\*****************************************************************************/
+
+#include "src/common/uid.h"
+#include "src/sview/sview.h"
+#include "src/common/parse_time.h"
+#include "src/common/proc_args.h"
+
+#define _DEBUG 0
+
+/* Collection of data for printing reports. Like data is combined here */
+typedef struct {
+	char *		bb_name;
+	burst_buffer_resv_t *bb_ptr;
+	int		color_inx;
+	GtkTreeIter	iter_ptr;
+	bool		iter_set;
+	char *		plugin;
+	int		pos;
+} sview_bb_info_t;
+
+enum {
+	EDIT_REMOVE = 1,
+	EDIT_EDIT
+};
+
+/* These need to be in alpha order (except POS and CNT) */
+enum {
+	SORTID_POS = POS_LOC,
+	SORTID_ACCOUNT,
+	SORTID_COLOR,
+	SORTID_COLOR_INX,
+	SORTID_CREATE_TIME,
+	SORTID_GRES,
+	SORTID_NAME,
+	SORTID_PARTITION,
+	SORTID_PLUGIN,
+	SORTID_QOS,
+	SORTID_SIZE,
+	SORTID_STATE,
+	SORTID_UPDATED,
+	SORTID_USERID,
+	SORTID_CNT
+};
+
+/* extra field here is for choosing the type of edit you that will
+ * take place.  If you choose EDIT_MODEL (means only display a set of
+ * known options) create it in function create_model_*.
+ */
+
+/*these are the settings to apply for the user
+ * on the first startup after a fresh slurm install.
+ * s/b a const probably*/
+static char *_initial_page_opts = "Plugin,Name/JobID,Gres,Size,State,StateTime,UserID";
+
+static display_data_t display_data_bb[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_PLUGIN, "Plugin", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_NAME, "Name/JobID", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_COLOR, NULL, TRUE, EDIT_COLOR,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_INT, SORTID_COLOR_INX, NULL, FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_ACCOUNT, "Account", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_CREATE_TIME, "CreateTime", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_GRES, "Gres", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_PARTITION, "Partition", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_QOS, "QOS", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_SIZE, "Size", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_STATE, "State", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_bb,
+	 create_model_bb, admin_edit_bb},
+	{G_TYPE_STRING, SORTID_USERID, "UserID", FALSE, EDIT_NONE,
+	 refresh_bb, create_model_bb, admin_edit_bb},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
+/*Burst buffer options list*/
+static display_data_t options_data_bb[] = {
+	{G_TYPE_INT, SORTID_POS, NULL, FALSE, EDIT_NONE},
+	{G_TYPE_STRING, INFO_PAGE, "Full Info", TRUE, BB_PAGE},
+	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
+};
+
+static display_data_t *local_display_data = NULL;
+//Variable for Admin edit if needed
+/* static char *got_edit_signal = NULL; */
+static GtkTreeModel *last_model = NULL;
+
+static void _get_size_str(char *buf, size_t buf_size, uint64_t num);
+
+/*Functions for admin edit*/
+/* static void _admin_bb(GtkTreeModel *model, GtkTreeIter *iter, char *type); */
+/* static void _process_each_bb(GtkTreeModel *model, GtkTreePath *path, */
+/*			       GtkTreeIter*iter, gpointer userdata); */
+
+/* static void _set_active_combo_bb(GtkComboBox *combo, */
+/*				 GtkTreeModel *model, GtkTreeIter *iter, */
+/*				 int type) */
+/* { */
+/* NOP */
+/* Function for admin edit */
+/* } */
+
+// Function for admin edit
+//don't free this char
+/* static const char *_set_bb_msg(burst_buffer_info_msg_t *bb_msg, */
+/*				 const char *new_text, */
+/*				 int column) */
+/* { */
+/* NOP */
+/* Function for admin edit */
+/* } */
+
+/* Free the burst buffer information */
+static void _bb_info_free(sview_bb_info_t *sview_bb_info)
+{
+	if (sview_bb_info) {
+		xfree(sview_bb_info->bb_name);
+		xfree(sview_bb_info->plugin);
+	}
+}
+
+/* Free the Burst Buffer information list */
+static void _bb_info_list_del(void *object)
+{
+	sview_bb_info_t *sview_bb_info = (sview_bb_info_t *)object;
+
+	if (sview_bb_info) {
+		_bb_info_free(sview_bb_info);
+		xfree(sview_bb_info);
+	}
+}
+
+/* static void _admin_edit_combo_box_bb(GtkComboBox *combo, */
+/*				     resv_desc_msg_t *resv_msg) */
+/* { */
+/* NOP */
+/* Function for admin edit */
+/* } */
+
+
+
+/* static gboolean _admin_focus_out_bb(GtkEntry *entry, */
+/*				      GdkEventFocus *event, */
+/*				      resv_desc_msg_t *resv_msg) */
+/* { */
+/* NOP */
+/* Function for admin edit */
+/* } */
+
+/* static GtkWidget *_admin_full_edit_bb(resv_desc_msg_t *resv_msg, */
+/*					GtkTreeModel *model, GtkTreeIter *iter) */
+/* { */
+/* NOP */
+/* Function for admin edit */
+/* } */
+
+/* Function creates the record menu when you double click on a record */
+static void _layout_bb_record(GtkTreeView *treeview,
+			      sview_bb_info_t *sview_bb_info, int update)
+{
+	GtkTreeIter iter;
+	char time_buf[20], *tmp_gres = NULL, tmp_user_id[60], tmp_size[20];
+	char bb_name_id[32];
+	char *tmp_state, *tmp_user_name, *sep;
+	burst_buffer_resv_t *bb_ptr = sview_bb_info->bb_ptr;
+	GtkTreeStore *treestore;
+	int i;
+
+	treestore = GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
+
+	if (bb_ptr->name) {
+		strncpy(bb_name_id, bb_ptr->name, sizeof(bb_name_id));
+	} else if (bb_ptr->array_task_id == NO_VAL) {
+		convert_num_unit(bb_ptr->job_id, bb_name_id,
+				 sizeof(bb_name_id),
+				 UNIT_NONE, working_sview_config.convert_flags);
+	} else {
+		snprintf(bb_name_id, sizeof(bb_name_id),
+			 "%u_%u(%u)",
+			 bb_ptr->array_job_id,
+			 bb_ptr->array_task_id,
+			 bb_ptr->job_id);
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_NAME),
+				   bb_name_id);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_PLUGIN),
+				   sview_bb_info->plugin);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_ACCOUNT),
+				   bb_ptr->account);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_PARTITION),
+				   bb_ptr->partition);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_QOS),
+				   bb_ptr->qos);
+
+	tmp_state = bb_state_string(bb_ptr->state);
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_STATE),
+				   tmp_state);
+
+	_get_size_str(tmp_size, sizeof(tmp_size), bb_ptr->size);
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_SIZE),
+				   tmp_size);
+
+	sep = "";
+	for (i = 0; i < bb_ptr->gres_cnt; i++) {
+		xstrfmtcat(tmp_gres, "%s%s:%"PRIu64"", sep,
+			   bb_ptr->gres_ptr->name, bb_ptr->gres_ptr->used_cnt);
+		sep = ",";
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_GRES),
+				   tmp_gres);
+	xfree(tmp_gres);
+
+	if (bb_ptr->create_time) {
+		slurm_make_time_str((time_t *)&bb_ptr->create_time, time_buf,
+				    sizeof(time_buf));
+	} else {
+		time_t now = time(NULL);
+		slurm_make_time_str(&now, time_buf, sizeof(time_buf));
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_CREATE_TIME),
+				   time_buf);
+
+	tmp_user_name = uid_to_string(bb_ptr->user_id);
+	snprintf(tmp_user_id, sizeof(tmp_user_id), "%s(%u)", tmp_user_name,
+		 bb_ptr->user_id);
+	xfree(tmp_user_name);
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_bb,
+						 SORTID_USERID),
+				   tmp_user_id);
+}
+
+/* Reformat a numeric value with an appropriate suffix.
+ * The input units are GB */
+static void _get_size_str(char *buf, size_t buf_size, uint64_t num)
+{
+	uint64_t tmp64;
+
+	if ((num == NO_VAL64) || (num == INFINITE64)) {
+		snprintf(buf, buf_size, "INFINITE");
+	} else if (num == 0) {
+		snprintf(buf, buf_size, "0GB");
+	} else if ((num % ((uint64_t) 1024 * 1024 * 1024 * 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t) 1024 * 1024 * 1024 * 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"PB", tmp64);
+	} else if ((num % ((uint64_t) 1024 * 1024 * 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t) 1024 * 1024 * 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"TB", tmp64);
+	} else if ((num % ((uint64_t) 1024 * 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t) 1024 * 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"GB", tmp64);
+	} else if ((num % ((uint64_t) 1024 * 1024)) == 0) {
+		tmp64 = num / ((uint64_t) 1024 * 1024);
+		snprintf(buf, buf_size, "%"PRIu64"MB", tmp64);
+	} else if ((num % 1024) == 0) {
+		tmp64 = num / 1024;
+		snprintf(buf, buf_size, "%"PRIu64"KB", tmp64);
+	} else {
+		tmp64 = num;
+		snprintf(buf, buf_size, "%"PRIu64"B", tmp64);
+	}
+}
+
+
+/* updates the burst buffer record on sview */
+static void _update_bb_record(sview_bb_info_t *sview_bb_info_ptr,
+			      GtkTreeStore *treestore)
+{
+	char tmp_create_time[40];
+	char tmp_size[20], tmp_user_id[60], bb_name_id[32];
+	char *tmp_gres = NULL, *tmp_state, *tmp_user_name, *sep;
+	burst_buffer_resv_t *bb_ptr = sview_bb_info_ptr->bb_ptr;
+	int i;
+
+	sep = "";
+	for (i = 0; i < bb_ptr->gres_cnt; i++) {
+		xstrfmtcat(tmp_gres, "%s%s:%"PRIu64"", sep,
+			   bb_ptr->gres_ptr->name, bb_ptr->gres_ptr->used_cnt);
+		sep = ",";
+	}
+
+	if (bb_ptr->name) {
+		strncpy(bb_name_id, bb_ptr->name, sizeof(bb_name_id));
+	} else if (bb_ptr->array_task_id == NO_VAL) {
+		convert_num_unit(bb_ptr->job_id, bb_name_id,
+				 sizeof(bb_name_id),
+				 UNIT_NONE, working_sview_config.convert_flags);
+	} else {
+		snprintf(bb_name_id, sizeof(bb_name_id),
+			 "%u_%u(%u)",
+			 bb_ptr->array_job_id,
+			 bb_ptr->array_task_id,
+			 bb_ptr->job_id);
+	}
+
+	if (bb_ptr->create_time) {
+		slurm_make_time_str((time_t *)&bb_ptr->create_time,
+				    tmp_create_time, sizeof(tmp_create_time));
+	} else {
+		time_t now = time(NULL);
+		slurm_make_time_str(&now, tmp_create_time,
+				    sizeof(tmp_create_time));
+	}
+
+	_get_size_str(tmp_size, sizeof(tmp_size), bb_ptr->size);
+
+	tmp_state = bb_state_string(bb_ptr->state);
+
+	tmp_user_name = uid_to_string(bb_ptr->user_id);
+	snprintf(tmp_user_id, sizeof(tmp_user_id), "%s(%u)", tmp_user_name,
+		 bb_ptr->user_id);
+	xfree(tmp_user_name);
+
+	/* Combining these records provides a slight performance improvement */
+	gtk_tree_store_set(treestore, &sview_bb_info_ptr->iter_ptr,
+			   SORTID_COLOR,
+			   sview_colors[sview_bb_info_ptr->color_inx],
+			   SORTID_COLOR_INX,     sview_bb_info_ptr->color_inx,
+			   SORTID_PLUGIN,        sview_bb_info_ptr->plugin,
+			   SORTID_ACCOUNT,       bb_ptr->account,
+			   SORTID_CREATE_TIME,   tmp_create_time,
+			   SORTID_GRES,          tmp_gres,
+			   SORTID_NAME,          bb_name_id,
+			   SORTID_PARTITION,     bb_ptr->partition,
+			   SORTID_QOS,           bb_ptr->qos,
+			   SORTID_SIZE,          tmp_size,
+			   SORTID_STATE,         tmp_state,
+			   SORTID_UPDATED,       1,
+			   SORTID_USERID,        tmp_user_id,
+			   -1);
+	xfree(tmp_gres);
+
+	return;
+}
+
+/* Append the give Burst Record to the list */
+static void _append_bb_record(sview_bb_info_t *sview_bb_info_ptr,
+				GtkTreeStore *treestore)
+{
+	gtk_tree_store_append(treestore, &sview_bb_info_ptr->iter_ptr, NULL);
+	gtk_tree_store_set(treestore, &sview_bb_info_ptr->iter_ptr,
+			   SORTID_POS, sview_bb_info_ptr->pos, -1);
+	_update_bb_record(sview_bb_info_ptr, treestore);
+}
+
+/* Update the Burst Buffer inforamtion record */
+static void _update_info_bb(List info_list, GtkTreeView *tree_view)
+{
+	GtkTreeModel *model = gtk_tree_view_get_model(tree_view);
+	char *name = NULL;
+	ListIterator itr = NULL;
+	sview_bb_info_t *sview_bb_info = NULL;
+
+	set_for_update(model, SORTID_UPDATED);
+
+	itr = list_iterator_create(info_list);
+	while ((sview_bb_info = (sview_bb_info_t*) list_next(itr))) {
+		/* This means the tree_store changed (added new column
+		 * or something). */
+		if (last_model != model)
+			sview_bb_info->iter_set = false;
+
+		if (sview_bb_info->iter_set) {
+			gtk_tree_model_get(model, &sview_bb_info->iter_ptr,
+					   SORTID_NAME, &name, -1);
+			if (strcmp(name, sview_bb_info->bb_name)) {
+				/* Bad pointer */
+				sview_bb_info->iter_set = false;
+				//g_print("bad resv iter pointer\n");
+			}
+			g_free(name);
+		}
+		if (sview_bb_info->iter_set) {
+			_update_bb_record(sview_bb_info,
+					    GTK_TREE_STORE(model));
+		} else {
+			_append_bb_record(sview_bb_info,
+					    GTK_TREE_STORE(model));
+			sview_bb_info->iter_set = true;
+		}
+	}
+	list_iterator_destroy(itr);
+
+	/* remove all old bb */
+	remove_old(model, SORTID_UPDATED);
+	last_model = model;
+}
+
+static List _create_bb_info_list(burst_buffer_info_msg_t *bb_info_ptr)
+{
+	static List info_list = NULL;
+	List last_list = NULL;
+	ListIterator last_list_itr = NULL;
+	int i, j, pos = 0;
+	static burst_buffer_info_msg_t *last_bb_info_ptr = NULL;
+	sview_bb_info_t *sview_bb_info_ptr = NULL;
+	burst_buffer_info_t *bb_ptr;
+	burst_buffer_resv_t *bb_resv_ptr = NULL;
+	char bb_name_id[32] = "";
+
+	if (info_list && (bb_info_ptr == last_bb_info_ptr))
+		return info_list;
+
+	last_bb_info_ptr = bb_info_ptr;
+	if (info_list)
+		last_list = info_list;
+	info_list = list_create(_bb_info_list_del);
+
+	for (i = 0, bb_ptr = bb_info_ptr->burst_buffer_array;
+	     i < bb_info_ptr->record_count; i++, bb_ptr++) {
+
+		for (j = 0, bb_resv_ptr = bb_ptr->burst_buffer_resv_ptr;
+		     j < bb_ptr->buffer_count; j++, bb_resv_ptr++) {
+
+			/* Find any existing record for this burst buffer */
+			if (last_list) {
+				last_list_itr = list_iterator_create(last_list);
+				while ((sview_bb_info_ptr =
+					list_next(last_list_itr))) {
+					if (bb_resv_ptr->job_id &&
+					    (bb_resv_ptr->job_id != 
+					     sview_bb_info_ptr->bb_ptr->job_id))
+						continue;
+					if (bb_resv_ptr->name &&
+					    xstrcmp(sview_bb_info_ptr->bb_name,
+						    bb_resv_ptr->name))
+						continue;
+					if (xstrcmp(sview_bb_info_ptr->plugin,
+						    bb_ptr->name))
+						continue;
+					list_remove(last_list_itr);
+					_bb_info_free(sview_bb_info_ptr);
+					break;
+				}
+				list_iterator_destroy(last_list_itr);
+			} else {
+				sview_bb_info_ptr = NULL;
+			}
+
+			if (bb_resv_ptr->name) {
+				strncpy(bb_name_id, bb_resv_ptr->name,
+					sizeof(bb_name_id));
+			} else if (bb_resv_ptr->array_task_id == NO_VAL) {
+				convert_num_unit(bb_resv_ptr->job_id,
+						 bb_name_id,
+						 sizeof(bb_name_id),
+						 UNIT_NONE,
+						 working_sview_config.
+						 convert_flags);
+			} else {
+				snprintf(bb_name_id, sizeof(bb_name_id),
+					 "%u_%u(%u)",
+					 bb_resv_ptr->array_job_id,
+					 bb_resv_ptr->array_task_id,
+					 bb_resv_ptr->job_id);
+			}
+
+			if (!sview_bb_info_ptr) {	/* Need new record */
+				sview_bb_info_ptr =
+					xmalloc(sizeof(sview_bb_info_t));
+			}
+			sview_bb_info_ptr->bb_ptr = bb_resv_ptr;
+			sview_bb_info_ptr->bb_name = xstrdup(bb_name_id);
+			strcpy(bb_name_id, "");	/* Clear bb_name_id */
+			sview_bb_info_ptr->color_inx = pos % sview_colors_cnt;
+			sview_bb_info_ptr->plugin = xstrdup(bb_ptr->name);
+			sview_bb_info_ptr->pos = pos++;
+			list_append(info_list, sview_bb_info_ptr);
+		}
+	}
+
+	FREE_NULL_LIST(last_list);
+	return info_list;
+}
+
+static void _display_info_bb(List info_list, popup_info_t *popup_win)
+{
+	specific_info_t *spec_info = popup_win->spec_info;
+	char *name = (char *)spec_info->search_info->gchar_data;
+	//int found = 0;
+	burst_buffer_resv_t *bb_ptr = NULL;
+	GtkTreeView *treeview = NULL;
+	ListIterator itr = NULL;
+	sview_bb_info_t *sview_bb_info = NULL;
+	int update = 0;
+	char bb_name_id[32];
+
+	if (!spec_info->search_info->gchar_data) {
+		//info = xstrdup("No pointer given!");
+		goto finished;
+	}
+
+	if (!spec_info->display_widget) {
+		treeview = create_treeview_2cols_attach_to_table(
+			popup_win->table);
+		spec_info->display_widget =
+			gtk_widget_ref(GTK_WIDGET(treeview));
+	} else {
+		treeview = GTK_TREE_VIEW(spec_info->display_widget);
+		update = 1;
+	}
+
+	itr = list_iterator_create(info_list);
+	while ((sview_bb_info = (sview_bb_info_t*) list_next(itr))) {
+		bb_ptr = sview_bb_info->bb_ptr;
+
+		if (bb_ptr->name) {
+			strcpy(bb_name_id, bb_ptr->name);
+		} else if (bb_ptr->array_task_id == NO_VAL) {
+			convert_num_unit(bb_ptr->job_id,
+					 bb_name_id,
+					 sizeof(bb_name_id),
+					 UNIT_NONE,
+					 working_sview_config.convert_flags);
+		} else {
+			snprintf(bb_name_id, sizeof(bb_name_id),
+				 "%u_%u(%u)",
+				 bb_ptr->array_job_id,
+				 bb_ptr->array_task_id,
+				 bb_ptr->job_id);
+		}
+
+		if (!strcmp(bb_name_id, name)) {
+			_layout_bb_record(treeview, sview_bb_info, update);
+			break;
+		}
+	}
+	list_iterator_destroy(itr);
+	gtk_widget_show(spec_info->display_widget);
+
+finished:
+
+	return;
+}
+
+/* extern GtkWidget *create_bb_entry(resv_desc_msg_t *resv_msg, */
+/*				    GtkTreeModel *model, GtkTreeIter *iter) */
+/* { */
+/* NOP */
+/* Function to add new burst buffer */
+/* Admin edit function */
+/* } */
+
+/* Fresh the Burst Buffer information */
+extern void refresh_bb(GtkAction *action, gpointer user_data)
+{
+	popup_info_t *popup_win = (popup_info_t *)user_data;
+	xassert(popup_win);
+	xassert(popup_win->spec_info);
+	xassert(popup_win->spec_info->title);
+	popup_win->force_refresh = 1;
+	specific_info_bb(popup_win);
+}
+
+/* Get the Burst buffer information */
+extern int get_new_info_bb(burst_buffer_info_msg_t **info_ptr, int force)
+{
+	static burst_buffer_info_msg_t *new_bb_ptr = NULL;
+	int error_code = SLURM_NO_CHANGE_IN_DATA;
+	time_t now = time(NULL);
+	static time_t last;
+	static bool changed = 0;
+
+	if (g_bb_info_ptr && !force
+	    && ((now - last) < working_sview_config.refresh_delay)) {
+		if (*info_ptr != g_bb_info_ptr)
+			error_code = SLURM_SUCCESS;
+		*info_ptr = g_bb_info_ptr;
+		if (changed)
+			error_code = SLURM_SUCCESS;
+		goto end_it;
+	}
+	last = now;
+	if (g_bb_info_ptr) {
+		error_code = slurm_load_burst_buffer_info(&new_bb_ptr);
+		if (error_code == SLURM_SUCCESS) {
+			slurm_free_burst_buffer_info_msg(g_bb_info_ptr);
+			changed = 1;
+		} else if (slurm_get_errno() == SLURM_NO_CHANGE_IN_DATA) {
+			error_code = SLURM_NO_CHANGE_IN_DATA;
+			new_bb_ptr = g_bb_info_ptr;
+			changed = 0;
+		}
+	} else {
+		new_bb_ptr = NULL;
+		error_code = slurm_load_burst_buffer_info(&new_bb_ptr);
+		changed = 1;
+	}
+
+	g_bb_info_ptr = new_bb_ptr;
+
+	if (g_bb_info_ptr && (*info_ptr != g_bb_info_ptr))
+		error_code = SLURM_SUCCESS;
+
+	*info_ptr = g_bb_info_ptr;
+end_it:
+	return error_code;
+}
+
+/* Create the model with types with known values */
+extern GtkListStore *create_model_bb(int type)
+{
+/* Since none of the values can be editted this is left blank */
+/* NOP */
+	return NULL;
+}
+
+/* If Burst buffer wants to be edited it goes here */
+extern void admin_edit_bb(GtkCellRendererText *cell,
+			    const char *path_string,
+			    const char *new_text,
+			    gpointer data)
+{
+	/* NOP */
+}
+
+extern void get_info_bb(GtkTable *table, display_data_t *display_data)
+{
+	int error_code = SLURM_SUCCESS;
+	List info_list = NULL;
+	static int view = -1;
+	static burst_buffer_info_msg_t *bb_info_ptr = NULL;
+	char error_char[100];
+	GtkWidget *label = NULL;
+	GtkTreeView *tree_view = NULL;
+	static GtkWidget *display_widget = NULL;
+	GtkTreePath *path = NULL;
+	static bool set_opts = FALSE;
+
+	if (!set_opts) {
+		set_page_opts(BB_PAGE, display_data_bb,
+			      SORTID_CNT, _initial_page_opts);
+	}
+	set_opts = TRUE;
+
+	/* reset */
+	if (!table && !display_data) {
+		if (display_widget)
+			gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+		bb_info_ptr = NULL;
+		goto reset_curs;
+	}
+
+	if (display_data)
+		local_display_data = display_data;
+	if (!table) {
+		display_data_bb->set_menu = local_display_data->set_menu;
+		goto reset_curs;
+	}
+	if (display_widget && toggled) {
+		gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+		goto display_it;
+	}
+
+	error_code = get_new_info_bb(&bb_info_ptr, force_refresh);
+
+	if (error_code == SLURM_NO_CHANGE_IN_DATA) {
+	} else if (error_code != SLURM_SUCCESS) {
+		if (view == ERROR_VIEW)
+			goto end_it;
+		if (display_widget)
+			gtk_widget_destroy(display_widget);
+		view = ERROR_VIEW;
+		sprintf(error_char, "slurm_load_reservations: %s",
+			slurm_strerror(slurm_get_errno()));
+		label = gtk_label_new(error_char);
+		gtk_table_attach_defaults(table, label, 0, 1, 0, 1);
+		gtk_widget_show(label);
+		display_widget = gtk_widget_ref(GTK_WIDGET(label));
+		goto end_it;
+	}
+
+display_it:
+	info_list = _create_bb_info_list(bb_info_ptr);
+
+	if (!info_list) {
+		goto reset_curs;
+	}
+
+	/* set up the grid */
+	if (display_widget && GTK_IS_TREE_VIEW(display_widget)
+	    && gtk_tree_selection_count_selected_rows(
+		    gtk_tree_view_get_selection(
+			    GTK_TREE_VIEW(display_widget)))) {
+		GtkTreeViewColumn *focus_column = NULL;
+		/* highlight the correct nodes from the last selection */
+		gtk_tree_view_get_cursor(GTK_TREE_VIEW(display_widget),
+					 &path, &focus_column);
+	}
+
+	change_grid_color(grid_button_list, -1, -1,
+			  MAKE_WHITE, true, 0);
+
+	if (view == ERROR_VIEW && display_widget) {
+		gtk_widget_destroy(display_widget);
+		display_widget = NULL;
+	}
+	if (!display_widget) {
+		tree_view = create_treeview(local_display_data,
+					    &grid_button_list);
+		gtk_tree_selection_set_mode(
+			gtk_tree_view_get_selection(tree_view),
+			GTK_SELECTION_MULTIPLE);
+		display_widget = gtk_widget_ref(GTK_WIDGET(tree_view));
+		gtk_table_attach_defaults(table,
+					  GTK_WIDGET(tree_view),
+					  0, 1, 0, 1);
+		/* since this function sets the model of the tree_view
+		   to the treestore we don't really care about
+		   the return value */
+		create_treestore(tree_view, display_data_bb,
+				 SORTID_CNT, SORTID_NAME, SORTID_COLOR);
+
+	}
+
+	view = INFO_VIEW;
+	_update_info_bb(info_list, GTK_TREE_VIEW(display_widget));
+end_it:
+	toggled = FALSE;
+	force_refresh = FALSE;
+reset_curs:
+	if (main_window && main_window->window)
+		gdk_window_set_cursor(main_window->window, NULL);
+
+	return;
+}
+
+/* Function for full information about a Burst Buffer */
+extern void specific_info_bb(popup_info_t *popup_win)
+{
+	int bb_error_code = SLURM_SUCCESS;
+	static burst_buffer_info_msg_t *bb_info_ptr = NULL;
+	specific_info_t *spec_info = popup_win->spec_info;
+	char error_char[100];
+	GtkWidget *label = NULL;
+	GtkTreeView *tree_view = NULL;
+	List bb_list = NULL;
+	List send_bb_list = NULL;
+	sview_bb_info_t *sview_bb_info_ptr = NULL;
+	int i=-1;
+	ListIterator itr = NULL;
+
+	if (!spec_info->display_widget) {
+		setup_popup_info(popup_win, display_data_bb, SORTID_CNT);
+	}
+
+	if (spec_info->display_widget && popup_win->toggled) {
+		gtk_widget_destroy(spec_info->display_widget);
+		spec_info->display_widget = NULL;
+		goto display_it;
+	}
+
+	if ((bb_error_code =
+	     get_new_info_bb(&bb_info_ptr, popup_win->force_refresh))
+	    == SLURM_NO_CHANGE_IN_DATA) {
+		if (!spec_info->display_widget || spec_info->view == ERROR_VIEW)
+			goto display_it;
+	} else if (bb_error_code != SLURM_SUCCESS) {
+		if (spec_info->view == ERROR_VIEW)
+			goto end_it;
+		spec_info->view = ERROR_VIEW;
+		if (spec_info->display_widget)
+			gtk_widget_destroy(spec_info->display_widget);
+		sprintf(error_char, "get_new_info_bb: %s",
+			slurm_strerror(slurm_get_errno()));
+		label = gtk_label_new(error_char);
+		gtk_table_attach_defaults(popup_win->table,
+					  label,
+					  0, 1, 0, 1);
+		gtk_widget_show(label);
+		spec_info->display_widget = gtk_widget_ref(label);
+		goto end_it;
+	}
+
+display_it:
+
+	bb_list = _create_bb_info_list(bb_info_ptr);
+
+	if (!bb_list)
+		return;
+
+	if (spec_info->view == ERROR_VIEW && spec_info->display_widget) {
+		gtk_widget_destroy(spec_info->display_widget);
+		spec_info->display_widget = NULL;
+	}
+	if (spec_info->type != INFO_PAGE && !spec_info->display_widget) {
+		tree_view = create_treeview(local_display_data,
+					    &popup_win->grid_button_list);
+		gtk_tree_selection_set_mode(
+			gtk_tree_view_get_selection(tree_view),
+			GTK_SELECTION_MULTIPLE);
+		spec_info->display_widget =
+			gtk_widget_ref(GTK_WIDGET(tree_view));
+		gtk_table_attach_defaults(popup_win->table,
+					  GTK_WIDGET(tree_view),
+					  0, 1, 0, 1);
+		/* since this function sets the model of the tree_view
+		 * to the treestore we don't really care about
+		 * the return value */
+		create_treestore(tree_view, popup_win->display_data,
+				 SORTID_CNT, SORTID_NAME, SORTID_COLOR);
+	}
+
+	setup_popup_grid_list(popup_win);
+
+	spec_info->view = INFO_VIEW;
+	if (spec_info->type == INFO_PAGE) {
+		_display_info_bb(bb_list, popup_win);
+		goto end_it;
+	}
+
+	/* just linking to another list, don't free the inside, just the list */
+	send_bb_list = list_create(NULL);
+	itr = list_iterator_create(bb_list);
+	i = -1;
+	/* Set up additional menu options(ie the right click menu stuff) */
+	while ((sview_bb_info_ptr = list_next(itr))) {
+		i++;
+		/* Since we will not use any of these pages we will */
+		/* leave them blank */
+		switch(spec_info->type) {
+		case PART_PAGE:
+		case BLOCK_PAGE:
+		case NODE_PAGE:
+		case JOB_PAGE:
+		case RESV_PAGE:
+		default:
+			g_print("Unknown type %d\n", spec_info->type);
+			continue;
+		}
+		list_push(send_bb_list, sview_bb_info_ptr);
+	}
+	list_iterator_destroy(itr);
+	post_setup_popup_grid_list(popup_win);
+
+	_update_info_bb(send_bb_list,
+			  GTK_TREE_VIEW(spec_info->display_widget));
+	FREE_NULL_LIST(send_bb_list);
+end_it:
+	popup_win->toggled = 0;
+	popup_win->force_refresh = 0;
+
+	return;
+}
+
+/* creates a popup windo depending on what is clicked */
+extern void set_menus_bb(void *arg, void *arg2, GtkTreePath *path, int type)
+{
+	GtkTreeView *tree_view = (GtkTreeView *)arg;
+	popup_info_t *popup_win = (popup_info_t *)arg;
+	GtkMenu *menu = (GtkMenu *)arg2;
+
+	switch(type) {
+	case TAB_CLICKED:
+		make_fields_menu(NULL, menu, display_data_bb, SORTID_CNT);
+		break;
+	case ROW_CLICKED:
+		make_options_menu(tree_view, path, menu, options_data_bb);
+		break;
+	case ROW_LEFT_CLICKED:
+		/* Highlights the node in th node grid */
+		/* since we are not using this we will keep it empty */
+		/* NOP */
+		break;
+	case FULL_CLICKED:
+	{
+		GtkTreeModel *model = gtk_tree_view_get_model(tree_view);
+		GtkTreeIter iter;
+		if (!gtk_tree_model_get_iter(model, &iter, path)) {
+			g_error("error getting iter from model\n");
+			break;
+		}
+
+		popup_all_bb(model, &iter, INFO_PAGE);
+
+		break;
+	}
+	case POPUP_CLICKED:
+		make_fields_menu(popup_win, menu,
+				 popup_win->display_data, SORTID_CNT);
+		break;
+	default:
+		g_error("UNKNOWN type %d given to set_fields\n", type);
+	}
+}
+
+/* Function to setup popup windows for Burst Buffer */
+extern void popup_all_bb(GtkTreeModel *model, GtkTreeIter *iter, int id)
+{
+	char *name = NULL;
+	char title[100];
+	ListIterator itr = NULL;
+	popup_info_t *popup_win = NULL;
+	GError *error = NULL;
+
+	gtk_tree_model_get(model, iter, SORTID_NAME, &name, -1);
+
+	switch(id) {
+	case INFO_PAGE:
+		snprintf(title, 100, "Full info for Burst Buffer %s", name);
+		break;
+	default:
+		g_print("Burst Buffer got %d\n", id);
+	}
+
+	itr = list_iterator_create(popup_list);
+	while ((popup_win = list_next(itr))) {
+		if (popup_win->spec_info)
+			if (!strcmp(popup_win->spec_info->title, title)) {
+				break;
+			}
+	}
+	list_iterator_destroy(itr);
+
+	if (!popup_win) {
+		if (id == INFO_PAGE) {
+			popup_win = create_popup_info(id, BB_PAGE, title);
+		} else {
+			popup_win = create_popup_info(BB_PAGE, id, title);
+		}
+	} else {
+		g_free(name);
+		gtk_window_present(GTK_WINDOW(popup_win->popup));
+		return;
+	}
+
+	/* Pass the model and the structs from the iter so we can always get
+	   the current node_inx.
+	*/
+	popup_win->model = model;
+	popup_win->iter = *iter;
+
+	/* Sets up right click information */
+	switch(id) {
+	case JOB_PAGE:
+	case INFO_PAGE:
+		popup_win->spec_info->search_info->gchar_data = name;
+		specific_info_bb(popup_win);
+		break;
+	case BLOCK_PAGE:
+	case NODE_PAGE:
+	case PART_PAGE:
+	case SUBMIT_PAGE:
+		break;
+	default:
+		g_print("Burst Buffer got unknown type %d\n", id);
+	}
+	if (!sview_thread_new((gpointer)popup_thr, popup_win, FALSE, &error)) {
+		g_printerr ("Failed to create burst buffer popup thread: %s\n",
+			    error->message);
+		return;
+	}
+}
+
+/* static void _process_each_bb(GtkTreeModel *model, GtkTreePath *path, */
+/*			       GtkTreeIter*iter, gpointer userdata) */
+/* { */
+/* Function for admin edit */
+/* NOP */
+/* } */
+
+extern void select_admin_bb(GtkTreeModel *model, GtkTreeIter *iter,
+			      display_data_t *display_data,
+			      GtkTreeView *treeview)
+{
+/* NOP */
+/* Function for admin edit */
+}
+
+/* static void _admin_bb(GtkTreeModel *model, GtkTreeIter *iter, char *type) */
+/* { */
+/* NOP */
+/* Function for admin edit */
+/* } */
+
+extern void cluster_change_bb(void)
+{
+	get_info_bb(NULL, NULL);
+}
diff --git a/src/sview/block_info.c b/src/sview/block_info.c
index ca3bdb8fb..329bd1faa 100644
--- a/src/sview/block_info.c
+++ b/src/sview/block_info.c
@@ -236,10 +236,7 @@ static void _block_info_free(sview_block_info_t *block_ptr)
 		xfree(block_ptr->imagemloader);
 		xfree(block_ptr->imageramdisk);
 
-		if (block_ptr->job_list) {
-			list_destroy(block_ptr->job_list);
-			block_ptr->job_list = NULL;
-		}
+		FREE_NULL_LIST(block_ptr->job_list);
 
 		/* don't xfree(block_ptr->mp_inx);
 		   it isn't copied like the chars and is freed in the api
@@ -354,10 +351,11 @@ static void _layout_block_record(GtkTreeView *treeview,
 						   block_ptr->bg_node_use));
 	}
 	convert_num_unit((float)block_ptr->cnode_cnt, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	if (cluster_flags & CLUSTER_FLAG_BGQ) {
 		convert_num_unit((float)block_ptr->cnode_err_cnt, tmp_cnt2,
-				 sizeof(tmp_cnt2), UNIT_NONE);
+				 sizeof(tmp_cnt2), UNIT_NONE,
+				 working_sview_config.convert_flags);
 		tmp_char = xstrdup_printf("%s/%s", tmp_cnt, tmp_cnt2);
 	} else
 		tmp_char = tmp_cnt;
@@ -388,10 +386,12 @@ static void _update_block_record(sview_block_info_t *block_ptr,
 	char *tmp_char = NULL, *tmp_char2 = NULL, *tmp_char3 = NULL;
 
 	convert_num_unit((float)block_ptr->cnode_cnt, cnode_cnt,
-			 sizeof(cnode_cnt), UNIT_NONE);
+			 sizeof(cnode_cnt), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	if (cluster_flags & CLUSTER_FLAG_BGQ) {
 		convert_num_unit((float)block_ptr->cnode_err_cnt, cnode_cnt2,
-				 sizeof(cnode_cnt), UNIT_NONE);
+				 sizeof(cnode_cnt), UNIT_NONE,
+				 working_sview_config.convert_flags);
 		tmp_char3 = xstrdup_printf("%s/%s", cnode_cnt, cnode_cnt2);
 	} else
 		tmp_char3 = cnode_cnt;
@@ -705,7 +705,7 @@ static List _create_block_list(partition_info_msg_t *part_info_ptr,
 
 	if (last_list) {
 		list_iterator_destroy(last_list_itr);
-		list_destroy(last_list);
+		FREE_NULL_LIST(last_list);
 	}
 
 	return block_list;
@@ -1400,7 +1400,7 @@ display_it:
 
 	_update_info_block(send_block_list,
 			   GTK_TREE_VIEW(spec_info->display_widget));
-	list_destroy(send_block_list);
+	FREE_NULL_LIST(send_block_list);
 end_it:
 	popup_win->toggled = 0;
 	popup_win->force_refresh = 0;
diff --git a/src/sview/common.c b/src/sview/common.c
index a1b8608bc..fa7ed3cec 100644
--- a/src/sview/common.c
+++ b/src/sview/common.c
@@ -603,6 +603,9 @@ static void _selected_page(GtkMenuItem *menuitem, display_data_t *display_data)
 	case RESV_PAGE:
 		each.pfunc = &popup_all_resv;
 		break;
+	case BB_PAGE:
+		each.pfunc = &popup_all_bb;
+		break;
 	case FRONT_END_PAGE:
 		each.pfunc = &popup_all_front_end;
 		break;
@@ -637,6 +640,10 @@ static void _selected_page(GtkMenuItem *menuitem, display_data_t *display_data)
 					   display_data, NO_VAL,
 					   treedata->treeview);
 			break;
+		case BB_PAGE:
+			select_admin_bb(treedata->model, &treedata->iter,
+					   display_data, treedata->treeview);
+			break;
 		default:
 			g_print("common admin got %d %d\n",
 				display_data->extra,
@@ -1196,7 +1203,7 @@ extern GtkTreeView *create_treeview_2cols_attach_to_table(GtkTable *table)
 					   "text", DISPLAY_FONT);
 	gtk_tree_view_append_column(tree_view, col);
 
-       	g_object_unref(treestore);
+	g_object_unref(treestore);
 	return tree_view;
 }
 
@@ -1751,10 +1758,7 @@ extern void destroy_popup_info(void *arg)
 		g_mutex_lock(sview_mutex);
 		/* these are all children of each other so must
 		   be freed in this order */
-		if (popup_win->grid_button_list) {
-			list_destroy(popup_win->grid_button_list);
-			popup_win->grid_button_list = NULL;
-		}
+		FREE_NULL_LIST(popup_win->grid_button_list);
 		if (popup_win->table) {
 			gtk_widget_destroy(GTK_WIDGET(popup_win->table));
 			popup_win->table = NULL;
@@ -1850,6 +1854,9 @@ extern void *popup_thr(popup_info_t *popup_win)
 	case FRONT_END_PAGE:
 		specifc_info = specific_info_front_end;
 		break;
+	case BB_PAGE:
+		specifc_info = specific_info_bb;
+		break;
 	case SUBMIT_PAGE:
 	default:
 		g_print("thread got unknown type %d\n", popup_win->type);
@@ -2054,8 +2061,8 @@ extern void display_admin_edit(GtkTable *table, void *type_msg, int *row,
 		entry = gtk_combo_box_new_with_model(model2);
 		g_object_unref(model2);
 
-/* 		(callback)_set_active_combo_part(GTK_COMBO_BOX(entry), model, */
-/* 				       iter, display_data->id); */
+/*		(callback)_set_active_combo_part(GTK_COMBO_BOX(entry), model, */
+/*				       iter, display_data->id); */
 		(set_active)(GTK_COMBO_BOX(entry), model,
 			     iter, display_data->id);
 
@@ -2134,8 +2141,8 @@ extern void add_display_treestore_line(int update,
 				       const char *name, char *value)
 {
 	if (!name) {
-/* 		g_print("error, name = %s and value = %s\n", */
-/* 			name, value); */
+/*		g_print("error, name = %s and value = %s\n", */
+/*			name, value); */
 		return;
 	}
 	if (update) {
@@ -2182,8 +2189,8 @@ extern void add_display_treestore_line_with_font(
 	char *font)
 {
 	if (!name) {
-/* 		g_print("error, name = %s and value = %s\n", */
-/* 			name, value); */
+/*		g_print("error, name = %s and value = %s\n", */
+/*			name, value); */
 		return;
 	}
 	if (update) {
@@ -2276,12 +2283,14 @@ extern char *page_to_str(int page)
 		return "Job";
 	case PART_PAGE:
 		return "Partition";
-	case NODE_PAGE:
-		return "Node";
-	case BLOCK_PAGE:
-		return "Block";
 	case RESV_PAGE:
 		return "Reservation";
+	case BB_PAGE:
+		return "BurstBuffer";
+	case BLOCK_PAGE:
+		return "Block";
+	case NODE_PAGE:
+		return "Node";
 	case FRONT_END_PAGE:
 		return "Frontend";
 	default:
diff --git a/src/sview/defaults.c b/src/sview/defaults.c
index 0a98b0958..c9ca8a392 100644
--- a/src/sview/defaults.c
+++ b/src/sview/defaults.c
@@ -199,6 +199,8 @@ static const char *_set_sview_config(sview_config_t *sview_config,
 			sview_config->default_page = NODE_PAGE;
 		else if (!strcasecmp(new_text, "frontend"))
 			sview_config->default_page = FRONT_END_PAGE;
+		else if (!strcasecmp(new_text, "burstbuffer"))
+			sview_config->default_page = BB_PAGE;
 		else
 			sview_config->default_page = JOB_PAGE;
 		break;
@@ -527,6 +529,7 @@ static void _init_sview_conf(void)
 	default_sview_config.show_grid = TRUE;
 	default_sview_config.default_page = JOB_PAGE;
 	default_sview_config.tab_pos = GTK_POS_TOP;
+	default_sview_config.convert_flags = CONVERT_NUM_UNIT_EXACT;
 
 	for(i=0; i<PAGE_CNT; i++) {
 		memset(&default_sview_config.page_opts[i],
@@ -562,6 +565,7 @@ extern int load_defaults(void)
 		{"PageOptsPartition", S_P_STRING},
 		{"PageOptsReservation", S_P_STRING},
 		{"PageOptsFrontend", S_P_STRING},
+		{"PageOptsBurstBuffer", S_P_STRING},
 		{"RefreshDelay", S_P_UINT16},
 		{"RuledTables", S_P_BOOLEAN},
 		{"SavePageSettings", S_P_BOOLEAN},
@@ -619,6 +623,9 @@ extern int load_defaults(void)
 			default_sview_config.default_page = NODE_PAGE;
 		else if (slurm_strcasestr(tmp_str, "frontend"))
 			default_sview_config.default_page = FRONT_END_PAGE;
+		else if (slurm_strcasestr(tmp_str, "burstbuffer"))
+			default_sview_config.default_page = BB_PAGE;
+
 		xfree(tmp_str);
 	}
 	s_p_get_uint32(&default_sview_config.grid_hori,
@@ -677,6 +684,8 @@ extern int load_defaults(void)
 			default_sview_config.page_visible[NODE_PAGE] = 1;
 		if (slurm_strcasestr(tmp_str, "frontend"))
 			default_sview_config.page_visible[FRONT_END_PAGE] = 1;
+		if (slurm_strcasestr(tmp_str, "burstbuffer"))
+			default_sview_config.page_visible[BB_PAGE] = 1;
 		xfree(tmp_str);
 	}
 
@@ -1142,10 +1151,7 @@ extern int configure_defaults(void)
 				/*force fresh grid and
 				 * node state check
 				 * */
-				if (grid_button_list) {
-					list_destroy(grid_button_list);
-					grid_button_list = NULL;
-				}
+				FREE_NULL_LIST(grid_button_list);
 				slurm_free_node_info_msg(g_node_info_ptr);
 				g_node_info_ptr = NULL;
 			}
diff --git a/src/sview/front_end_info.c b/src/sview/front_end_info.c
index ff81432ed..e828ec4a7 100644
--- a/src/sview/front_end_info.c
+++ b/src/sview/front_end_info.c
@@ -417,7 +417,7 @@ static List _create_front_end_info_list(
 
 	if (last_list) {
 		list_iterator_destroy(last_list_itr);
-		list_destroy(last_list);
+		FREE_NULL_LIST(last_list);
 	}
 
 update_color:
@@ -875,7 +875,7 @@ display_it:
 
 	_update_info_front_end(send_resv_list,
 			  GTK_TREE_VIEW(spec_info->display_widget));
-	list_destroy(send_resv_list);
+	FREE_NULL_LIST(send_resv_list);
 end_it:
 	popup_win->toggled = 0;
 	popup_win->force_refresh = 0;
diff --git a/src/sview/grid.c b/src/sview/grid.c
index fa4c90e97..0901f0450 100644
--- a/src/sview/grid.c
+++ b/src/sview/grid.c
@@ -1601,8 +1601,7 @@ extern int get_system_stats(GtkTable *table)
 		rc = update_grid_table(main_grid_table, grid_button_list,
 				       node_list);
 		if (rc == RESET_GRID) {
-			list_destroy(grid_button_list);
-			grid_button_list = NULL;
+			FREE_NULL_LIST(grid_button_list);
 			grid_button_list = list_create(destroy_grid_button);
 			setup_grid_table(main_grid_table, grid_button_list,
 					 node_list);
diff --git a/src/sview/job_info.c b/src/sview/job_info.c
index 7ffe07c3c..39bd5670c 100644
--- a/src/sview/job_info.c
+++ b/src/sview/job_info.c
@@ -113,6 +113,7 @@ enum {
 	SORTID_BATCH,
 	SORTID_BATCH_HOST,
 	SORTID_BLOCK,
+	SORTID_BURST_BUFFER,
 	SORTID_COLOR,
 	SORTID_COLOR_INX,
 	SORTID_COMMAND,
@@ -191,6 +192,7 @@ enum {
 	SORTID_STD_OUT,
 	SORTID_SWITCHES,
 	SORTID_TASKS,
+	SORTID_THREAD_SPEC,
 /* 	SORTID_THREADS_MAX, */
 /* 	SORTID_THREADS_MIN, */
 	SORTID_TIME_ELIGIBLE,
@@ -201,6 +203,7 @@ enum {
 	SORTID_TIME_START,
 	SORTID_TIME_SUBMIT,
 	SORTID_TIME_SUSPEND,
+	SORTID_TRES,
 	SORTID_SMALL_BLOCK,
 	SORTID_UPDATED,
 	SORTID_USER_ID,
@@ -345,6 +348,8 @@ static display_data_t display_data_job[] = {
 	 refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_CORE_SPEC, "CoreSpec", FALSE, EDIT_TEXTBOX,
 	 refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_THREAD_SPEC, "ThreadSpec", FALSE, EDIT_TEXTBOX,
+	 refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_REBOOT, "Reboot", FALSE, EDIT_MODEL,
 	 refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_REQUEUE, "Requeue", FALSE, EDIT_MODEL,
@@ -362,6 +367,8 @@ static display_data_t display_data_job[] = {
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_BATCH_HOST, "Batch Host", FALSE,
 	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_BURST_BUFFER, "Burst Buffer", FALSE,
+	 EDIT_TEXTBOX, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_CPU_MIN, "CPUs Min",
 	 FALSE, EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_STRING, SORTID_CPU_MAX, "CPUs Max",
@@ -425,6 +432,8 @@ static display_data_t display_data_job[] = {
 	 refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_INT, SORTID_SMALL_BLOCK, NULL, FALSE, EDIT_NONE, refresh_job,
 	 create_model_job, admin_edit_job},
+	{G_TYPE_STRING, SORTID_TRES, "TRES", FALSE,
+	 EDIT_NONE, refresh_job, create_model_job, admin_edit_job},
 	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_job,
 	 create_model_job, admin_edit_job},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
@@ -955,6 +964,14 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 			goto return_error;
 		job_msg->core_spec = (uint16_t)temp_int;
 		break;
+	case SORTID_THREAD_SPEC:
+		temp_int = strtol(new_text, (char **)NULL, 10);
+
+		type = "specialized threads";
+		if (temp_int <= 0)
+			goto return_error;
+		job_msg->core_spec = (uint16_t)temp_int | CORE_SPEC_THREAD;
+		break;
 	case SORTID_REBOOT:
 		if (!strcasecmp(new_text, "yes"))
 			job_msg->reboot = 1;
@@ -993,6 +1010,10 @@ static const char *_set_job_msg(job_desc_msg_t *job_msg, const char *new_text,
 		job_msg->account = xstrdup(new_text);
 		type = "account";
 		break;
+	case SORTID_BURST_BUFFER:
+		job_msg->burst_buffer = xstrdup(new_text);
+		type = "burst buffer";
+		break;
 	case SORTID_QOS:
 		job_msg->qos = xstrdup(new_text);
 		type = "qos";
@@ -1279,40 +1300,6 @@ static int _nodes_in_list(char *node_list)
 	return count;
 }
 
-static int _get_node_cnt(job_info_t * job)
-{
-	int node_cnt = 0;
-
-	/*  For PENDING jobs, return the maximum of the requested nodelist,
-	 *   requested maximum number of nodes, or requested CPUs rounded
-	 *   to nearest node.
-	 *
-	 *  For COMPLETING jobs, the job->nodes nodelist has already been
-	 *   altered to list only the nodes still in the comp state, and
-	 *   thus we count only those nodes toward the total nodes still
-	 *   allocated to this job.
-	 */
-
-	if (IS_JOB_PENDING(job)) {
-		node_cnt = _nodes_in_list(job->req_nodes);
-		node_cnt = MAX(node_cnt, job->num_nodes);
-		if ((node_cnt == 1) && (job->num_cpus > 1)
-		    && job->ntasks_per_node
-		    && (job->ntasks_per_node != (uint16_t) NO_VAL)) {
-			int num_tasks = job->num_cpus;
-			if (job->cpus_per_task != (uint16_t) NO_VAL)
-				num_tasks /= job->cpus_per_task;
-			node_cnt = (num_tasks + 1) / job->ntasks_per_node;
-			if (node_cnt > num_tasks)
-				node_cnt = num_tasks;
-			else if (!node_cnt)
-				node_cnt = 1;
-		}
-	} else
-		node_cnt = _nodes_in_list(job->nodes);
-	return node_cnt;
-}
-
 /* this needs to be freed by xfree() */
 static void _convert_char_to_job_and_step(const char *data,
 					  int *jobid, int *stepid)
@@ -1447,6 +1434,11 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_BATCH_HOST),
 				   job_ptr->batch_host);
 
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_BURST_BUFFER),
+				   job_ptr->burst_buffer);
+
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		add_display_treestore_line(update, treestore, &iter,
 					   find_col_name(display_data_job,
@@ -1495,7 +1487,12 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_CONTIGUOUS),
 				   tmp_char);
 
-	sprintf(tmp_char, "%u", job_ptr->core_spec);
+	if ((job_ptr->core_spec == (uint16_t) NO_VAL) ||
+	    (job_ptr->core_spec & CORE_SPEC_THREAD)) {
+		sprintf(tmp_char, "N/A");
+	} else {
+		sprintf(tmp_char, "%u", job_ptr->core_spec);
+	}
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_CORE_SPEC),
@@ -1504,7 +1501,7 @@ static void _layout_job_record(GtkTreeView *treeview,
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)job_ptr->num_cpus,
 				 tmp_char, sizeof(tmp_char),
-				 UNIT_NONE);
+				 UNIT_NONE, working_sview_config.convert_flags);
 	else
 		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->num_cpus);
 
@@ -1516,7 +1513,7 @@ static void _layout_job_record(GtkTreeView *treeview,
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)job_ptr->max_cpus,
 				 tmp_char, sizeof(tmp_char),
-				 UNIT_NONE);
+				 UNIT_NONE, working_sview_config.convert_flags);
 	else
 		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->max_cpus);
 
@@ -1528,7 +1525,7 @@ static void _layout_job_record(GtkTreeView *treeview,
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)job_ptr->num_cpus,
 				 tmp_char, sizeof(tmp_char),
-				 UNIT_NONE);
+				 UNIT_NONE, working_sview_config.convert_flags);
 	else
 		snprintf(tmp_char, sizeof(tmp_char), "%u", job_ptr->num_cpus);
 
@@ -1663,7 +1660,8 @@ static void _layout_job_record(GtkTreeView *treeview,
 				   job_ptr->licenses);
 
 	convert_num_unit((float)job_ptr->pn_min_cpus,
-			 tmp_char, sizeof(tmp_char), UNIT_NONE);
+			 tmp_char, sizeof(tmp_char), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_CPU_REQ),
@@ -1676,7 +1674,8 @@ static void _layout_job_record(GtkTreeView *treeview,
 	if (min_mem > 0) {
 		int len;
 		convert_num_unit((float)min_mem,
-				 tmp_char, sizeof(tmp_char), UNIT_MEGA);
+				 tmp_char, sizeof(tmp_char), UNIT_MEGA,
+				 working_sview_config.convert_flags);
 		len = strlen(tmp_char);
 		if (job_ptr->pn_min_memory & MEM_PER_CPU)
 			sprintf(tmp_char+len, " Per CPU");
@@ -1691,7 +1690,8 @@ static void _layout_job_record(GtkTreeView *treeview,
 
 	if (job_ptr->pn_min_tmp_disk > 0)
 		convert_num_unit((float)job_ptr->pn_min_tmp_disk,
-				 tmp_char, sizeof(tmp_char), UNIT_MEGA);
+				 tmp_char, sizeof(tmp_char), UNIT_MEGA,
+				 working_sview_config.convert_flags);
 	else
 		sprintf(tmp_char, " ");
 	add_display_treestore_line(update, treestore, &iter,
@@ -1739,7 +1739,8 @@ static void _layout_job_record(GtkTreeView *treeview,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)sview_job_info_ptr->node_cnt,
-				 tmp_char, sizeof(tmp_char), UNIT_NONE);
+				 tmp_char, sizeof(tmp_char), UNIT_NONE,
+				 working_sview_config.convert_flags);
 	else
 		snprintf(tmp_char, sizeof(tmp_char), "%u",
 			 sview_job_info_ptr->node_cnt);
@@ -1751,7 +1752,8 @@ static void _layout_job_record(GtkTreeView *treeview,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)sview_job_info_ptr->node_cnt,
-				 tmp_char, sizeof(tmp_char), UNIT_NONE);
+				 tmp_char, sizeof(tmp_char), UNIT_NONE,
+				 working_sview_config.convert_flags);
 	else
 		snprintf(tmp_char, sizeof(tmp_char), "%u",
 			 job_ptr->max_nodes);
@@ -1763,10 +1765,11 @@ static void _layout_job_record(GtkTreeView *treeview,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)sview_job_info_ptr->node_cnt,
-				 tmp_char, sizeof(tmp_char), UNIT_NONE);
+				 tmp_char, sizeof(tmp_char), UNIT_NONE,
+				 working_sview_config.convert_flags);
 	else
 		snprintf(tmp_char, sizeof(tmp_char), "%u",
-			 job_ptr->num_nodes);
+			 sview_job_info_ptr->node_cnt);
 
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
@@ -1778,6 +1781,16 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_PARTITION),
 				   job_ptr->partition);
 
+	if (job_ptr->preempt_time) {
+		slurm_make_time_str((time_t *)&job_ptr->preempt_time, tmp_char,
+				    sizeof(tmp_char));
+	} else
+		sprintf(tmp_char, "N/A");
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_PREEMPT_TIME),
+				   tmp_char);
+
 	sprintf(tmp_char, "%u", job_ptr->priority);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
@@ -1835,10 +1848,14 @@ static void _layout_job_record(GtkTreeView *treeview,
 						   sizeof(tmp_char),
 						   SELECT_PRINT_ROTATE));
 
-	if (job_ptr->shared)
-		sprintf(tmp_char, "yes");
-	else
+	if (job_ptr->shared == 0)
 		sprintf(tmp_char, "no");
+	else if (job_ptr->shared == 1)
+		sprintf(tmp_char, "no");
+	else if (job_ptr->shared == 2)
+		sprintf(tmp_char, "user");
+	else
+		sprintf(tmp_char, "ok");
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_SHARED),
@@ -1876,6 +1893,18 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_SWITCHES),
 				   tmp_char);
 
+	if ((job_ptr->core_spec == (uint16_t) NO_VAL) ||
+	    ((job_ptr->core_spec & CORE_SPEC_THREAD) == 0)) {
+		sprintf(tmp_char, "N/A");
+	} else {
+		sprintf(tmp_char, "%u",
+			job_ptr->core_spec & (~CORE_SPEC_THREAD));
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_THREAD_SPEC),
+				   tmp_char);
+
 	slurm_make_time_str((time_t *)&job_ptr->eligible_time, tmp_char,
 			    sizeof(tmp_char));
 	add_display_treestore_line(update, treestore, &iter,
@@ -1905,16 +1934,6 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_TIMELIMIT),
 				   tmp_char);
 
-	if (job_ptr->preempt_time) {
-		slurm_make_time_str((time_t *)&job_ptr->preempt_time, tmp_char,
-				    sizeof(tmp_char));
-	} else
-		sprintf(tmp_char, "N/A");
-	add_display_treestore_line(update, treestore, &iter,
-				   find_col_name(display_data_job,
-						 SORTID_PREEMPT_TIME),
-				   tmp_char);
-
 	if (job_ptr->resize_time) {
 		slurm_make_time_str((time_t *)&job_ptr->resize_time, tmp_char,
 				    sizeof(tmp_char));
@@ -1948,6 +1967,11 @@ static void _layout_job_record(GtkTreeView *treeview,
 						 SORTID_TIME_SUSPEND),
 				   tmp_char);
 
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_TRES),
+				   job_ptr->tres_alloc_str);
+
 	uname = uid_to_string_cached((uid_t)job_ptr->user_id);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
@@ -1981,6 +2005,7 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	char tmp_prio[40],      tmp_nice[40],        tmp_preempt_time[40];
 	char tmp_rqswitch[40],  tmp_core_spec[40],   tmp_job_id[400];
 	char tmp_std_err[128],  tmp_std_in[128],     tmp_std_out[128];
+	char tmp_thread_spec[40];
 	char *tmp_batch,  *tmp_cont, *tmp_shared, *tmp_requeue, *tmp_uname;
 	char *tmp_reboot, *tmp_reason, *tmp_nodes;
 	char time_buf[32];
@@ -2071,7 +2096,20 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	else
 		tmp_cont = "no";
 
-	sprintf(tmp_core_spec, "%u", job_ptr->core_spec);
+	if ((job_ptr->core_spec == (uint16_t) NO_VAL) ||
+	    (job_ptr->core_spec & CORE_SPEC_THREAD)) {
+		sprintf(tmp_core_spec, "N/A");
+	} else {
+		sprintf(tmp_core_spec, "%u", job_ptr->core_spec);
+	}
+	if ((job_ptr->core_spec == (uint16_t) NO_VAL) ||
+	    ((job_ptr->core_spec & CORE_SPEC_THREAD) == 0)) {
+		sprintf(tmp_thread_spec, "N/A");
+	} else {
+		sprintf(tmp_thread_spec, "%u",
+			job_ptr->core_spec & (~CORE_SPEC_THREAD));
+	}
+
 
 	if (job_ptr->cpus_per_task > 0)
 		sprintf(tmp_cpus_per_task, "%u", job_ptr->cpus_per_task);
@@ -2081,26 +2119,28 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		convert_num_unit((float)job_ptr->num_cpus,
 				 tmp_cpu_cnt, sizeof(tmp_cpu_cnt),
-				 UNIT_NONE);
+				 UNIT_NONE, working_sview_config.convert_flags);
 	} else {
 		snprintf(tmp_cpu_cnt, sizeof(tmp_cpu_cnt), "%u",
 			 job_ptr->num_cpus);
 	}
 
 	convert_num_unit((float)job_ptr->pn_min_cpus,
-			 tmp_cpu_req, sizeof(tmp_cpu_req), UNIT_NONE);
+			 tmp_cpu_req, sizeof(tmp_cpu_req), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	if (cluster_flags & CLUSTER_FLAG_BG) {
 		convert_num_unit((float)job_ptr->max_cpus,
 				 tmp_cpus_max, sizeof(tmp_cpus_max),
-				 UNIT_NONE);
+				 UNIT_NONE, working_sview_config.convert_flags);
 	} else {
 		snprintf(tmp_cpus_max, sizeof(tmp_cpus_max), "%u",
 			 job_ptr->max_cpus);
 	}
 
 	convert_num_unit((float)job_ptr->pn_min_tmp_disk,
-			 tmp_disk, sizeof(tmp_disk), UNIT_MEGA);
+			 tmp_disk, sizeof(tmp_disk), UNIT_MEGA,
+			 working_sview_config.convert_flags);
 
 	if (WIFSIGNALED(job_ptr->derived_ec))
 		term_sig = WTERMSIG(job_ptr->derived_ec);
@@ -2130,7 +2170,8 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 	if (min_mem > 0) {
 		int len;
 		convert_num_unit((float)min_mem,
-				 tmp_mem_min, sizeof(tmp_mem_min), UNIT_MEGA);
+				 tmp_mem_min, sizeof(tmp_mem_min), UNIT_MEGA,
+				 working_sview_config.convert_flags);
 		len = strlen(tmp_mem_min);
 		if (job_ptr->pn_min_memory & MEM_PER_CPU)
 			sprintf(tmp_mem_min+len, " Per CPU");
@@ -2141,7 +2182,8 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)sview_job_info_ptr->node_cnt,
-				 tmp_node_cnt, sizeof(tmp_node_cnt), UNIT_NONE);
+				 tmp_node_cnt, sizeof(tmp_node_cnt), UNIT_NONE,
+				 working_sview_config.convert_flags);
 	else
 		sprintf(tmp_node_cnt, "%u", sview_job_info_ptr->node_cnt);
 
@@ -2271,6 +2313,7 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 				   SORTID_ARRAY_TASK_ID,tmp_array_task_id,
 				   SORTID_BATCH,        tmp_batch,
 				   SORTID_BATCH_HOST,   job_ptr->batch_host,
+				   SORTID_BURST_BUFFER, job_ptr->burst_buffer,
 				   SORTID_COLOR,
 				   sview_colors[sview_job_info_ptr->color_inx],
 				   SORTID_COLOR_INX,
@@ -2299,6 +2342,7 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 				   SORTID_ARRAY_TASK_ID,tmp_array_task_id,
 				   SORTID_BATCH,        tmp_batch,
 				   SORTID_BATCH_HOST,   job_ptr->batch_host,
+				   SORTID_BURST_BUFFER, job_ptr->burst_buffer,
 				   SORTID_COLOR,
 				   sview_colors[sview_job_info_ptr->color_inx],
 				   SORTID_COLOR_INX,
@@ -2350,6 +2394,7 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 				   SORTID_STD_IN,       tmp_std_in,
 				   SORTID_STD_OUT,      tmp_std_out,
 				   SORTID_SWITCHES,     tmp_rqswitch,
+				   SORTID_THREAD_SPEC,  tmp_thread_spec,
 				   SORTID_TIME_ELIGIBLE,tmp_time_elig,
 				   SORTID_TIME_END,     tmp_time_end,
 				   SORTID_TIME_RESIZE,  tmp_time_resize,
@@ -2359,6 +2404,7 @@ static void _update_job_record(sview_job_info_t *sview_job_info_ptr,
 				   SORTID_TIME_SUSPEND, tmp_time_sus,
 				   SORTID_TIMELIMIT,    tmp_time_limit,
 				   SORTID_TMP_DISK,     tmp_disk,
+				   SORTID_TRES,         job_ptr->tres_alloc_str,
 				   SORTID_UPDATED,      1,
 				   SORTID_USER_ID,      tmp_uname,
 				   SORTID_WCKEY,        job_ptr->wckey,
@@ -2507,7 +2553,7 @@ static void _layout_step_record(GtkTreeView *treeview,
 	char *uname;
 	char tmp_char[50], tmp_nodes[50], tmp_time[50];
 	GtkTreeIter iter;
-	enum job_states state;
+	uint32_t state;
 	GtkTreeStore *treestore =
 		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
 
@@ -2515,7 +2561,7 @@ static void _layout_step_record(GtkTreeView *treeview,
 		return;
 
 	convert_num_unit((float)step_ptr->num_cpus, tmp_char, sizeof(tmp_char),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_CPUS),
@@ -2571,14 +2617,17 @@ static void _layout_step_record(GtkTreeView *treeview,
 						    &nodes);
 			convert_num_unit(
 				(float)nodes,
-				tmp_char, sizeof(tmp_char), UNIT_NONE);
+				tmp_char, sizeof(tmp_char), UNIT_NONE,
+				working_sview_config.convert_flags);
 		} else if (cluster_flags & CLUSTER_FLAG_BG)
 			convert_num_unit(
 				(float)step_ptr->num_tasks / cpus_per_node,
-				tmp_char, sizeof(tmp_char), UNIT_NONE);
+				tmp_char, sizeof(tmp_char), UNIT_NONE,
+				working_sview_config.convert_flags);
 		else {
 			convert_num_unit((float)_nodes_in_list(tmp_nodes),
-					 tmp_char, sizeof(tmp_char), UNIT_NONE);
+					 tmp_char, sizeof(tmp_char), UNIT_NONE,
+					 working_sview_config.convert_flags);
 		}
 		add_display_treestore_line(update, treestore, &iter,
 					   find_col_name(display_data_job,
@@ -2619,11 +2668,16 @@ static void _layout_step_record(GtkTreeView *treeview,
 				   tmp_time);
 
 	convert_num_unit((float)step_ptr->num_tasks, tmp_char, sizeof(tmp_char),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_job,
 						 SORTID_TASKS),
 				   tmp_char);
+
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_job,
+						 SORTID_TRES),
+				   step_ptr->tres_alloc_str);
 }
 
 static void _update_step_record(job_step_info_t *step_ptr,
@@ -2635,11 +2689,12 @@ static void _update_step_record(job_step_info_t *step_ptr,
 	char tmp_cpu_min[40],  tmp_time_run[40],   tmp_time_limit[40];
 	char tmp_node_cnt[40], tmp_time_start[40], tmp_task_cnt[40];
 	char tmp_step_id[40], tmp_job_id[400];
-	enum job_states state;
+	uint32_t state;
 	int color_inx = step_ptr->step_id % sview_colors_cnt;
 
 	convert_num_unit((float)step_ptr->num_cpus, tmp_cpu_min,
-			 sizeof(tmp_cpu_min), UNIT_NONE);
+			 sizeof(tmp_cpu_min), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	if (suspended)
 		state = JOB_SUSPENDED;
@@ -2662,20 +2717,24 @@ static void _update_step_record(job_step_info_t *step_ptr,
 						    SELECT_JOBDATA_NODE_CNT,
 						    &nodes);
 			convert_num_unit((float)nodes, tmp_node_cnt,
-					 sizeof(tmp_node_cnt), UNIT_NONE);
+					 sizeof(tmp_node_cnt), UNIT_NONE,
+					 working_sview_config.convert_flags);
 		} else if (cluster_flags & CLUSTER_FLAG_BG) {
 			convert_num_unit(
 				(float)step_ptr->num_tasks / cpus_per_node,
-				tmp_node_cnt, sizeof(tmp_node_cnt), UNIT_NONE);
+				tmp_node_cnt, sizeof(tmp_node_cnt), UNIT_NONE,
+				working_sview_config.convert_flags);
 		} else {
 			convert_num_unit((float)_nodes_in_list(tmp_nodes),
 					 tmp_node_cnt, sizeof(tmp_node_cnt),
-					 UNIT_NONE);
+					 UNIT_NONE,
+					 working_sview_config.convert_flags);
 		}
 	}
 
 	convert_num_unit((float)step_ptr->num_tasks, tmp_task_cnt,
-			 sizeof(tmp_task_cnt), UNIT_NONE);
+			 sizeof(tmp_task_cnt), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	if ((step_ptr->time_limit == NO_VAL) ||
 	    (step_ptr->time_limit == INFINITE)) {
@@ -2719,6 +2778,7 @@ static void _update_step_record(job_step_info_t *step_ptr,
 			   SORTID_TIME_RUNNING, tmp_time_run,
 			   SORTID_TIME_START,   tmp_time_start,
 			   SORTID_TIMELIMIT,    tmp_time_limit,
+			   SORTID_TRES,         step_ptr->tres_alloc_str,
 			   SORTID_UPDATED,      1,
 			   SORTID_USER_ID,      tmp_uname,
 			   -1);
@@ -3213,11 +3273,10 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 					xstrdup(job_ptr->nodes);
 			}
 			xfree(ionodes);
-		} else
+		} else {
 			sview_job_info_ptr->nodes = xstrdup(job_ptr->nodes);
-
-		if (!sview_job_info_ptr->node_cnt)
-			sview_job_info_ptr->node_cnt = _get_node_cnt(job_ptr);
+			sview_job_info_ptr->node_cnt = job_ptr->num_nodes;
+		}
 
 		for (j = 0; j < step_info_ptr->job_step_count; j++) {
 			step_ptr = &(step_info_ptr->job_steps[j]);
@@ -3247,7 +3306,7 @@ static List _create_job_info_list(job_info_msg_t *job_info_ptr,
 
 	if (last_list) {
 		list_iterator_destroy(last_list_itr);
-		list_destroy(last_list);
+		FREE_NULL_LIST(last_list);
 	}
 
 update_color:
@@ -3892,7 +3951,7 @@ display_it:
 		bool *color_set_flag = xmalloc(sizeof(bool) * array_size);
 		itr = list_iterator_create(info_list);
 		while ((sview_job_info_ptr = list_next(itr))) {
-			uint16_t base_state;
+			uint32_t base_state;
 			job_ptr = sview_job_info_ptr->job_ptr;
 			base_state = job_ptr->job_state & JOB_STATE_BASE;
 			if (base_state != JOB_RUNNING)
@@ -4208,7 +4267,7 @@ display_it:
 	_update_info_job(send_info_list,
 			 GTK_TREE_VIEW(spec_info->display_widget));
 
-	list_destroy(send_info_list);
+	FREE_NULL_LIST(send_info_list);
 end_it:
 	popup_win->toggled = 0;
 	popup_win->force_refresh = 0;
@@ -4702,7 +4761,7 @@ static void _edit_jobs(GtkTreeModel *model, GtkTreeIter *iter,
 {
 	jobs_foreach_common_t job_foreach_common;
 	global_error_code = SLURM_SUCCESS;
-	/* setup params that applies to ALL selections */
+	/* setup working_sview_config that applies to ALL selections */
 	memset(&job_foreach_common, 0, sizeof(jobs_foreach_common_t));
 	job_foreach_common.type = type;
 	job_foreach_common.edit_type = EDIT_EDIT;
@@ -4718,7 +4777,7 @@ static void _edit_jobs(GtkTreeModel *model, GtkTreeIter *iter,
 		selected_foreach_build_list(model, NULL, iter, NULL);
 	/* determine what to do with them/it */
 	_edit_each_job(model, iter, &job_foreach_common); /*go do them*/
-	list_destroy(foreach_list);
+	FREE_NULL_LIST(foreach_list);
 
 	return;
 
@@ -4839,7 +4898,7 @@ extern void admin_job(GtkTreeModel *model, GtkTreeIter *iter,
 	if (response == GTK_RESPONSE_OK) {
 		jobs_foreach_common_t job_foreach_common;
 		global_error_code = SLURM_SUCCESS;
-		/* setup params that applies to ALL selections */
+		/* setup working_sview_config that applies to ALL selections */
 		memset(&job_foreach_common, 0, sizeof(jobs_foreach_common_t));
 		job_foreach_common.type = type;
 		job_foreach_common.edit_type = edit_type;
@@ -4859,7 +4918,7 @@ extern void admin_job(GtkTreeModel *model, GtkTreeIter *iter,
 			selected_foreach_build_list(model, NULL, iter, NULL);
 		/* determine what to do with them/it */
 		process_foreach_list(&job_foreach_common); /*go do them*/
-		list_destroy(foreach_list);
+		FREE_NULL_LIST(foreach_list);
 	}/*response OK ^^*/
 	/* switch back to standard cursor*/
 
diff --git a/src/sview/node_info.c b/src/sview/node_info.c
index 8f66379c8..b9443a7ae 100644
--- a/src/sview/node_info.c
+++ b/src/sview/node_info.c
@@ -1,9 +1,9 @@
 /*****************************************************************************\
- *  node_info.c - Functions related to node display
- *  mode of sview.
+ *  node_info.c - Functions related to node display mode of sview.
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2010-2015 SchedMD LLC.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
@@ -42,9 +42,9 @@ int g_node_scaling = 1;
 enum {
 	SORTID_POS = POS_LOC,
 	SORTID_ARCH,
-	SORTID_BASE_WATTS,
 	SORTID_BOARDS,
 	SORTID_BOOT_TIME,
+	SORTID_CAP_WATTS,
 	SORTID_COLOR,
 	SORTID_CPUS,
 	SORTID_CPU_LOAD,
@@ -53,12 +53,15 @@ enum {
 	SORTID_CURRENT_WATTS,
 	SORTID_ERR_CPUS,
 	SORTID_FEATURES,
+	SORTID_FREE_MEM,
 	SORTID_GRES,
 	SORTID_IDLE_CPUS,
+	SORTID_LOWEST_JOULES,
 	SORTID_NAME,
 	SORTID_NODE_ADDR,
 	SORTID_NODE_HOSTNAME,
-	SORTID_MEMORY,	/* RealMemory */
+	SORTID_OWNER,
+	SORTID_REAL_MEMORY,
 	SORTID_REASON,
 	SORTID_RACK_MP,
 	SORTID_SLURMD_START_TIME,
@@ -66,7 +69,7 @@ enum {
 	SORTID_STATE,
 	SORTID_STATE_NUM,
 	SORTID_THREADS,
-	SORTID_DISK,	/* TmpDisk */
+	SORTID_TMP_DISK,
 	SORTID_UPDATED,
 	SORTID_USED_CPUS,
 	SORTID_USED_MEMORY,
@@ -106,6 +109,8 @@ static display_data_t display_data_node[] = {
 	 refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_NODE_HOSTNAME, "NodeHostName", FALSE, EDIT_NONE,
 	 refresh_node, create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_OWNER, "Owner", FALSE, EDIT_NONE,
+	 refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_STATE, "State", FALSE, EDIT_MODEL, refresh_node,
 	 create_model_node, admin_edit_node},
 	{G_TYPE_INT, SORTID_STATE_NUM, NULL, FALSE, EDIT_NONE, refresh_node,
@@ -126,12 +131,14 @@ static display_data_t display_data_node[] = {
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_INT, SORTID_THREADS, "ThreadsPerCore", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
-	{G_TYPE_STRING, SORTID_MEMORY, "Real Memory", FALSE,
+	{G_TYPE_STRING, SORTID_REAL_MEMORY, "Real Memory", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_USED_MEMORY, "Used Memory", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
-	{G_TYPE_STRING, SORTID_DISK, "Tmp Disk", FALSE, EDIT_NONE, refresh_node,
-	 create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_FREE_MEM, "Free Memory", FALSE, EDIT_NONE,
+	 refresh_node, create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_TMP_DISK, "Tmp Disk", FALSE, EDIT_NONE,
+	 refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_INT, SORTID_WEIGHT,"Weight", FALSE, EDIT_NONE, refresh_node,
 	 create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_CPU_LOAD, "CPU Load", FALSE, EDIT_NONE,
@@ -148,12 +155,14 @@ static display_data_t display_data_node[] = {
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_REASON, "Reason", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
-	{G_TYPE_STRING, SORTID_BASE_WATTS, "Lowest Joules", FALSE,
+	{G_TYPE_STRING, SORTID_LOWEST_JOULES, "Lowest Joules", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_CONSUMED_ENERGY,"Consumed Joules", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_CURRENT_WATTS, "Current Watts", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
+	{G_TYPE_STRING, SORTID_CAP_WATTS,"Cap Watts", FALSE,
+	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_STRING, SORTID_VERSION, "Version", FALSE,
 	 EDIT_NONE, refresh_node, create_model_node, admin_edit_node},
 	{G_TYPE_INT, SORTID_UPDATED, NULL, FALSE, EDIT_NONE, refresh_node,
@@ -204,6 +213,7 @@ static void _layout_node_record(GtkTreeView *treeview,
 	char tmp_current_watts[50];
 	char tmp_base_watts[50];
 	char tmp_consumed_energy[50];
+	char tmp_cap_watts[50], tmp_owner[32];
 	char tmp_version[50];
 	char *upper = NULL, *lower = NULL;
 	GtkTreeIter iter;
@@ -237,8 +247,21 @@ static void _layout_node_record(GtkTreeView *treeview,
 						 SORTID_NODE_HOSTNAME),
 				   node_ptr->node_hostname);
 
+	if (node_ptr->owner == NO_VAL) {
+		snprintf(tmp_owner, sizeof(tmp_owner), "N/A");
+	} else {
+		char *user_name;
+		user_name = uid_to_string((uid_t) node_ptr->owner);
+		snprintf(tmp_owner, sizeof(tmp_owner), "%s(%u)",
+			 user_name, node_ptr->owner);
+		xfree(user_name);
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_node,
+						 SORTID_OWNER), tmp_owner);
+
 	convert_num_unit((float)node_ptr->cpus, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_CPUS),
@@ -255,6 +278,17 @@ static void _layout_node_record(GtkTreeView *treeview,
 						 SORTID_CPU_LOAD),
 				   tmp_cnt);
 
+	if (node_ptr->free_mem == NO_VAL) {
+		snprintf(tmp_cnt, sizeof(tmp_cnt), "N/A");
+	} else {
+		snprintf(tmp_cnt, sizeof(tmp_cnt), "%uM",
+		         node_ptr->free_mem);
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_node,
+						 SORTID_FREE_MEM),
+				   tmp_cnt);
+
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 				     SELECT_NODEDATA_SUBCNT,
 				     NODE_STATE_ALLOCATED,
@@ -269,7 +303,8 @@ static void _layout_node_record(GtkTreeView *treeview,
 	}
 	idle_cpus -= alloc_cpus;
 	convert_num_unit((float)alloc_cpus, tmp_cnt,
-			 sizeof(tmp_cnt), UNIT_NONE);
+			 sizeof(tmp_cnt), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_USED_CPUS),
@@ -284,13 +319,15 @@ static void _layout_node_record(GtkTreeView *treeview,
 		err_cpus *= cpus_per_node;
 
 	idle_cpus -= err_cpus;
-	convert_num_unit((float)err_cpus, tmp_cnt, sizeof(tmp_cnt), UNIT_NONE);
+	convert_num_unit((float)err_cpus, tmp_cnt, sizeof(tmp_cnt), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_ERR_CPUS),
 				   tmp_cnt);
 
-	convert_num_unit((float)idle_cpus, tmp_cnt, sizeof(tmp_cnt), UNIT_NONE);
+	convert_num_unit((float)idle_cpus, tmp_cnt, sizeof(tmp_cnt), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_IDLE_CPUS),
@@ -306,38 +343,38 @@ static void _layout_node_record(GtkTreeView *treeview,
 	xfree(lower);
 
 	convert_num_unit((float)node_ptr->boards, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_BOARDS),
 				   tmp_cnt);
 
 	convert_num_unit((float)node_ptr->sockets, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_SOCKETS),
 				   tmp_cnt);
 
 	convert_num_unit((float)node_ptr->cores, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_CORES),
 				   tmp_cnt);
 
 	convert_num_unit((float)node_ptr->threads, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
 						 SORTID_THREADS),
 				   tmp_cnt);
 
 	convert_num_unit((float)node_ptr->real_memory, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_MEGA);
+			 UNIT_MEGA, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
-						 SORTID_MEMORY),
+						 SORTID_REAL_MEMORY),
 				   tmp_cnt);
 
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
@@ -351,10 +388,10 @@ static void _layout_node_record(GtkTreeView *treeview,
 				   tmp_cnt);
 
 	convert_num_unit((float)node_ptr->tmp_disk, tmp_cnt, sizeof(tmp_cnt),
-			 UNIT_MEGA);
+			 UNIT_MEGA, working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
-						 SORTID_DISK),
+						 SORTID_TMP_DISK),
 				   tmp_cnt);
 	snprintf(tmp_cnt, sizeof(tmp_cnt), "%u", node_ptr->weight);
 	add_display_treestore_line(update, treestore, &iter,
@@ -399,11 +436,11 @@ static void _layout_node_record(GtkTreeView *treeview,
 		snprintf(tmp_base_watts, sizeof(tmp_base_watts),
 			 "%u", node_ptr->energy->base_watts);
 		snprintf(tmp_consumed_energy, sizeof(tmp_consumed_energy),
-			 "%u", node_ptr->energy->consumed_energy);
+			 "%"PRIu64"", node_ptr->energy->consumed_energy);
 	}
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_node,
-						 SORTID_BASE_WATTS),
+						 SORTID_LOWEST_JOULES),
 				   tmp_base_watts);
 
 	add_display_treestore_line(update, treestore, &iter,
@@ -416,6 +453,17 @@ static void _layout_node_record(GtkTreeView *treeview,
 						 SORTID_CURRENT_WATTS),
 				   tmp_current_watts);
 
+	if (!node_ptr->power || (node_ptr->power->cap_watts == NO_VAL)) {
+		snprintf(tmp_cap_watts, sizeof(tmp_cap_watts), "N/A");
+	} else {
+		snprintf(tmp_cap_watts, sizeof(tmp_cap_watts), "%u",
+			 node_ptr->power->cap_watts);
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_node,
+						 SORTID_CAP_WATTS),
+				   tmp_cap_watts);
+
 	if (node_ptr->version == NULL) {
 		snprintf(tmp_version, sizeof(tmp_version), "N/A");
 	} else {
@@ -437,9 +485,9 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 	node_info_t *node_ptr = sview_node_info_ptr->node_ptr;
 	char tmp_disk[20], tmp_cpus[20], tmp_err_cpus[20], tmp_idle_cpus[20];
 	char tmp_mem[20], tmp_used_memory[20];
-	char tmp_used_cpus[20], tmp_cpu_load[20];
+	char tmp_used_cpus[20], tmp_cpu_load[20], tmp_free_mem[20], tmp_owner[32];
 	char tmp_current_watts[50], tmp_base_watts[50], tmp_consumed_energy[50];
-	char tmp_version[50];
+	char tmp_cap_watts[50], tmp_version[50];
 	char *tmp_state_lower, *tmp_state_upper;
 
 
@@ -456,7 +504,14 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 		snprintf(tmp_base_watts, sizeof(tmp_base_watts),
 			 "%u", node_ptr->energy->base_watts);
 		snprintf(tmp_consumed_energy, sizeof(tmp_consumed_energy),
-			 "%u", node_ptr->energy->consumed_energy);
+			 "%"PRIu64"", node_ptr->energy->consumed_energy);
+	}
+
+	if (!node_ptr->power || (node_ptr->power->cap_watts == NO_VAL)) {
+		snprintf(tmp_cap_watts, sizeof(tmp_cap_watts), "N/A");
+	} else {
+		snprintf(tmp_cap_watts, sizeof(tmp_cap_watts), "%u",
+			 node_ptr->power->cap_watts);
 	}
 
 	if (node_ptr->cpu_load == NO_VAL) {
@@ -466,8 +521,16 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 			 "%.2f", (node_ptr->cpu_load / 100.0));
 	}
 
+	if (node_ptr->free_mem == NO_VAL) {
+		strcpy(tmp_free_mem, "N/A");
+	} else {
+		snprintf(tmp_free_mem, sizeof(tmp_free_mem),
+		         "%uM", node_ptr->free_mem);
+	}
+
 	convert_num_unit((float)node_ptr->cpus, tmp_cpus,
-			 sizeof(tmp_cpus), UNIT_NONE);
+			 sizeof(tmp_cpus), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 				     SELECT_NODEDATA_SUBCNT,
@@ -483,7 +546,8 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 	}
 	idle_cpus = node_ptr->cpus - alloc_cpus;
 	convert_num_unit((float)alloc_cpus, tmp_used_cpus,
-			 sizeof(tmp_used_cpus), UNIT_NONE);
+			 sizeof(tmp_used_cpus), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 				     SELECT_NODEDATA_MEM_ALLOC,
@@ -492,7 +556,8 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 	snprintf(tmp_used_memory, sizeof(tmp_used_memory), "%uM", alloc_memory);
 
 	convert_num_unit((float)alloc_cpus, tmp_used_cpus,
-			 sizeof(tmp_used_cpus), UNIT_NONE);
+			 sizeof(tmp_used_cpus), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	select_g_select_nodeinfo_get(node_ptr->select_nodeinfo,
 				     SELECT_NODEDATA_SUBCNT,
@@ -502,10 +567,10 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 		err_cpus *= cpus_per_node;
 	idle_cpus -= err_cpus;
 	convert_num_unit((float)err_cpus, tmp_err_cpus, sizeof(tmp_err_cpus),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 
 	convert_num_unit((float)idle_cpus, tmp_idle_cpus, sizeof(tmp_idle_cpus),
-			 UNIT_NONE);
+			 UNIT_NONE, working_sview_config.convert_flags);
 
 	if (IS_NODE_DRAIN(node_ptr)) {
 		/* don't worry about mixed since the
@@ -519,10 +584,10 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 	tmp_state_lower = str_tolower(tmp_state_upper);
 
 	convert_num_unit((float)node_ptr->real_memory, tmp_mem, sizeof(tmp_mem),
-			 UNIT_MEGA);
+			 UNIT_MEGA, working_sview_config.convert_flags);
 
 	convert_num_unit((float)node_ptr->tmp_disk, tmp_disk, sizeof(tmp_disk),
-			 UNIT_MEGA);
+			 UNIT_MEGA, working_sview_config.convert_flags);
 
 	if (node_ptr->version == NULL) {
 		snprintf(tmp_version, sizeof(tmp_version), "N/A");
@@ -531,12 +596,23 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 			 node_ptr->version);
 	}
 
+	if (node_ptr->owner == NO_VAL) {
+		snprintf(tmp_owner, sizeof(tmp_owner), "N/A");
+	} else {
+		char *user_name;
+		user_name = uid_to_string((uid_t) node_ptr->owner);
+		snprintf(tmp_owner, sizeof(tmp_owner), "%s(%u)",
+			 user_name, node_ptr->owner);
+		xfree(user_name);
+	}
+
 	/* Combining these records provides a slight performance improvement */
 	gtk_tree_store_set(treestore, &sview_node_info_ptr->iter_ptr,
 			   SORTID_ARCH,      node_ptr->arch,
-			   SORTID_BASE_WATTS,tmp_base_watts,
+			   SORTID_LOWEST_JOULES, tmp_base_watts,
 			   SORTID_BOARDS,    node_ptr->boards,
 			   SORTID_BOOT_TIME, sview_node_info_ptr->boot_time,
+			   SORTID_CAP_WATTS, tmp_cap_watts,
 			   SORTID_COLOR,
 				sview_colors[sview_node_info_ptr->pos
 				% sview_colors_cnt],
@@ -545,15 +621,17 @@ static void _update_node_record(sview_node_info_t *sview_node_info_ptr,
 			   SORTID_CPUS,      tmp_cpus,
 			   SORTID_CURRENT_WATTS, tmp_current_watts,
 			   SORTID_CPU_LOAD,  tmp_cpu_load,
-			   SORTID_DISK,      tmp_disk,
+			   SORTID_FREE_MEM,  tmp_free_mem,
+			   SORTID_TMP_DISK,  tmp_disk,
 			   SORTID_ERR_CPUS,  tmp_err_cpus,
 			   SORTID_IDLE_CPUS, tmp_idle_cpus,
 			   SORTID_FEATURES,  node_ptr->features,
 			   SORTID_GRES,      node_ptr->gres,
-			   SORTID_MEMORY,    tmp_mem,
+			   SORTID_REAL_MEMORY, tmp_mem,
 			   SORTID_NAME,      node_ptr->name,
 			   SORTID_NODE_ADDR, node_ptr->node_addr,
 			   SORTID_NODE_HOSTNAME, node_ptr->node_hostname,
+			   SORTID_OWNER,     tmp_owner,
 			   SORTID_RACK_MP,   sview_node_info_ptr->rack_mp,
 			   SORTID_REASON,    sview_node_info_ptr->reason,
 			   SORTID_SLURMD_START_TIME,
@@ -882,7 +960,7 @@ extern List create_node_info_list(node_info_msg_t *node_info_ptr,
 
 	if (last_list) {
 		list_iterator_destroy(last_list_itr);
-		list_destroy(last_list);
+		FREE_NULL_LIST(last_list);
 	}
 
 update_color:
@@ -1738,7 +1816,7 @@ display_it:
 
 	_update_info_node(send_info_list,
 			  GTK_TREE_VIEW(spec_info->display_widget));
-	list_destroy(send_info_list);
+	FREE_NULL_LIST(send_info_list);
 end_it:
 	popup_win->toggled = 0;
 	popup_win->force_refresh = 0;
diff --git a/src/sview/part_info.c b/src/sview/part_info.c
index 61eeb2588..9e47ef5eb 100644
--- a/src/sview/part_info.c
+++ b/src/sview/part_info.c
@@ -4,7 +4,7 @@
  *****************************************************************************
  *  Copyright (C) 2004-2007 The Regents of the University of California.
  *  Copyright (C) 2008-2010 Lawrence Livermore National Security.
- *  Portions Copyright (C) 2010 SchedMD <http://www.schedmd.com>.
+ *  Portions Copyright (C) 2010-2015 SchedMD <http://www.schedmd.com>.
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Danny Auble <da@llnl.gov>
  *
@@ -89,6 +89,7 @@ enum {
 	SORTID_DEFAULT,
 	SORTID_DENY_ACCOUNTS,
 	SORTID_DENY_QOS,
+	SORTID_EXCLUSIVE_USER,
 	SORTID_FEATURES,
 	SORTID_GRACE_TIME,
 	SORTID_HIDDEN,
@@ -114,6 +115,7 @@ enum {
 	SORTID_PART_STATE,
 	SORTID_PREEMPT_MODE,
 	SORTID_PRIORITY,
+	SORTID_QOS_CHAR,
 	SORTID_REASON,
 	SORTID_ROOT,
 	SORTID_SHARE,
@@ -138,6 +140,8 @@ static display_data_t display_data_part[] = {
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_DEFAULT, "Default", FALSE,
 	 EDIT_MODEL, refresh_part, create_model_part, admin_edit_part},
+	{G_TYPE_STRING, SORTID_EXCLUSIVE_USER, "ExclusiveUser", FALSE,
+	 EDIT_MODEL, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_GRACE_TIME, "GraceTime", FALSE,
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_HIDDEN, "Hidden", FALSE,
@@ -169,6 +173,8 @@ static display_data_t display_data_part[] = {
 	 create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_SHARE, "Share", FALSE, EDIT_MODEL, refresh_part,
 	 create_model_part, admin_edit_part},
+	{G_TYPE_STRING, SORTID_QOS_CHAR, "Qos", FALSE,
+	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_ALLOW_ACCOUNTS, "Allowed Accounts", FALSE,
 	 EDIT_TEXTBOX, refresh_part, create_model_part, admin_edit_part},
 	{G_TYPE_STRING, SORTID_ALLOW_GROUPS, "Allowed Groups", FALSE,
@@ -217,6 +223,8 @@ static display_data_t create_data_part[] = {
 	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
 	{G_TYPE_STRING, SORTID_DEFAULT, "Default", FALSE,
 	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
+	{G_TYPE_STRING, SORTID_EXCLUSIVE_USER, "ExclusiveUser", FALSE,
+	 EDIT_MODEL, refresh_part, _create_model_part2, admin_edit_part},
 	{G_TYPE_STRING, SORTID_GRACE_TIME, "GraceTime", FALSE,
 	 EDIT_TEXTBOX, refresh_part, _create_model_part2, admin_edit_part},
 	{G_TYPE_STRING, SORTID_HIDDEN, "Hidden", FALSE,
@@ -325,8 +333,10 @@ static int _build_min_max_32_string(char *buffer, int buf_size,
 {
 	char tmp_min[8];
 	char tmp_max[8];
-	convert_num_unit((float)min, tmp_min, sizeof(tmp_min), UNIT_NONE);
-	convert_num_unit((float)max, tmp_max, sizeof(tmp_max), UNIT_NONE);
+	convert_num_unit((float)min, tmp_min, sizeof(tmp_min), UNIT_NONE,
+			 working_sview_config.convert_flags);
+	convert_num_unit((float)max, tmp_max, sizeof(tmp_max), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	if (max == min)
 		return snprintf(buffer, buf_size, "%s", tmp_max);
@@ -356,6 +366,7 @@ static void _set_active_combo_part(GtkComboBox *combo,
 		goto end_it;
 	switch(type) {
 	case SORTID_DEFAULT:
+	case SORTID_EXCLUSIVE_USER:
 	case SORTID_HIDDEN:
 	case SORTID_ROOT:
 		if (!strcmp(temp_char, "yes"))
@@ -506,6 +517,16 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		}
 		type = "default";
 		break;
+	case SORTID_EXCLUSIVE_USER:
+		if (!strcasecmp(new_text, "yes")) {
+			part_msg->flags |= PART_FLAG_EXCLUSIVE_USER;
+			part_msg->flags &= (~PART_FLAG_EXC_USER_CLR);
+		} else if (!strcasecmp(new_text, "no")) {
+			part_msg->flags &= (~PART_FLAG_EXCLUSIVE_USER);
+			part_msg->flags |= PART_FLAG_EXC_USER_CLR;
+		}
+		type = "hidden";
+		break;
 	case SORTID_GRACE_TIME:
 		temp_int = time_str2mins((char *)new_text);
 		type = "grace_time";
@@ -664,6 +685,10 @@ static const char *_set_part_msg(update_part_msg_t *part_msg,
 		type = "Update Features";
 		got_features_edit_signal = xstrdup(new_text);
 		break;
+	case SORTID_QOS_CHAR:
+		type = "QOS Char";
+		part_msg->qos_char = xstrdup(new_text);
+		break;
 	default:
 		type = "unknown";
 		break;
@@ -905,11 +930,14 @@ static void _layout_part_record(GtkTreeView *treeview,
 		GTK_TREE_STORE(gtk_tree_view_get_model(treeview));
 
 	convert_num_unit((float)sview_part_info->sub_part_total.node_alloc_cnt,
-			 tmp_cnt, sizeof(tmp_cnt), UNIT_NONE);
+			 tmp_cnt, sizeof(tmp_cnt), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	convert_num_unit((float)sview_part_info->sub_part_total.node_idle_cnt,
-			 tmp_cnt1, sizeof(tmp_cnt1), UNIT_NONE);
+			 tmp_cnt1, sizeof(tmp_cnt1), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	convert_num_unit((float)sview_part_info->sub_part_total.node_error_cnt,
-			 tmp_cnt2, sizeof(tmp_cnt2), UNIT_NONE);
+			 tmp_cnt2, sizeof(tmp_cnt2), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	snprintf(ind_cnt, sizeof(ind_cnt), "%s/%s/%s",
 		 tmp_cnt, tmp_cnt1, tmp_cnt2);
 
@@ -943,7 +971,8 @@ static void _layout_part_record(GtkTreeView *treeview,
 		case SORTID_CPUS:
 			convert_num_unit((float)part_ptr->total_cpus,
 					 tmp_cnt, sizeof(tmp_cnt),
-					 UNIT_NONE);
+					 UNIT_NONE,
+					 working_sview_config.convert_flags);
 			temp_char = tmp_cnt;
 			break;
 		case SORTID_DEFAULT:
@@ -991,6 +1020,12 @@ static void _layout_part_record(GtkTreeView *treeview,
 			else
 				temp_char = "none";
 			break;
+		case SORTID_EXCLUSIVE_USER:
+			if (part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)
+				yes_no = 1;
+			else
+				yes_no = 0;
+			break;
 		case SORTID_HIDDEN:
 			if (part_ptr->flags & PART_FLAG_HIDDEN)
 				yes_no = 1;
@@ -1007,7 +1042,8 @@ static void _layout_part_record(GtkTreeView *treeview,
 			convert_num_unit((float)sview_part_info->
 					 sub_part_total.mem_total,
 					 tmp_cnt, sizeof(tmp_cnt),
-					 UNIT_MEGA);
+					 UNIT_MEGA,
+					 working_sview_config.convert_flags);
 			temp_char = tmp_cnt;
 			break;
 		case SORTID_NODELIST:
@@ -1020,7 +1056,9 @@ static void _layout_part_record(GtkTreeView *treeview,
 			if (cluster_flags & CLUSTER_FLAG_BG)
 				convert_num_unit((float)part_ptr->total_nodes,
 						 tmp_cnt,
-						 sizeof(tmp_cnt), UNIT_NONE);
+						 sizeof(tmp_cnt), UNIT_NONE,
+						 working_sview_config.
+						 convert_flags);
 			else
 				sprintf(tmp_cnt, "%u", part_ptr->total_nodes);
 			temp_char = tmp_cnt;
@@ -1046,7 +1084,8 @@ static void _layout_part_record(GtkTreeView *treeview,
 			break;
 		case SORTID_PRIORITY:
 			convert_num_unit((float)part_ptr->priority,
-					 time_buf, sizeof(time_buf), UNIT_NONE);
+					 time_buf, sizeof(time_buf), UNIT_NONE,
+					 working_sview_config.convert_flags);
 			temp_char = time_buf;
 			break;
 		case SORTID_REASON:
@@ -1081,12 +1120,19 @@ static void _layout_part_record(GtkTreeView *treeview,
 			convert_num_unit(
 				(float)sview_part_info->sub_part_total.
 				disk_total,
-				time_buf, sizeof(time_buf), UNIT_NONE);
+				time_buf, sizeof(time_buf), UNIT_NONE,
+				working_sview_config.convert_flags);
 			temp_char = time_buf;
 			break;
 		case SORTID_TIMELIMIT:
 			limit_set = part_ptr->max_time;
 			break;
+		case SORTID_QOS_CHAR:
+			if (part_ptr->qos_char)
+				temp_char = part_ptr->qos_char;
+			else
+				temp_char = "N/A";
+			break;
 		default:
 			break;
 		}
@@ -1109,7 +1155,8 @@ static void _layout_part_record(GtkTreeView *treeview,
 			else {
 				convert_num_unit(
 					(float)limit_set,
-					time_buf, sizeof(time_buf), UNIT_NONE);
+					time_buf, sizeof(time_buf), UNIT_NONE,
+					working_sview_config.convert_flags);
 				temp_char = time_buf;
 			}
 			limit_set = NO_VAL;
@@ -1139,7 +1186,7 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 	char tmp_max_nodes[40], tmp_min_nodes[40], tmp_grace[40];
 	char tmp_cpu_cnt[40], tmp_node_cnt[40], tmp_max_cpus_per_node[40];
 	char *tmp_alt, *tmp_default, *tmp_accounts, *tmp_groups, *tmp_hidden;
-	char *tmp_deny_accounts;
+	char *tmp_deny_accounts, *tmp_qos_char, *tmp_exc_user;
 	char *tmp_qos, *tmp_deny_qos;
 	char *tmp_root, *tmp_share, *tmp_state;
 	uint16_t tmp_preempt;
@@ -1153,7 +1200,8 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->total_cpus, tmp_cpu_cnt,
-				 sizeof(tmp_cpu_cnt), UNIT_NONE);
+				 sizeof(tmp_cpu_cnt), UNIT_NONE,
+				 working_sview_config.convert_flags);
 	else
 		sprintf(tmp_cpu_cnt, "%u", part_ptr->total_cpus);
 
@@ -1187,6 +1235,11 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 	else
 		tmp_deny_qos = "none";
 
+	if (part_ptr->flags & PART_FLAG_EXCLUSIVE_USER)
+		tmp_exc_user = "yes";
+	else
+		tmp_exc_user = "no";
+
 	if (part_ptr->flags & PART_FLAG_HIDDEN)
 		tmp_hidden = "yes";
 	else
@@ -1204,14 +1257,15 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 	else {
 		convert_num_unit((float)part_ptr->max_nodes,
 				 tmp_max_nodes, sizeof(tmp_max_nodes),
-				 UNIT_NONE);
+				 UNIT_NONE, working_sview_config.convert_flags);
 	}
 
 	if (part_ptr->min_nodes == (uint32_t) INFINITE)
 		snprintf(tmp_min_nodes, sizeof(tmp_min_nodes), "infinite");
 	else {
 		convert_num_unit((float)part_ptr->min_nodes,
-				 tmp_min_nodes, sizeof(tmp_min_nodes), UNIT_NONE);
+				 tmp_min_nodes, sizeof(tmp_min_nodes),
+				 UNIT_NONE, working_sview_config.convert_flags);
 	}
 
 	if (part_ptr->max_cpus_per_node == INFINITE) {
@@ -1223,7 +1277,8 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 
 	if (cluster_flags & CLUSTER_FLAG_BG)
 		convert_num_unit((float)part_ptr->total_nodes, tmp_node_cnt,
-				 sizeof(tmp_node_cnt), UNIT_NONE);
+				 sizeof(tmp_node_cnt), UNIT_NONE,
+				 working_sview_config.convert_flags);
 	else
 		sprintf(tmp_node_cnt, "%u", part_ptr->total_nodes);
 
@@ -1252,7 +1307,8 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 		tmp_preempt = slurm_get_preempt_mode();	/* use cluster param */
 
 	convert_num_unit((float)part_ptr->priority,
-			 tmp_prio, sizeof(tmp_prio), UNIT_NONE);
+			 tmp_prio, sizeof(tmp_prio), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	if (part_ptr->max_share & SHARED_FORCE) {
 		snprintf(tmp_share_buf, sizeof(tmp_share_buf), "force:%u",
@@ -1274,6 +1330,11 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 			      tmp_time, sizeof(tmp_time));
 	}
 
+	if (part_ptr->qos_char)
+		tmp_qos_char = part_ptr->qos_char;
+	else
+		tmp_qos_char = "N/A";
+
 	/* Combining these records provides a slight performance improvement
 	 * NOTE: Some of these fields are cleared here and filled in based upon
 	 * the configuration of nodes within this partition. */
@@ -1286,11 +1347,13 @@ static void _update_part_record(sview_part_info_t *sview_part_info,
 			   SORTID_DEFAULT,    tmp_default,
 			   SORTID_FEATURES,   "",
 			   SORTID_GRACE_TIME, tmp_grace,
+			   SORTID_QOS_CHAR,   tmp_qos_char,
 			   SORTID_ALLOW_ACCOUNTS, tmp_accounts,
 			   SORTID_ALLOW_GROUPS, tmp_groups,
 			   SORTID_ALLOW_QOS,  tmp_qos,
 			   SORTID_DENY_ACCOUNTS, tmp_deny_accounts,
 			   SORTID_DENY_QOS,   tmp_deny_qos,
+			   SORTID_EXCLUSIVE_USER, tmp_exc_user,
 			   SORTID_HIDDEN,     tmp_hidden,
 			   SORTID_JOB_SIZE,   tmp_size,
 			   SORTID_MAX_CPUS_PER_NODE, tmp_max_cpus_per_node,
@@ -1346,21 +1409,24 @@ static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 		if (sview_part_sub->cpu_alloc_cnt) {
 			convert_num_unit((float)sview_part_sub->cpu_alloc_cnt,
 					 tmp_cnt,
-					 sizeof(tmp_cnt), UNIT_NONE);
+					 sizeof(tmp_cnt), UNIT_NONE,
+					 working_sview_config.convert_flags);
 			xstrfmtcat(tmp_cpus, "Alloc:%s", tmp_cnt);
 			if (cluster_flags & CLUSTER_FLAG_BG) {
 				convert_num_unit(
 					(float)(sview_part_sub->cpu_alloc_cnt
 						/ cpus_per_node),
 					tmp_cnt,
-					sizeof(tmp_cnt), UNIT_NONE);
+					sizeof(tmp_cnt), UNIT_NONE,
+					working_sview_config.convert_flags);
 				xstrfmtcat(tmp_nodes, "Alloc:%s", tmp_cnt);
 			}
 		}
 		if (sview_part_sub->cpu_error_cnt) {
 			convert_num_unit((float)sview_part_sub->cpu_error_cnt,
 					 tmp_cnt,
-					 sizeof(tmp_cnt), UNIT_NONE);
+					 sizeof(tmp_cnt), UNIT_NONE,
+					 working_sview_config.convert_flags);
 			if (tmp_cpus)
 				xstrcat(tmp_cpus, " ");
 			xstrfmtcat(tmp_cpus, "Err:%s", tmp_cnt);
@@ -1369,7 +1435,8 @@ static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 					(float)(sview_part_sub->cpu_error_cnt
 						/ cpus_per_node),
 					tmp_cnt,
-					sizeof(tmp_cnt), UNIT_NONE);
+					sizeof(tmp_cnt), UNIT_NONE,
+					working_sview_config.convert_flags);
 				if (tmp_nodes)
 					xstrcat(tmp_nodes, " ");
 				xstrfmtcat(tmp_nodes, "Err:%s", tmp_cnt);
@@ -1378,7 +1445,8 @@ static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 		if (sview_part_sub->cpu_idle_cnt) {
 			convert_num_unit((float)sview_part_sub->cpu_idle_cnt,
 					 tmp_cnt,
-					 sizeof(tmp_cnt), UNIT_NONE);
+					 sizeof(tmp_cnt), UNIT_NONE,
+					 working_sview_config.convert_flags);
 			if (tmp_cpus)
 				xstrcat(tmp_cpus, " ");
 			xstrfmtcat(tmp_cpus, "Idle:%s", tmp_cnt);
@@ -1387,7 +1455,8 @@ static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 					(float)(sview_part_sub->cpu_idle_cnt
 						/ cpus_per_node),
 					tmp_cnt,
-					sizeof(tmp_cnt), UNIT_NONE);
+					sizeof(tmp_cnt), UNIT_NONE,
+					working_sview_config.convert_flags);
 				if (tmp_nodes)
 					xstrcat(tmp_nodes, " ");
 				xstrfmtcat(tmp_nodes, "Idle:%s", tmp_cnt);
@@ -1396,20 +1465,24 @@ static void _update_part_sub_record(sview_part_sub_t *sview_part_sub,
 	} else {
 		tmp_cpus = xmalloc(20);
 		convert_num_unit((float)sview_part_sub->cpu_cnt,
-				 tmp_cpus, 20, UNIT_NONE);
+				 tmp_cpus, 20, UNIT_NONE,
+				 working_sview_config.convert_flags);
 	}
 
 	if (!tmp_nodes) {
 		convert_num_unit((float)sview_part_sub->node_cnt, tmp_cnt,
-				 sizeof(tmp_cnt), UNIT_NONE);
+				 sizeof(tmp_cnt), UNIT_NONE,
+				 working_sview_config.convert_flags);
 		tmp_nodes = xstrdup(tmp_cnt);
 	}
 
 	convert_num_unit((float)sview_part_sub->disk_total, tmp_disk,
-			 sizeof(tmp_disk), UNIT_NONE);
+			 sizeof(tmp_disk), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	convert_num_unit((float)sview_part_sub->mem_total, tmp_mem,
-			 sizeof(tmp_mem), UNIT_MEGA);
+			 sizeof(tmp_mem), UNIT_MEGA,
+			 working_sview_config.convert_flags);
 
 	tmp_nodelist = hostlist_ranged_string_xmalloc(sview_part_sub->hl);
 
@@ -1502,8 +1575,7 @@ static void _part_info_free(sview_part_info_t *sview_part_info)
 		xfree(sview_part_info->part_name);
 		memset(&sview_part_info->sub_part_total, 0,
 		       sizeof(sview_part_sub_t));
-		if (sview_part_info->sub_list)
-			list_destroy(sview_part_info->sub_list);
+		FREE_NULL_LIST(sview_part_info->sub_list);
 	}
 }
 
@@ -1526,8 +1598,7 @@ static void _destroy_part_sub(void *object)
 		xfree(sview_part_sub->reason);
 		if (sview_part_sub->hl)
 			hostlist_destroy(sview_part_sub->hl);
-		if (sview_part_sub->node_ptr_list)
-			list_destroy(sview_part_sub->node_ptr_list);
+		FREE_NULL_LIST(sview_part_sub->node_ptr_list);
 		xfree(sview_part_sub);
 	}
 }
@@ -1852,7 +1923,7 @@ static List _create_part_info_list(partition_info_msg_t *part_info_ptr,
 
 	if (last_list) {
 		list_iterator_destroy(last_list_itr);
-		list_destroy(last_list);
+		FREE_NULL_LIST(last_list);
 	}
 
 	return info_list;
@@ -2139,6 +2210,7 @@ static GtkListStore *_create_model_part2(int type)
 	last_model = NULL;	/* Reformat display */
 	switch (type) {
 	case SORTID_DEFAULT:
+	case SORTID_EXCLUSIVE_USER:
 	case SORTID_HIDDEN:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
 		gtk_list_store_append(model, &iter);
@@ -2224,6 +2296,7 @@ extern GtkListStore *create_model_part(int type)
 	last_model = NULL;	/* Reformat display */
 	switch (type) {
 	case SORTID_DEFAULT:
+	case SORTID_EXCLUSIVE_USER:
 	case SORTID_HIDDEN:
 	case SORTID_ROOT:
 		model = gtk_list_store_new(2, G_TYPE_STRING, G_TYPE_INT);
@@ -2755,7 +2828,7 @@ display_it:
 
 	_update_info_part(send_info_list,
 			  GTK_TREE_VIEW(spec_info->display_widget));
-	list_destroy(send_info_list);
+	FREE_NULL_LIST(send_info_list);
 end_it:
 	popup_win->toggled = 0;
 	popup_win->force_refresh = 0;
diff --git a/src/sview/popups.c b/src/sview/popups.c
index 0a1bac900..ef48764b4 100644
--- a/src/sview/popups.c
+++ b/src/sview/popups.c
@@ -262,8 +262,7 @@ static void _layout_conf_ctl(GtkTreeStore *treestore,
 
 	ret_list = slurm_ctl_conf_2_key_pairs(slurm_ctl_conf_ptr);
 	_gtk_print_key_pairs(ret_list, tmp_str, 1, treestore, &iter);
-	if (ret_list)
-		list_destroy(ret_list);
+	FREE_NULL_LIST(ret_list);
 
 	_gtk_print_key_pairs(slurm_ctl_conf_ptr->acct_gather_conf,
 			     "Account Gather", 0, treestore, &iter);
@@ -420,8 +419,7 @@ extern void create_dbconfig_popup(GtkAction *action, gpointer user_data)
 
 	gtk_widget_show_all(popup);
 
-	if (dbd_config_list)
-		list_destroy(dbd_config_list);
+	FREE_NULL_LIST(dbd_config_list);
 
 	return;
 }
@@ -1037,7 +1035,7 @@ extern void change_grid_popup(GtkAction *action, gpointer user_data)
 			 * here and it will be remade in get_system_stats(). */
 			if ((width > working_sview_config.grid_x_width)
 			    && grid_button_list) {
-				list_destroy(grid_button_list);
+				FREE_NULL_LIST(grid_button_list);
 				grid_button_list = NULL;
 				refresh = 1;
 			}
diff --git a/src/sview/resv_info.c b/src/sview/resv_info.c
index 1b7df15b0..d91af75fc 100644
--- a/src/sview/resv_info.c
+++ b/src/sview/resv_info.c
@@ -3,6 +3,7 @@
  *  mode of sview.
  *****************************************************************************
  *  Copyright (C) 2009-2011 Lawrence Livermore National Security.
+ *  Portions Copyright (C) 2012-2015 SchedMD LLC <http://www.schedmd.com>
  *  Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
  *  Written by Morris Jette <jette@llnl.gov>
  *  CODE-OCEC-09-009. All rights reserved.
@@ -53,6 +54,7 @@ enum {
 	SORTID_POS = POS_LOC,
 	SORTID_ACCOUNTS,
 	SORTID_ACTION,
+	SORTID_BURST_BUFFER,
 	SORTID_COLOR,
 	SORTID_COLOR_INX,
 	SORTID_CORE_CNT,
@@ -67,8 +69,10 @@ enum {
 	SORTID_PARTITION,
 	SORTID_TIME_END,
 	SORTID_TIME_START,
+	SORTID_TRES,
 	SORTID_UPDATED,
 	SORTID_USERS,
+	SORTID_WATTS,
 	SORTID_CNT
 };
 
@@ -77,9 +81,9 @@ enum {
  * known options) create it in function create_model_*.
  */
 
-/*these are the settings to apply for the user
+/* these are the settings to apply for the user
  * on the first startup after a fresh slurm install.
- * s/b a const probably*/
+ * s/b a const probably */
 static char *_initial_page_opts = "Name,Node_Count,Core_Count,NodeList,"
 	"Time_Start,Time_End";
 
@@ -121,6 +125,8 @@ static display_data_t display_data_resv[] = {
 	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_ACCOUNTS,   "Accounts", FALSE, EDIT_TEXTBOX,
 	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_BURST_BUFFER,  "BurstBuffer", FALSE,
+	 EDIT_TEXTBOX, refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_LICENSES,   "Licenses", TRUE, EDIT_TEXTBOX,
 	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_USERS,      "Users", FALSE, EDIT_TEXTBOX,
@@ -135,8 +141,12 @@ static display_data_t display_data_resv[] = {
 	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_INT, SORTID_COLOR_INX,  NULL, FALSE, EDIT_NONE,
 	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_TRES,   "TRES", FALSE, EDIT_NONE,
+	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_INT,    SORTID_UPDATED,    NULL, FALSE, EDIT_NONE,
 	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_WATTS,    "Watts", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
@@ -176,6 +186,8 @@ static display_data_t create_data_resv[] = {
 	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_ACCOUNTS,   "Accounts", FALSE, EDIT_TEXTBOX,
 	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_BURST_BUFFER,  "BurstBuffer", FALSE,
+	 EDIT_TEXTBOX, refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_USERS,      "Users", FALSE, EDIT_TEXTBOX,
 	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_PARTITION,  "Partition", FALSE, EDIT_TEXTBOX,
@@ -184,6 +196,8 @@ static display_data_t create_data_resv[] = {
 	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_STRING, SORTID_FLAGS, "Flags", FALSE, EDIT_TEXTBOX,
 	 refresh_resv, create_model_resv, admin_edit_resv},
+	{G_TYPE_STRING, SORTID_WATTS, "Watts", FALSE, EDIT_TEXTBOX,
+	 refresh_resv, create_model_resv, admin_edit_resv},
 	{G_TYPE_NONE, -1, NULL, FALSE, EDIT_NONE}
 };
 
@@ -223,7 +237,7 @@ static void _set_active_combo_resv(GtkComboBox *combo,
 	gtk_tree_model_get(model, iter, type, &temp_char, -1);
 	if (!temp_char)
 		goto end_it;
-	switch(type) {
+	switch (type) {
 	case SORTID_ACTION:
 		if (!strcmp(temp_char, "none"))
 			action = 0;
@@ -242,6 +256,27 @@ end_it:
 
 }
 
+static uint32_t _parse_watts(char * watts_str)
+{
+	uint32_t watts_num = 0;
+	char *end_ptr = NULL;
+
+	if (!strcasecmp(watts_str, "n/a") || !strcasecmp(watts_str, "none"))
+		return watts_num;
+	if (!strcasecmp(watts_str, "INFINITE"))
+		return INFINITE;
+	watts_num = strtol(watts_str, &end_ptr, 10);
+	if ((end_ptr[0] == 'k') || (end_ptr[0] == 'K')) {
+		watts_num *= 1000;
+	} else if ((end_ptr[0] == 'm') || (end_ptr[0] == 'M')) {
+		watts_num *= 1000000;
+	} else if (end_ptr[0] != '\0') {
+		g_printerr("invalid watts value\n");
+		watts_num = NO_VAL;
+	}
+	return watts_num;
+}
+
 /* don't free this char */
 static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
 				 const char *new_text,
@@ -258,7 +293,7 @@ static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
 	if (!resv_msg)
 		return NULL;
 
-	switch(column) {
+	switch (column) {
 	case SORTID_ACCOUNTS:
 		resv_msg->accounts = xstrdup(new_text);
 		type = "accounts";
@@ -270,6 +305,10 @@ static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
 		else
 			got_edit_signal = xstrdup(new_text);
 		break;
+	case SORTID_BURST_BUFFER:
+		resv_msg->burst_buffer = xstrdup(new_text);
+		type = "burst_buffer";
+		break;
 	case SORTID_DURATION:
 		temp_int = time_str2mins((char *)new_text);
 		if (temp_int <= 0)
@@ -339,6 +378,10 @@ static const char *_set_resv_msg(resv_desc_msg_t *resv_msg,
 		resv_msg->users = xstrdup(new_text);
 		type = "users";
 		break;
+	case SORTID_WATTS:
+		resv_msg->resv_watts = _parse_watts((char *) new_text);
+		type = "watts";
+		break;
 	default:
 		type = "unknown";
 		break;
@@ -447,7 +490,7 @@ static GtkWidget *_admin_full_edit_resv(resv_desc_msg_t *resv_msg,
 
 	gtk_table_set_homogeneous(table, FALSE);
 
-	for(i = 0; i < SORTID_CNT; i++) {
+	for (i = 0; i < SORTID_CNT; i++) {
 		while (display_data++) {
 			if (display_data->id == -1)
 				break;
@@ -475,7 +518,7 @@ static void _layout_resv_record(GtkTreeView *treeview,
 				int update)
 {
 	GtkTreeIter iter;
-	char time_buf[20];
+	char time_buf[20], power_buf[20];
 	reserve_info_t *resv_ptr = sview_resv_info->resv_ptr;
 	char *temp_char = NULL;
 
@@ -487,8 +530,14 @@ static void _layout_resv_record(GtkTreeView *treeview,
 						 SORTID_ACCOUNTS),
 				   resv_ptr->accounts);
 
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_resv,
+						 SORTID_BURST_BUFFER),
+				   resv_ptr->burst_buffer);
+
 	convert_num_unit((float)resv_ptr->core_cnt,
-			 time_buf, sizeof(time_buf), UNIT_NONE);
+			 time_buf, sizeof(time_buf), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_resv,
 						 SORTID_CORE_CNT),
@@ -520,7 +569,8 @@ static void _layout_resv_record(GtkTreeView *treeview,
 
 	/* NOTE: node_cnt in reservation info from slurmctld ONE number */
 	convert_num_unit((float)resv_ptr->node_cnt,
-			 time_buf, sizeof(time_buf), UNIT_NONE);
+			 time_buf, sizeof(time_buf), UNIT_NONE,
+			 working_sview_config.convert_flags);
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_resv,
 						 SORTID_NODE_CNT),
@@ -549,17 +599,39 @@ static void _layout_resv_record(GtkTreeView *treeview,
 						 SORTID_TIME_START),
 				   time_buf);
 
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_resv,
+						 SORTID_TRES),
+				   resv_ptr->tres_str);
+
 	add_display_treestore_line(update, treestore, &iter,
 				   find_col_name(display_data_resv,
 						 SORTID_USERS),
 				   resv_ptr->users);
+
+	if ((resv_ptr->resv_watts == NO_VAL) || (resv_ptr->resv_watts == 0)) {
+		snprintf(power_buf, sizeof(power_buf), "0");
+	} else if ((resv_ptr->resv_watts % 1000000) == 0) {
+		snprintf(power_buf, sizeof(power_buf), "%uM",
+			 resv_ptr->resv_watts / 1000000);
+	} else if ((resv_ptr->resv_watts % 1000) == 0) {
+		snprintf(power_buf, sizeof(power_buf), "%uK",
+			 resv_ptr->resv_watts / 1000);
+	} else {
+		snprintf(power_buf, sizeof(power_buf), "%u",
+			 resv_ptr->resv_watts);
+	}
+	add_display_treestore_line(update, treestore, &iter,
+				   find_col_name(display_data_resv,
+						 SORTID_WATTS),
+				   power_buf);
 }
 
 static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr,
 				GtkTreeStore *treestore)
 {
-	char tmp_duration[40], tmp_end[40], tmp_nodes[40], tmp_start[40],
-		tmp_cores[40];
+	char tmp_duration[40], tmp_end[40], tmp_nodes[40], tmp_start[40];
+	char tmp_cores[40], power_buf[40];
 	char *tmp_flags;
 	reserve_info_t *resv_ptr = sview_resv_info_ptr->resv_ptr;
 
@@ -573,17 +645,33 @@ static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr,
 	tmp_flags = reservation_flags_string(resv_ptr->flags);
 
 	convert_num_unit((float)resv_ptr->core_cnt,
-			 tmp_cores, sizeof(tmp_cores), UNIT_NONE);
+			 tmp_cores, sizeof(tmp_cores), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	convert_num_unit((float)resv_ptr->node_cnt,
-			 tmp_nodes, sizeof(tmp_nodes), UNIT_NONE);
+			 tmp_nodes, sizeof(tmp_nodes), UNIT_NONE,
+			 working_sview_config.convert_flags);
 
 	slurm_make_time_str((time_t *)&resv_ptr->start_time, tmp_start,
 			    sizeof(tmp_start));
 
+	if ((resv_ptr->resv_watts == NO_VAL) || (resv_ptr->resv_watts == 0)) {
+		snprintf(power_buf, sizeof(power_buf), "0");
+	} else if ((resv_ptr->resv_watts % 1000000) == 0) {
+		snprintf(power_buf, sizeof(power_buf), "%uM",
+			 resv_ptr->resv_watts / 1000000);
+	} else if ((resv_ptr->resv_watts % 1000) == 0) {
+		snprintf(power_buf, sizeof(power_buf), "%uK",
+			 resv_ptr->resv_watts / 1000);
+	} else {
+		snprintf(power_buf, sizeof(power_buf), "%u",
+			 resv_ptr->resv_watts);
+	}
+
 	/* Combining these records provides a slight performance improvement */
 	gtk_tree_store_set(treestore, &sview_resv_info_ptr->iter_ptr,
 			   SORTID_ACCOUNTS,   resv_ptr->accounts,
+			   SORTID_BURST_BUFFER, resv_ptr->burst_buffer,
 			   SORTID_COLOR,
 				sview_colors[sview_resv_info_ptr->color_inx],
 			   SORTID_COLOR_INX,  sview_resv_info_ptr->color_inx,
@@ -599,8 +687,10 @@ static void _update_resv_record(sview_resv_info_t *sview_resv_info_ptr,
 			   SORTID_PARTITION,  resv_ptr->partition,
 			   SORTID_TIME_START, tmp_start,
 			   SORTID_TIME_END,   tmp_end,
+			   SORTID_TRES,       resv_ptr->tres_str,
 			   SORTID_UPDATED,    1,
 			   SORTID_USERS,      resv_ptr->users,
+			   SORTID_WATTS,      power_buf,
 			   -1);
 
 	xfree(tmp_flags);
@@ -744,7 +834,7 @@ static List _create_resv_info_list(reserve_info_msg_t *resv_info_ptr)
 
 	if (last_list) {
 		list_iterator_destroy(last_list_itr);
-		list_destroy(last_list);
+		FREE_NULL_LIST(last_list);
 	}
 
 update_color:
@@ -1317,7 +1407,7 @@ display_it:
 
 	_update_info_resv(send_resv_list,
 			  GTK_TREE_VIEW(spec_info->display_widget));
-	list_destroy(send_resv_list);
+	FREE_NULL_LIST(send_resv_list);
 end_it:
 	popup_win->toggled = 0;
 	popup_win->force_refresh = 0;
diff --git a/src/sview/sview.c b/src/sview/sview.c
index 75c42a117..d9805ea13 100644
--- a/src/sview/sview.c
+++ b/src/sview/sview.c
@@ -92,6 +92,7 @@ job_info_msg_t *g_job_info_ptr = NULL;
 node_info_msg_t *g_node_info_ptr = NULL;
 partition_info_msg_t *g_part_info_ptr = NULL;
 reserve_info_msg_t *g_resv_info_ptr = NULL;
+burst_buffer_info_msg_t *g_bb_info_ptr = NULL;
 slurm_ctl_conf_info_msg_t *g_ctl_info_ptr = NULL;
 job_step_info_response_msg_t *g_step_info_ptr = NULL;
 topo_info_response_msg_t *g_topo_info_msg_ptr = NULL;
@@ -126,6 +127,10 @@ display_data_t main_display_data[] = {
 	 refresh_main, create_model_resv, admin_edit_resv,
 	 get_info_resv, specific_info_resv,
 	 set_menus_resv, NULL},
+	{G_TYPE_NONE, BB_PAGE, "Burst Buffers", TRUE, -1,
+	 refresh_main, create_model_bb, admin_edit_bb,
+	 get_info_bb, specific_info_bb,
+	 set_menus_bb, NULL},
 #ifdef HAVE_BG
 	{G_TYPE_NONE, BLOCK_PAGE, "BG Blocks", TRUE, -1,
 	 refresh_main, NULL, NULL,
@@ -320,9 +325,9 @@ static void _page_switched(GtkNotebook     *notebook,
 		/* If we return here we would not clear the grid which
 		   may need to be done. */
 		/* if (toggled || force_refresh) { */
-		/* 	(main_display_data[i].get_info)( */
-		/* 		table, &main_display_data[i]); */
-		/* 	return; */
+		/*	(main_display_data[i].get_info)( */
+		/*		table, &main_display_data[i]); */
+		/*	return; */
 		/* } */
 
 		page_thr = xmalloc(sizeof(page_thr_t));
@@ -382,10 +387,7 @@ static void _set_hidden(GtkToggleAction *action)
 		tmp = g_strdup_printf(
 			"Hidden partitions and their jobs are now visible");
 	if (apply_hidden_change) {
-		if (grid_button_list) {
-			list_destroy(grid_button_list);
-			grid_button_list = NULL;
-		}
+		FREE_NULL_LIST(grid_button_list);
 		get_system_stats(main_grid_table);
 	}
 	apply_hidden_change = TRUE;
@@ -460,6 +462,7 @@ static void _set_ruled(GtkToggleAction *action)
 	cluster_change_part();
 	cluster_change_job();
 	cluster_change_node();
+	cluster_change_bb();
 
 	refresh_main(NULL, NULL);
 	display_edit_note(tmp);
@@ -632,16 +635,11 @@ static gboolean _delete(GtkWidget *widget,
 	select_g_ba_fini();
 
 #ifdef MEMORY_LEAK_DEBUG
-	if (popup_list)
-		list_destroy(popup_list);
-	if (grid_button_list)
-		list_destroy(grid_button_list);
-	if (multi_button_list)
-		list_destroy(multi_button_list);
-	if (signal_params_list)
-		list_destroy(signal_params_list);
-	if (cluster_list)
-		list_destroy(cluster_list);
+	FREE_NULL_LIST(popup_list);
+	FREE_NULL_LIST(grid_button_list);
+	FREE_NULL_LIST(multi_button_list);
+	FREE_NULL_LIST(signal_params_list);
+	FREE_NULL_LIST(cluster_list);
 	xfree(orig_cluster_name);
 	uid_cache_clear();
 #endif
@@ -995,12 +993,12 @@ static GtkWidget *_get_menubar_menu(GtkWidget *window, GtkWidget *notebook)
 	}
 	xfree(ui_description);
 	/* GList *action_list = */
-	/* 	gtk_action_group_list_actions(menu_action_group); */
+	/*	gtk_action_group_list_actions(menu_action_group); */
 	/* GtkAction *action = NULL; */
 	/* int i=0; */
 	/* while ((action = g_list_nth_data(action_list, i++))) { */
-	/* 	g_print("got %s and %x\n", gtk_action_get_name(action), */
-	/* 		action); */
+	/*	g_print("got %s and %x\n", gtk_action_get_name(action), */
+	/*		action); */
 	/* } */
 
 	/* Get the pointers to the correct action so if we ever need
@@ -1116,8 +1114,8 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 	   going back to the same cluster we were just at.
 	*/
 	/* if (working_cluster_rec) { */
-	/* 	if (!strcmp(cluster_rec->name, working_cluster_rec->name)) */
-	/* 		return; */
+	/*	if (!strcmp(cluster_rec->name, working_cluster_rec->name)) */
+	/*		return; */
 	/* } */
 
 	/* free old info under last cluster */
@@ -1125,6 +1123,8 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 	g_block_info_ptr = NULL;
 	slurm_free_front_end_info_msg(g_front_end_info_ptr);
 	g_front_end_info_ptr = NULL;
+	slurm_free_burst_buffer_info_msg(g_bb_info_ptr);
+	g_bb_info_ptr = NULL;
 	slurm_free_job_info_msg(g_job_info_ptr);
 	g_job_info_ptr = NULL;
 	slurm_free_node_info_msg(g_node_info_ptr);
@@ -1208,11 +1208,11 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 	cluster_change_part();
 	cluster_change_job();
 	cluster_change_node();
+	cluster_change_bb();
 
 	/* destroy old stuff */
 	if (grid_button_list) {
-		list_destroy(grid_button_list);
-		grid_button_list = NULL;
+		FREE_NULL_LIST(grid_button_list);
 		got_grid = 1;
 	}
 
@@ -1292,10 +1292,7 @@ extern void _change_cluster_main(GtkComboBox *combo, gpointer extra)
 			/* I know we just did this before, but it
 			   needs to be done again here.
 			*/
-			if (grid_button_list) {
-				list_destroy(grid_button_list);
-				grid_button_list = NULL;
-			}
+			FREE_NULL_LIST(grid_button_list);
 			get_system_stats(main_grid_table);
 		}
 
@@ -1323,8 +1320,7 @@ static GtkWidget *_create_cluster_combo(void)
 
 	cluster_list = slurmdb_get_info_cluster(NULL);
 	if (!cluster_list || !list_count(cluster_list)) {
-		if (cluster_list)
-			list_destroy(cluster_list);
+		FREE_NULL_LIST(cluster_list);
 		return NULL;
 	}
 
diff --git a/src/sview/sview.h b/src/sview/sview.h
index 07c2c4ee7..8dab73eae 100644
--- a/src/sview/sview.h
+++ b/src/sview/sview.h
@@ -98,6 +98,7 @@
 enum { JOB_PAGE,
        PART_PAGE,
        RESV_PAGE,
+       BB_PAGE,
        BLOCK_PAGE,
        NODE_PAGE,
        FRONT_END_PAGE,
@@ -208,6 +209,7 @@ typedef struct {
 	bool show_hidden;
 	bool save_page_opts;
 	uint16_t tab_pos;
+	uint32_t convert_flags;
 } sview_config_t;
 
 struct display_data {
@@ -352,6 +354,7 @@ extern job_info_msg_t *g_job_info_ptr;
 extern node_info_msg_t *g_node_info_ptr;
 extern partition_info_msg_t *g_part_info_ptr;
 extern reserve_info_msg_t *g_resv_info_ptr;
+extern burst_buffer_info_msg_t *g_bb_info_ptr;
 extern slurm_ctl_conf_info_msg_t *g_ctl_info_ptr;
 extern job_step_info_response_msg_t *g_step_info_ptr;
 extern topo_info_response_msg_t *g_topo_info_msg_ptr;
@@ -665,4 +668,20 @@ extern int save_defaults(bool final_save);
 extern GtkListStore *create_model_defaults(int type);
 extern int configure_defaults(void);
 
+//bb_info.c
+extern void refresh_bb(GtkAction *action, gpointer user_data);
+extern GtkListStore *create_model_bb(int type);
+extern void admin_edit_bb(GtkCellRendererText *cell,
+			  const char *path_string,
+			  const char *new_text,
+			  gpointer data);
+extern void get_info_bb(GtkTable *table, display_data_t *display_data);
+extern void specific_info_bb(popup_info_t *popup_win);
+extern void set_menus_bb(void *arg, void *arg2, GtkTreePath *path, int type);
+extern void cluster_change_bb(void);
+extern void popup_all_bb(GtkTreeModel *model, GtkTreeIter *iter, int id);
+extern void select_admin_bb(GtkTreeModel *model, GtkTreeIter *iter,
+			    display_data_t *display_data,
+			    GtkTreeView *treeview);
+
 #endif
diff --git a/testsuite/Makefile.in b/testsuite/Makefile.in
index 67a793958..ea3e35c4b 100644
--- a/testsuite/Makefile.in
+++ b/testsuite/Makefile.in
@@ -98,6 +98,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -106,10 +107,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -122,7 +125,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -256,6 +259,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -305,8 +310,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -325,6 +334,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -368,6 +380,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -391,6 +404,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/testsuite/expect/Makefile.am b/testsuite/expect/Makefile.am
index b94c60c05..3cad95cb5 100644
--- a/testsuite/expect/Makefile.am
+++ b/testsuite/expect/Makefile.am
@@ -96,12 +96,12 @@ EXTRA_DIST = \
 	test1.73			\
 	test1.74			\
 	test1.75			\
+	test1.77			\
 	test1.80			\
 	test1.81			\
 	test1.82			\
 	test1.83			\
 	test1.84			\
-	test1.84.bash			\
 	test1.85			\
 	test1.86			\
 	test1.87			\
@@ -124,6 +124,7 @@ EXTRA_DIST = \
 	test1.96.prog.c			\
 	test1.97			\
 	test1.99			\
+	test1.100			\
 	test2.1				\
 	test2.2				\
 	test2.3				\
@@ -148,6 +149,7 @@ EXTRA_DIST = \
 	test2.22			\
 	test2.23			\
 	test2.24			\
+	test2.25			\
 	test3.1				\
 	test3.2				\
 	test3.3				\
@@ -160,16 +162,18 @@ EXTRA_DIST = \
 	test3.9				\
 	test3.10			\
 	test3.11			\
-	inc3.11.1                      \
-	inc3.11.2                      \
-	inc3.11.3                      \
-	inc3.11.4                      \
-	inc3.11.5                      \
-	inc3.11.6                      \
-	inc3.11.7                      \
-	inc3.11.8                      \
+	inc3.11.1			\
+	inc3.11.2			\
+	inc3.11.3			\
+	inc3.11.4			\
+	inc3.11.5			\
+	inc3.11.6			\
+	inc3.11.7			\
+	inc3.11.8			\
 	test3.12			\
 	test3.13			\
+	test3.14			\
+	test3.15			\
 	test4.1				\
 	test4.2				\
 	test4.3				\
@@ -182,6 +186,7 @@ EXTRA_DIST = \
 	test4.10			\
 	test4.11			\
 	test4.12			\
+	test4.13			\
 	test5.1				\
 	test5.2				\
 	test5.3				\
@@ -243,6 +248,19 @@ EXTRA_DIST = \
 	test7.16			\
 	test7.17			\
 	test7.17.prog.c			\
+	test7.17_configs/test7.17.1/gres.conf	\
+	test7.17_configs/test7.17.1/slurm.conf	\
+	test7.17_configs/test7.17.2/gres.conf	\
+	test7.17_configs/test7.17.2/slurm.conf	\
+	test7.17_configs/test7.17.3/gres.conf	\
+	test7.17_configs/test7.17.3/slurm.conf	\
+	test7.17_configs/test7.17.4/gres.conf	\
+	test7.17_configs/test7.17.4/slurm.conf	\
+	test7.17_configs/test7.17.5/slurm.conf	\
+	test7.17_configs/test7.17.6/gres.conf	\
+	test7.17_configs/test7.17.6/slurm.conf	\
+	test7.17_configs/test7.17.7/gres.conf	\
+	test7.17_configs/test7.17.7/slurm.conf	\
 	test8.1				\
 	test8.2				\
 	test8.3				\
@@ -320,6 +338,7 @@ EXTRA_DIST = \
 	test14.7			\
 	test14.8			\
 	test14.9			\
+	test14.10			\
 	test15.1			\
 	test15.2			\
 	test15.3			\
@@ -389,6 +408,8 @@ EXTRA_DIST = \
 	test17.36			\
 	test17.37			\
 	test17.38			\
+	test17.39			\
+	test17.40			\
 	test19.1			\
 	test19.2			\
 	test19.3			\
@@ -431,10 +452,7 @@ EXTRA_DIST = \
 	test21.19			\
 	test21.20			\
 	test21.21			\
-	inc21.21.1                      \
-	inc21.21.2                      \
-	inc21.21.3                      \
-	inc21.21.4                      \
+	inc21.21_tests                  \
 	test21.22			\
 	test21.23			\
 	test21.24			\
@@ -446,6 +464,7 @@ EXTRA_DIST = \
 	test21.30                       \
 	test21.31			\
 	test21.32			\
+	test21.34			\
 	inc21.30.1                      \
 	inc21.30.2                      \
 	inc21.30.3                      \
@@ -462,6 +481,9 @@ EXTRA_DIST = \
 	inc21.30.14			\
 	inc21.30.15			\
 	inc21.30.16			\
+	inc21.34.1			\
+	inc21.34.2			\
+	inc21.34_test			\
 	test22.1			\
 	inc22.1.1                       \
 	inc22.1.2                       \
@@ -492,6 +514,8 @@ EXTRA_DIST = \
 	test28.5                        \
 	test28.6                        \
 	test28.7			\
+	test28.8			\
+	test28.9			\
 	test29.1                        \
 	test29.2                        \
 	test29.3                        \
@@ -516,20 +540,18 @@ EXTRA_DIST = \
 	test32.12			\
 	test33.1			\
 	test33.1.prog.c			\
-	etc.33.1.1/slurm.conf		\
 	etc.33.1.1/testcases		\
 	etc.33.1.1/topology.conf	\
-	etc.33.1.2/slurm.conf		\
 	etc.33.1.2/testcases		\
 	etc.33.1.2/topology.conf	\
-	etc.33.1.3/slurm.conf		\
 	etc.33.1.3/testcases		\
 	etc.33.1.3/topology.conf	\
-	etc.33.1.4/slurm.conf		\
 	etc.33.1.4/testcases		\
 	etc.33.1.4/topology.conf	\
 	test34.1			\
-	test34.2
+	test34.2			\
+	test35.1			\
+	test35.2
 
 distclean-local:
 	rm -rf *error *output
diff --git a/testsuite/expect/Makefile.in b/testsuite/expect/Makefile.in
index 0e0f2b43c..55279c314 100644
--- a/testsuite/expect/Makefile.in
+++ b/testsuite/expect/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -189,6 +192,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -238,8 +243,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -258,6 +267,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -301,6 +313,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -324,6 +337,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
@@ -481,12 +495,12 @@ EXTRA_DIST = \
 	test1.73			\
 	test1.74			\
 	test1.75			\
+	test1.77			\
 	test1.80			\
 	test1.81			\
 	test1.82			\
 	test1.83			\
 	test1.84			\
-	test1.84.bash			\
 	test1.85			\
 	test1.86			\
 	test1.87			\
@@ -509,6 +523,7 @@ EXTRA_DIST = \
 	test1.96.prog.c			\
 	test1.97			\
 	test1.99			\
+	test1.100			\
 	test2.1				\
 	test2.2				\
 	test2.3				\
@@ -533,6 +548,7 @@ EXTRA_DIST = \
 	test2.22			\
 	test2.23			\
 	test2.24			\
+	test2.25			\
 	test3.1				\
 	test3.2				\
 	test3.3				\
@@ -545,16 +561,18 @@ EXTRA_DIST = \
 	test3.9				\
 	test3.10			\
 	test3.11			\
-	inc3.11.1                      \
-	inc3.11.2                      \
-	inc3.11.3                      \
-	inc3.11.4                      \
-	inc3.11.5                      \
-	inc3.11.6                      \
-	inc3.11.7                      \
-	inc3.11.8                      \
+	inc3.11.1			\
+	inc3.11.2			\
+	inc3.11.3			\
+	inc3.11.4			\
+	inc3.11.5			\
+	inc3.11.6			\
+	inc3.11.7			\
+	inc3.11.8			\
 	test3.12			\
 	test3.13			\
+	test3.14			\
+	test3.15			\
 	test4.1				\
 	test4.2				\
 	test4.3				\
@@ -567,6 +585,7 @@ EXTRA_DIST = \
 	test4.10			\
 	test4.11			\
 	test4.12			\
+	test4.13			\
 	test5.1				\
 	test5.2				\
 	test5.3				\
@@ -628,6 +647,19 @@ EXTRA_DIST = \
 	test7.16			\
 	test7.17			\
 	test7.17.prog.c			\
+	test7.17_configs/test7.17.1/gres.conf	\
+	test7.17_configs/test7.17.1/slurm.conf	\
+	test7.17_configs/test7.17.2/gres.conf	\
+	test7.17_configs/test7.17.2/slurm.conf	\
+	test7.17_configs/test7.17.3/gres.conf	\
+	test7.17_configs/test7.17.3/slurm.conf	\
+	test7.17_configs/test7.17.4/gres.conf	\
+	test7.17_configs/test7.17.4/slurm.conf	\
+	test7.17_configs/test7.17.5/slurm.conf	\
+	test7.17_configs/test7.17.6/gres.conf	\
+	test7.17_configs/test7.17.6/slurm.conf	\
+	test7.17_configs/test7.17.7/gres.conf	\
+	test7.17_configs/test7.17.7/slurm.conf	\
 	test8.1				\
 	test8.2				\
 	test8.3				\
@@ -705,6 +737,7 @@ EXTRA_DIST = \
 	test14.7			\
 	test14.8			\
 	test14.9			\
+	test14.10			\
 	test15.1			\
 	test15.2			\
 	test15.3			\
@@ -774,6 +807,8 @@ EXTRA_DIST = \
 	test17.36			\
 	test17.37			\
 	test17.38			\
+	test17.39			\
+	test17.40			\
 	test19.1			\
 	test19.2			\
 	test19.3			\
@@ -816,10 +851,7 @@ EXTRA_DIST = \
 	test21.19			\
 	test21.20			\
 	test21.21			\
-	inc21.21.1                      \
-	inc21.21.2                      \
-	inc21.21.3                      \
-	inc21.21.4                      \
+	inc21.21_tests                  \
 	test21.22			\
 	test21.23			\
 	test21.24			\
@@ -831,6 +863,7 @@ EXTRA_DIST = \
 	test21.30                       \
 	test21.31			\
 	test21.32			\
+	test21.34			\
 	inc21.30.1                      \
 	inc21.30.2                      \
 	inc21.30.3                      \
@@ -847,6 +880,9 @@ EXTRA_DIST = \
 	inc21.30.14			\
 	inc21.30.15			\
 	inc21.30.16			\
+	inc21.34.1			\
+	inc21.34.2			\
+	inc21.34_test			\
 	test22.1			\
 	inc22.1.1                       \
 	inc22.1.2                       \
@@ -877,6 +913,8 @@ EXTRA_DIST = \
 	test28.5                        \
 	test28.6                        \
 	test28.7			\
+	test28.8			\
+	test28.9			\
 	test29.1                        \
 	test29.2                        \
 	test29.3                        \
@@ -901,20 +939,18 @@ EXTRA_DIST = \
 	test32.12			\
 	test33.1			\
 	test33.1.prog.c			\
-	etc.33.1.1/slurm.conf		\
 	etc.33.1.1/testcases		\
 	etc.33.1.1/topology.conf	\
-	etc.33.1.2/slurm.conf		\
 	etc.33.1.2/testcases		\
 	etc.33.1.2/topology.conf	\
-	etc.33.1.3/slurm.conf		\
 	etc.33.1.3/testcases		\
 	etc.33.1.3/topology.conf	\
-	etc.33.1.4/slurm.conf		\
 	etc.33.1.4/testcases		\
 	etc.33.1.4/topology.conf	\
 	test34.1			\
-	test34.2
+	test34.2			\
+	test35.1			\
+	test35.2
 
 all: all-am
 
diff --git a/testsuite/expect/README b/testsuite/expect/README
index 3b1da346c..eccda2088 100644
--- a/testsuite/expect/README
+++ b/testsuite/expect/README
@@ -1,7 +1,7 @@
 ############################################################################
 # Copyright (C) 2002-2007 The Regents of the University of California.
 # Copyright (C) 2008-2011 Lawrence Livermore National Security.
-# Copyright (C) 2010-2014 SchedMD LLC
+# Copyright (C) 2010-2015 SchedMD LLC
 # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
 # Written by Morris Jette <jette1@llnl.gov>
 # Additions by Joseph Donaghy <donaghy1@llnl.gov>
@@ -177,6 +177,9 @@ test1.72   Validate JobAcctGatherFrequency configuration parameter is enforced.
 test1.73   Validate OverTimeLimit configuration parameter is enforced.
 test1.74   Validate MaxNode and GrpNode limit enforcment with QoS & association.
 test1.75   Test that --cpu-freq is enforced when using non-numeric values
+test1.76   Test that --cpu-freq sets min-max:gov
+test1.77   Test that job name specified at the command line overwrites
+           the environment variable SLURM_JOB_NAME
 
 **NOTE**   The following tests attempt to utilize multiple CPUs or partitions,
 	   The test will print "WARNING" and terminate with an exit code of
@@ -206,7 +209,9 @@ test1.94   Test of MPICH2 task spawn logic
 test1.95   Basic UPC (Unified Parallel C) test via srun.
 test1.96   Basic SHMEM test via srun.
 test1.97   Test that --ntask-per-node and -c options are enforced
+test1.98   AVAILBLE...
 test1.99   Validate that SrunPortRange is enforced when using srun
+test1.100  Test of pack/nopack task distribution.
 **NOTE**   The above tests for multiple processor/partition systems only
 
 test2.#    Testing of scontrol options (to be run as unprivileged user).
@@ -236,6 +241,7 @@ test2.21   Validate scontrol requeue of failed or completed job.
 test2.22   Validate scontrol requeuehold requeues job to held pending state.
 test2.23   Validate scontrol requeuehold State=SpecialExit.
 test2.24   Validate the scontrol write config creates accurate config
+test2.25   Validate scontrol show assoc_mgr command.
 
 
 test3.#    Testing of scontrol options (best run as SlurmUser or root).
@@ -253,6 +259,8 @@ test3.10   Test of "scontrol notify <jobid> <message>"
 test3.11   Validate scontrol create, delete, and update of reservations.
 test3.12   Validate scontrol update command for front end nodes.
 test3.13   Validate scontrol update command for jobs by JobName and UserID.
+test3.14   Test of advanced reservation "replace" option.
+test3.15   Test of advanced reservation of licenses.
 UNTESTED   "scontrol abort"    would stop slurm
 UNTESTED   "scontrol shutdown" would stop slurm
 
@@ -275,6 +283,7 @@ test4.10   Confirm that sinfo reports a proper version number (--version
 	   option).
 test4.11   Test down node reason display (--list-reasons option).
 test4.12   Test cpu total and allocation numbers.
+test4.13   Test sinfo's -O (--Format) option.
 
 
 test5.#    Testing of squeue options.
@@ -310,8 +319,8 @@ test6.10   Validate scancel user and partition filters, delete all remaining
 test6.11   Validate scancel quiet option, no warning if job gone
 	   (--quiet option).
 test6.12   Test scancel signal to batch script (--batch option)
-test6.13   Test routing all signals through slurmctld rather than directly
-	   to slurmd (undocumented --ctld option).
+test6.13   Test routing all signals through slurmctld rather than slurmd
+	   (undocumented --ctld option).
 test6.14   Test scancel nodelist option (--nodelist or -w)
 
 
@@ -452,6 +461,7 @@ test14.7   Test sbcast security issues.
 test14.8   Test sbcast transmission buffer options (--size and
 	   --fanout options).
 test14.9   Verify that an sbcast credential is properly validated.
+test14.10  Validate sbcast for a job step allocation (subset of job allocation).
 
 test15.#   Testing of salloc options.
 =====================================
@@ -557,6 +567,8 @@ test17.35  Test performance/timing of job submissions.
 test17.36  Test that the shared option in partitions is enforced.
 test17.37  Validate that afternotok dependency is enforced.
 test17.38  Test for sbatch --signal.
+test17.39  Validate dependecy OR option.
+test17.34  Test of --thread-spec option.
 
 
 test19.#   Testing of strigger options.
@@ -623,6 +635,7 @@ test21.29  sacctmgr clear (modify) QoS values
 test21.30  sacctmgr test if the QoS values are enforced
 test21.31  sacctmgr modify Resource values
 test21.32  Validate that mod qos =,+=,-= change the preempt value
+test21.34  Validate that partition and job qos limits are enforced
 
 
 test22.#   Testing of sreport commands and options.
@@ -680,6 +693,8 @@ test28.5   Validates that scontrol can hold and release a whole job
 test28.6   Validates that when a job array is submitted to multiple
 	   partitions that the jobs run on them.
 test28.7   Confirms job array dependencies.
+test28.8   Test of job array suspend/resume.
+test28.9   Test of maximum running task count in a job array.
 
 
 test29.#   Testing of smd command and option.
@@ -727,3 +742,8 @@ test34.*   Test of Slurm preemption.
 ====================================
 test34.1   Validate that partition PreemptMode is enforced.
 test34.2   Validate that qos PreemptMode is enforced.
+
+test35.*   Burst buffer tests
+=============================
+test35.1   Cray persistent burst buffer creation
+test35.2   Cray job-specific burst buffer use
diff --git a/testsuite/expect/globals b/testsuite/expect/globals
index 539e71cfd..355cf7d76 100755
--- a/testsuite/expect/globals
+++ b/testsuite/expect/globals
@@ -217,9 +217,11 @@ set alpha                "\[a-zA-Z\]+"
 set alpha_cap            "\[A-Z\]+"
 set alpha_comma_slash    "\[a-zA-Z/,\]+"
 set alpha_numeric        "\[a-zA-Z0-9\]+"
+set alpha_numeric_colon  "\[a-zA-Z0-9_,\:\-\]+"
 set alpha_numeric_comma  "\[a-zA-Z0-9_,\-\]+"
 set alpha_numeric_under  "\[a-zA-Z0-9_\-\]+"
 set alpha_under          "\[A-Z_\]+"
+set alpha_under_slash    "\[a-zA-Z/_\]+"
 set digit                "\[0-9\]"
 set end_of_line          "\[\r\n\]"
 set float                "\[0-9\]+\\.?\[0-9\]*"
@@ -255,7 +257,7 @@ proc cancel_job { job_id } {
 	}
 
 	send_user "cancelling $job_id\n"
-	exec $scancel -Q $job_id
+	set status [catch [exec $scancel -Q $job_id] result]
 	exec $bin_sleep 1
 	return [wait_for_job $job_id "DONE"]
 }
@@ -555,6 +557,7 @@ proc wait_for_file { file_name } {
 #                            DONE any terminated state
 #                            PENDING job is pending
 #                            RUNNING job is running
+#                            SUSPENDED job is suspended
 #
 # NOTE: We sleep for two seconds before replying that a job is
 # done to give time for I/O completion (stdout/stderr files)
@@ -569,6 +572,7 @@ proc wait_for_job { job_id desired_state } {
 		"DONE" {}
 		"PENDING" {}
 		"RUNNING" {}
+		"SUSPENDED" {}
 		default {
 			send_user "FAILURE: wait_for_job with invalid state: $desired_state\n"
 			return 1
@@ -607,6 +611,10 @@ proc wait_for_job { job_id desired_state } {
 					send_user "Job $job_id is $state, "
 					send_user "but we wanted RUNNING\n"
 				}
+				if {[string compare $desired_state "SUSPENDED"] == 0} {
+					send_user "Job $job_id is $state, "
+					send_user "but we wanted SUSPENDED\n"
+				}
 				return 1
 			}
 			"PENDING" {
@@ -625,6 +633,14 @@ proc wait_for_job { job_id desired_state } {
 				send_user "Job $job_id is in state $state, "
 				send_user "desire $desired_state\n"
 			}
+			"SUSPENDED" {
+				if {[string compare $desired_state "SUSPENDED"] == 0} {
+					send_user "Job $job_id is SUSPENDED\n"
+					return 0
+				}
+				send_user "Job $job_id is in state $state, "
+				send_user "desire $desired_state\n"
+			}
 			default {
 				send_user "Job $job_id is in state $state, "
 				send_user "desire $desired_state\n"
@@ -781,24 +797,20 @@ proc wait_for_all_jobs { job_name incr_sleep } {
 
 ################################################################
 #
-# Proc: test_fast_schedule_2
+# Proc: test_fast_schedule
 #
-# Purpose: Determine if we are running FastSchedule=2 usually meaning we are pretending we have more resources than reality.
-# This is based upon
-# the value of FastSchedule in the slurm.conf.
-#
-# Returns level of 1 if it is 2, 0 otherwise
+# Returns value of FastSchedule from slurm.conf
 #
 ################################################################
-proc test_fast_schedule_2 { } {
+proc test_fast_schedule { } {
 	global scontrol number
 
 	log_user 0
-	set is_2 0
+	set fast_schedule -1
 	spawn $scontrol show config
 	expect {
-		-re "FastSchedule *= 2" {
-			set is_2 1
+		-re "FastSchedule *= ($number)" {
+			set fast_schedule $expect_out(1,string)
 			exp_continue
 		}
 		eof {
@@ -807,10 +819,9 @@ proc test_fast_schedule_2 { } {
 	}
 
 	log_user 1
-	return $is_2
+	return $fast_schedule
 }
 
-
 ################################################################
 #
 # Proc: test_assoc_enforced
@@ -983,14 +994,13 @@ proc test_topology { } {
 			wait
 		}
 	}
-
 	log_user 1
 	return $have_topology
 }
 
 ################################################################
 #
-# Proc: get_task_plugins
+# Proc: get_task_types
 #
 # Purpose: get the task plugins running with task/ stripped
 #
@@ -1001,7 +1011,7 @@ proc get_affinity_types { } {
 	global scontrol alpha_comma_slash
 
 	log_user 0
-	set affinity 0
+	set affinity ""
 	spawn $scontrol show config
 	expect {
 		-re "TaskPlugin *= ($alpha_comma_slash)" {
@@ -1018,11 +1028,130 @@ proc get_affinity_types { } {
 			wait
 		}
 	}
-
 	log_user 1
 	return $affinity
 }
 
+################################################################
+#
+# Proc: get_bb_emulate
+#
+# Purpose: Determine if Cray burst buffers API is emulated
+#
+# Returns: 1 if true, 0 if false
+#
+################################################################
+proc get_bb_emulate { } {
+	global scontrol
+
+	log_user 0
+	set bb_emulate 0
+	spawn $scontrol show burst
+	expect {
+		-re "EmulateCray" {
+			set bb_emulate 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	return $bb_emulate
+}
+
+################################################################
+#
+# Proc: get_bb_persistent
+#
+# Purpose: Determine if persistent burst buffers can be created by users
+#
+# Returns: 1 if true, 0 if false
+#
+################################################################
+proc get_bb_persistent { } {
+	global scontrol
+
+	log_user 0
+	set bb_persistent 0
+	spawn $scontrol show burst
+	expect {
+		-re "EnablePersistent" {
+			set bb_persistent 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	return $bb_persistent
+}
+
+################################################################
+#
+# Proc: get_bb_types
+#
+# Purpose: get the burst buffer plugins running with task/ stripped
+#
+# Returns Returns comma separated list of task plugins running without the task/
+#
+################################################################
+proc get_bb_types { } {
+	global scontrol alpha_under_slash
+
+	log_user 0
+	set bb_types ""
+	spawn $scontrol show config
+	expect {
+		-re "BurstBufferType *= ($alpha_under_slash)" {
+			set parts [split $expect_out(1,string) ",/"]
+			while 1 {
+				set task_found [lsearch $parts "burst_buffer"]
+				if { $task_found == -1 } break
+				set parts [lreplace $parts $task_found $task_found]
+			}
+			set bb_types [join $parts ","]
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+
+	log_user 1
+	return $bb_types
+}
+
+################################################################
+#
+# Proc: get_cpu_governors
+#
+# Purpose: get the CpuFreqGovernor configuration parameter
+#
+# Returns Returns comma separated list of available CPU governor's
+#
+################################################################
+proc get_cpu_governors { } {
+	global scontrol alpha_numeric_comma
+
+	log_user 0
+	set governors ""
+	spawn $scontrol show config
+	expect {
+		-re "CpuFreqGovernors *= ($alpha_numeric_comma)" {
+			set governors $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+
+	log_user 1
+	return $governors
+}
+
 ################################################################
 #
 # Proc: test_cpu_affinity
@@ -1116,8 +1245,8 @@ proc test_track_wckey { } {
 	set track_wckey 0
 	spawn $scontrol show config
 	expect {
-		-re "TrackWCKey *= ($number)" {
-			set track_wckey $expect_out(1,string)
+		-re "TrackWCKey *= Yes" {
+			set track_wckey 1
 			exp_continue
 		}
 		eof {
@@ -2252,7 +2381,7 @@ proc is_super_user { } {
 #
 ################################################################
 proc check_acct_associations { } {
-	global sacctmgr number alpha_numeric_under
+	global sacctmgr number alpha_numeric_under exit_code
 
 	set rc 1
 	log_user 0
@@ -2402,7 +2531,7 @@ proc get_job_acct_type { } {
 #
 ################################################################
 proc check_accounting_admin_level { } {
-	global sacctmgr alpha alpha_numeric_under bin_id
+	global sacctmgr alpha alpha_numeric_under bin_id exit_code
 
 	set admin_level ""
 	set user_name ""
@@ -2458,7 +2587,7 @@ proc check_accounting_admin_level { } {
 #
 ################################################################
 proc get_cluster_name { } {
-	global scontrol alpha_numeric_under
+	global scontrol alpha_numeric_under exit_code
 	#
 	# Use scontrol to find the cluster name
 	#
@@ -2495,7 +2624,7 @@ proc get_cluster_name { } {
 ################################################################
 
 proc get_bluegene_layout { } {
-	global scontrol alpha_numeric_under
+	global scontrol alpha_numeric_under exit_code
 
 	log_user 0
 	set layout 0
@@ -2530,7 +2659,7 @@ proc get_bluegene_layout { } {
 ################################################################
 
 proc get_bluegene_psets { } {
-	global scontrol number
+	global scontrol number exit_code
 
 	log_user 0
 	set psets 0
@@ -2568,7 +2697,7 @@ proc get_bluegene_psets { } {
 ################################################################
 
 proc get_bluegene_type { } {
-	global scontrol alpha
+	global scontrol alpha exit_code
 
 	log_user 0
 	set type 0
@@ -2603,7 +2732,7 @@ proc get_bluegene_type { } {
 ################################################################
 
 proc get_bluegene_procs_per_cnode { } {
-	global scontrol number
+	global scontrol number exit_code
 
 	log_user 0
 	set cpu_cnt 0
@@ -2638,7 +2767,7 @@ proc get_bluegene_procs_per_cnode { } {
 ################################################################
 
 proc get_bluegene_cnodes_per_mp { } {
-	global scontrol number
+	global scontrol number exit_code
 
 	log_user 0
 	set node_cnt 1
@@ -2673,7 +2802,7 @@ proc get_bluegene_cnodes_per_mp { } {
 ################################################################
 
 proc get_bluegene_allow_sub_blocks { } {
-	global scontrol alpha
+	global scontrol alpha exit_code
 
 	log_user 0
 	set type 0
@@ -2708,7 +2837,7 @@ proc get_bluegene_allow_sub_blocks { } {
 ################################################################
 
 proc get_node_cnt { } {
-	global scontrol
+	global scontrol exit_code
 
 	log_user 0
 	set node_cnt 0
@@ -2862,12 +2991,12 @@ proc print_success { test_id } {
 proc change_subbp_state { node ionodes state } {
 	global scontrol smap
 
-	set exit_code 0
+	set return_code 0
 
 	set my_pid [spawn $scontrol update subbpname=$node\[$ionodes\] state=$state]
 	expect {
 		-re "slurm_update error:" {
-			set exit_code 1
+			set return_code 1
 			exp_continue
 		}
 		-re "Unable to contact" {
@@ -2877,15 +3006,15 @@ proc change_subbp_state { node ionodes state } {
 		timeout {
 			send_user "\nFAILURE: scontrol not responding\n"
 			slow_kill $my_pid
-			set exit_code 1
+			set return_code 1
 		}
 		eof {
 			wait
 		}
 	}
 
-	if { $exit_code } {
-		return $exit_code
+	if { $return_code } {
+		return $return_code
 	}
 
 	set match 0
@@ -2906,7 +3035,7 @@ proc change_subbp_state { node ionodes state } {
 		timeout {
 			send_user "\nFAILURE: smap not responding\n"
 			slow_kill $my_pid
-			set exit_code 1
+			set return_code 1
 		}
 		eof {
 			wait
@@ -2915,10 +3044,10 @@ proc change_subbp_state { node ionodes state } {
 
 	if {$match != 2} {
 		send_user "\nFAILURE: Subbp did not go into $state state. $match\n"
-		set exit_code 1
+		set return_code 1
 	}
 
-	return $exit_code
+	return $return_code
 }
 
 ################################################################
@@ -3075,7 +3204,7 @@ proc check_node_mem { } {
 #
 ################################################################
 proc get_fs_damping_factor { } {
-	global scontrol number
+	global scontrol number exit_code
 
 	log_user 0
 	set damp 1
@@ -3111,7 +3240,7 @@ proc get_fs_damping_factor { } {
 ################################################################
 
 proc slurmctld_plug_stack_nonstop { } {
-	global scontrol alpha_numeric_comma
+	global scontrol alpha_numeric_comma exit_code
 
 	log_user 0
 	set nonstop_enforce 0
@@ -3148,7 +3277,7 @@ proc slurmctld_plug_stack_nonstop { } {
 ################################################################
 
 proc job_submit_all_partitions { } {
-	global scontrol alpha_numeric_comma
+	global scontrol alpha_numeric_comma exit_code
 
 	log_user 0
 	set all_partitions 0
diff --git a/testsuite/expect/globals_accounting b/testsuite/expect/globals_accounting
index cc1b3c90a..fd570f959 100644
--- a/testsuite/expect/globals_accounting
+++ b/testsuite/expect/globals_accounting
@@ -1208,9 +1208,9 @@ proc archive_load { file } {
 }
 
 #
-# Use sacctmgr to create a res
+# Use sacctmgr to create a resource
 #
-proc add_res {name res_limits} {
+proc add_resource {name res_limits} {
 	global sacctmgr timeout
 
 	set exit_code 0
@@ -1230,7 +1230,7 @@ proc add_res {name res_limits} {
 		set command "$command $option=$res_req($option)"
 	}
 
-	set my_pid [eval spawn $sacctmgr -i add res $command]
+	set my_pid [eval spawn $sacctmgr -i add resource $command]
 	expect {
 		-re "(There was a problem|Unknown condition|Unknown field|Unknown option)" {
 			send_user "FAILURE: there was a problem with the sacctmgr command\n"
@@ -1708,7 +1708,7 @@ proc check_qos_limits { name qos_req } {
 
 ###############################################################
 #
-# Proc: check_res_limits
+# Proc: check_resource_limits
 #
 # Purpose: Verify that the reservation limits are correct
 #
@@ -1716,7 +1716,7 @@ proc check_qos_limits { name qos_req } {
 #
 #
 ###############################################################
-proc check_res_limits { name res_limits } {
+proc check_resource_limits { name res_limits } {
 	global sacctmgr
 
 	set command "format=name"
@@ -1739,7 +1739,7 @@ proc check_res_limits { name res_limits } {
 
 	log_user 0
 	set match 0
-	spawn $sacctmgr -p -n list res $name $command
+	spawn $sacctmgr -p -n list resource $name $command
 	expect {
 		-re "$values" {
 			set match 1
@@ -1766,15 +1766,15 @@ proc check_res_limits { name res_limits } {
 
 ###############################################################
 #
-# Proc: mod_res
+# Proc: mod_resource
 #
-# Purpose: Modify existing reservation limits
+# Purpose: Modify existing resources limits
 #
 # Returns: 0 if successful, 1 otherwise
 #
 #
 ###############################################################
-proc mod_res {name mod_limits} {
+proc mod_resource {name mod_limits} {
 
 	global sacctmgr
 	set commands ""
@@ -1795,7 +1795,7 @@ proc mod_res {name mod_limits} {
 
 	log_user 0
 	set match 0
-	spawn $sacctmgr -i mod res where name=$name set $commands
+	spawn $sacctmgr -i mod resource where name=$name set $commands
 	expect {
 		-re "Modified server resources ..." {
 			set match 1
diff --git a/testsuite/expect/inc12.3.1 b/testsuite/expect/inc12.3.1
index bb514f983..bc20c12f1 100644
--- a/testsuite/expect/inc12.3.1
+++ b/testsuite/expect/inc12.3.1
@@ -54,7 +54,7 @@ proc inc12_3_1 {job_id_1 job_id_2 job_name_1 job_name_2 test_acct} {
 
 	}
 	if {$idmatch != 1} {
-		send_user "\nFAILURE: sacct could not match job id to job name (Within: inc12.3.1)\n"
+		send_user "\nFAILURE: sacct could not match job id to job name ($idmatch != 1, Within: inc12.3.1)\n"
 		set exit_code 1
 	}
 
@@ -79,7 +79,7 @@ proc inc12_3_1 {job_id_1 job_id_2 job_name_1 job_name_2 test_acct} {
 		}
 	}
 	if {$idmatch != 1} {
-		send_user "\nFAILURE: sacct could not match job id to job name (Within: 12.3.1)\n"
+		send_user "\nFAILURE: sacct could not match job id to job name ($idmatch != 1, Within: 12.3.1)\n"
 		set exit_code 1
 	}
 }
diff --git a/testsuite/expect/inc12.3.2 b/testsuite/expect/inc12.3.2
index e427b2e75..29084bbd2 100755
--- a/testsuite/expect/inc12.3.2
+++ b/testsuite/expect/inc12.3.2
@@ -112,7 +112,7 @@ proc inc12_3_2 {job_id_1 job_id_2 job_id_3 job_name_1 job_name_2 test_acct job_1
 		}
 	}
 	if {$job_2_match != 1} {
-		send_user "\nFAILURE:sacct was unable to find the job $job_id_2 on node $job_2_node_0 ($job_2_match != 1) (Within: inc12.3.2)\n"
+		send_user "\nFAILURE:sacct was unable to find the job $job_id_2 on node $job_2_node_0 ($job_2_match != 1, Within: inc12.3.2)\n"
 		set exit_code 1
 	}
 
@@ -139,7 +139,7 @@ proc inc12_3_2 {job_id_1 job_id_2 job_id_3 job_name_1 job_name_2 test_acct job_1
 		}
 	}
 	if {$job_2_match != 1} {
-		send_user "\nFAILURE: sacct was unable to find the job $job_id_2 on node $job_2_node_1 ($job_2_match != 1) (Within: inc12.3.2)\n"
+		send_user "\nFAILURE: sacct was unable to find the job $job_id_2 on node $job_2_node_1 ($job_2_match != 1, Within: inc12.3.2)\n"
 		set exit_code 1
 	}
 }
diff --git a/testsuite/expect/inc21.21.1 b/testsuite/expect/inc21.21.1
deleted file mode 100644
index 58d863a77..000000000
--- a/testsuite/expect/inc21.21.1
+++ /dev/null
@@ -1,66 +0,0 @@
-############################################################################
-# Purpose: Test for accounting records of specific job names with their ID
-############################################################################
-# Copyright (C) 2012 SchedMD LLC.
-# Written by Nathan Yee <nyee32@schedmd.com>
-#
-# This file is part of SLURM, a resource management program.
-# For details, see <http://slurm.schedmd.com/>.
-# Please also read the included file: DISCLAIMER.
-#
-# SLURM is free software; you can redistribute it and/or modify it under
-# the terms of the GNU General Public License as published by the Free
-# Software Foundation; either version 2 of the License, or (at your option)
-# any later version.
-#
-# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along
-# with SLURM; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-############################################################################
-
-#
-# Test maxnode
-#
-proc inc21_21_1 { maxnode } {
-
-	global number bin_id ta srun exit_code
-	set job_id 0
-
-	send_user "\nTest MaxNode (Within: inc21.21.1)\n"
-
-#
-#	Spawn a job testing maxnode
-#
-	set matches 0
-	spawn $srun -v -N$maxnode --account=$ta $bin_id
-	expect {
-		-re "launching ($number)" {
-			set job_id $expect_out(1,string)
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: srun not responding (Within: inc21.21.1)\n"
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
-
-	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
-		send_user "\nFAILURE: job $job_id did not complete (Within: inc21.21.1)\n"
-		set exit_code 1	
-	}
-
-	if { $matches != 1 } {
-		send_user "\nFAILURE: job dion't launch with correct limit (Within: inc21.21.1)\n"
-		set exit_code 1
-		return $exit_code
-	}
-}
diff --git a/testsuite/expect/inc21.21.2 b/testsuite/expect/inc21.21.2
deleted file mode 100644
index 6dd2a9747..000000000
--- a/testsuite/expect/inc21.21.2
+++ /dev/null
@@ -1,68 +0,0 @@
-############################################################################
-# Purpose: Test for accounting records of specific job names with their ID
-############################################################################
-# Copyright (C) 2012 SchedMD LLC.
-# Written by Nathan Yee <nyee32@schedmd.com>
-#
-# This file is part of SLURM, a resource management program.
-# For details, see <http://slurm.schedmd.com/>.
-# Please also read the included file: DISCLAIMER.
-#
-# SLURM is free software; you can redistribute it and/or modify it under
-# the terms of the GNU General Public License as published by the Free
-# Software Foundation; either version 2 of the License, or (at your option)
-# any later version.
-#
-# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along
-# with SLURM; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-############################################################################
-
-#
-# Test for maxnode+1
-#
-proc inc21_21_2 {maxnode} {
-
-	global number bin_id ta srun exit_code
-	set job_id 0
-
-	send_user "\nTest MaxNodes overage (Within: inc21.21.2)\n"
-
-#
-#	Spawn a job testing maxnode+1
-#
-	spawn $srun -N[expr $maxnode + 1] --account=$ta -I $bin_id
-	expect {
-		-re "Job violates accounting policy" {
-			send_user "\nThis error is expected, not a problem (Within: inc21.21.2)\n"
-			exp_continue
-		}
-		-re "launching ($number)" {
-			set job_id $expect_out(1,string)
-			send_user "\nFAILURE: job should not have run. (Within: inc21.21.2)\n"
-			set exit_code 1
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: srun not responding (Within: inc21.21.2)\n"
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
-
-	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
-		send_user "\nFAILURE: job $job_id did not complete (Within: inc21.21.2)\n"
-		set exit_code 1	
-	}
-
-	if { $exit_code } {
-		return $exit_code
-	}
-}
diff --git a/testsuite/expect/inc21.21.3 b/testsuite/expect/inc21.21.3
deleted file mode 100644
index e28c732b0..000000000
--- a/testsuite/expect/inc21.21.3
+++ /dev/null
@@ -1,67 +0,0 @@
-############################################################################
-# Purpose: Test for accounting records of specific job names with their ID
-############################################################################
-# Copyright (C) 2012 SchedMD LLC.
-# Written by Nathan Yee <nyee32@schedmd.com>
-#
-# This file is part of SLURM, a resource management program.
-# For details, see <http://slurm.schedmd.com/>.
-# Please also read the included file: DISCLAIMER.
-#
-# SLURM is free software; you can redistribute it and/or modify it under
-# the terms of the GNU General Public License as published by the Free
-# Software Foundation; either version 2 of the License, or (at your option)
-# any later version.
-#
-# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along
-# with SLURM; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-############################################################################
-
-#
-# Test for maxwall
-#
-
-proc inc21_21_3 { maxwall } {
-
-	global number bin_id ta srun exit_code
-	set job_id 0
-
-	send_user "\nTest MaxWall (Within: inc21.21.3)\n"
-
-	#
-	# Spawn a job testing maxwall
-	#
-	set matches 0
-	spawn $srun -v -t$maxwall --account=$ta $bin_id
-	expect {
-		-re "launching ($number)" {
-			set job_id $expect_out(1,string)
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: srun not responding (Within: inc21.21.3)\n"
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
-
-	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
-		send_user "\nFAILURE: job $job_id did not complete (Within: inc21.21.3)\n"
-		set exit_code 1	
-	}
-
-	if { $matches != 1 } {
-		send_user "\nFAILURE: job dion't launch with correct limit (Within: inc21.21.3)\n"
-		set exit_code 1
-		return $exit_code
-	}
-}
diff --git a/testsuite/expect/inc21.21.4 b/testsuite/expect/inc21.21.4
deleted file mode 100644
index a4924ff9f..000000000
--- a/testsuite/expect/inc21.21.4
+++ /dev/null
@@ -1,69 +0,0 @@
-############################################################################
-# Purpose: Test for accounting records of specific job names with their ID
-############################################################################
-# Copyright (C) 2012 SchedMD LLC.
-# Written by Nathan Yee <nyee32@schedmd.com>
-#
-# This file is part of SLURM, a resource management program.
-# For details, see <http://slurm.schedmd.com/>.
-# Please also read the included file: DISCLAIMER.
-#
-# SLURM is free software; you can redistribute it and/or modify it under
-# the terms of the GNU General Public License as published by the Free
-# Software Foundation; either version 2 of the License, or (at your option)
-# any later version.
-#
-# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
-# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
-# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
-# details.
-#
-# You should have received a copy of the GNU General Public License along
-# with SLURM; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
-############################################################################
-
-#
-# Tesing maxwall+1
-#
-
-proc inc21_21_4 { maxwall } {
-
-	global number bin_id ta srun exit_code
-	set job_id 0
-
-	send_user "\nTest MaxWall overage (Within: inc21.21.4)\n"
-
-	#
-	# Spawn a job testing maxwall+1
-	#
-	spawn $srun -t[expr $maxwall + 1] --account=$ta -I $bin_id
-	expect {
-		-re "Job violates accounting/QOS policy" {
-			send_user "\nThis error is expected, not a problem (Within: inc21.21.4)\n"
-			exp_continue
-		}
-		-re "launching ($number)" {
-			set job_id $expect_out(1,string)
-			send_user "\nFAILURE: job should not have run. (Within: inc21.21.4)\n"
-			set exit_code 1
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: srun not responding Within: inc21.21.4)\n"
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
-
-	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
-		send_user "\nFAILURE: job $job_id did not complete (Within: inc21.21.4)\n"
-		set exit_code 1	
-	}
-
-	if { $exit_code } {
-		return $exit_code
-	}
-}
diff --git a/testsuite/expect/inc21.21_tests b/testsuite/expect/inc21.21_tests
new file mode 100644
index 000000000..331a6943c
--- /dev/null
+++ b/testsuite/expect/inc21.21_tests
@@ -0,0 +1,519 @@
+############################################################################
+# Purpose: Test for accounting records of specific job names with their ID
+############################################################################
+# Copyright (C) 2015 SchedMD LLC.
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc print_err { test_type func_type } {
+
+	send_user "(Within: inc21.21_test function: $func_type "
+	send_user "Testing: $test_type)\n"
+}
+
+#
+# Supplimental function to test21.21 that test a job with
+# resources within the allowed limit in the association
+#
+proc inc21_21_good { test_type limit } {
+
+	global number bin_id ta srun test_node selectparam nthreads
+	set exit_code 0
+	set job_id 0
+	set val 0
+	set add ""
+
+	# Wait for old jobs to clean up
+	sleep 2
+
+	send_user "\n====== Test $test_type"
+	send_user "(Within: inc21.21_tests function: inc21_21_good) ======\n"
+
+	if { [string compare $test_type "maxnode"] == 0 } {
+		set add "--exclusive"
+	} else {
+		set add "-w$test_node"
+	}
+
+	set matches 0
+	spawn $srun -v $add -t1 [lindex $limit 0][lindex $limit 1] \
+	    --account=$ta -I $bin_id
+	expect {
+		-re "launching ($number)" {
+			set job_id $expect_out(1,string)
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: srun not responding "
+			print_err $test_type "inc21_21_good"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+
+	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
+		send_user "\nFAILURE: job $job_id did not complete "
+		print_err $test_type "inc21_21_good"
+		set exit_code 1
+	}
+
+	if { $matches != 1 } {
+		send_user "\nFAILURE: job dion't launch with correct limit "
+		print_err $test_type "inc21_21_good"
+		set exit_code 1
+	}
+	return $exit_code
+}
+
+#
+# Supplimental function to test21.21 that test a job with
+# resources larger then allowed limit in the association
+#
+proc inc21_21_bad { test_type limit } {
+
+	global number bin_id ta srun test_node nthreads selectparam
+	set exit_code 0
+	set job_id 0
+	set over_lim [expr [lindex $limit 1] + 1]
+	set add ""
+
+	send_user "\n====== Test $test_type"
+	send_user "(Within: inc21.21_tests function: inc21_21_bad) ======\n"
+
+	if { [string compare $test_type "maxnode"] == 0 } {
+		set add "--exclusive"
+	} else {
+		set add "-w$test_node"
+	}
+
+	set matches 0
+	spawn $srun -v $add -t1 [lindex $limit 0]$over_lim --account=$ta \
+	    -I $bin_id
+	expect {
+		-re "Job violates accounting/QOS policy" {
+			send_user "\nThis error is expected, not a problem "
+			print_err $test_type "inc21_21_bad"
+			exp_continue
+		}
+		-re "launching ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nFAILURE: job should not have run. "
+			print_err $test_type "inc21_21_bad"
+			set exit_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: srun not responding "
+			print_err $test_type "inc21_21_bad"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
+		send_user "\nFAILURE: job $job_id did not complete "
+		print_err $test_type "inc21_21_bad"
+		set exit_code 1
+	}
+	return $exit_code
+}
+
+proc inc21_21_grp_test { test_type limit } {
+
+	global number bin_id ta srun sbatch test_node selectparam nthreads
+	global file_in squeue scancel bin_bash bin_chmod
+	set exit_code 0
+	set job_id 0
+	set val 0
+	set exclusive ""
+
+	send_user "\n===== Test $test_type "
+	send_user "(Within: inc21.21_tests function: inc21_21_grp_test) =====\n"
+
+	# Check and see if it is a cpu test
+	if { [string compare $test_type "grpcpus"] == 0 ||
+	     [string compare $test_type "grpcpumins"] == 0 ||
+	     [string compare $test_type "grpcpurunmins"] == 0 } {
+		if {$selectparam} {
+			set val [expr [lindex $limit 1] / $nthreads]
+		} else {
+			set val [lindex $limit 1]
+		}
+	} else {
+		set exclusive "#SBATCH --exclusive"
+		set val [lindex $limit 1]
+	}
+
+	make_bash_script $file_in "
+$exclusive
+sleep 10"
+
+	#
+	# Submit n+1 number of jobs but job n+1 should be pending
+	# since it will be past the association limit
+	#
+	set matches 0
+	for {set inx 0} {$inx <= $val} {incr inx} {
+		spawn $sbatch [lindex $limit 0]1 --account=$ta \
+		    --output=/dev/null --error=/dev/null \
+		    -t1 $file_in
+		expect {
+			-re "Submitted batch job ($number)" {
+				set job_id $expect_out(1,string)
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: srun not responding "
+				print_err $test_type "inc21_21_grp_test"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+	}
+
+	if {$matches != [expr $val + 1]} {
+		send_user "\nFAILURE $matches != [expr $val + 1]\n"
+		set exit_code 1
+	}
+
+
+	#
+	# Wait for squeue to update
+	#
+	sleep 2
+
+	set pending 0
+	set running 0
+	spawn $squeue -h -o "\%t \%r"
+	expect {
+		-re "PD.Assoc*" {
+			incr pending
+			exp_continue
+		}
+		-re "R.None" {
+			incr running
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: squeue not responding "
+			print_err $test_type "inc21_21_grp_test"
+			slow_kill $mypid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $pending != 1 || $running != $val } {
+		send_user "\nFAILURE found $pending jobs pending "
+		send_user "and $running jobs running"
+		print_err $test_type "inc21_21_grp_test"
+		set exit_code 1
+	}
+
+	#
+	# Cancel test jobs
+	#
+	spawn $scancel --quiet --account=$ta
+	expect {
+		eof {
+			wait
+		}
+	}
+
+	return $exit_code
+
+}
+
+#
+# Supplimental function to test21.21 that test for max/grp
+# submit and jobs
+#
+proc inc21_21_submit_test { limit } {
+
+	global file_in srun sbatch squeue scancel bin_id number bin_sleep
+	global bin_rm ta maxjob_lim maxsub_lim
+	global acct_mod_desc acct_mod_acct_vals acct_mod_assoc_vals
+	global acct_mod_assoc_test_vals
+
+	set exit_code 0
+	set limit_job ""
+	set limit_sub ""
+
+	if { ![string compare $limit "maxjobsub"] } {
+		set limit_job "maxjob"
+		set limit_sub "maxsubmit"
+
+	} else {
+		set limit_job "grpjob"
+		set limit_sub "grpsubmit"
+	}
+
+
+	set acct_mod_assoc_test_vals($limit_job) \
+	    [lindex $acct_mod_assoc_vals($limit) 0]
+	set acct_mod_assoc_test_vals($limit_sub) \
+	    [lindex $acct_mod_assoc_vals($limit) 1]
+	set exit_code  [mod_acct $ta [array get acct_mod_desc] \
+			    [array get acct_mod_assoc_test_vals] \
+			    [array get acct_mod_acct_vals]]
+	if { $exit_code } {
+		return $exit_code
+	}
+
+
+	make_bash_script $file_in "
+	$bin_sleep 10
+	"
+	send_user "\n==== Test $limit (Within: inc21.21_tests function: "
+	send_user "inc21_21_submit_test) ====\n"
+
+	for {set inx 0} {$inx < $acct_mod_assoc_test_vals($limit_sub)} \
+	    {incr inx} {
+		    set job_id($inx) 0
+		    set mypid [spawn $sbatch -N1 -n1 --account=$ta \
+				   --output=/dev/null \
+				   --error=/dev/null -t5 $file_in]
+		    expect {
+			    -re "Submitted batch job ($number)" {
+				    set job_id($inx) $expect_out(1,string)
+				    exp_continue
+			    }
+			    -re "Unable to contact" {
+				    send_user "\nFAILURE: slurm appears "
+				    send_user "to be down "
+				    print_err $limit "inc21_21_submit_test"
+				    set exit_code 1
+				    exp_continue
+			    }
+			    timeout {
+				    send_user "\nFAILURE: sbatch not "
+				    send_user "responding "
+				    print_err $limit "inc21_21_submit_test"
+				    slow_kill $mypid
+				    set exit_code 1
+			    }
+			    eof {
+				    wait
+			    }
+		    }
+
+		    if { !$job_id($inx) } {
+			    send_user "\nFAILURE: sbatch didn't return jobid "
+			    print_err $limit "inc21_21_submit_test"
+			    set exit_code 1
+			    break
+		    }
+
+	    }
+
+	if { $exit_code } {
+		return $exit_code
+	}
+
+	# then submit one more over the limit and it should fail
+	set mypid [spawn $sbatch -N1 -n1 --account=$ta --output=/dev/null \
+		       --error=/dev/null -t5 $file_in]
+	expect {
+		-re "Job violates accounting/QOS policy" {
+			send_user "\nThis error is expected, not a problem"
+			print_err $limit "inc21_21_submit_test"
+			exp_continue
+		}
+		-re "Submitted batch job ($number)" {
+			send_user "\nFAILURE: this job should not have ran."
+			print_err $limit "inc21_21_submit_test"
+			set exit_code 1
+			exp_continue
+		}
+		-re "Unable to contact" {
+			send_user "\nFAILURE: slurm appears to be down "
+			print_err $limit "inc21_21_submit_test"
+			set exit_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding "
+			print_err $limit "inc21_21_submit_test"
+			slow_kill $mypid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $exit_code } {
+		#return $exit_code
+	}
+
+	# sleep the Schedule cycle default is 4
+	sleep 4
+
+	set matches 0
+	set mypid [spawn $squeue -h -o "\%i \%t \%r"]
+	expect {
+		-re "($job_id(2)|$job_id(3)).PD.AssocMaxJobsLimit" {
+			incr matches
+			exp_continue
+		}
+		-re "($job_id(2)|$job_id(3)).PD.AssocGrpJobsLimit" {
+			incr matches
+			exp_continue
+		}
+		-re "($job_id(0)|$job_id(1)).R.None" {
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: squeue not responding "
+			print_err $limit "inc21_21_submit_test"
+			slow_kill $mypid
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $scancel --quiet --account=$ta
+	expect {
+		eof {
+			wait
+		}
+	}
+
+
+	if { $matches != 4 } {
+		send_user "\nFAILURE: jobs are not in the expected state "
+		send_user "expected ($matches != 4)"
+		print_err $limit "inc21_21_submit_test"
+		set exit_code 1
+	}
+
+	# Clear the limits
+	set acct_mod_assoc_test_vals($limit_job) "-1"
+	set acct_mod_assoc_test_vals($limit_sub) "-1"
+
+	return $exit_code
+
+}
+
+#
+# Function that tests an association's grpwall limit
+#
+proc inc21_21_grpwall { test_type limit } {
+
+	global number bin_id ta srun bin_sleep bin_rm file_in
+	set exit_code 0
+	set job_id 0
+	set timeout 120
+
+	send_user "\n====== Test $test_type"
+	send_user "(Within: inc21.21_tests function: inc21_21_grpwall) ======\n"
+
+	make_bash_script $file_in "
+	$bin_sleep 61
+	"
+
+	set matches 0
+	send_user "Sleeping for a bit...hang tight\n"
+	spawn $srun -v [lindex $limit 0][lindex $limit 1] --account=$ta \
+	    -I $file_in
+	expect {
+		-re "launching ($number)" {
+			set job_id $expect_out(1,string)
+			incr matches
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: srun not responding "
+			print_err $test_type "inc21_21_grpwall"
+			send_user "(Within: inc21.21_tests function: "
+			send_user "inc21_21_grpwall)\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
+		send_user "\nFAILURE: job $job_id did not complete "
+		print_err $test_type "inc21_21_grpwall"
+		send_user "(Within: inc21.21_tests function: inc21_21_grpwall)\n"
+		set exit_code 1
+		return $exit_code
+	}
+
+	if { $matches != 1 } {
+		send_user "\nFAILURE: job didn't launch with correct limit "
+		print_err $test_type "inc21_21_grpwall"
+		send_user "(Within: inc21.21_tests function: inc21_21_grpwall)\n"
+		set exit_code 1
+		return $exit_code
+	}
+
+	set matches 0
+	spawn $srun -v [lindex $limit 0][lindex $limit 1] --account=$ta \
+	    -I $bin_id
+	expect {
+		-re "Job violates accounting/QOS policy" {
+			send_user "\nThis error is expected, not a problem "
+			print_err $test_type "inc21_21_grpwall"
+			exp_continue
+		}
+		-re "launching ($number)" {
+			set job_id $expect_out(1,string)
+			send_user "\nFAILURE: job should not have run. "
+			print_err $test_type "inc21_21_grpwall"
+			set exit_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: srun not responding "
+			print_err $test_type "inc21_21_grpwall"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$job_id != 0 && [wait_for_job $job_id "DONE"] != 0} {
+		send_user "\nFAILURE: job $job_id did not complete "
+		print_err $test_type "inc21_21_grpwall"
+		set exit_code 1
+	}
+
+	return $exit_code
+
+}
diff --git a/testsuite/expect/inc21.30.15 b/testsuite/expect/inc21.30.15
index 2267623ae..f12b37e16 100644
--- a/testsuite/expect/inc21.30.15
+++ b/testsuite/expect/inc21.30.15
@@ -41,7 +41,7 @@ proc inc21_30_15 { } {
 		}
 		-re "job ($number) queued and waiting for resources" {
 			set job_id1 $expect_out(1,string)
-			send_user "\nFAILURE: Job $job_id1 show not be waiting for resources, but is not (Within: inc21.30.15)\n"
+			send_user "\nFAILURE: Job $job_id1 show not be waiting for resources, but is (Within: inc21.30.15)\n"
 			set exit_code 1
 		}
 		-re "error" {
@@ -69,7 +69,7 @@ proc inc21_30_15 { } {
 			send_user "\nJob $job_id2 is waiting for resource. This is expected do not worry (Within: inc21.30.15)\n"
 		}
 		-re "Granted job allocation ($number)" {
-			set job_id2$expect_out(1,string)
+			set job_id2 $expect_out(1,string)
 			send_user "\nFAILURE: Job $job_id2 should be pending for resources, but is not (Within: 21.30.15)\n"
 			set exit_code 1
 		}
diff --git a/testsuite/expect/inc21.30.9 b/testsuite/expect/inc21.30.9
index dc4ed5355..7fe1e2d76 100644
--- a/testsuite/expect/inc21.30.9
+++ b/testsuite/expect/inc21.30.9
@@ -39,7 +39,6 @@ proc inc21_30_9 { } {
 
 	set job_id1 0
 	set job_id2 0
-	set task_cnt 2
 	set timeout [expr $grpcpumin_num * 120]
 	set timelimit [expr $grpcpumin_num / $nthreads]
 	# Since GrpCpuMins is a decayed variable lets reset it to make sure
@@ -73,7 +72,7 @@ proc inc21_30_9 { } {
 
 	reset_qos_usage "" $qostest
 
-	spawn $salloc --account=$acct -w$test_node --qos=$qostest -n$task_cnt -t[expr $timelimit + 1] $srun $bin_sleep 1
+	spawn $salloc --account=$acct -w$test_node --qos=$qostest -n$nthreads -t[expr $timelimit + 1] $srun $bin_sleep 1
 	expect {
 		-re "Pending job allocation ($number)" {
 			set job_id2 $expect_out(1,string)
diff --git a/testsuite/expect/inc21.34.1 b/testsuite/expect/inc21.34.1
new file mode 100644
index 000000000..8a2cb6d13
--- /dev/null
+++ b/testsuite/expect/inc21.34.1
@@ -0,0 +1,96 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.34
+#          Tests if the GrpCPUmins limit is enforced
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+# Test GrpCPUmins
+proc inc21_34_1 { qostest } {
+	global salloc srun nthreads acct bin_sleep grpcpumin_num number
+	global totcpus test_node exit_code
+
+	send_user "\nStarting GrpCpuMins test (Within: inc21.34.1) \n\n"
+
+	if  { [test_enforce_safe_set] == 0 } {
+		send_user "\nWARNING: This test can't be run without AccountingStorageEnforce having \"safe\" in it\n"
+		return
+	}
+
+	set job_id1 0
+	set job_id2 0
+	set timeout [expr $grpcpumin_num * 120]
+	set timelimit [expr $grpcpumin_num / $nthreads]
+	# Since GrpCpuMins is a decayed variable lets reset it to make sure
+	# the test gets exactly what we would expect.
+	reset_qos_usage "" $qostest
+
+	spawn $salloc --account=$acct -w$test_node -n$nthreads -t$timelimit $srun $bin_sleep 1
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted. (Within: inc21.34.1)\n"
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: This job should not be pending (Within: inc21.34.1)\n"
+			set exit_code 1
+		}
+		-re "error" {
+			send_user "\nFAILURE: Job allocation should not have failed. (Within: inc21.34.1)\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding. (Within: inc21.34.1)\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	cancel_job $job_id1
+
+	reset_qos_usage "" $qostest
+
+	spawn $salloc --account=$acct -w$test_node -n$nthreads -t[expr $timelimit + 1] $srun $bin_sleep 1
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is expected. (Within: inc21.34.1)\n"
+		}
+		-re "Granted job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nFAILURE: Job should be pending but is not. (Within: inc21.34.1)\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding. (Within: inc21.34.1)\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.34.2 b/testsuite/expect/inc21.34.2
new file mode 100644
index 000000000..bf99567cf
--- /dev/null
+++ b/testsuite/expect/inc21.34.2
@@ -0,0 +1,91 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.34
+#          Tests if the Grpwall limit is enforced
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+
+proc inc21_34_2 { qostest } {
+	global salloc srun acct bin_sleep grpwall_num number exit_code
+
+	send_user "\nStarting Grpwall test (Within: inc21.34.2)\n\n"
+	set job_id1 0
+	set job_id2 0
+	set timeout 120
+
+	# Since wall is a decayed variable lets reset it to make sure the test
+	# gets exactly what we would expect.
+	reset_qos_usage "" $qostest
+
+	spawn $salloc --account=$acct -N2 -t$grpwall_num $srun $bin_sleep 10
+	expect {
+		-re "Granted job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nJob $job_id1 has been submitted. (Within: inc21.34.2)\n"
+		}
+		-re "Pending job allocation ($number)" {
+			set job_id1 $expect_out(1,string)
+			send_user "\nFAILURE: This job should not be pending, but is. (Within: inc21.34.2)\n"
+			set exit_code 1
+		}
+		-re "error" {
+			send_user "\nFAILURE: Job allocation should not have failed. (Within: inc21.34.2)\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc not responding. (Within: inc21.30.2)\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel job
+	cancel_job $job_id1
+
+	reset_qos_usage "" $qostest
+
+	spawn $salloc --account=$acct -N2 -t2 $srun $bin_sleep 10
+	expect {
+		-re "Pending job allocation ($number)" {
+			set job_id2 $expect_out(1,string)
+			send_user "\nJob $job_id2 is waiting for resources. This is exected. (Within: inc21.34.2)\n"
+		}
+		-re "Granted job allocation ($number)"{
+			set job_id2 $expect_out(1,string)
+			send_user "\nFAILURE: Job $job_id2 should be waiting for resources, but is not. (Within: inc21.34.2)\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: salloc is not responding (Within: inc21.34.2)\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	# Cancel job
+	cancel_job $job_id2
+}
diff --git a/testsuite/expect/inc21.34_tests b/testsuite/expect/inc21.34_tests
new file mode 100644
index 000000000..3615af9b8
--- /dev/null
+++ b/testsuite/expect/inc21.34_tests
@@ -0,0 +1,610 @@
+############################################################################
+# Purpose: Test of SLURM functionality
+#          to be called from test21.34
+#          Tests partition and job qos limits
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./inc21.30.1
+source ./inc21.30.2
+source ./inc21.30.3
+source ./inc21.30.4
+source ./inc21.30.5
+source ./inc21.30.6
+source ./inc21.30.7
+source ./inc21.30.8
+source ./inc21.34.1
+source ./inc21.30.10
+source ./inc21.30.11
+source ./inc21.34.2
+source ./inc21.30.13
+source ./inc21.30.14
+source ./inc21.30.15
+source ./inc21.30.16
+
+##########################################################
+#
+# Test that partition qos limits are enforeced
+#
+##########################################################
+proc part_test { } {
+
+	global grn_num grcpu_num grjobs_num grsub_num grsub_num maxcpu_num
+	global maxnode_num maxjobs_num maxjobsub_num grpcpumin_num
+	global grpcpurunmin_num grpmem_num grpwall_num maxcpumin_num
+	global maxwall_num maxcpuspu_num maxnodespu_num
+	global exit_code part_qos mod_job_qos time_spacing selectparam
+	global nthreads job_qos mod_part_qos
+
+	# We will set qostest with the qos that we will test since the some of
+	# the 21.30 inc test require the qostest variable
+	set qostest $part_qos
+
+	#
+	# Test GrpNode limit
+	#
+	set mod_job_qos(GrpNodes) 1
+	set mod_part_qos(GrpNodes) $grn_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_1
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	# Reset the value to 0
+	set mod_job_qos(GrpNodes) "-1"
+	set mod_part_qos(GrpNodes) "-1"
+
+	#
+	# Test GrpCpus
+	#
+	set mod_job_qos(GrpCpus) 1
+	set mod_part_qos(GrpCpus) $grcpu_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_2
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpCpus) "-1"
+	set mod_part_qos(GrpCpus) "-1"
+
+	#
+	# test GrpJob limits
+	#
+	set mod_job_qos(GrpJobs) 1
+	set mod_part_qos(GrpJobs) $grjobs_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_3
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpJobs) "-1"
+	set mod_part_qos(GrpJobs) "-1"
+
+	#
+	# test GrpSubmit
+	#
+	set mod_job_qos(GrpSubmit) 1
+	set mod_part_qos(GrpSubmit) $grsub_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_4
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpSubmit) "-1"
+	set mod_part_qos(GrpSubmit) "-1"
+
+	#
+	# Test MaxCpus limits
+	#
+	set mod_job_qos(MaxCpus) 1
+	set mod_part_qos(MaxCpus) $maxcpu_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_5
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxCpus) "-1"
+	set mod_part_qos(MaxCpus) "-1"
+
+	#
+	# Test MaxNode limit
+	#
+	set mod_job_qos(MaxNodes) 1
+	set mod_part_qos(MaxNodes) $maxnode_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_6
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxNodes) "-1"
+	set mod_part_qos(MaxNodes) "-1"
+
+	#
+	# Test MaxJobs limit
+	#
+	set mod_job_qos(MaxJobs) 1
+	set mod_part_qos(MaxJobs) $maxjobs_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_7
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxJobs) "-1"
+	set mod_part_qos(MaxJobs) "-1"
+
+	#
+	# Test MaxJobsSubmits limit
+	#
+	set mod_job_qos(MaxSubmitJobs) 1
+	set mod_part_qos(MaxSubmitJobs) $maxjobsub_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_8
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxSubmitJobs) "-1"
+	set mod_part_qos(MaxSubmitJobs) "-1"
+
+	#
+	# Test GroupCPUMins
+	#
+	set mod_job_qos(GrpCpuMin) 1
+	set mod_part_qos(GrpCpuMin) $grpcpumin_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_34_1 $part_qos
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpCpuMin) "-1"
+	set mod_part_qos(GrpCpuMin) "-1"
+
+	#
+	# Test GroupCPURunMins
+	#
+	set mod_job_qos(GrpCpuRunMin) 1
+	set mod_part_qos(GrpCpuRunMin) $grpcpurunmin_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_10
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpCpuRunMin) "-1"
+	set mod_part_qos(GrpCpuRunMin) "-1"
+
+	#
+	# Test Group Memory
+	#
+	set mod_job_qos(GrpMem) 1
+	set mod_part_qos(GrpMem) $grpmem_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_11
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpMem) "-1"
+	set mod_part_qos(GrpMem) "-1"
+
+	#
+	# Test Group wall
+	#
+	set mod_job_qos(GrpWall) 1
+	set mod_part_qos(GrpWall) $grpwall_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_34_2 $part_qos
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpWall) "-1"
+	set mod_part_qos(GrpWall) "-1"
+
+	#
+	# Test Max Cpu Mins
+	#
+	set mod_job_qos(MaxCpuMin) 1
+	set mod_part_qos(MaxCpuMin) $maxcpumin_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_13
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxCpuMin) "-1"
+	set mod_part_qos(MaxCpuMin) "-1"
+
+	#
+	# Test Max Wall
+	#
+	set mod_job_qos(MaxWall) 1
+	set mod_part_qos(MaxWall) $maxwall_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_14
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxWall) "-1"
+	set mod_part_qos(MaxWall) "-1"
+
+	#
+	# Test Max CPUs Per User
+	#
+
+	# If CR_CORE set maxcpuspu a multiple number of threads
+	if {$selectparam} {
+		set maxcpuspu_num [expr $maxcpuspu_num * $nthreads]
+	}
+	set mod_job_qos(MaxCpusPerUser) 1
+	set mod_part_qos(MaxCpusPerUser) $maxcpuspu_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_15
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+
+	}
+	set mod_job_qos(MaxCpusPerUser) "-1"
+	set mod_part_qos(MaxCpusPerUser) "-1"
+
+	#
+	# Test MaxNodesPerUser
+	#
+	set mod_job_qos(MaxNodesPerUser) 1
+	set mod_part_qos(MaxNodesPerUser) $maxnodespu_num
+	mod_qos $part_qos [array get mod_part_qos]
+	mod_qos $job_qos [array get mod_job_qos]
+	sleep $time_spacing
+	inc21_30_16
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxNodesPerUser) "-1"
+	set mod_part_qos(MaxNodesPerUser) "-1"
+}
+
+##########################################################
+#
+# Test that jobs qos limits are enforeced after
+# PartitionQos flag is set on the job's qos
+#
+##########################################################
+
+proc qos_test { } {
+
+	global grn_num grcpu_num grjobs_num grsub_num grsub_num maxcpu_num
+	global maxnode_num maxjobs_num maxjobsub_num grpcpumin_num
+	global grpcpurunmin_num grpmem_num grpwall_num maxcpumin_num
+	global maxwall_num maxcpuspu_num maxnodespu_num
+	global exit_code job_qos mod_job_qos time_spacing selectparam
+	global nthreads part_qos mod_part_qos
+
+	# We will set qostest with the qos that we will test since the some of
+	# the 21.30 inc test require the qostest variable
+	set qostest $job_qos
+
+	#
+	# Test GrpNode limit
+	#
+	set mod_job_qos(GrpNodes) $grn_num
+	set mod_part_qos(GrpNodes) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_1
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	# Reset the value to 0
+	set mod_job_qos(GrpNodes) "-1"
+	set mod_part_qos(GrpNodes) "-1"
+
+	#
+	# Test GrpCpus
+	#
+	set mod_job_qos(GrpCpus) $grcpu_num
+	set mod_part_qos(GrpCpus) $grcpu_num
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_2
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpCpus) "-1"
+	set mod_part_qos(GrpCpus) "-1"
+
+	#
+	# test GrpJob limits
+	#
+	set mod_job_qos(GrpJobs) $grjobs_num
+	set mod_part_qos(GrpJobs) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_3
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpJobs) "-1"
+	set mod_part_qos(GrpJobs) "-1"
+
+	#
+	# test GrpSubmit
+	#
+	set mod_job_qos(GrpSubmit) $grsub_num
+	set mod_part_qos(GrpSubmit) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_4
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpSubmit) "-1"
+	set mod_part_qos(GrpSubmit) "-1"
+
+	#
+	# Test MaxCpus limits
+	#
+	set mod_job_qos(MaxCpus) $maxcpu_num
+	set mod_part_qos(MaxCpus) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_5
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxCpus) "-1"
+	set mod_part_qos(MaxCpus) "-1"
+
+	#
+	# Test MaxNode limit
+	#
+	set mod_job_qos(MaxNodes) $maxnode_num
+	set mod_part_qos(MaxNodes) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_6
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxNodes) "-1"
+	set mod_part_qos(MaxNodes) "-1"
+
+	#
+	# Test MaxJobs limit
+	#
+	set mod_job_qos(MaxJobs) $maxjobs_num
+	set mod_part_qos(MaxJobs) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_7
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxJobs) "-1"
+	set mod_part_qos(MaxJobs) "-1"
+
+	#
+	# Test MaxJobsSubmits limit
+	#
+	set mod_job_qos(MaxSubmitJobs) $maxjobsub_num
+	set mod_part_qos(MaxSubmitJobs) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_8
+	if {$exit_code != 0} {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxSubmitJobs) "-1"
+	set mod_part_qos(MaxSubmitJobs) "-1"
+
+	#
+	# Test GroupCPUMins
+	#
+	set mod_job_qos(GrpCpuMin) $grpcpumin_num
+	set mod_part_qos(GrpCpuMin) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_34_1 $job_qos
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpCpuMin) "-1"
+	set mod_part_qos(GrpCpuMin) "-1"
+
+	#
+	# Test GroupCPURunMins
+	#
+	set mod_job_qos(GrpCpuRunMin) $grpcpurunmin_num
+	set mod_part_qos(GrpCpuRunMin) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_10
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpCpuRunMin) "-1"
+	set mod_part_qos(GrpCpuRunMin) "-1"
+
+	#
+	# Test Group Memory
+	#
+	set mod_job_qos(GrpMem) $grpmem_num
+	set mod_part_qos(GrpMem) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_11
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpMem) "-1"
+	set mod_part_qos(GrpMem) "-1"
+
+	#
+	# Test Group wall
+	#
+	set mod_job_qos(GrpWall) $grpwall_num
+	set mod_part_qos(GrpWall) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_34_2 $job_qos
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(GrpWall) "-1"
+	set mod_part_qos(GrpWall) "-1"
+
+	#
+	# Test Max Cpu Mins
+	#
+	set mod_job_qos(MaxCpuMin) $maxcpumin_num
+	set mod_part_qos(MaxCpuMin) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_13
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxCpuMin) "-1"
+	set mod_part_qos(MaxCpuMin) "-1"
+
+	#
+	# Test Max Wall
+	#
+	set mod_job_qos(MaxWall) $maxwall_num
+	set mod_part_qos(MaxWall) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_14
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxWall) "-1"
+	set mod_part_qos(MaxWall) "-1"
+
+	#
+	# Test Max CPUs Per User
+	#
+
+	# If CR_CORE set maxcpuspu a multiple number of threads
+	if {$selectparam} {
+		set maxcpuspu_num [expr $maxcpuspu_num * $nthreads]
+	}
+
+	set mod_job_qos(MaxCpusPerUser) $maxcpuspu_num
+	set mod_part_qos(MaxCpusPerUser) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_15
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+
+	}
+	set mod_job_qos(MaxCpusPerUser) "-1"
+	set mod_part_qos(MaxCpusPerUser) "-1"
+
+	#
+	# Test MaxNodesPerUser
+	#
+	set mod_job_qos(MaxNodesPerUser) $maxnodespu_num
+	set mod_part_qos(MaxNodesPerUser) 1
+	mod_qos $job_qos [array get mod_job_qos]
+	mod_qos $part_qos [array get mod_part_qos]
+	sleep $time_spacing
+	inc21_30_16
+	if {$exit_code != 0 } {
+		cleanup
+		exit 1
+	}
+	set mod_job_qos(MaxNodesPerUser) "-1"
+	set mod_part_qos(MaxNodesPerUser) "-1"
+
+}
diff --git a/testsuite/expect/inc3.11.4 b/testsuite/expect/inc3.11.4
index a089c3a44..824007130 100644
--- a/testsuite/expect/inc3.11.4
+++ b/testsuite/expect/inc3.11.4
@@ -57,7 +57,7 @@ proc inc3_11_4 {} {
 	expect {
 		-re "Submitted batch job ($number)" {
 			set job_id $expect_out(1,string)
-			exec $scancel $job_id
+			cancel_job $job_id
 			send_user "\n\033\[31mFAILURE: job submit should have been denied (Within: inc3.11.4)\033\[m\n"
 			set exit_code 1
 			exp_continue
diff --git a/testsuite/expect/inc3.11.5 b/testsuite/expect/inc3.11.5
index f85750a6e..ff2482552 100644
--- a/testsuite/expect/inc3.11.5
+++ b/testsuite/expect/inc3.11.5
@@ -47,7 +47,7 @@ proc inc3_11_5 {} {
 	# Test for node reservation conflict
 	set ret_code [create_res "StartTime=now+60minutes Duration=60 NodeCnt=1 user=$user_name" 1]
 	if {$ret_code == 0} {
-		send_user "\n\033\[31mFAILURE: Reservation $test did not fail but should have (Within: inc3.11.5)\033\[m\n"
+		send_user "\n\033\[31mFAILURE: Reservation did not fail but should have (Within: inc3.11.5)\033\[m\n"
 		delete_res $res_name
 		exit 1
 	} else {
diff --git a/testsuite/expect/regression b/testsuite/expect/regression
index ea5e3952b..5529d398b 100755
--- a/testsuite/expect/regression
+++ b/testsuite/expect/regression
@@ -56,7 +56,7 @@ fi
 /bin/date
 BEGIN_TIME=`date +%s`
 for major in `seq 1 100`; do
-	for minor in `seq 1 100`; do
+	for minor in `seq 1 150`; do
 		TEST=test${major}.${minor}
 		if [ ! -f ./$TEST ]; then continue; fi
 
diff --git a/testsuite/expect/test1.100 b/testsuite/expect/test1.100
new file mode 100755
index 000000000..3cd2c14d5
--- /dev/null
+++ b/testsuite/expect/test1.100
@@ -0,0 +1,171 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of pack/nopack task distribution.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "WARNING: ..." with an explanation of why the test can't be made, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2002-2007 The Regents of the University of California.
+# Copyright (C) 2008-2010 Lawrence Livermore National Security.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette1@llnl.gov>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "1.100"
+set exit_code   0
+
+print_header $test_id
+
+if { [test_front_end] } {
+	send_user "\nWARNING: This test is incompatible with front end systems\n"
+	exit 0
+} elseif {[test_serial]} {
+	send_user "\nWARNING: This test is incompatible with serial system\n"
+	exit 0
+}
+if {[test_front_end] != 0} {
+	send_user "\nWARNING: This test is incompatible with front-end systems\n"
+	exit $exit_code
+}
+
+if {[test_launch_poe]} {
+	set use_envvar MP_I_UPMD_HOSTNAME
+} else {
+	set use_envvar SLURM_NODEID
+}
+
+#
+# Submit a two node job with "pack" distribution
+#
+set expected_layout [list 0 0 0 1]
+set tested_layout [list -1 -1 -1 -1]
+set timeout $max_job_delay
+set srun_pid [spawn $srun -O -N2 -n4 --exclusive -l --distribution=pack -t1 $bin_printenv $use_envvar]
+expect {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "Node count specification invalid" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "($number): ($number)" {
+		set index $expect_out(1,string)
+		set value $expect_out(2,string)
+		lset tested_layout $index $value
+		exp_continue
+	}
+	-re "($number).($number):($alpha_numeric_under)" {
+		set index $expect_out(1,string)
+		set index2 $expect_out(2,string)
+		if { $index == 0 } {
+			set value 0
+		} else {
+			set value 1
+		}
+		lset tested_layout $index $value
+		lset tested_layout $index2 $value
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Verify "pack" distribution of tasks
+#
+send_user "Optimal task layout was: $expected_layout\n"
+send_user "Actual task layout was : $tested_layout\n"
+if {[string compare $tested_layout $expected_layout]} {
+	send_user "\FAILURE: failed to distribute tasks in nopack fashion\n"
+	set exit_code 1
+}
+
+#
+# Submit a two node job with "nopack" distribution
+#
+set expected_layout [list 0 0 1 1]
+set tested_layout [list -1 -1 -1 -1]
+set timeout $max_job_delay
+set srun_pid [spawn $srun -O -N2 -n4 --exclusive -l --distribution=nopack -t1 $bin_printenv $use_envvar]
+expect {
+	-re "More processors requested than permitted" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "Node count specification invalid" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit 0
+	}
+	-re "($number): ($number)" {
+		set index $expect_out(1,string)
+		set value $expect_out(2,string)
+		lset tested_layout $index $value
+		exp_continue
+	}
+	-re "($number).($number):($alpha_numeric_under)" {
+		set index $expect_out(1,string)
+		set index2 $expect_out(2,string)
+		if { $index == 0 } {
+			set value 0
+		} else {
+			set value 1
+		}
+		lset tested_layout $index $value
+		lset tested_layout $index2 $value
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Verify "nopack" distribution of tasks
+#
+send_user "Optimal task layout was: $expected_layout\n"
+send_user "Actual task layout was : $tested_layout\n"
+if {[string compare $tested_layout $expected_layout]} {
+	send_user "\FAILURE: failed to distribute tasks in nopack fashion\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test1.14 b/testsuite/expect/test1.14
index 1bbfde8e6..9d046cc41 100755
--- a/testsuite/expect/test1.14
+++ b/testsuite/expect/test1.14
@@ -58,7 +58,7 @@ if {[test_launch_poe]} {
 # Delete left-over input script
 # Build input script file
 # Run one more step than allocated CPUs and make sure it waits
-# The "sleep 2" is meant to insure the earlier job steps start first
+# The "sleep 4" is meant to insure the earlier job steps start first
 #
 exec $bin_rm -f $file_in $file_in2 $file_out
 make_bash_script $file_in "
@@ -69,12 +69,11 @@ make_bash_script $file_in "
     $srun --exclusive -n1 $bin_sleep $sleep_secs &
     inx=\$((inx+1))
   done
-  $bin_sleep 2
+  $bin_sleep 4
   $srun -v --exclusive -n1 ./$file_in2 &
   wait
 "
 make_bash_script $file_in2 "
-  $bin_sleep 2
   $scontrol show steps
 "
 
@@ -149,7 +148,7 @@ if {$exit_code == 0} {
 # Delete left-over input script
 # Build another input script file
 # Run one more step than allocated CPUs with immediate option and make aborts
-# The "sleep 2" is meant to insure the earlier job steps start first
+# The "sleep 4" is meant to insure the earlier job steps start first
 #
 exec $bin_rm -f $file_in $file_out
 make_bash_script $file_in "
@@ -159,7 +158,7 @@ make_bash_script $file_in "
     $srun --exclusive -n1 $bin_sleep $sleep_secs &
     inx=\$((inx+1))
   done
-  $bin_sleep 2
+  $bin_sleep 4
   $srun -v --exclusive -n1 --immediate $file_in2 &
   wait
 "
@@ -222,7 +221,7 @@ expect {
 }
 
 if { $matches != 1 } {
-	send_user "\nFAILURE: Problem --exclusive and --immediate option for step ($matches)\n"
+	send_user "\nFAILURE: Problem --exclusive and --immediate option for step ($matches != 1)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test1.27 b/testsuite/expect/test1.27
index b2d4b8c1e..83f7c3352 100755
--- a/testsuite/expect/test1.27
+++ b/testsuite/expect/test1.27
@@ -59,6 +59,20 @@ if {[test_alps]} {
 	    SLURM_NTASKS 1
 	    SLURM_STEPID 0
 	}
+} elseif { [test_bluegene] } {
+	array set good_vars {
+	    SLURM_CPUS_ON_NODE 1
+	    SLURM_CPUS_PER_TASK 1
+	    SLURM_JOB_ID 1
+	    SLURM_LAUNCH_NODE_IPADDR 0
+	    SLURM_NNODES 0
+	    SLURM_NODELIST 0
+	    SLURM_NTASKS 1
+	    SLURM_SRUN_COMM_HOST 0
+	    SLURM_SRUN_COMM_PORT 1
+	    SLURM_STEPID 0
+	    SLURM_TASKS_PER_NODE 1
+	}
 } elseif {[test_front_end]} {
 	array set good_vars {
 	    SLURM_CPUS_ON_NODE 1
diff --git a/testsuite/expect/test1.28 b/testsuite/expect/test1.28
index 7b888a71d..e11fd08a2 100755
--- a/testsuite/expect/test1.28
+++ b/testsuite/expect/test1.28
@@ -41,7 +41,7 @@ set matches          0
 
 print_header $test_id
 
-make_bash_script $file_in "env"
+make_bash_script $file_in "env | grep TEST; exit 0"
 
 #
 # Spawn a job via srun to print environment variables
diff --git a/testsuite/expect/test1.58 b/testsuite/expect/test1.58
index 9785a00d3..8c177b226 100755
--- a/testsuite/expect/test1.58
+++ b/testsuite/expect/test1.58
@@ -98,7 +98,10 @@ if {$got_pattern == 0} {
 #
 # Release the allocation by killing salloc (really it kills the "sleep")
 #
-cancel_job $jobid
+if {[cancel_job $jobid] != 0} {
+	set exit_code 1
+}
+
 set spawn_id $srun_alloc_sid
 exec kill -9 $srun_alloc_pid
 expect {
@@ -110,7 +113,7 @@ expect {
 	}
 }
 
-if {$got_pattern == 1} {
+if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test1.59 b/testsuite/expect/test1.59
index 5cad9feec..9412e0698 100755
--- a/testsuite/expect/test1.59
+++ b/testsuite/expect/test1.59
@@ -205,7 +205,7 @@ for {set i 0} {$i<4} {incr i} {
 		}
 		timeout {
 			send_user "\nFAILURE: srun not responding\n"
-			exec $scancel --quiet $job_id
+			cancel_job $job_id
 			set exit_code 1
 		}
 		eof {
@@ -366,7 +366,7 @@ for {set i 0} {$i<5} {incr i} {
 		}
 		timeout {
 			send_user "\nFAILURE: srun not responding\n"
-			exec $scancel --quiet $job_id
+			cancel_job $job_id
 			set exit_code 1
 		}
 		eof {
diff --git a/testsuite/expect/test1.75 b/testsuite/expect/test1.75
index 83aa246ae..e0056ac4b 100755
--- a/testsuite/expect/test1.75
+++ b/testsuite/expect/test1.75
@@ -31,7 +31,9 @@
 source ./globals
 
 set test_id      1.75
-set file_in      "test$test_id\_sc"
+set file_id      "test$test_id\_id.bash"
+set file_in      "test$test_id\.bash"
+set file_out     "test$test_id\.out"
 set node         ""
 set threads      0
 set job_id       0
@@ -75,17 +77,26 @@ if {[test_cpu_affinity_or_cgroup] == 0} {
 proc sub_job { freq } {
 
 	global srun sacct node threads job_id number wait_for_job float timeout exit_code
-	global alpha_numeric_under
+	global alpha_numeric_under file_id avail_governors
 
 	set timeout 120
 	array set this_freq $freq
 
 	foreach option [array names this_freq] {
-
+		send_user "\n======= TESTING FREQUENCY/GOVERNOR $option =======\n"
 		set job_id 0
-		set srun_pid [spawn $srun -v -t1 --cpu-freq=$option -n$threads -w$node sleep 5]
+		set srun_pid [spawn $srun -t1 --cpu-freq=$option -n$threads -w$node $file_id]
 		expect {
-			-re "launching ($number).0" {
+			-re "not allowed" {
+				if {[string first $option $avail_governors] == -1} {
+					send_user "\nThis error is expected, no worries\n"
+				} else {
+					send_user "\nFAILURE: This CPU frequency should be valid\n"
+					set exit_code 1
+				}
+				exp_continue	
+			}
+			-re "SLURM_JOB_ID=($number)" {
 				set job_id $expect_out(1,string)
 				exp_continue
 			}
@@ -97,7 +108,6 @@ proc sub_job { freq } {
 				wait
 			}
 		}
-
 		if {$job_id == 0} {
 			send_user "\nFAILURE: srun did not submit job\b"
 			exit 1
@@ -132,10 +142,11 @@ proc sub_job { freq } {
 	return [array get this_freq]
 }
 
-make_bash_script $file_in "sleep 2"
+make_bash_script $file_id "echo SLURM_JOB_ID=\$SLURM_JOB_ID; $bin_sleep 10"
+make_bash_script $file_in "cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_available_governors"
 
-# Get a node that we can use
-spawn $sbatch -N1 -t1 -o/dev/null --exclusive $file_in
+# Identify a node that we can use and available governors
+spawn $sbatch -N1 -t1 -o/dev/null --exclusive -o $file_out $file_in
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id $expect_out(1,string)
@@ -149,13 +160,35 @@ expect {
 		wait
 	}
 }
-
 if {$job_id == 0} {
 	send_user "\nFAILURE: sbatch did not submit job\n"
 	exit 1
 }
 
-wait_for_job $job_id RUNNING
+if {[wait_for_job $job_id "DONE"] != 0} {
+	send_user "\nFAILURE: waiting for job to complete\n"
+	cancel_job $job_id
+	exit 1
+}
+
+set userspace_governor 0
+if {[wait_for_file $file_out] == 0} {
+	spawn $bin_cat $file_out
+	expect {
+		-re "ondemand" {
+			set userspace_governor 1
+			exp_continue
+		}
+                eof {
+			wait
+                }
+        }
+}
+if {$userspace_governor == 0} {
+	send_user "\nWARNING: Node configuration prevents directly control over CPU frequency\n"
+	exec $bin_rm -f $file_id $file_in $file_out
+	exit 0
+}
 
 set match 0
 spawn $scontrol show job $job_id
@@ -196,31 +229,39 @@ expect {
 
 cancel_job $job_id
 
+#
+# Test various CPU governor values
+#
+set avail_governors get_cpu_governors
+send_user "\nCpuFreqGovernors = $avail_governors\n"
+
+array set freq_lvl_2 [sub_job [array get freq_lvl_2]]
+
+if {($freq_lvl_2(conservative) == 0) || ($freq_lvl_2(ondemand) == 0) ||
+    ($freq_lvl_2(performance) == 0)  || ($freq_lvl_2(powersave) == 0)} {
+	send_user "\nFAILURE: CPU frequency values are invalid\n"
+	set exit_code 1
+}
+
+#
+# Test various CPU frequency values
+#
 array set freq_lvl_1 [sub_job [array get freq_lvl_1]]
 
+send_user "\n======= Reported frequencies =======\n"
+foreach name [array names freq_lvl_1] {
+	send_user "$name is $freq_lvl_1($name) GHz\n"
+}
+
 if { (($freq_lvl_1(low) > $freq_lvl_1(medium)) ||
       ($freq_lvl_1(medium) > $freq_lvl_1(high)) ||
       ($freq_lvl_1(highm1) > $freq_lvl_1(high)))} {
 	send_user "\nFAILURE: CPU frequency values are not valid\n"
-	foreach name [array names freq_lvl_1] {
-		send_user "$name is $freq_lvl_1($name)\n"
-	}
 	exit 1
 }
 
-array set freq_lvl_2 [sub_job [array get freq_lvl_2]]
-
-if {($freq_lvl_2(conservative) == 0) || ($freq_lvl_2(ondemand) == 0)
-     || ($freq_lvl_2(performance) == 0) || ($freq_lvl_2(powersave) == 0)} {
-	send_user "\nFAILURE: CPU frequency values are invalid\n"
-	foreach name [array names freq_lvl_2] {
-		send_user "$name is $freq_lvl_2($name)\n"
-	}
-	set exit_code 1
-}
-
 if {$exit_code == 0} {
-	exec $bin_rm -f $file_in
-	send_user "\nSUCCCESS\n"
+	exec $bin_rm -f $file_id $file_in $file_out
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test1.76 b/testsuite/expect/test1.76
new file mode 100755
index 000000000..d8ec0bdf4
--- /dev/null
+++ b/testsuite/expect/test1.76
@@ -0,0 +1,368 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose:  Test of route/topology plugin
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+#
+############################################################################
+#  Copyright (C) 2014 Bull S. A. S.
+#		Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois.
+#  Written by Rod Schultz <Rod.Schultz@bull.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+# Test various --cpu-freq options
+
+# Assumptions
+# - the first node selected has frequency scaling, wih ondemand and userspace
+# - all cpus on a node have the same scaling options.
+# - task affinity either cgroups or cpusets
+# - fastschedule=0
+
+set test_id	"1.76"
+set nerr	0
+set exit_code	0
+set test_prog   "test$test_id.prog"
+set avail_freq	[split "1000 2000" " "]
+set nfreq	0
+set have_on_demand 1
+set have_user_space 1
+set wd		[pwd]
+print_header	$test_id
+
+################################################################
+# Run a test case.
+#
+# Parameters
+# - opt		value of --cpu-freq
+# - expmin      expected value of scaling_min (if 0, don't check)
+# - expcur	expected value of scaling_cur (if 0, don't check)
+# - expmax      expected value of scaling_max (if 0, don't check)
+# - expgov      expected value of scaling_governor (if 0, don't check)
+#
+# Returns
+# 0 on success, 1 on failure.
+#   FAILURE message is sent to user on failure.
+################################################################
+proc test_case {opt expmin expcur expmax expgov} {
+	global bin_rm srun sbatch test_node test_cpu wd number
+	log_user 0
+	exec $bin_rm -f $wd/test1.76.out
+	set job_id 0
+	spawn $sbatch -w $test_node --exclusive -o test1.76.out test1.76.batch $test_cpu $opt $wd $srun
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string)
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	if {$job_id == 0} {
+		send_user "\nFAILURE: batch submit failure\n"
+		return 1
+	}
+	#
+	# Wait for job to complete
+	#
+	if {[wait_for_job $job_id "DONE"] != 0} {
+		send_user "\nFAILURE: waiting for job to complete\n"
+		return 1
+	}
+	if {[wait_for_file $wd/test1.76.out] != 0} {
+		send_user "\nFAILURE: waiting for file $wd/test1.76.out\n"
+		return 1
+	}
+	set line ""
+	set start_value ""
+	set test_value ""
+	set end_value ""
+	set fd 0
+	set fd [open $wd/test1.76.out "r"]
+
+	# Search for starting values
+	while {$fd > 0 && [gets $fd line] != -1} {
+		set pos [string first "scaling_values:" $line]
+		if {$pos == 0} {
+			set start_value [string trim $line "\r"]
+			set start_value [string range $start_value 16 end]
+			break
+		} 
+	}
+	if {[string compare $start_value ""] == 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- can't find starting values\n"
+		close $fd
+		log_user 1
+		return 1
+	}
+
+	# Search for test values
+	while {$fd > 0 && [gets $fd line] != -1} {
+		set pos [string first "scaling_values:" $line]
+		if {$pos == 0} {
+			set test_value [string trim $line "\r"]
+			set test_value [string range $test_value 16 end]
+			break
+		} 
+	}
+	if {[string compare $test_value ""] == 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- can't find test values\n"
+		close $fd
+		log_user 1
+		return 1
+	}
+
+	# Search for ending values
+	while {$fd > 0 && [gets $fd line] != -1} {
+		set pos [string first "scaling_values:" $line]
+		if {$pos == 0} {
+			set end_value [string trim $line "\r"]
+			set end_value [string range $end_value 16 end]
+			break
+		} 
+	}
+	if {[string compare $end_value ""] == 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- can't find ending values\n"
+		close $fd
+		log_user 1
+		return 1
+	}
+	close $fd
+
+	send_user "\n--cpu-freq=$opt\n"
+	send_user "start_freq: $start_value\n"
+	send_user "test_freq:  $test_value\n"
+	send_user "end_freq:   $end_value\n\n"
+
+	set strt_vals [split $start_value " "]
+	set sgov [string range [lindex $strt_vals 0] 4 end]
+	set smin [string range [lindex $strt_vals 1] 4 end]
+	set scur [string range [lindex $strt_vals 2] 4 end]
+	set smax [string range [lindex $strt_vals 3] 4 end]
+
+	set tst_vals [split $test_value " "]
+	set tgov [string range [lindex $tst_vals 0] 4 end]
+	set tmin [string range [lindex $tst_vals 1] 4 end]
+	set tcur [string range [lindex $tst_vals 2] 4 end]
+	set tmax [string range [lindex $tst_vals 3] 4 end]
+
+	set end_vals [split $end_value " "]
+	set egov [string range [lindex $end_vals 0] 4 end]
+	set emin [string range [lindex $end_vals 1] 4 end]
+	set ecur [string range [lindex $end_vals 2] 4 end]
+	set emax [string range [lindex $end_vals 3] 4 end]
+
+	if {[string compare $expgov 0] !=0 
+	    && [string compare $tgov $expgov] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- test governor $tgov not expected ($expgov)\n"
+		log_user 1
+		return 1
+	}
+	if {[string compare $expmin 0] !=0 
+	    && [string compare $tmin $expmin] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- test min $tmin not expected ($expmin)\n"
+		log_user 1
+		return 1
+	}
+	if {[string compare $expcur 0] !=0 
+	    && [string compare $tcur $expcur] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- test cur $tcur not expected ($expcur)\n"
+		log_user 1
+		return 1
+	}
+	if {[string compare $expmax 0] !=0 
+	    && [string compare $tmax $expmax] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- test max $tmax not expected ($expmax)\n"
+		log_user 1
+		return 1
+	}
+	
+	if {[string compare $sgov $egov] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- starting governor $sgov not reset ($egov)\n"
+		log_user 1
+		return 1
+	}
+	if {[string compare $smin $emin] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- starting min $smin not reset ($emin)\n"
+		log_user 1
+		return 1
+	}
+	if {[string compare $smax $emax] != 0} {
+		send_user "\nERROR: --cpu-freq=$opt -- starting max $smax not reset ($emax)\n"
+		log_user 1
+		return 1
+	}
+
+	log_user 1
+	return 0	
+}
+
+# Check environment
+if {[test_cpu_affinity_or_cgroup] == 0} {
+	send_user "\nWARNING: This test requires some form of task affinity\n"
+	exit 0
+}
+if {[test_fast_schedule] != 0} {
+	send_user "\nWARNING: FastSchedule=0 is recommended.\n"
+	send_user "         Proceeding assuming that the number of cpu declared is accurate.\n"
+}
+log_user 0
+spawn $scontrol show config
+expect {
+	-re "CpuFreqGovernors *= ($alpha_numeric_comma)" {
+		if {[string first "OnDemand" $expect_out(1,string)] == -1} {
+			set have_on_demand 0
+		}
+		if {[string first "UserSpace" $expect_out(1,string)] == -1} {
+			set have_user_space 0
+		}
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+# Identify a node to use
+spawn $srun --exclusive -n1 env
+expect
+wait
+set pos [string first "SLURMD_NODENAME=" $expect_out(buffer)]
+set tmp [string range $expect_out(buffer) $pos+16 end]
+set pos [string first "\n" $tmp]
+set test_node [string range $tmp 0 $pos-1]
+set test_node [string trim $test_node "\r"]
+
+# See how many cpu's on that node.
+# We will assume the tests will run on the highest numbered cpu
+spawn $scontrol show node $test_node
+expect
+wait
+set lines [split $expect_out(buffer) "\n"]
+set ll [llength $lines]
+if {$ll < 2} {
+	send_user "\nFAILURE: scontrol node failed\n"
+	exit 1
+}
+set line [lindex $lines 1]
+set line [string trim $line]
+set opts [split $line " "]
+if {[llength $opts] < 3} {
+	send_user "\nFAILURE: scontrol node output is bad\n"
+	exit 1
+} 
+set opt3 [lindex $opts 2]
+if {[string first "CPUTot" $opt3] != 0} {
+	send_user "\nFAILURE: CPUTot not found in scontrol node output\n"
+	exit 1
+}
+set test_cpu [string range $opt3 7 end]
+incr test_cpu -1
+send_user "test node is $test_node, test_cpu is $test_cpu\n"
+
+# Verify that selected cpu on selected node supports cpu_frequency
+spawn $srun -w $test_node --exclusive ./test1.76.bash $test_cpu
+expect
+wait
+set pos [string first "not supported" $expect_out(buffer)]
+if {$pos > 0} {
+	send_user "\nWARNING: Test requires frequency scaling\n"
+	exit 0
+}
+set pos [string first "userspace" $expect_out(buffer)]
+if {$pos < 0} {
+	set have_user_space 0
+}
+set pos [string first "ondemand" $expect_out(buffer)]
+if {$pos < 0} {
+	set have_on_demand 0
+}
+
+set lines [split $expect_out(buffer) "\n"]
+set ll [llength $lines]
+for {set lx 0} {$lx < $ll} {incr lx} {
+	set line [lindex $lines $lx]
+	set line [string trim $line "\r"]
+	set pos [string first "frequencies" $line]
+	if {$pos > 0} {
+		set tmp [string range $line 22 end]
+		set freqs [split $tmp " "]
+		set avail_freq [lsort $freqs]
+		set nfreq [llength $avail_freq]
+		break
+	}
+}
+incr nfreq -1
+if {$nfreq < 2} {
+	send_user "\nWARNING: Test requires at least 2 available frequencies.\n"
+	exit 0
+}
+if {$have_user_space == 0} {
+	send_user "\nWARNING: Test recommends UserSpace governor\n"
+	exit 0
+}
+if {$have_on_demand == 0} {
+	send_user "\nWARNING: Test recommends OnDemand governor\n"
+}
+
+set avail_freq {}
+set iy $nfreq
+incr iy -1
+for {set ix 0} {$ix<$nfreq} {incr ix} {
+	lappend avail_freq [lindex $freqs $iy]
+	incr iy -1
+}
+
+set xmx [expr ($nfreq - 1) / 2]
+set xlow [lindex $avail_freq 0]
+set xhigh [lindex $avail_freq $nfreq-1]
+set xhighm1 [lindex $avail_freq $nfreq-2]
+set xmed [lindex $avail_freq $xmx]
+
+incr nerr [test_case userspace 0 0 0 userspace]
+if {$have_on_demand == 1} {
+	incr nerr [test_case ondemand 0 0 0 ondemand]
+}
+incr nerr [test_case Low $xlow $xlow 0 userspace]
+incr nerr [test_case High 0 $xhigh $xhigh userspace]
+incr nerr [test_case HighM1 0 $xhighm1 0 userspace]
+incr nerr [test_case Medium 0 $xmed 0 userspace]
+incr nerr [test_case $xmed 0 $xmed 0 userspace]
+if {$nfreq > 3} {
+	set mxx [expr $nfreq - 2]
+	set minfrq [lindex $avail_freq 1]
+	set maxfrq [lindex $avail_freq $mxx]
+	set opt "$minfrq-$maxfrq"
+	incr nerr [test_case $opt $minfrq 0 $maxfrq 0]
+	set opt "$opt:userspace"
+	incr nerr [test_case $opt $minfrq 0 0 userspace]
+}
+
+if {$nerr != 0} {
+	send_user "\n\nFAILURE: $nerr test cases failed.\n"
+	set exit_code 1
+} else {
+	send_user "\nSUCCESS\n"
+	exec $bin_rm -f $wd/test1.76.out
+}
+exit $exit_code
diff --git a/testsuite/expect/test1.76.bash b/testsuite/expect/test1.76.bash
new file mode 100755
index 000000000..4ae5dd97c
--- /dev/null
+++ b/testsuite/expect/test1.76.bash
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# It is assumed this script runs on cpu $1
+# 
+# Assumptions for embedded values.
+# o the running slurm is idle
+# o TaskPlugin=task/cgroup
+
+if test -f "/sys/devices/system/cpu/cpu$1/cpufreq/scaling_governor"
+	then
+		echo "scaling frequency is supported"
+	else
+		echo "scaling frequency not supported"
+		exit 0
+fi
+		
+sleep 1
+smin=$(cat /sys/devices/system/cpu/cpu$1/cpufreq/scaling_min_freq)
+scur=$(cat /sys/devices/system/cpu/cpu$1/cpufreq/scaling_cur_freq)
+smax=$(cat /sys/devices/system/cpu/cpu$1/cpufreq/scaling_max_freq)
+sgov=$(cat /sys/devices/system/cpu/cpu$1/cpufreq/scaling_governor)
+govs=$(cat /sys/devices/system/cpu/cpu$1/cpufreq/scaling_available_governors)
+freqs=$(cat /sys/devices/system/cpu/cpu$1/cpufreq/scaling_available_frequencies)
+
+echo "available_governors $govs"
+echo "available_frequencies $freqs"
+echo "scaling_values: gov=$sgov min=$smin cur=$scur max=$smax"
diff --git a/testsuite/expect/test1.76.batch b/testsuite/expect/test1.76.batch
new file mode 100755
index 000000000..bc1a8188c
--- /dev/null
+++ b/testsuite/expect/test1.76.batch
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# It is assumed this script runs on cpu $1
+# $2 is the --cpu-freq parameter
+# $3 is full path to test directory
+# $4 is the full patch to srun
+
+# Get current value of scaling parameters,
+# Then set, and get new values.
+# Finally get reset values
+cd $3
+echo "Starting values"
+$4 $3/test1.76.bash $1
+echo "srun --cpu-freq=$2 test1.76.bash $1"
+$4 --cpu-freq=$2 $3/test1.76.bash $1
+echo "Ending values"
+$4 $3/test1.76.bash $1
diff --git a/testsuite/expect/test1.77 b/testsuite/expect/test1.77
new file mode 100755
index 000000000..b9011e777
--- /dev/null
+++ b/testsuite/expect/test1.77
@@ -0,0 +1,145 @@
+#!/usr/bin/expect
+################################################################################
+# Purpose: Test of SLURM functionality
+#          Sets the job name environment variable, and changes it using srun,
+#          sbatch and salloc.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Martin Thomas <mt@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id	"1.77"
+set exit_code	0
+set job_id      0
+set number      "\[0-9\]+"
+set name        "banana"
+set my_cmd      ./mycmd
+set file_in     "test$test_id.in"
+
+# Set env path to SLURM_JOB_NAME
+set env(SLURM_JOB_NAME) "zebra"
+
+print_header $test_id
+
+make_bash_script $file_in "
+/usr/bin/env|grep SLURM_JOB_NAME
+"
+
+#
+# Tests -J by using srun
+#
+set found 0
+set srun_pid [spawn $srun -J $name $file_in]
+expect {
+        "SLURM_JOB_NAME=$name" {
+            set found 1
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		slow_kill $srun_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+
+if  {$found != 1} {
+	send_user "\nFAILURE: COULD NOT FIND SLURM_JOB_NAME=banana\n"
+	exit 1
+}
+send_user "SUCCESS\n"
+
+#
+# Tests -J using sbatch
+#
+set found 0
+set sbatch_pid [spawn $sbatch -J $name --wrap=env]
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+
+
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		slow_kill $sbatch_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+set wait_file [wait_for_file slurm-$job_id.out]
+spawn $bin_grep SLURM_JOB_NAME=banana slurm-$job_id.out
+expect {
+        -re "SLURM_JOB_NAME=($alpha_numeric)" {
+            send_user "Job name matched $expect_out(1,string)\n"
+            set found 1
+            exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+if  {$found != 1} {
+	send_user "\nFAILURE: COULD NOT FIND SLURM_JOB_NAME=banana\n"
+	exit 1
+}
+if {$exit_code == 0} {
+	exec $bin_rm -f slurm-$job_id.out
+	send_user "SUCCESS\n"
+}
+exec rm -f slurm-$job_id.out
+
+#
+# Tests -J using salloc
+#
+set found 0
+set salloc_pid [spawn $salloc -J $name ./$file_in]
+expect {
+        "SLURM_JOB_NAME=$name" {
+		set found 1
+	}
+	timeout {
+		send_user "\nFAILURE: salloc not responding\n"
+		slow_kill $salloc_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+
+if  {$found != 1} {
+	send_user "\nFAILURE: COULD NOT FIND SLURM_JOB_NAME=banana\n"
+	exit 1
+}
+exec rm -f $file_in
+send_user "SUCCESS\n"
+exit $exit_code
diff --git a/testsuite/expect/test1.91 b/testsuite/expect/test1.91
index 8acac4189..f98b96b20 100755
--- a/testsuite/expect/test1.91
+++ b/testsuite/expect/test1.91
@@ -520,7 +520,7 @@ expect {
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_prog
 	send_user "\nSUCCESS\n"
-} elseif { [test_fast_schedule_2] } {
+} elseif { [test_fast_schedule] == 2 } {
 	exec $bin_rm -f $file_prog
 	send_user "\nNOTE: This test can fail if the node configuration in slurm.conf\n"
 	send_user "  (sockets, cores, threads) differs from the actual configuration\n"
diff --git a/testsuite/expect/test1.92 b/testsuite/expect/test1.92
index b7499bffd..0a20da11d 100755
--- a/testsuite/expect/test1.92
+++ b/testsuite/expect/test1.92
@@ -66,6 +66,7 @@ expect {
 	}
 	-re "(configuration is not available|Unable to submit batch job|Node count specification invalid|More processors requested than permitted)" {
 		send_user "\nWARNING: can't test srun task distribution\n"
+		file delete $file_bash
 		exit 0
 	}
 	-re $prompt {
@@ -117,11 +118,13 @@ expect {
 
 if {$node_cnt != 2} {
 	send_user "\nWARNING: need 2 nodes to perform test\n"
+	file delete $file_bash
 	exit $exit_code
 }
 
 if {$task_cnt < (2 * $node_cnt)} {
         send_user "\nWARNING: need at least 2 CPUs per node, test is not applicable\n"
+	file delete $file_bash
         exit $exit_code
 }
 
@@ -302,4 +305,3 @@ if {$exit_code == 0} {
 }
 file delete $file_bash
 exit $exit_code
-
diff --git a/testsuite/expect/test1.97 b/testsuite/expect/test1.97
index b66251034..833f647ca 100755
--- a/testsuite/expect/test1.97
+++ b/testsuite/expect/test1.97
@@ -372,6 +372,6 @@ check_tasks_off $ntaskpn [array get tasks] 2
 
 if {$exit_code == 0} {
 	exec $bin_rm $file_in
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test1.99 b/testsuite/expect/test1.99
index 0b10389ce..6df7c9c9a 100755
--- a/testsuite/expect/test1.99
+++ b/testsuite/expect/test1.99
@@ -287,6 +287,6 @@ update_conf
 
 if {$exit_code == 0} {
 	exec $bin_rm $file_in $tmp_job $cwd/slurm.conf.orig
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test11.5 b/testsuite/expect/test11.5
index 41075e20b..e1f2c0bd4 100755
--- a/testsuite/expect/test11.5
+++ b/testsuite/expect/test11.5
@@ -259,6 +259,8 @@ if {$matches != 1} {
 #
 cancel_job $job_id
 if {$exit_code == 0} {
+#	The checkpoint file, if any, has the same name as the job step ID
+	exec $bin_rm -rf $job_id.$step_id
 	exec $bin_rm -f $file_in
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test12.1 b/testsuite/expect/test12.1
index d82d22960..8b1517275 100755
--- a/testsuite/expect/test12.1
+++ b/testsuite/expect/test12.1
@@ -59,7 +59,7 @@ expect {
 		incr matches
 		exp_continue
 	}
-	-re "--uid" {
+	-re "--duplicates" {
 		incr matches
 		exp_continue
 	}
@@ -77,7 +77,7 @@ if {$not_support != 0} {
 	exit 0
 }
 if {$matches != 3} {
-	send_user "\nFAILURE: sacct --help failed ($matches)\n"
+	send_user "\nFAILURE: sacct --help failed ($matches != 3)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test12.2 b/testsuite/expect/test12.2
index b530146cc..429afbeb3 100755
--- a/testsuite/expect/test12.2
+++ b/testsuite/expect/test12.2
@@ -45,7 +45,7 @@ set aix         0
 
 # job parameters
 set mem_size    1024000
-set sleep_time  21
+set sleep_time  25
 set ret_code    42
 set file_size   10485760
 print_header $test_id
@@ -304,7 +304,7 @@ if {[wait_for_job $job_id "RUNNING"] != 0} {
 }
 
 # Wait for data to get logged
-exec $bin_sleep 10
+exec $bin_sleep 15
 
 if {[_get_mem $sstat] != 0} {
 	set exit_code 1
diff --git a/testsuite/expect/test12.2.prog.c b/testsuite/expect/test12.2.prog.c
old mode 100644
new mode 100755
index 02c600aca..c5e8e24fe
--- a/testsuite/expect/test12.2.prog.c
+++ b/testsuite/expect/test12.2.prog.c
@@ -25,13 +25,14 @@
  *  with SLURM; if not, write to the Free Software Foundation, Inc.,
  *  51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
 \*****************************************************************************/
+#include <fcntl.h>
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
-#include <unistd.h>
 #include <sys/types.h>
 #include <sys/stat.h>
-#include <fcntl.h>
+#include <time.h>
+#include <unistd.h>
 
 int
 main (int argc, char **argv)
@@ -43,6 +44,7 @@ main (int argc, char **argv)
 	int fd;
 	char *mem;
 	char *file_name;
+	time_t time_start = time(NULL);
 
 	if (argc != 6) {
 		fprintf(stderr,
@@ -88,6 +90,7 @@ main (int argc, char **argv)
 	}
 	close(fd);
 
+	sleep_time -= difftime(time(NULL), time_start);
 	sleep(sleep_time);
 	free(mem);
 
diff --git a/testsuite/expect/test12.3 b/testsuite/expect/test12.3
index 5f4704d4f..4cd98f563 100755
--- a/testsuite/expect/test12.3
+++ b/testsuite/expect/test12.3
@@ -146,7 +146,7 @@ expect {
 	}
 }
 if {$aamatches != 1} {
-	send_user "\nFAILURE:  sacctmgr had a problem adding account.\n"
+	send_user "\nFAILURE:  sacctmgr had a problem adding account ($aamatches != 1)\n"
 	exit 1
 }
 
diff --git a/testsuite/expect/test12.4 b/testsuite/expect/test12.4
index 20617f5f6..2365cb7c8 100755
--- a/testsuite/expect/test12.4
+++ b/testsuite/expect/test12.4
@@ -190,7 +190,6 @@ if { $using_slurmdbd } {
 	sleep 12
 }
 
-
 ################################################################
 #
 # Proc: sacct_job
@@ -213,279 +212,247 @@ proc sacct_job { soption job_id} {
 	send_user "sacct -$soption -p -j $job_id\n"
 
 	if { $soption == "-brief" || $soption == "b" } {
-
-	spawn $sacct -$soption -p -j $job_id
-	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re "JobID.State.ExitCode" {
-			if {$debug} {send_user "\nmatch1\n"}
-			incr matches
-			exp_continue
-		}
-		-re "$job_id" {
-			if {$debug} {send_user "\nmatch2\n"}
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
-			set exit_code 1
-		}
-		eof {
-			wait
+		spawn $sacct -$soption -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JobID.State.ExitCode" {
+				if {$debug} {send_user "\nmatch1\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id" {
+				if {$debug} {send_user "\nmatch2\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
 		}
-	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-	if {$matches != 2} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
-		set exit_code 1
-	}
 		return $matches
 	}
 
 	if { $soption == "-long" || $soption == "l" } {
-
-	spawn $sacct -$soption -p -j $job_id
-	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re "JobID.JobName.Partition.MaxVMSize" {
-			if {$debug} {send_user "\nmatch3\n"}
-			incr matches
-			exp_continue
-		}
-		-re "MaxVMSizeNode.MaxVMSizeTask.AveVMSize.MaxRSS" {
-			if {$debug} {send_user "\nmatch4\n"}
-			incr matches
-			exp_continue
-		}
-		-re "MaxRSSNode.MaxRSSTask.AveRSS.MaxPages" {
-			if {$debug} {send_user "\nmatch5\n"}
-			incr matches
-			exp_continue
-		}
-		-re "MaxPagesNode.MaxPagesTask.AvePages.MinCPU" {
-			if {$debug} {send_user "\nmatch6\n"}
-			incr matches
-			exp_continue
-		}
-		-re "MinCPUNode.MinCPUTask.AveCPU.NTasks" {
-			if {$debug} {send_user "\nmatch7\n"}
-			incr matches
-			exp_continue
-		}
-		-re "AllocCPUS.Elapsed.State.ExitCode" {
-			if {$debug} {send_user "\nmatch8\n"}
-			incr matches
-			exp_continue
-		}
-		-re "$job_id" {
-			if {$debug} {send_user "\nmatch9\n"}
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
-			set exit_code 1
-		}
-		eof {
-			wait
+		spawn $sacct -$soption -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JobID.JobName.Partition.MaxVMSize" {
+				if {$debug} {send_user "\nmatch3\n"}
+				incr matches
+				exp_continue
+			}
+			-re "MaxVMSizeNode.MaxVMSizeTask.AveVMSize.MaxRSS" {
+				if {$debug} {send_user "\nmatch4\n"}
+				incr matches
+				exp_continue
+			}
+			-re "MaxRSSNode.MaxRSSTask.AveRSS.MaxPages" {
+				if {$debug} {send_user "\nmatch5\n"}
+				incr matches
+				exp_continue
+			}
+			-re "MaxPagesNode.MaxPagesTask.AvePages.MinCPU" {
+				if {$debug} {send_user "\nmatch6\n"}
+				incr matches
+				exp_continue
+			}
+			-re "MinCPUNode.MinCPUTask.AveCPU.NTasks" {
+				if {$debug} {send_user "\nmatch7\n"}
+				incr matches
+				exp_continue
+			}
+			-re "AllocCPUS.Elapsed.State.ExitCode" {
+				if {$debug} {send_user "\nmatch8\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id" {
+				if {$debug} {send_user "\nmatch9\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
 		}
-	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-
-	if {$matches != 7} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
-		set exit_code 1
-	}
 		return $matches
 	}
 
 	if { $soption == "-noheader" || $soption == "n" } {
-
-	spawn $sacct -$soption -p -j $job_id
-	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re "AllocCPUS|Account|AssocID|AveCPU|AvePages|AveRSS|AveVSize|BlockID	\
-			|Cluster|CPUTime|CPUTimeRAW|Elapsed	\
-			|Eligible|End|ExitCode|GID	\
-			|Group|JobID|JobName|NodeList	\
-			|MaxPages|MaxPagesNode|MaxPagesTask|MaxRSS	|
-			|MaxRSSNode|MaxRSSTask|MaxVSize|MaxVSizeNode	|
-			|MaxVSizeTask|MinCPU|MinCPUNode|MinCPUTask	|
-			|NCPUS|NNodes|NTasks|Priority	|
-			|Partition|QOS|QOSRAW|ReqCPUS	|
-			|Reserved|ResvCPU|ResvCPURAW|Start	|
-			|State|Submit|Suspended|SystemCPU	|
-			|Timelimit|TotalCPU|UID|User	|
-			|UserCPU|WCKey|WCKeyID" {
-			if {$debug} {send_user "\nmatch10\n"}
-			incr matches
-			exp_continue
-		}
-		-re "$job_id" {
-			if {$debug} {send_user "\nmatch11\n"}
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
-			set exit_code 1
+		spawn $sacct -$soption -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "AllocCPUS|Account|AssocID|AveCPU|AvePages|AveRSS|AveVSize|BlockID	\
+				|Cluster|CPUTime|CPUTimeRAW|Elapsed	\
+				|Eligible|End|ExitCode|GID	\
+				|Group|JobID|JobName|NodeList	\
+				|MaxPages|MaxPagesNode|MaxPagesTask|MaxRSS	|
+				|MaxRSSNode|MaxRSSTask|MaxVSize|MaxVSizeNode	|
+				|MaxVSizeTask|MinCPU|MinCPUNode|MinCPUTask	|
+				|NCPUS|NNodes|NTasks|Priority	|
+				|Partition|QOS|QOSRAW|ReqCPUS	|
+				|Reserved|ResvCPU|ResvCPURAW|Start	|
+				|State|Submit|Suspended|SystemCPU	|
+				|Timelimit|TotalCPU|UID|User	|
+				|UserCPU|WCKey|WCKeyID" {
+				if {$debug} {send_user "\nmatch10\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id" {
+				if {$debug} {send_user "\nmatch11\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
 		}
-		eof {
-			wait
-		}
-	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-	if {$matches != 1} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
-		set exit_code 1
-	}
 		return $matches
 	}
 
 	if { $soption == "-parsable" || $soption == "p" } {
-
-	spawn $sacct -$soption -p -j $job_id
-	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re "JobID\\|JobName\\|Partition\\|Account\\|AllocCPUS\\|State\\|ExitCode\\|" {
-			if {$debug} {send_user "\nmatch12\n"}
-			incr matches
-			exp_continue
-		}
-		-re "$job_id\\|" {
-			if {$debug} {send_user "\nmatch13\n"}
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
-			set exit_code 1
-		}
-		eof {
-			wait
+		spawn $sacct -$soption -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JobID\\|JobName\\|Partition\\|Account\\|AllocCPUS\\|State\\|ExitCode\\|" {
+				if {$debug} {send_user "\nmatch12\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id\\|" {
+				if {$debug} {send_user "\nmatch13\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
 		}
-	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-	if {$matches != 2} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
-		set exit_code 1
-	}
 		return $matches
 	}
 
 	if { $soption == "-parsable2" || $soption == "P" } {
 
-	spawn $sacct -$soption -p -j $job_id
-	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re
-"JobID\\|JobName\\|Partition\\|Account\\|AllocCPUS\\|State\\|ExitCode *" {
-			if {$debug} {send_user "\nmatch14\n"}
-			incr matches
-			exp_continue
+		spawn $sacct -$soption -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JobID\\|JobName\\|Partition\\|Account\\|AllocCPUS\\|State\\|ExitCode *" {
+				if {$debug} {send_user "\nmatch14\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id\\|" {
+				if {$debug} {send_user "\nmatch15\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
 		}
-		-re "$job_id\\|" {
-			if {$debug} {send_user "\nmatch15\n"}
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-	if {$matches != 2} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
-		set exit_code 1
-	}
 		return $matches
 	}
 
 	if { $soption == "-verbose" || $soption == "v" } {
-
-	spawn $sacct -$soption -p -j $job_id
-	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re "sacct: Accounting storage SLURMDBD plugin loaded " {
-			if {$debug} {send_user "\nmatch16\n"}
-			incr matches
-			exp_continue
-		}
-		-re "JobID.JobName.Partition" {
-			if {$debug} {send_user "\nmatch17\n"}
-			incr matches
-			exp_continue
-		}
-		-re "Account.AllocCPUS.State.ExitCode" {
-			if {$debug} {send_user "\nmatch18\n"}
-			incr matches
-			exp_continue
-		}
-		-re "$job_id" {
-			if {$debug} {send_user "\nmatch19\n"}
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
-			set exit_code 1
-		}
-		eof {
-			wait
+		spawn $sacct -$soption -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "sacct: Accounting storage SLURMDBD plugin loaded " {
+				if {$debug} {send_user "\nmatch16\n"}
+				incr matches
+				exp_continue
+			}
+			-re "JobID.JobName.Partition" {
+				if {$debug} {send_user "\nmatch17\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Account.AllocCPUS.State.ExitCode" {
+				if {$debug} {send_user "\nmatch18\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id" {
+				if {$debug} {send_user "\nmatch19\n"}
+				incr matches
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
 		}
-	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-	if {$matches != 4} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
-		set exit_code 1
-	}
 		return $matches
 	}
 
@@ -513,156 +480,289 @@ proc sacct_vargs { soption vargs job_id} {
 	send_user "sacct -$soption $vargs -p -j $job_id\n"
 
 	if { $soption == "g" || $soption == "-gid" || $soption == "-group" || $soption == "u" || $soption == "-uid" || $soption == "-user"} {
+		spawn $sacct -$soption $vargs -p -j $job_id
+		expect {
+			-re "SLURM accounting storage is disabled" {
+				set not_support 1
+				exp_continue
+			}
+			-re "JobID.JobName.Partition" {
+				if {$debug} {send_user "\nmatch20\n"}
+				incr matches
+				exp_continue
+			}
+			-re "Account.AllocCPUS.State.ExitCode" {
+				if {$debug} {send_user "\nmatch21\n"}
+				incr matches
+				exp_continue
+			}
+			-re "$job_id" {
+				incr matches
+				if {$debug} {send_user "\nmatch22\n"}
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: sacct not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$not_support != 0} {
+			send_user "\nWARNING: can not test without accounting enabled\n"
+			exit 0
+		}
+		return $matches
+	}
+}
+################################################################
 
-	spawn $sacct -$soption $vargs -p -j $job_id
+set using_slurmdbd [test_using_slurmdbd]
+
+set cluster [get_cluster_name]
+#
+# Identify the user and his current default account
+#
+set acct_name ""
+set user_name ""
+set user_gid ""
+spawn $bin_id -u -n
+expect {
+	 -re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+spawn $bin_id -u
+expect {
+	 -re "($alpha_numeric_under)" {
+		set user_gid $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+set s_pid [spawn $sacctmgr show user $user_name]
+expect {
+	-re "$user_name *($alpha_numeric_under)" {
+		set acct_name $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "FAILURE: sacctmgr add not responding\n"
+		slow_kill $s_pid
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Use sacctmgr to add an account
+#
+set aamatches 0
+set sadd_pid [spawn $sacctmgr -i add account $test_acct cluster=$cluster]
+expect {
+	-re "Adding Account" {
+		incr aamatches
+		exp_continue
+	}
+	-re "Nothing new added" {
+		send_user "\nWARNING: vestigial account $test_acct found\n"
+		incr aamatches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$aamatches != 1} {
+	send_user "\nFAILURE:  sacctmgr had a problem adding account.\n"
+	exit 1
+}
+
+#
+# Add self to this new account
+#
+set sadd_pid [spawn $sacctmgr -i create user name=$user_name account=$test_acct cluster=$cluster]
+expect {
+	 timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Spawn a job via srun using this account
+#
+set job_id 0
+spawn $srun -N1 -v --account=$test_acct $bin_id
+expect {
+	-re "launching ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: did not get srun job_id3\n"
+	set exit_code 1
+} else {
+	set matches 0
+	spawn $scontrol show job $job_id
 	expect {
-		-re "SLURM accounting storage is disabled" {
-			set not_support 1
-			exp_continue
-		}
-		-re "JobID.JobName.Partition" {
-			if {$debug} {send_user "\nmatch20\n"}
-			incr matches
-			exp_continue
-		}
-		-re "Account.AllocCPUS.State.ExitCode" {
-			if {$debug} {send_user "\nmatch21\n"}
-			incr matches
-			exp_continue
-		}
-		-re "$job_id" {
+		 -re "Account=$test_acct" {
 			incr matches
-			if {$debug} {send_user "\nmatch22\n"}
 			exp_continue
 		}
 		timeout {
-			send_user "\nFAILURE: sacct not responding\n"
+			send_user "\nFAILURE: scontrol not responding\n"
 			set exit_code 1
 		}
 		eof {
 			wait
 		}
 	}
-
-	if {$not_support != 0} {
-		send_user "\nWARNING: can not test without accounting enabled\n"
-		exit 0
-	}
-	if {$matches != 3} {
-		send_user "\nFAILURE: sacct -$soption failed ($matches)\n"
+	if {$matches != 1} {
+		send_user "\nFAILURE: srun failed to use specified account\n"
 		set exit_code 1
 	}
-		return $matches
+
+	if { $using_slurmdbd } {
+		sleep 12
 	}
-}
-################################################################
 
-set matches [sacct_job b $job_id]
-if {$matches != 2} {
-	send_user "\nFAILURE: sacct -b failed ($matches != 2)\n"
-	set exit_code 1
-}
+	set matches [sacct_job b $job_id]
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct -b failed ($matches != 2)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job -brief $job_id]
-if {$matches != 2} {
-	send_user "\nFAILURE: sacct --brief failed ($matches != 2)\n"
-	set exit_code 1
-}
+	set matches [sacct_job -brief $job_id]
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct --brief failed ($matches != 2)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_vargs g $user_gid $job_id]
-if {$matches != 3} {
-	send_user "\nFAILURE: sacct -g failed ($matches != 3)\n"
-	set exit_code 1
-}
+	set matches [sacct_vargs g $user_gid $job_id]
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct -g failed ($matches != 3)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_vargs -gid $user_gid $job_id]
-if {$matches != 3} {
-	send_user "\nFAILURE: sacct --gid failed ($matches != 3)\n"
-	set exit_code 1
-}
+	set matches [sacct_vargs -gid $user_gid $job_id]
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct --gid failed ($matches != 3)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_vargs -group $user_gid $job_id]
-if {$matches != 3} {
-	send_user "\nFAILURE: sacct --group failed ($matches != 3)\n"
-	set exit_code 1
-}
+	set matches [sacct_vargs -group $user_gid $job_id]
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct --group failed ($matches != 3)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job l $job_id]
-if {$matches != 7} {
-	send_user "\nFAILURE: sacct --allusers failed ($matches != 7)\n"
-	set exit_code 1
-}
+	set matches [sacct_job l $job_id]
+	if {$matches != 7} {
+		send_user "\nFAILURE: sacct --allusers failed ($matches != 7)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job -long $job_id]
-if {$matches != 7} {
-	send_user "\nFAILURE: sacct -l failed ($matches != 7)\n"
-	set exit_code 1
-}
+	set matches [sacct_job -long $job_id]
+	if {$matches != 7} {
+		send_user "\nFAILURE: sacct -l failed ($matches != 7)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job n $job_id]
-if {$matches != 1} {
-	send_user "\nFAILURE: sacct -n failed ($matches != 1)\n"
-	set exit_code 1
-}
+	set matches [sacct_job n $job_id]
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacct -n failed ($matches != 1)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job -noheader $job_id]
-if {$matches != 1} {
-	send_user "\nFAILURE: sacct -n failed ($matches != 1)\n"
-	set exit_code 1
-}
+	set matches [sacct_job -noheader $job_id]
+	if {$matches != 1} {
+		send_user "\nFAILURE: sacct -n failed ($matches != 1)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job p $job_id]
-if {$matches != 2} {
-	send_user "\nFAILURE: sacct -p failed ($matches != 2)\n"
-	set exit_code 1
-}
+	set matches [sacct_job p $job_id]
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct -p failed ($matches != 2)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job -parsable $job_id]
-if {$matches != 2} {
-	send_user "\nFAILURE: sacct --parsable failed ($matches != 2)\n"
-	set exit_code 1
-}
+	set matches [sacct_job -parsable $job_id]
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct --parsable failed ($matches != 2)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job P $job_id]
-if {$matches != 2} {
-	send_user "\nFAILURE: sacct -P failed ($matches != 2)\n"
-	set exit_code 1
-}
+	set matches [sacct_job P $job_id]
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct -P failed ($matches != 2)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job -parsable2 $job_id]
-if {$matches != 2} {
-	send_user "\nFAILURE: sacct --parsable2 failed ($matches != 2)\n"
-	set exit_code 1
-}
+	set matches [sacct_job -parsable2 $job_id]
+	if {$matches != 2} {
+		send_user "\nFAILURE: sacct --parsable2 failed ($matches != 2)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_vargs u $user_name $job_id]
-if {$matches != 3} {
-	send_user "\nFAILURE: sacct -g failed ($matches != 3)\n"
-	set exit_code 1
-}
+	set matches [sacct_vargs u $user_name $job_id]
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct -g failed ($matches != 3)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_vargs -uid $user_name $job_id]
-if {$matches != 3} {
-	send_user "\nFAILURE: sacct --gid failed ($matches != 3)\n"
-	set exit_code 1
-}
+	set matches [sacct_vargs -uid $user_name $job_id]
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct --gid failed ($matches != 3)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_vargs -user $user_name $job_id]
-if {$matches != 3} {
-	send_user "\nFAILURE: sacct --group failed ($matches != 3)\n"
-	set exit_code 1
-}
+	set matches [sacct_vargs -user $user_name $job_id]
+	if {$matches != 3} {
+		send_user "\nFAILURE: sacct --group failed ($matches != 3)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job v $job_id]
-if {$matches != 4} {
-	send_user "\nFAILURE: sacct -v failed ($matches != 4)\n"
-	set exit_code 1
-}
+	set matches [sacct_job v $job_id]
+	if {$matches != 4} {
+		send_user "\nFAILURE: sacct -v failed ($matches != 4)\n"
+		set exit_code 1
+	}
 
-set matches [sacct_job -verbose $job_id]
-if {$matches != 4} {
-	send_user "\nFAILURE: sacct --verbose failed ($matches != 4)\n"
-	set exit_code 1
+	set matches [sacct_job -verbose $job_id]
+	if {$matches != 4} {
+		send_user "\nFAILURE: sacct --verbose failed ($matches != 4)\n"
+		set exit_code 1
+	}
 }
 
 #
diff --git a/testsuite/expect/test12.6 b/testsuite/expect/test12.6
index 3ea2d40be..6c5ce29f3 100755
--- a/testsuite/expect/test12.6
+++ b/testsuite/expect/test12.6
@@ -38,52 +38,6 @@ set file_out    "test$test_id.output"
 set file_prog   "test$test_id.prog"
 set job_id      0
 
-proc get_jobacct_freq {} {
-	global scontrol
-	set task_freq 30
-	log_user 0
-	match_max -d 100000
-	spawn $scontrol show config
-	expect {
-		timeout {
-			send_user "\nCan't find JobAcctGatherFrequency in slurm.conf. Using $task_freq"
-			return $task_freq
-		}
-	}
-	wait
-	log_user 1
-	set opt ""
-	set lines [split $expect_out(buffer) "\n"]
-	set nl [llength $lines]
-	for {set lx 0} {$lx < $nl} {incr lx} {
-		set line [lindex $lines $lx]
-		if {[string first "JobAcctGatherFrequency" $line] != -1	} {
-			set poseq [string first "=" $line]
-			set opt [string range $line $poseq+2 end]
-			set opt [string tolower $opt]
-			break
-		}
-	}
-	if {$opt == ""} {
-		send_user "\nJobAcctGatherFrequency in slurm.conf. Using $task_freq"
-	}
-
-	set tokens [split $opt ","]
-	set nt [llength $tokens]
-	if {$nt == 1} {
-		if {[string first "task=" $opt] == -1} {
-			return $opt
-		}
-	}
-	for {set tx 0} {$tx < $nt} {incr tx} {
-		set opt [lindex $tokens $tx]
-		if {[string first "task=" $opt] != -1} {
-			return [string range $opt 5 end]
-		}
-	}
-	return task_freq
-}
-
 print_header $test_id
 
 if {[test_front_end]} {
@@ -111,7 +65,10 @@ if {$profile == 0} {
 send_user "\nacct_gather_profile/hdf5 plugin installed\n"
 send_user "Note: this test takes 3 minutes to run\n"
 
-set task_freq [get_jobacct_freq]
+set task_freq [get_job_acct_freq]
+if {$task_freq < 30} {
+	send_user "\nWarning: jobacct_gather_freq < 30 ($task_freq), results are unreliable\n"
+}
 
 #
 # Build a test program to put a known load on the system
@@ -122,12 +79,6 @@ exec $bin_chmod 700 $file_prog
 
 set timeout [expr $max_job_delay + 200]
 
-# Override task polling supplied by slurm.conf
-# I am not sure why we are doing this (but we are)
-# The original task_freq had a + 5 here.  That will always fail with a bad
-# allocation if you are enforcing memory limits, so we changed it to - 5
-# instead.  I don't think it really matters though.
-set task_freq [expr $task_freq - 5]
 set srun_pid [spawn $srun --acctg-freq=$task_freq --profile=task -t5 ./$file_prog]
 expect {
 	-re "SLURM_JobId=($number)" {
@@ -180,51 +131,70 @@ set lno 0
 set fd 0
 set last_et 0
 set fd [open $file_out "r"]
+set et_col -1
+set cpu_util_col -1
+set read_disk_col -1
 while {$fd > 0 && [gets $fd line] != -1} {
 	incr lno
-	if {$lno > 2} {
-		set tokens [split $line ","]
-		if {[llength $tokens] < 14} {
-			send_user "\nFAILURE: too few items on line $lno"
+
+	set tokens [split $line ","]
+
+	if {$lno == 1} {
+		set et_col [lsearch $tokens "ElapsedTime"]
+		set cpu_util_col [lsearch $tokens "CPUUtilization"]
+		set read_disk_col [lsearch $tokens "ReadMB"]
+
+		if {$et_col == -1} {
+			send_user "\nFAILURE: no ElapsedTime column found\n"
 			set exit_code 1
-			break;
 		}
-		set et [lindex $tokens 5]
-		set cur_et [expr $et - $last_et]
-		set last_et $et
-		set low_rd [expr 0.975 * 10 * $cur_et]
-		set hi_rd  [expr 1.025 * 10 * $cur_et]
-		if {$lno == 2 && $et < 30} {
-			send_user "\nWarning: jobacct_gather_freq < 30, results are unreliable\n"
-		}
-		if {$lno == 2 && $et < $task_freq} {
-			send_user "\nFAILURE: sample $et is not --acctg-freq=$task_freq\n"
+		if {$cpu_util_col == -1} {
+			send_user "\nFAILURE: no CPUUtilization column found\n"
 			set exit_code 1
 		}
-		set cputil [lindex $tokens 8]
-		# The range on cpu utilization is pretty wide
-		# Linux accounting resolution is only to one second, so in a
-		# typical 30 interval an extra second is 3%. The burn loop
-		# consumes a bit more that asked for. There is additional type
-		# managing the I/O portion. Slurm and linux also consume some
-		# cpu.
-		if {$cputil < 38.0} {
-			send_user "\nWarning: CPU Busy $cputil not near 40% on line $lno\n"
-			incr nerr
-		}
-		if {$cputil > 47.0} {
-			send_user "\nWarning: CPU Busy $cputil not near 40% on line $lno\n"
-			incr nerr
-		}
-		set rdmb [lindex $tokens 12]
-		if {$rdmb < $low_rd} {
-			send_user "\nWarning: Read Megabytes $rdmb not near 100.0 on line $lno\n"
-			incr nerr
+		if {$read_disk_col == -1} {
+			send_user "\nFAILURE: no ReadMB column found\n"
+			set exit_code 1
 		}
-		if {$rdmb  > $hi_rd} {
-			send_user "\nWarning: Read Megabytes $rdmb not near 100.0 on line $lno\n"
-			incr nerr
+
+		if {$exit_code} {
+			break;
 		}
+		continue;
+	}
+
+	set et [lindex $tokens $et_col]
+	set cur_et [expr $et - $last_et]
+	set last_et $et
+
+	if { $lno == 2 } {
+		continue;
+	}
+
+	if {$cur_et < $task_freq} {
+		send_user "\nWarning: Poll interval was only $cur_et instead of expected $task_freq on line $lno\n"
+		incr nerr
+	}
+
+	set cputil [lindex $tokens $cpu_util_col]
+	# The range on cpu utilization is pretty wide
+	# Linux accounting resolution is only to one second, so in a
+	# typical 30 interval an extra second is 3%. The burn loop
+	# consumes a bit more that asked for. There is additional type
+	# managing the I/O portion. Slurm and linux also consume some
+	# cpu.
+	if {$cputil < 38.0 || $cputil > 47.0 } {
+		send_user "\nWarning: CPU Busy $cputil not near 40% on line $lno\n"
+		incr nerr
+	}
+
+	set rdmb [lindex $tokens $read_disk_col]
+	set low_rd [expr 0.975 * 10 * $cur_et]
+	set hi_rd  [expr 1.025 * 10 * $cur_et]
+
+	if {$rdmb < $low_rd || $rdmb  > $hi_rd } {
+		send_user "\nWarning: Read Megabytes $rdmb not near 100.0 on line $lno\n"
+		incr nerr
 	}
 }
 close $fd
diff --git a/testsuite/expect/test12.7 b/testsuite/expect/test12.7
index 58ddd88fb..10fd3e84e 100755
--- a/testsuite/expect/test12.7
+++ b/testsuite/expect/test12.7
@@ -44,15 +44,14 @@ if { [test_super_user] == 0 } {
 	exit $exit_code
 }
 
-
-proc mod_state { state } {
+proc mod_state { state reason } {
 
 	global scontrol node exit_code
 
-	spawn $scontrol update nodename=$node state=$state
+	spawn $scontrol update nodename=$node state=$state reason=$reason
 	expect {
 		timeout {
-			send_user "\nFAILURE:  scontrol is not responding\n"
+			send_user "\nFAILURE: scontrol is not responding\n"
 			set exit_code 1
 		}
 		eof {
@@ -89,19 +88,23 @@ proc check_step { num } {
 	}
 }
 
+# Count the number of jobs and steps with a specific job ID and state
+# NOTE: Skip "extern" job container optionally spawned by "PrologFlags=contain"
 proc check_sacct_states { states log_it } {
 	global job_id sacct
 
 	log_user $log_it
 	set state_num 0
 	if { $log_it == 1 } {
-		spawn $sacct --job=$job_id --duplicates --parsable2 --start=today
+		spawn $sacct --job=$job_id --duplicates --parsable2 --start=today -o JobID,State
 	} else {
-		spawn $sacct --job=$job_id --duplicates --parsable2 --start=today --noheader --format=state
+		spawn $sacct --job=$job_id --duplicates --parsable2 --start=today --noheader -o JobID,State
 	}
 	expect {
-		-re ($states) {
-			incr state_num
+		-re "(\[0-9_\.a-z\]+)\\|($states)" {
+			if {[string first "extern" $expect_out(1,string)] == -1} {
+				incr state_num
+			}
 			exp_continue
 		}
 		timeout {
@@ -198,13 +201,13 @@ if {$job_id == 0} {
 wait_for_job $job_id RUNNING
 
 # Set the node that the job is running on to down
-mod_state "down"
+mod_state "down" "test$test_id"
 
 # Wait a little bit for node state to change
 sleep 5
 
 # Set the node back to resume
-mod_state "resume"
+mod_state "resume" "test$test_id"
 
 # Check the number of steps
 check_step 0
@@ -346,6 +349,6 @@ cancel_job $job_id
 
 if {$exit_code == 0} {
 	exec $bin_rm $file_in
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test14.10 b/testsuite/expect/test14.10
new file mode 100755
index 000000000..d712a17bb
--- /dev/null
+++ b/testsuite/expect/test14.10
@@ -0,0 +1,153 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate sbcast for a job step allocation (subset of job allocation).
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id    14.10
+set file_in1   "test$test_id.in1"
+set file_in2   "test$test_id.in2"
+set exit_code  0
+set job_id     0
+set hostlist   ""
+set node1      ""
+set node2      ""
+
+print_header $test_id
+
+if {[test_front_end] != 0} {
+	send_user "\nWARNING: This test is incompatible with front-end systems\n"
+	exit 0
+} elseif {[slurmd_user_root] == 0} {
+	send_user "\nWARNING: This test requires that the SlurmdUser be root\n"
+	exit 0
+}
+
+spawn $bin_bash -c "exec $sinfo -tidle -h -o%n | head -n2 |tr \'\n\' ' ' "
+expect {
+	-re "($alpha_numeric_under) ($alpha_numeric_under)" {
+		set node1 $expect_out(1,string)
+		set node2 $expect_out(2,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sinfo is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {![string compare $node1 ""] || ![string compare $node2 ""]} {
+	send_user "\nFAILURE: did not get usable hostlist\n"
+	exit 1
+}
+
+set hostlist "$node1,$node2"
+
+make_bash_script $file_in1 "
+$srun $bin_rm -f /tmp/$node1/test$test_id\_file
+$srun $bin_rm -fr /tmp/$node1
+$srun $bin_rm -f /tmp/$node2/test$test_id\_file
+$srun $bin_rm -fr /tmp/$node2
+
+$srun -w$node1 mkdir /tmp/$node1
+$srun -w$node2 mkdir /tmp/$node2
+
+$srun -N1 -n1 -r1 ./$file_in2
+
+echo -n \"Checking node 1: \"
+$srun -N1 -n1 -w$node2 ls /tmp/$node2/test$test_id\_file
+
+echo -n \"Checking node 0: \"
+$srun -N1 -n1 -w$node1 ls /tmp/$node1/test$test_id\_file
+
+$srun $bin_rm -f /tmp/$node1/test$test_id\_file
+$srun $bin_rm -fr /tmp/$node1
+$srun $bin_rm -f /tmp/$node2/test$test_id\_file
+$srun $bin_rm -fr /tmp/$node2
+"
+
+make_bash_script $file_in2 "
+$sbcast -f -j\$SLURM_JOBID.\$SLURM_STEPID $srun /tmp/\$SLURMD_NODENAME/test$test_id\_file
+"
+
+# Make allocations
+set matches 0
+spawn $salloc -N2 -w$hostlist -t1 ./$file_in1
+expect {
+	-re "(configuration is not available|Unable to submit batch job|Node count specification invalid|More processors requested than permitted)" {
+		send_user "\nWARNING: can't run this test\n"
+		exec $bin_rm -rf $file_in1 $file_in2
+		exit 0
+	}
+	-re "cannot create directory" {
+		send_user "\nThis error is expected when nodes share the "
+		send_user "same tmp directory\n"
+		exp_continue
+	}
+	-re "Granted job allocation ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	-re "Checking node 1: */tmp/$node2/test$test_id\_file" {
+		incr matches
+		exp_continue
+	}
+	-re "Checking node 0: */tmp/$node1/test$test_id\_file" {
+		set matches -999
+		exp_continue
+	}
+	-re "Checking node 0: .*No such" {
+		send_user "\nThis error is expected\n"
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: salloc is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: Test job submission failed\n"
+	exit 1
+}
+if {$matches != 2} {
+	send_user "\nFAILURE: sbcast did not copy the file to the correct nodes ($matches != 2)\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -rf $file_in1 $file_in2
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test14.4 b/testsuite/expect/test14.4
index 11cf517bf..d0402b307 100755
--- a/testsuite/expect/test14.4
+++ b/testsuite/expect/test14.4
@@ -113,7 +113,7 @@ if {$job_id == 0} {
 #
 if {[wait_for_job $job_id "DONE"] != 0} {
 	send_user "\nFAILURE: waiting for job to complete\n"
-	exec $scancel --quiet $job_id
+	cancel_job $job_id
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test14.6 b/testsuite/expect/test14.6
index af50cb676..cb55666d4 100755
--- a/testsuite/expect/test14.6
+++ b/testsuite/expect/test14.6
@@ -102,19 +102,19 @@ set matches 0
 if {[wait_for_file $file_err] == 0} {
 	spawn $bin_cat $file_err
 	expect {
-		-re "force" {
+		-re "force *=" {
 			incr matches
 			exp_continue
 		}
-		-re "preserve" {
+		-re "preserve *=" {
 			incr matches
 			exp_continue
 		}
-		-re "modes" {
+		-re "modes *=" {
 			incr matches
 			exp_continue
 		}
-		-re "jobid" {
+		-re "jobid *=" {
 			incr matches
 			exp_continue
 		}
@@ -128,7 +128,7 @@ if {[wait_for_file $file_err] == 0} {
 	}
 }
 if {$matches != 4} {
-	send_user "\nFAILURE: sbcast --verbose option did not produced expected logging\n"
+	send_user "\nFAILURE: sbcast --verbose option did not produced expected logging ($matches != 4)\n"
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test14.8 b/testsuite/expect/test14.8
index b6572e3a5..6de9e0c2f 100755
--- a/testsuite/expect/test14.8
+++ b/testsuite/expect/test14.8
@@ -109,7 +109,7 @@ if {$job_id == 0} {
 #
 if {[wait_for_job $job_id "DONE"] != 0} {
 	send_user "\nFAILURE: waiting for job to complete\n"
-	exec $scancel --quiet $job_id
+	cancel_job $job_id
 	set exit_code 1
 }
 
diff --git a/testsuite/expect/test15.27 b/testsuite/expect/test15.27
index 7b749ab68..dab6cbe30 100755
--- a/testsuite/expect/test15.27
+++ b/testsuite/expect/test15.27
@@ -170,6 +170,6 @@ expect {
 check_alloc
 
 if {$exit_code == 0} {
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test15.4 b/testsuite/expect/test15.4
index 4ce7118b3..e51010059 100755
--- a/testsuite/expect/test15.4
+++ b/testsuite/expect/test15.4
@@ -81,7 +81,7 @@ expect {
 		set job_id $expect_out(1,string)
 		exp_continue
 	}
-	-re "(uid=.*\\)\r\n)" {
+	-re "(uid=.*\n)" {
 		set job_grp_info $expect_out(1,string)
 		set got_job_grps 1
 		exp_continue
diff --git a/testsuite/expect/test15.7 b/testsuite/expect/test15.7
index 30dc51dbb..afbc7db97 100755
--- a/testsuite/expect/test15.7
+++ b/testsuite/expect/test15.7
@@ -126,12 +126,13 @@ expect {
 		wait
 	}
 }
-cancel_job $job_id
-
 if {$matches != 5} {
 	send_user "\nFAILURE: Did not get constraints ($matches of 5)\n"
 	set exit_code 1
 }
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test16.4 b/testsuite/expect/test16.4
index 6f07af891..fc6a4f6ec 100755
--- a/testsuite/expect/test16.4
+++ b/testsuite/expect/test16.4
@@ -153,7 +153,7 @@ expect {
 		exp_continue
 	}
 	timeout {
-		send_user \nFAILURE: sattach not responding\n"
+		send_user "\nFAILURE: sattach not responding\n"
 		set exit_code 1
 	}
 	eof {
diff --git a/testsuite/expect/test17.10 b/testsuite/expect/test17.10
index 4c094e1cf..b8e647fc3 100755
--- a/testsuite/expect/test17.10
+++ b/testsuite/expect/test17.10
@@ -129,12 +129,13 @@ expect {
 		wait
 	}
 }
-cancel_job $job_id
-
 if {$matches != 5} {
 	send_user "\nFAILURE: Did not get proper constraints\n"
 	set exit_code 1
 }
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
 	send_user "\nSUCCESS\n"
diff --git a/testsuite/expect/test17.11 b/testsuite/expect/test17.11
index 4b28ad448..6d451d791 100755
--- a/testsuite/expect/test17.11
+++ b/testsuite/expect/test17.11
@@ -36,8 +36,8 @@ source ./globals
 set test_id     "17.11"
 set exit_code   0
 set file_in     "test$test_id.input"
+set force_part  0
 set job_id      0
-set matches     0
 
 print_header $test_id
 
@@ -46,6 +46,12 @@ if {[test_alps]} {
         exit $exit_code
 }
 
+set def_part [default_partition]
+set shared [partition_shared $def_part]
+if {[string compare $shared "FORCE"] == 0} {
+	set force_part 1
+}
+
 #
 # Delete left-over input script
 # Build input script file
@@ -77,6 +83,7 @@ expect {
 #
 # Confirm shared and contiguous flag values
 #
+set matches 0
 if {$job_id != 0} {
 	spawn $scontrol show job $job_id
 	expect {
@@ -96,19 +103,21 @@ if {$job_id != 0} {
 		}
 		timeout {
 			send_user "\nFAILURE: scontrol not responding\n"
-			set exit_code   1
+			set exit_code 1
 		}
 		eof {
 			wait
 		}
 	}
-	cancel_job $job_id
+	if {[cancel_job $job_id] != 0} {
+		set exit_code 1
+	}
 	set job_id 0
 } else {
 	set exit_code   1
 }
 if {$matches != 2} {
-	send_user "\nFAILURE: Did not properly set shared and contiguous flag\n"
+	send_user "\nFAILURE: Did not properly set shared and contiguous flag ($matches != 2)\n"
 	set exit_code   1
 }
 
@@ -116,7 +125,6 @@ if {$matches != 2} {
 # Spawn a sbatch job with contiguous option only
 #
 set job_id  0
-set matches 0
 spawn $sbatch --contiguous --hold -t1 $file_in
 expect {
 	-re "Submitted batch job ($number)" {
@@ -135,21 +143,23 @@ expect {
 #
 # Confirm shared and contiguous flag values
 #
+set match_share 0
+set match_cont  0
 if {$job_id != 0} {
 	spawn $scontrol show job $job_id
 	expect {
 		-re "Shared=0" {
-			incr matches
+			set match_share 1
 			exp_continue
 		}
 		-re "Shared=OK" {
-			incr matches
+			set match_share 1
 			exp_continue
 		}
 		-re "Contiguous=($number)" {
 			set cont_val $expect_out(1,string)
 			if {$cont_val == 1} {
-				incr matches
+				set match_cont 1
 			}
 			exp_continue
 		}
@@ -161,15 +171,21 @@ if {$job_id != 0} {
 			wait
 		}
 	}
-	cancel_job $job_id
+	if {[cancel_job $job_id] != 0} {
+		set exit_code 1
+	}
 } else {
 	set exit_code 1
 }
-
-if {$matches != 2} {
-	send_user "\nFAILURE: Did not properly set shared and contiguous flags\n"
+if {$match_cont != 1} {
+	send_user "\nFAILURE: Did not properly set contiguous flag\n"
 	set exit_code 1
 }
+if {$force_part == 0 && $match_share != 1} {
+	send_user "\nFAILURE: Did not properly set shared flag\n"
+	set exit_code 1
+}
+
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
 	send_user "\nSUCCESS\n"
diff --git a/testsuite/expect/test17.19 b/testsuite/expect/test17.19
index 27143d5be..9e3968db0 100755
--- a/testsuite/expect/test17.19
+++ b/testsuite/expect/test17.19
@@ -103,11 +103,13 @@ expect {
 		wait
 	}
 }
-cancel_job $job_id
 if {$matches != 1} {
 	send_user "\nFAILURE: sbatch failed to set partition properly\n"
 	exit 1
 }
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 if {$exit_code != 0} {
 	exit $exit_code
 }
@@ -140,7 +142,9 @@ expect {
 }
 if {$job_id != 0} {
 	send_user "\nFAILURE: sbatch submitted job to invalid partition name\n"
-	cancel_job $job_id
+	if {[cancel_job $job_id] != 0} {
+		set exit_code 1
+	}
 	set exit_code 1
 }
 if {$matches != 1} {
@@ -203,11 +207,13 @@ expect {
 		wait
 	}
 }
-cancel_job $job_id
 if {$matches != 1} {
 	send_user "\nFAILURE: sbatch failed to set partition properly\n"
 	exit 1
 }
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
diff --git a/testsuite/expect/test17.27 b/testsuite/expect/test17.27
index 6397bae73..06459b008 100755
--- a/testsuite/expect/test17.27
+++ b/testsuite/expect/test17.27
@@ -88,8 +88,8 @@ if {[test_launch_poe]} {
 spawn $sbatch -N$node_cnt -vv -t1 --output=$file_out $file_in
 expect {
 	-re "nodes *: ($number)" {
-		if {$expect_out(1,string) != 3} {
-			send_user "\nFAILURE: failed to process --nodes option\n"
+		if {$expect_out(1,string) != $node_cnt} {
+			send_user "\nFAILURE: failed to process --nodes option ($expect_out(1,string) != $node_cnt)\n"
 			set exit_code 1
 		}
 		exp_continue
diff --git a/testsuite/expect/test17.34 b/testsuite/expect/test17.34
index 933be1f66..0fb14021b 100755
--- a/testsuite/expect/test17.34
+++ b/testsuite/expect/test17.34
@@ -46,7 +46,7 @@ set exit_code  0
 #
 # exp_node = 0: job must only use the specified node
 # exp_node = 1: job must use more then specified node
-# exp_node = -1: job must fail because the job exceeds the number or cores
+# exp_node = -1: job must fail because the job exceeds the number of cores
 #
 #############################################################################
 proc core_spec_job {task node core_spec exp_nodes} {
@@ -57,7 +57,11 @@ proc core_spec_job {task node core_spec exp_nodes} {
 
 	# Determine the number of tasks that can be run
 	set cpu_used_by_spec [expr $thread_cnt * $core_spec]
-	set task_limit [expr $cpu_tot - $cpu_used_by_spec]
+	if {$cpu_tot > $cpu_used_by_spec} {
+		set task_limit [expr $cpu_tot - $cpu_used_by_spec]
+	} else {
+		set task_limit 1
+	}
 	set error_chk 0
 	spawn $sbatch -t1 -w$node -S$core_spec -n[expr abs($task_limit + $task)] -o$file_out $spec_in
 	expect {
@@ -179,6 +183,11 @@ if {![string compare $select_type "linear"]} {
 	send_user "\nWARNING: This test is incompatible with select/$select_type\n"
 	exit 0
 }
+set select_type_params [test_select_type_params]
+if {[string match "*CR_SOCKET*" $select_type_params]} {
+	send_user "\nWARNING: This test is incompatible with CR_SOCKET allocations\n"
+	exit 0
+}
 
 # Remove any vestigial files
 exec $bin_rm -f $file_in $file_out $spec_in
diff --git a/testsuite/expect/test17.35 b/testsuite/expect/test17.35
index 426ae3b35..55f4a6570 100755
--- a/testsuite/expect/test17.35
+++ b/testsuite/expect/test17.35
@@ -92,7 +92,17 @@ if {$match != $job_cnt} {
 	send_user "FAILURE: job count mismatch ($match != $job_cnt)\n"
 	set exit_code 1
 }
-exec $scancel --name=$file_in
+
+spawn $scancel --name=$file_in
+expect {
+	timeout {
+		send_user "\nFAILURE: scancel is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
 
 # Prevent zero divide
 if {$time_used == 0} {
diff --git a/testsuite/expect/test17.36 b/testsuite/expect/test17.36
index b59dcd6ec..e70bbbe5b 100755
--- a/testsuite/expect/test17.36
+++ b/testsuite/expect/test17.36
@@ -38,6 +38,7 @@ set test_part_3   "test$test_id\_part_3"
 set num_jobs      0
 set cr_core       0
 set cr_cpu        0
+set cr_socket     0
 set cpu_cnt       0
 set socket_cnt    0
 set thread_cnt    0
@@ -56,33 +57,24 @@ if {[is_super_user] == 0} {
 	send_user "\nWARNING: This test can't be run except as SlurmUser\n"
 	exit 0
 }
+set select_type_params [test_select_type_params]
+if {[string first "CR_SOCKET" $select_type_params] != -1} {
+	send_user "\nWARNING: This test is incompatible with CR_SOCKET allocations\n"
+	exit 0
+}
+if {[string first "CR_CORE" $select_type_params] != -1} {
+	set cr_core 1
+}
+if {[string first "CR_CPU" $select_type_params] != -1} {
+	set cr_cpu 1
+}
 
 proc cr_core_cpu { node } {
 
 	global cr_cpu cr_core core_cnt socket_cnt scontrol number exit_code
 	global cpu_cnt thread_cnt
 
-	# Determine if CR_CPU or CR_CORE is used
 	log_user 0
-	spawn $scontrol show config
-	expect {
-		-re "SelectTypeParameters *= .*CR_CPU" {
-			set cr_cpu 1
-			exp_continue
-		}
-		-re "SelectTypeParameters *= .*CR_CORE" {
-			set cr_core 1
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: scontrol is not responding\n"
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
-
 	spawn $scontrol show node $node
 	expect {
 		-re "CoresPerSocket=($number)" {
@@ -224,7 +216,7 @@ proc check_job { exp_num_jobs } {
 	if { $job_cnt != $exp_num_jobs } {
 		send_user "\nFAILURE: the number of possible jobs that could "
 		send_user "run were not reached ($job_cnt != $exp_num_jobs).\n"
-		send_user "This could be due to memory or other limits.\n"
+		send_user "FAILURE: This could be due to memory or other limits.\n"
 		set exit_code 1
 	}
 }
@@ -346,22 +338,25 @@ delete_part $test_part_1
 # Test partition with shared=YES:2
 #
 ####################################
-send_user "\n\nTest partition with shared=YES:2\n"
-
-# Make a new partition with shared=yes:2
-create_part $test_part_2 "YES:$shared_j_cnt" $node_name
-
-# Submit a job with shared (expect 2 jobs per core/CPU)
-set new_job_limit [expr $num_jobs * 2]
-sub_job "0-$new_job_limit" 1 $test_part_2
-check_job $new_job_limit
-cancel_job $job_id
-
-# Submit a job without shared (expect 1 job per core/CPU)
-sub_job "0-$num_jobs" 0 $test_part_2
-check_job $num_jobs
-cancel_job $job_id
-delete_part $test_part_2
+if { [test_gang] == 1 } {
+	send_user "\n\nTest partition with shared=YES:2 incompatible with gang scheduling\n"
+} else {
+	send_user "\n\nTest partition with shared=YES:2\n"
+#	Make a new partition with shared=yes:2
+	create_part $test_part_2 "YES:$shared_j_cnt" $node_name
+
+#	Submit a job with shared (expect 2 jobs per core/CPU)
+	set new_job_limit [expr $num_jobs * 2]
+	sub_job "0-$new_job_limit" 1 $test_part_2
+	check_job $new_job_limit
+	cancel_job $job_id
+
+#	Submit a job without shared (expect 1 job per core/CPU)
+	sub_job "0-$num_jobs" 0 $test_part_2
+	check_job $num_jobs
+	cancel_job $job_id
+	delete_part $test_part_2
+}
 
 ########################################
 #
diff --git a/testsuite/expect/test17.37 b/testsuite/expect/test17.37
index 6de52bbdc..8c54837dd 100755
--- a/testsuite/expect/test17.37
+++ b/testsuite/expect/test17.37
@@ -40,15 +40,10 @@ set exit_code     0
 
 print_header $test_id
 
-if {[test_serial]} {
-	send_user "\nWARNING: This test is incompatible with serial systems\n"
-	exit $exit_code
-}
-
 make_bash_script $script "sleep 10"
 
 # Submit a job to depend on
-spawn $sbatch -t1 -N2 -o/dev/null $script
+spawn $sbatch -t1 -o/dev/null $script
 expect {
 	-re "Node count specification invalid" {
 		send_user "\nWARNING: can't this test with less than two nodes\n"
@@ -66,7 +61,6 @@ expect {
 		wait
 	}
 }
-
 if { $job_id1 == 0 } {
 	send_user "\nFAILURE: sbatch did not submit job\n"
 	exit 1
@@ -75,7 +69,7 @@ if { $job_id1 == 0 } {
 wait_for_job $job_id1 RUNNING
 
 # Submit a job that depends on job above
-spawn $sbatch -t1 -N2 -dafternotok:$job_id1 -o/dev/null $script
+spawn $sbatch -t1 -dafternotok:$job_id1 -o/dev/null $script
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id2 $expect_out(1,string)
@@ -89,7 +83,6 @@ expect {
 		wait
 	}
 }
-
 if { $job_id2 == 0 } {
 	send_user "\nFAILURE: sbatch did not submit job\n"
 	exit 1
@@ -113,7 +106,6 @@ expect {
 		wait
 	}
 }
-
 if { $match != 1 } {
 	send_user "\nFAILURE: job $job_id1 did not exit with exit code 0\n"
 	set exit_code 1
@@ -139,14 +131,14 @@ expect {
 		wait
 	}
 }
-
 if { $match != 2 } {
 	send_user "\nFAILURE: job $job_id2 should be in pending state and "
 	send_user "should have DependencyNeverSatisfied for a reason\n"
 	set exit_code 1
 }
-
-cancel_job $job_id2
+if {[cancel_job $job_id2] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	exec $bin_rm -f $script
diff --git a/testsuite/expect/test17.38 b/testsuite/expect/test17.38
index d9e7a0b52..59ed2fbaa 100755
--- a/testsuite/expect/test17.38
+++ b/testsuite/expect/test17.38
@@ -119,12 +119,33 @@ if {$elps_time < 59 || $elps_time > 121} {
 	set exit_code 1
 }
 
+# Job step gets signaled and exits. The batch script runs until timeout.
+spawn $scontrol show job $job_id
+expect {
+	-re "JobState=TIMEOUT" {
+		exp_continue
+	}
+	-re "JobState=" {
+		send_user "\nFAILURE: bad job exit state\n"
+		set exit_code 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
 # Remove output file so we do not get it mixed up with the new one
 exec $bin_rm -f $file_out
 
 ################Run sbatch with --signal=B:################
 send_user "\nStart --signal test to signal bash script\n"
 
+set job_id 0
 spawn $sbatch -t3 --signal=B:2@60 -o$file_out $file_in
 expect {
 	-re "Submitted batch job ($number)" {
@@ -178,6 +199,27 @@ if {$elps_time < 59 || $elps_time >121} {
 	set exit_code 1
 }
 
+# Job gets signaled and exits.
+# Without job exit code of zero, it is treated as a job failure.
+spawn $scontrol show job $job_id
+expect {
+	-re "JobState=FAILED" {
+		exp_continue
+	}
+	-re "JobState=" {
+		send_user "\nFAILURE: bad job exit state\n"
+		set exit_code 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
 if {$exit_code == 0} {
 	exec rm -f $file_prog $file_in $file_out
 	send_user "\nSUCCESS\n"
diff --git a/testsuite/expect/test17.39 b/testsuite/expect/test17.39
new file mode 100755
index 000000000..8e971f68a
--- /dev/null
+++ b/testsuite/expect/test17.39
@@ -0,0 +1,192 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validates that the OR dependency option is enforced
+#          when a job runs to completion
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id       17.39
+set slow_id       0
+set fast_id       0
+set dep_id        0
+set slow_job      "test$test_id\_slow_sc"
+set fast_job      "test$test_id\_fast_sc"
+set exit_code     0
+
+print_header $test_id
+
+set select_type [test_select_type]
+if {![string compare $select_type "linear"]} {
+	set def_part_name [default_partition]
+	set nb_nodes [get_node_cnt_in_part $def_part_name]
+	if {$nb_nodes < 2} {
+		send_user "\nWARNING: This test is incompatible with select/linear and only one node\n"
+		exit $exit_code
+	}
+}
+
+make_bash_script $slow_job "sleep 120"
+make_bash_script $fast_job "sleep 30"
+
+proc check_state {id state} {
+	global squeue exit_code
+
+	set match 0
+	spawn $squeue --job=$id -o"%T" --noheader
+	expect {
+		-re "$state" {
+			incr match 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: squeue is not reponding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match == 0} {
+		send_user "\nFAILURE: job $id is in the wrong state should be "
+		send_user "$state\n"
+		set exit_code 1
+	}
+}
+
+# Submit job 1 of 2
+spawn $sbatch -t3 -o/dev/null $slow_job
+expect {
+	-re "Submitted batch job ($number)" {
+		set slow_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch is not reponding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$slow_id == 0} {
+	send_user "\nFAILURE: sbatch did not submit job\n"
+	exit 1
+}
+
+# Submit job 2 of 2
+spawn $sbatch -t3 -o/dev/null $fast_job
+expect {
+	-re "Node count specification invalid" {
+		send_user "\nWARNING: can't test with less than two nodes\n"
+		exit 0
+	}
+	-re "Submitted batch job ($number)" {
+		set fast_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch is not reponding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$fast_id == 0} {
+	send_user "\nFAILURE: sbatch did not submit job\n"
+	exit 1
+}
+
+# Submit dependency job
+spawn $sbatch --dependency=afterok:$slow_id?afterok:$fast_id -o/dev/null $slow_job
+expect {
+	-re "Submitted batch job ($number)" {
+		set dep_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch is not reponding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$dep_id == 0} {
+	send_user "\nFAILURE: sbatch did not submit job\n"
+	exit 1
+}
+
+# Check that dependent job is pending
+set match 0
+spawn $squeue --job=$dep_id -o"%t|%r" --noheader
+expect {
+	-re "PD|Dependency" {
+		incr match 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: squeue is not reponding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$match == 0} {
+	send_user "\nFAILURE: job $dep_id is in the wrong state should be "
+	send_user "state PENDING and have REASON Dependency\n"
+	set exit_code 1
+}
+
+# Wait for the fast job to finish after submitting dependent job
+wait_for_job $fast_id DONE
+
+# Wait for dependency job to start once the fast job is complete
+if {[wait_for_job $dep_id RUNNING]} {
+	send_user "\nFAILURE: job $dep_job should be running\n"
+	set exit_code 1
+}
+
+check_state $dep_id RUNNING
+
+# Slow job should still be running
+check_state $slow_id RUNNING
+
+# Cancel leftover jobs
+cancel_job $fast_id
+cancel_job $slow_id
+cancel_job $dep_id
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $slow_job $fast_job
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test17.40 b/testsuite/expect/test17.40
new file mode 100755
index 000000000..5b4191837
--- /dev/null
+++ b/testsuite/expect/test17.40
@@ -0,0 +1,304 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test that the thread-spec option in sbatch allocates the correct
+#          number of cores and that tasks spread over multiple nodes
+#          when there is not enough resources on one node.
+#
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2014-2015 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id    "17.40"
+set file_in    "test$test_id\.in"
+set file_out   "test$test_id\.out"
+set spec_in    "spec_thread_script\.in"
+set exit_code  0
+
+#############################################################################
+#
+# Checks that the node uses the correct number of specialized threads
+# and that the number of nodes the job uses is correct.
+#
+# exp_node = 0: job must only use the specified node
+# exp_node = 1: job must use more then specified node
+# exp_node = -1: job must fail because the job exceeds the number of threads
+#
+#############################################################################
+proc thread_spec_job {task node thread_spec exp_nodes} {
+	global sbatch scontrol spec_in file_out number thread_cnt exit_code
+	global cpu_tot
+	set job_id 0
+	set num_nodes 0
+
+	# Determine the number of tasks that can be run
+	set cpu_used_by_spec $thread_spec
+	if {$cpu_tot > $cpu_used_by_spec} {
+		set task_limit [expr $cpu_tot - $cpu_used_by_spec]
+	} else {
+		set task_limit 1
+	}
+	set error_chk 0
+	spawn $sbatch -t1 -w$node --thread-spec=$thread_spec -n[expr abs($task_limit + $task)] -o$file_out $spec_in
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string)
+			exp_continue
+		}
+		-re "error" {
+			if {$exp_nodes != -1} {
+				send_user "\nFAILURE: sbatch should not have produced an error\n"
+				set exit_code 1
+			}
+			set error_chk 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$job_id == 0 && $error_chk == 0} {
+		send_user "\nFAILURE: Job was not submitted\n"
+		exit 1
+
+	} elseif {$exp_nodes == -1 && $job_id != 0} {
+		send_user "\nFAILURE: This job should have failed but did not\n"
+		exit 1
+
+	} elseif {$exp_nodes == -1 && $error_chk != 0} {
+		send_user "\nThis error is expected do not worry\n"
+
+	} else {
+		set thread_chk 0
+		if {[wait_for_job $job_id "RUNNING"] != 0} {
+			send_user "\nFAILURE: waiting for job to start\n"
+			set exit_code 1
+		}
+		spawn $scontrol show job $job_id
+		expect {
+			-re "NumNodes=($number)" {
+				set num_nodes $expect_out(1,string)
+				exp_continue
+			}
+			-re "ThreadSpec=$thread_spec" {
+				set thread_chk 1
+				exp_continue
+			}
+			timeout {
+				send_user "\nFAILURE: scontrol is not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+
+		if {$thread_chk == 0} {
+			send_user "\nFAILURE: Job $job_id does not have the correct number of specialized thread\n"
+			set exit_code 1
+		}
+
+		if {[wait_for_job $job_id "DONE"] != 0} {
+			send_user "\nFAILURE: waiting for job to complete\n"
+			set exit_code 1
+		}
+	}
+
+	if {$exp_nodes == 1} {
+		if {$num_nodes <= 1} {
+			send_user "\nFAILURE: Job $job_id should use more then 1 node\n"
+			set exit_code 1
+		}
+	}
+
+	if {$exp_nodes == 0} {
+		if {$num_nodes != 1} {
+			send_user "\nFAILURE: Job $job_id should use only $node\n"
+			set exit_code 1
+		}
+	}
+}
+
+#############################################################################
+#
+# Tests begin here
+#
+#############################################################################
+
+print_header $test_id
+
+log_user 0
+set allow_spec 0
+spawn $scontrol show config
+expect {
+	-re "AllowSpecResourcesUsage = ($number)" {
+		set allow_spec $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
+if {$allow_spec == 0} {
+	send_user "WARNING: AllowSpecResourcesUsage not configured to permit thread specialization\n"
+	exit $exit_code
+}
+
+set select_type [test_select_type]
+if {![string compare $select_type "linear"] || ![string compare $select_type "serial"]} {
+	send_user "\nWARNING: This test is incompatible with select/$select_type\n"
+	exit 0
+}
+set select_type_params [test_select_type_params]
+if {[string match "*CR_SOCKET*" $select_type_params]} {
+	send_user "\nWARNING: This test is incompatible with CR_SOCKET allocations\n"
+	exit 0
+}
+
+# Remove any vestigial files
+exec $bin_rm -f $file_in $file_out $spec_in
+
+make_bash_script $file_in "
+first=\$($scontrol show hostnames \$SLURM_JOB_NODELIST\ | head -n1)\
+
+$scontrol show node \$first\
+
+"
+make_bash_script $spec_in "sleep 5"
+
+set job_id 0
+spawn $sbatch --exclusive -t1 -N2 -o$file_out $file_in
+expect {
+	-re "Node count specification invalid" {
+		send_user "\nWARNING: can't test srun task distribution\n"
+		exit $exit_code
+	}
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "FAILURE: sbatch did not submit job\n"
+	exit 1
+}
+
+if {[wait_for_file $file_out] != 0} {
+	send_user "\nFAILURE: output file was not created\n"
+	exit 1
+}
+
+set first_node ""
+set core_cnt   0
+set cpu_tot    1
+set socket_cnt 1
+set thread_cnt 1
+
+spawn $bin_cat $file_out
+expect {
+	-re "NodeName=($alpha_numeric_under)" {
+		set first_node $expect_out(1,string)
+		exp_continue
+	}
+	-re "CoresPerSocket=($number)" {
+		set core_cnt $expect_out(1,string)
+		exp_continue
+	}
+	-re "CPUTot=($number)" {
+		set cpu_tot $expect_out(1,string)
+		exp_continue
+	}
+	-re "Sockets=($number)" {
+		set socket_cnt $expect_out(1,string)
+		exp_continue
+	}
+	-re "ThreadsPerCore=($number)" {
+		set thread_cnt $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: cat is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+set thread_cnt [expr $thread_cnt * $core_cnt * $socket_cnt]
+if {$thread_cnt == 0} {
+	send_user "\nFAILURE: sbatch did not find the number of thread\n"
+	exit 1
+}
+if {$thread_cnt < 4} {
+	send_user "\nWARNING: thread thread too low for testing ($thread_cnt < 4)\n"
+	exit $exit_code
+}
+
+#
+# Using the thread spec within the node limits
+#
+send_user "\n\nRun within the specified node\n"
+thread_spec_job  0 $first_node [expr $thread_cnt - 2] 0
+thread_spec_job -1 $first_node [expr $thread_cnt - 2] 0
+
+#
+# Using thread spec with more tasks then the node can handle. This should
+# cause the tasks to spread across multiple nodes as needed
+#
+send_user "\n\nSpread job across multiple nodes\n"
+thread_spec_job 1 $first_node [expr $thread_cnt - 2] 1
+thread_spec_job 1 $first_node [expr $thread_cnt - 1] 1
+
+#
+# Using thread spec with more thread then the specified node has
+#
+send_user "\n\nFail by trying to use more threads than exist\n"
+thread_spec_job 1 $first_node [expr $thread_cnt + 1] -1
+thread_spec_job 1 $first_node [expr $thread_cnt + 3] -1
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+	exec $bin_rm -f $file_in $file_out $spec_in
+}
+exit $exit_code
diff --git a/testsuite/expect/test2.24 b/testsuite/expect/test2.24
index 4bfe4f8c1..e12809119 100755
--- a/testsuite/expect/test2.24
+++ b/testsuite/expect/test2.24
@@ -78,8 +78,7 @@ expect {
 		    $option != "MULTIPLE_SLURMD" &&
 		    $option != "NEXT_JOB_ID" &&
 		    $option != "SLURM_CONF" &&
-		    $option != "SLURM_VERSION" &&
-		    $option != "SuspendTime"} {
+		    $option != "SLURM_VERSION"} {
 			set conf_val($option) $val
 			incr opt_cnt
 		}
diff --git a/testsuite/expect/test2.25 b/testsuite/expect/test2.25
new file mode 100755
index 000000000..03c527526
--- /dev/null
+++ b/testsuite/expect/test2.25
@@ -0,0 +1,337 @@
+#!/usr/bin/expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate that scontrol show assoc_mgr shows the data cached in
+#          the slurmctld
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id      2.25
+set test_acct    "test$test_id\_acct"
+set test_user    ""
+set exit_code    0
+
+print_header $test_id
+
+if {![test_super_user]} {
+	send_user "\nWARNING: This test can't be run except as SlurmUser or root\n"
+	exit 0
+}
+
+proc mod_assoc_vals { } {
+	global sacctmgr test_acct test_user exit_code
+
+	set modified 0
+	spawn $sacctmgr mod -i account $test_acct where user=$test_user set \
+	    GrpCpus=3 MaxJobs=2 MaxCPUs=6
+	expect {
+		-re "Modified account associations" {
+			set modified 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {!$modified} {
+		send_user "\nFAILURE: account limits were not modified\n"
+		exit 1
+	}
+}
+
+proc clear_assoc_vals { } {
+	global sacctmgr test_acct test_user exit_code
+
+	set modified 0
+	spawn $sacctmgr mod -i account $test_acct where user=$test_user set \
+	    GrpCpus=-1 MaxJobs=-1 MaxCPUs=-1
+	expect {
+		-re "Modified account associations" {
+			set modified 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {!$modified} {
+		send_user "\nFAILURE: account limits were not modified\n"
+		exit 1
+	}
+}
+
+proc delete_test_acct { } {
+	global test_acct sacctmgr exit_code
+
+	set deleted 0
+	spawn $sacctmgr delete -i account $test_acct
+	expect {
+		-re "Deleting accounts..." {
+			set deleted 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+}
+
+# Remove any vestigial accounts
+delete_test_acct
+
+# Get username
+spawn $bin_id -u -n
+expect {
+	-re "($alpha_numeric_under)" {
+		set test_user $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+if {[test_using_slurmdbd]} {
+	# Add test Account
+	set acct_added 0
+	spawn $sacctmgr add -i account $test_acct
+	expect {
+		-re "Adding Account" {
+			set acct_added 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {!$acct_added} {
+		send_user "\nFAILURE: could not add test account $test_acct\n"
+		exit 1
+	}
+
+	# Add user to test account
+	set user_added 0
+	spawn $sacctmgr add -i user $test_user account=$test_acct
+	expect {
+		-re "Associations" {
+			set user_added 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not reponding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {!$user_added} {
+		send_user "\nFAILURE: could not add user to test account\n"
+		exit 1
+	}
+
+	set match 0
+	spawn $bin_bash -c "exec $scontrol -o show assoc_mgr | $bin_grep Account=$test_acct -A1| $bin_grep UserName=$test_user -A1"
+	expect {
+		-re " Account=$test_acct" {
+			incr match
+			exp_continue
+		}
+		-re " UserName=$test_user" {
+			incr match
+			exp_continue
+		}
+		-re " GrpTRES=" {
+			incr match
+			exp_continue
+		}
+		-re " MaxJobs=" {
+			incr match
+			exp_continue
+		}
+		-re " MaxTRESPJ=" {
+			incr match
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match != 5} {
+		send_user "\nFAILURE: 1 scontrol show assoc_mgr did not show correct information ($match != 5).\n"
+		set exit_code 1
+	}
+
+	# Set association limits and check that controller is updated
+	mod_assoc_vals
+
+	set match 0
+	spawn $bin_bash -c "exec $scontrol -o show assoc_mgr | $bin_grep Account=$test_acct -A1| $bin_grep UserName=$test_user -A1"
+	expect {
+		-re " Account=$test_acct" {
+			incr match
+			exp_continue
+		}
+		-re " UserName=$test_user" {
+			incr match
+			exp_continue
+		}
+		-re " GrpTRES=cpu=3" {
+			incr match
+			exp_continue
+		}
+		-re " MaxJobs=2" {
+			incr match
+			exp_continue
+		}
+		-re " MaxTRESPJ=cpu=6" {
+			incr match
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match != 5} {
+		send_user "\nFAILURE: 2 scontrol show assoc_mgr did not show correct information ($match != 5).\n"
+		set exit_code 1
+	}
+
+	# Clear associtation limits and check controller is updated
+	clear_assoc_vals
+
+	set match 0
+	set limit_match 0
+	spawn $bin_bash -c "exec $scontrol show assoc_mgr | $bin_grep Account=$test_acct -A1| $bin_grep UserName=$test_user -A1"
+	expect {
+		-re " Account=$test_acct" {
+			incr match
+			exp_continue
+		}
+		-re " UserName=$test_user" {
+			incr match
+			exp_continue
+		}
+		-re " GrpTRES=cpu=3" {
+			incr limit_match
+			exp_continue
+		}
+		-re " MaxJobs=2" {
+			incr limit_match
+			exp_continue
+		}
+		-re " MaxTRESPJ=cpu=6" {
+			incr limit_match
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match != 2 && $limit_match} {
+		send_user "\nFAILURE: 3 scontrol show assoc_mgr did not show correct information ($match != 2).\n"
+		set exit_code 1
+	}
+
+	delete_test_acct
+
+} else {
+
+	set match 0
+	spawn $scontrol show assoc_mgr
+	expect {
+		-re "Current Association Manager state" {
+			incr match
+			exp_continue
+		}
+		-re "No users currently cached in Slurm" {
+			incr match
+			exp_continue
+		}
+		-re "No associations currently cached in Slurm" {
+			incr match
+			exp_continue
+		}
+		-re "No QOS currently cached in Slurm" {
+			incr match
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match != 4} {
+		send_user "\nFAILURE: 4 scontrol show assoc_mgr did not show correct information ($match != 4)\n"
+		set exit_code 1
+	}
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test20.3 b/testsuite/expect/test20.3
index 3a92003ae..991a8e499 100755
--- a/testsuite/expect/test20.3
+++ b/testsuite/expect/test20.3
@@ -111,7 +111,7 @@ expect {
 	}
 }
 
-# we should get and error code here from qdel, but no message
+# we should get an error code here from qdel, but no message
 if {($matches != 0) || ($status == 0)} {
 	send_user "\nFAILURE: No error on attempt to cancel terminated job\n"
 	set exit_code 1
diff --git a/testsuite/expect/test20.6 b/testsuite/expect/test20.6
index 22fa479ed..7091f6eba 100755
--- a/testsuite/expect/test20.6
+++ b/testsuite/expect/test20.6
@@ -37,6 +37,10 @@ set exit_code    0
 
 print_header $test_id
 
+if {[file executable $qalter] == 0} {
+	send_user "\nWARNING: $qalter does not exits\n"
+	exit 0
+}
 if {[file executable $qsub] == 0} {
 	send_user "\nWARNING: $qsub does not exits\n"
 	exit 0
@@ -119,7 +123,9 @@ expect {
 # 1 for "y" and 0 for "n"
 check_rerun 0 $job_id
 
-cancel_job $job_id
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
diff --git a/testsuite/expect/test20.7 b/testsuite/expect/test20.7
index 8abf954bd..c9a633d75 100755
--- a/testsuite/expect/test20.7
+++ b/testsuite/expect/test20.7
@@ -81,6 +81,7 @@ proc check_output { path job_id } {
 	}
 }
 
+set job_id 0
 spawn $qsub -l walltime=1:00 -o /dev/null $file_in
 expect {
 	-re "($number)" {
@@ -114,7 +115,9 @@ expect {
 
 check_output $out_path $job_id
 
-cancel_job $job_id
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
diff --git a/testsuite/expect/test20.8 b/testsuite/expect/test20.8
index 246deda69..81cd11938 100755
--- a/testsuite/expect/test20.8
+++ b/testsuite/expect/test20.8
@@ -31,8 +31,8 @@
 source ./globals
 
 set test_id     "20.8"
-set file_in     "$test_id\.script"
-set job_name    "$test_id\.newname"
+set file_in     "test$test_id\.bash"
+set job_name    "test$test_id\.newname"
 set job_id      0
 set exit_code   0
 
@@ -108,6 +108,9 @@ expect {
 }
 
 check_name $job_id $job_name
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
diff --git a/testsuite/expect/test21.21 b/testsuite/expect/test21.21
index c28b14dbb..b2c063d02 100755
--- a/testsuite/expect/test21.21
+++ b/testsuite/expect/test21.21
@@ -33,171 +33,197 @@
 ############################################################################
 source ./globals
 source ./globals_accounting
-source ./inc21.21.1
-source ./inc21.21.2
-source ./inc21.21.3
-source ./inc21.21.4
+source ./inc21.21_tests
 
 set test_id     "21.21"
 set exit_code   0
+set test_qos    "test$test_id\_qos"
 set file_in     "test.$test_id.input"
 set ta		"test$test_id-account.1"
+set maxcpu      MaxCpus
+set maxcpu_num  0
+set grcpu       GrpCpus
+set grcpu_num   0
 set timeout     60
+set test_node   " "
+# cr_core = 1 / cr_cpu = 0
+set selectparam  0
+
+# test maxjob maxnode maxsubmit maxwall
+array set acct_mod_desc {}
+array set acct_mod_acct_vals {}
+array set acct_mod_assoc_vals {
+	grpnode           "-N 1"
+	grpwall           "-t 1"
+	grpcpus           ""
+	grpcpumins        ""
+	grpjobsub         "2 4"
+	grpcpurunmins     ""
+	maxnode           "-N 1"
+	maxwall           "-t 10"
+	maxcpus           ""
+	maxcpumins        ""
+	maxjobsub         "2 4"
+}
+
+array set acct_mod_assoc_test_vals {
+	grpnode      -1
+	grpwall      -1
+	grpcpus      -1
+	grpcpumins   -1
+	grpjob       -1
+	grpsubmit    -1
+	maxnode      -1
+	maxwall      -1
+	maxcpus      -1
+	maxcpumins   -1
+	maxjob       -1
+	maxsubmit    -1
+}
 
 print_header    $test_id
 
-proc _test_limits { } {
-	global file_in srun sbatch squeue scancel bin_id number bin_sleep bin_rm ta
-	# test maxjob maxnode maxsubmit maxwall
-	array set acct_mod_desc {}
-	array set acct_mod_acct_vals {}
-	array set acct_mod_assoc_vals {
-		maxjob     2
-		maxnode    1
-		maxsubmit  4
-		maxwall    10
-	}
-
-	incr exit_code [mod_acct $ta [array get acct_mod_desc] [array get acct_mod_assoc_vals] [array get acct_mod_acct_vals]]
-	if { $exit_code } {
-		return $exit_code
+# Determine what the selecttype param is
+if {[string first "CR_CORE" [test_select_type_params]] != -1} {
+	set selectparam 1
+}
+
+set got_node 0
+spawn $srun -N1 printenv SLURM_NODELIST
+expect {
+	-re "($alpha_numeric_under)" {
+		set test_node $expect_out(1,string)
+		set got_node 1
+		exp_continue
 	}
+	timeout {
+		send_user "\nFAILURE: srun is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
 
-	#
-	# Start sub-test: test for maxnode
-	#
-	inc21_21_1 $acct_mod_assoc_vals(maxnode)
-
-	#
-	# Start sub-test: test for maxnode+1
-	#
-	inc21_21_2 $acct_mod_assoc_vals(maxnode)
-
-	#
-	# Start sub-test: testing maxwall
-	#
-	inc21_21_3 $acct_mod_assoc_vals(maxwall)
-
-	#
-	# Start sub-test: testing maxwall+1
-	#
-	inc21_21_4 $acct_mod_assoc_vals(maxwall)
-
-	# This should overwrite the old file_in which has already been used,
-	# so no big deal.
-	make_bash_script $file_in "
-	$bin_sleep 5
-	"
-	
-	# test job max cnt and submit
-	for {set inx 0} {$inx < $acct_mod_assoc_vals(maxsubmit)} {incr inx} {
-		set job_id($inx) 0
-		set mypid [spawn $sbatch -N1 -n1 --account=$ta --output=/dev/null --error=/dev/null -t5 $file_in]
-		expect {
-			-re "Submitted batch job ($number)" {
-				set job_id($inx) $expect_out(1,string)
-				exp_continue
-			}
-			-re "Unable to contact" {
-				send_user "\nFAILURE: slurm appears to be down\n"
-				set exit_code 1
-				exp_continue
-			}
-			timeout {
-				send_user "\nFAILURE: sbatch not responding\n"
-				slow_kill $mypid
-				set exit_code 1
-			}
-			eof {
-				wait
-			}
-		}
+if {$got_node != 1} {
+	send_user "\nFAILURE: did not get node for testing\n"
+	exit 0
+}
 
-		if { !$job_id($inx) } {
-			send_user "\nFAILURE: sbatch didn't return jobid\n"
-			set exit_code 1
-			exit
-		}
+# Get the number of cpus on a node
 
-		if { $exit_code } {
-			break
-		}
+spawn $scontrol show node $test_node
+expect {
+	-re "CPUTot=($number)" {
+		set totcpus $expect_out(1,string)
+		exp_continue
 	}
-
-	if { $exit_code } {
-		return $exit_code
+	-re "ThreadsPerCore=($number)" {
+		set nthreads $expect_out(1,string)
+		exp_continue
 	}
-
-	# then submit one more over the limit and it should fail
-	set mypid [spawn $sbatch -N1 -n1 --account=$ta --output=/dev/null --error=/dev/null -t5 $file_in]
-	expect {
-		-re "Job violates accounting/QOS policy" {
-			send_user "\nThis error is expected, not a problem\n"
-			exp_continue
-		}
-		-re "Submitted batch job ($number)" {
-			send_user "\nFAILURE: job should not have run 3.\n"
-			set exit_code 1
-			exp_continue
-		}
-		-re "Unable to contact" {
-			send_user "\nFAILURE: slurm appears to be down\n"
-			set exit_code 1
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: sbatch not responding\n"
-			slow_kill $mypid
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
 	}
+	eof {
+		wait
+	}
+}
+
+if {$totcpus == 0} {
+	send_user "\nFAILURE: no cpus where found\n"
+	exit 1
+} else {
+	# Set assoc CPU values
+	set acct_mod_assoc_vals(grpcpus) "-n [expr $totcpus - $nthreads]"
+	set acct_mod_assoc_vals(maxcpus) "-n [expr $totcpus - $nthreads]"
+	set acct_mod_assoc_vals(grpcpumins) "-n [expr $totcpus - $nthreads]"
+	set acct_mod_assoc_vals(maxcpumins) "-n [expr $totcpus - $nthreads]"
+	set acct_mod_assoc_vals(grpcpurunmins) "-n [expr $totcpus - $nthreads]"
+}
+
 
+
+
+proc assoc_setup { limit_type limit_val } {
+
+	global acct_mod_assoc_test_vals
+	global acct_mod_desc acct_mod_acct_vals acct_mod_assoc_vals ta
+
+	set exit_code 0
+	set new_limit [lindex $limit_val 1]
+
+	set acct_mod_assoc_test_vals($limit_type) $new_limit
+
+	set exit_code  [mod_acct $ta [array get acct_mod_desc] [array get acct_mod_assoc_test_vals] [array get acct_mod_acct_vals]]
 	if { $exit_code } {
 		return $exit_code
 	}
 
-	# sleep the Schedule cycle default is 4
-	sleep 4
+}
 
-	set matches 0
-	set mypid [spawn $squeue -o "\%i \%t \%r"]
-	expect {
-		-re "($job_id(2)|$job_id(3)).PD.AssocMaxJobsLimit" {
-			incr matches
-			exp_continue
-		}
-		-re "($job_id(0)|$job_id(1)).R.None" {
-			incr matches
-			exp_continue
-		}
-		timeout {
-			send_user "\nFAILURE: squeue not responding\n"
-			slow_kill $mypid
-			set exit_code 1
-		}
-		eof {
-			wait
-		}
-	}
+proc _test_limits { } {
+	global file_in srun sbatch squeue scancel bin_id number bin_sleep bin_rm ta maxjob_lim maxsub_lim
+	global acct_mod_desc acct_mod_acct_vals acct_mod_assoc_vals acct_mod_assoc_test_vals
+
+	set exit_code 0
+
+	# Test jobs within the association limits
+	foreach option [array names acct_mod_assoc_vals] {
+		send_user "\nSetting up association limit $option...\n"
+		if { [string compare $option "maxjobsub"] &&
+		     [string compare $option "grpjobsub"] } {
+			assoc_setup $option $acct_mod_assoc_vals($option)
+
+			if { ![string compare $option "grpwall"] } {
+				if { [inc21_21_grpwall $option \
+					  $acct_mod_assoc_vals($option)] } {
+					set exit_code 1
+					return $exit_code
+				}
+			} elseif { ![string compare -length 3 $option "grp"] } {
+				if { [inc21_21_grp_test $option \
+					  $acct_mod_assoc_vals($option)] } {
+					set exit_code 1
+					return $exit_code
+				}
+
+			} else {
+				#
+				# Test value within the association limit
+				#
+				if { [inc21_21_good $option \
+					  $acct_mod_assoc_vals($option)] } {
+
+					set exit_code 1
+					return $exit_code
+				}
+				#
+				# Test value over the association limit
+				#
+				if { [inc21_21_bad $option \
+					  $acct_mod_assoc_vals($option)] } {
+
+					set exit_code 1
+					return $exit_code
+				}
+			}
+			# Reset the limit
+			set acct_mod_assoc_test_vals($option) "-1"
 
-	spawn $scancel --quiet --account=$ta
-	expect {
-		eof {
-			wait
+		} else {
+			if { [inc21_21_submit_test $option] } {
+				set exit_code 1
+				return $exit_code
+			}
 		}
-	}
-
-
-	if { $matches != 4 } {
-		send_user "\nFAILURE: jobs are not in the expected state expected ($matches != 4)\n"
-		set exit_code 1
-		return $exit_code
+		# Reset usage
+		reset_account_usage "" $ta
 	}
 
 	return $exit_code
+
 }
 
 set select_type [test_select_type]
@@ -257,6 +283,22 @@ expect {
 	}
 }
 
+#
+# remove any vestigial account
+#
+set aamatches 0
+set sadd_pid [spawn $sacctmgr -i delete account $ta]
+expect {
+	timeout {
+		send_user "\nFAILURE: sacctmgr add not responding\n"
+		slow_kill $sadd_pid
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
 #
 # Use sacctmgr to add an account
 #
@@ -301,6 +343,57 @@ expect {
 	}
 }
 
+#
+# Remove test QoS
+#
+set match 0
+spawn $sacctmgr -i delete qos $test_qos
+expect {
+	-re "Deleting QOS" {
+		set match 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr delete not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+set match 0
+spawn $sacctmgr -i create qos $test_qos
+expect {
+	-re "Adding QOS" {
+		set match 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$match != 1} {
+	send_user "\nFAILURE: $test_qos was not created\n"
+	exit 1
+}
+
+spawn $sacctmgr -i mod account $ta set qos=$test_qos
+expect {
+	timeout {
+		send_user "\nFAILURE: sacctmgr is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
 #
 # Spawn a job via salloc using this account
 #
@@ -423,7 +516,6 @@ if {$job_id == 0} {
 	}
 }
 
-
 #
 # Check to see if limits are enforced.
 #
@@ -459,6 +551,25 @@ if {$damatches != 1} {
 	set exit_code 1
 }
 
+#
+# Remove test QoS
+#
+set match 0
+spawn $sacctmgr -i delete qos $test_qos
+expect {
+	-re "Deleting QOS" {
+		set match 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr delete not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_in
 	print_success $test_id
diff --git a/testsuite/expect/test21.24 b/testsuite/expect/test21.24
index 6a2a4cf76..e2906f5c0 100755
--- a/testsuite/expect/test21.24
+++ b/testsuite/expect/test21.24
@@ -83,20 +83,17 @@ set pba		pbatch
 set dw		DefaultWCKey
 set wk		WCKeys
 set fs		Fairshare
-set gm		GrpCPUMins
-set gc		GrpCPUs
+set gm		GrpTRESMins
+set gc		GrpTRES
 set gj		GrpJobs
 set gs		GrpSubmitJobs
-set gn		GrpNodes
 set gw		GrpWall
-set mm		MaxCPUMins
-set mp		MaxCPUMinsPerJob
-set mc		MaxCPUs
-set mu		MaxCPUsPerJob
+set mm		MaxTRESMins
+set mp		MaxTRESMinsPerJob
+set mc		MaxTRES
+set mu		MaxTRESPerJob
 set mj		MaxJobs
 set ms		MaxSubmitJobs
-set mn		MaxNodes
-set mnj		MaxNodesPerJob
 set mw		MaxWall
 set md		MaxWallDurationPerJob
 
@@ -108,14 +105,13 @@ array set acct_req {
 	description    appliedacct
 	organization   organization
 	fairshare      5678
-	grpcpumins     5500
-	grpcpus        50
+	grptresmins    cpu=5500
+	grptres        cpu=50,node=530
 	grpjobs        510
 	grpsubmitjobs  520
-	grpnodes       530
 	grpwall        300
-	maxcpus        540
-	maxcpumins     550000
+	maxtres        cpu=540,node=570
+	maxtresmins    cpu=550000
 	maxjobs        550
 	maxsubmitjobs  560
 	maxnodes       570
@@ -128,16 +124,14 @@ array set acct_req2 {
 	description    theoryacct
 	organization   theoryorg
 	fairshare      4321
-	grpcpumins     4000
-	grpcpus        40
+	grptresmins    cpu=4000
+	grptres        cpu=40,node=430
 	grpjobs        410
-	grpnodes       430
 	grpsubmitjobs  420
 	grpwall        240
-	maxcpumins     420000
-	maxcpus        440
+	maxtresmins    cpu=420000
+	maxtres        cpu=440,node=470
 	maxjobs        450
-	maxnodes       470
 	maxsubmitjobs  460
 	maxwall        280
 }
@@ -149,16 +143,14 @@ array set acct_req3 {
 	description    physicsacct
 	organization   physicsorg
 	fairshare      3240
-	grpcpumins     3300
-	grpcpus        30
+	grptresmins    cpu=3300
+	grptres        cpu=30,node=330
 	grpjobs        310
-	grpnodes       330
 	grpsubmitjobs  320
 	grpwall        180
-	maxcpumins     330000
-	maxcpus        340
+	maxtresmins    cpu=330000
+	maxtres        cpu=340,node=370
 	maxjobs        350
-	maxnodes       370
 	maxsubmitjobs  360
 	maxwall        210
 }
@@ -172,16 +164,14 @@ array set user_req {
 	defaultwckey   hole
 	wckey          latch,stone,turn
 	fairshare      3240
-	grpcpumins     3300
-	grpcpus        30
+	grptresmins    cpu=3300
+	grptres        cpu=30,node=330
 	grpjobs        310
-	grpnodes       330
 	grpsubmitjobs  320
 	grpwall        180
-	maxcpumins     330000
-	maxcpus        340
+	maxtresmins    cpu=330000
+	maxtres        cpu=340,node=370
 	maxjobs        350
-	maxnodes       370
 	maxsubmitjobs  360
 	maxwall        210
 }
@@ -195,16 +185,14 @@ array set user_req2 {
 	defaultwckey   stone
 	wckey          latch,hole,turn
 	fairshare      2375
-	grpcpumins     2000
-	grpcpus        20
+	grptresmins    cpu=2000
+	grptres        cpu=20,node=230
 	grpjobs        210
-	grpnodes       230
 	grpsubmitjobs  220
 	grpwall        120
-	maxcpumins     220000
-	maxcpus        240
+	maxtresmins    cpu=220000
+	maxtres        cpu=240,node=270
 	maxjobs        250
-	maxnodes       270
 	maxsubmitjobs  260
 	maxwall        140
 
@@ -219,16 +207,14 @@ array set user_req3 {
 	defaultwckey   latch
 	wckey          stone,hole,turn
 	fairshare      1000
-	grpcpumins     1100
-	grpcpus        10
+	grptresmins    cpu=1100
+	grptres        cpu=10,node=140
 	grpjobs        120
-	grpnodes       140
 	grpsubmitjobs  130
 	grpwall        60
-	maxcpumins     110000
-	maxcpus        150
+	maxtresmins    cpu=110000
+	maxtres        cpu=150,node=180
 	maxjobs        160
-	maxnodes       180
 	maxsubmitjobs  170
 	maxwall        70
 
@@ -241,15 +227,13 @@ set user_req3(defaultaccount) $ta2
 # Clusters
 array set clus_req {
 	fairshare 6789
-	grpcpus	  60
+	grptres	  cpu=60,node=630
 	grpjob	  610
 	grpsubmit 620
-	grpnodes  630
-	maxcpus   640
-	maxcpumin 660000
+	maxtres   cpu=640,node=670
+	maxtresmin cpu=660000
 	maxjobs   650
 	maxsubmit 660
-	maxnodes  670
 	maxwall   2880
 }
 # I couldn't figure out how to make a variable in an array to = the value of
@@ -416,7 +400,7 @@ while {[gets $tfile line] != -1} {
 	if {![string compare $line "No such file or directory"]} {
 		send_user "FAILURE: there was a problem with the sacctmgr command\n"
 		incr exit_code 1
-	} elseif {![string compare $line "$clu - '$tc3':$fs=$clus_req(fairshare):$gc=$clus_req(grpcpus):$gj=$clus_req(grpjob):$gn=$clus_req(grpnodes):$gs=$clus_req(grpsubmit):$mp=$clus_req(maxcpumin):$mu=$clus_req(maxcpus):$mj=$clus_req(maxjobs):$mnj=$clus_req(maxnodes):$ms=$clus_req(maxsubmit):$md=$clus_req(maxwall):$qs='$clus_req(qos)'"]} {
+	} elseif {![string compare $line "$clu - '$tc3':$fs=$clus_req(fairshare):$gc=$clus_req(grptres):$gj=$clus_req(grpjob):$gs=$clus_req(grpsubmit):$mp=$clus_req(maxtresmin):$mu=$clus_req(maxtres):$mj=$clus_req(maxjobs):$ms=$clus_req(maxsubmit):$md=$clus_req(maxwall):$qs='$clus_req(qos)'"]} {
 		send_user "match 1\n"
 		incr matches
 	} elseif {![string compare $line "$par - '$roo'"]} {
@@ -425,32 +409,32 @@ while {[gets $tfile line] != -1} {
 	} elseif {![string compare $line "$usr - '$roo':$dac='$roo':$al='$ala':$fs=1"]} {
 		send_user "match 3\n"
 		incr matches
-	} elseif {![string compare $line "$acc - '$ta4':$dsc='$acct_req(description)':$org='$acct_req(organization)':$fs=$acct_req(fairshare):$gm=$acct_req(grpcpumins):$gc=$acct_req(grpcpus):$gj=$acct_req(grpjobs):$gn=$acct_req(grpnodes):$gs=$acct_req(grpsubmitjobs):$gw=$acct_req(grpwall):$mp=$acct_req(maxcpumins):$mu=$acct_req(maxcpus):$mj=$acct_req(maxjobs):$mnj=$acct_req(maxnodes):$ms=$acct_req(maxsubmitjobs):$md=$acct_req(maxwall):$qs='$acct_req(qos)'"]} {
+	} elseif {![string compare $line "$acc - '$ta4':$dsc='$acct_req(description)':$org='$acct_req(organization)':$fs=$acct_req(fairshare):$gm=$acct_req(grptresmins):$gc=$acct_req(grptres):$gj=$acct_req(grpjobs):$gs=$acct_req(grpsubmitjobs):$gw=$acct_req(grpwall):$mp=$acct_req(maxtresmins):$mu=$acct_req(maxtres):$mj=$acct_req(maxjobs):$ms=$acct_req(maxsubmitjobs):$md=$acct_req(maxwall):$qs='$acct_req(qos)'"]} {
 		send_user "match 4\n"
 		incr matches
 	} elseif {![string compare $line "$par - '$ta4'"]} {
 		send_user "match 5\n"
 		incr matches
-	} elseif {![string compare $line "$usr - '$tu3':$dac='$user_req(defaultaccount)':$dw='$user_req(defaultwckey)':$al='$user_req(adminlevel)':$wk='$wckey_check':$fs=$user_req(fairshare):$gm=$user_req(grpcpumins):$gc=$user_req(grpcpus):$gj=$user_req(grpjobs):$gn=$user_req(grpnodes):$gs=$user_req(grpsubmitjobs):$gw=$user_req(grpwall):$mp=$user_req(maxcpumins):$mu=$user_req(maxcpus):$mj=$user_req(maxjobs):$mnj=$user_req(maxnodes):$ms=$user_req(maxsubmitjobs):$md=$user_req(maxwall):$qs='$user_req(qos)'"]} {
+	} elseif {![string compare $line "$usr - '$tu3':$dac='$user_req(defaultaccount)':$dw='$user_req(defaultwckey)':$al='$user_req(adminlevel)':$wk='$wckey_check':$fs=$user_req(fairshare):$gm=$user_req(grptresmins):$gc=$user_req(grptres):$gj=$user_req(grpjobs):$gs=$user_req(grpsubmitjobs):$gw=$user_req(grpwall):$mp=$user_req(maxtresmins):$mu=$user_req(maxtres):$mj=$user_req(maxjobs):$ms=$user_req(maxsubmitjobs):$md=$user_req(maxwall):$qs='$user_req(qos)'"]} {
 		# should come through 2 times
 		send_user "match 6\n"
 		incr matches
-	} elseif {![string compare $line "$acc - '$ta3':$dsc='$acct_req2(description)':$org='$acct_req2(organization)':$fs=$acct_req2(fairshare):$gm=$acct_req2(grpcpumins):$gc=$acct_req2(grpcpus):$gj=$acct_req2(grpjobs):$gn=$acct_req2(grpnodes):$gs=$acct_req2(grpsubmitjobs):$gw=$acct_req2(grpwall):$mp=$acct_req2(maxcpumins):$mu=$acct_req2(maxcpus):$mj=$acct_req2(maxjobs):$mnj=$acct_req2(maxnodes):$ms=$acct_req2(maxsubmitjobs):$md=$acct_req2(maxwall):$qs='$acct_req2(qos)'"]} {
+	} elseif {![string compare $line "$acc - '$ta3':$dsc='$acct_req2(description)':$org='$acct_req2(organization)':$fs=$acct_req2(fairshare):$gm=$acct_req2(grptresmins):$gc=$acct_req2(grptres):$gj=$acct_req2(grpjobs):$gs=$acct_req2(grpsubmitjobs):$gw=$acct_req2(grpwall):$mp=$acct_req2(maxtresmins):$mu=$acct_req2(maxtres):$mj=$acct_req2(maxjobs):$ms=$acct_req2(maxsubmitjobs):$md=$acct_req2(maxwall):$qs='$acct_req2(qos)'"]} {
 		send_user "match 7\n"
 		incr matches
 	} elseif {![string compare $line "$par - '$ta3'"]} {
 		send_user "match 8\n"
 		incr matches
-	} elseif {![string compare $line "$acc - '$ta2':$dsc='$acct_req3(description)':$org='$acct_req3(organization)':$fs=$acct_req3(fairshare):$gm=$acct_req3(grpcpumins):$gc=$acct_req3(grpcpus):$gj=$acct_req3(grpjobs):$gn=$acct_req3(grpnodes):$gs=$acct_req3(grpsubmitjobs):$gw=$acct_req3(grpwall):$mp=$acct_req3(maxcpumins):$mu=$acct_req3(maxcpus):$mj=$acct_req3(maxjobs):$mnj=$acct_req3(maxnodes):$ms=$acct_req3(maxsubmitjobs):$md=$acct_req3(maxwall):$qs='$acct_req3(qos)'"]} {
+	} elseif {![string compare $line "$acc - '$ta2':$dsc='$acct_req3(description)':$org='$acct_req3(organization)':$fs=$acct_req3(fairshare):$gm=$acct_req3(grptresmins):$gc=$acct_req3(grptres):$gj=$acct_req3(grpjobs):$gs=$acct_req3(grpsubmitjobs):$gw=$acct_req3(grpwall):$mp=$acct_req3(maxtresmins):$mu=$acct_req3(maxtres):$mj=$acct_req3(maxjobs):$ms=$acct_req3(maxsubmitjobs):$md=$acct_req3(maxwall):$qs='$acct_req3(qos)'"]} {
 		send_user "match 9\n"
 		incr matches
 	} elseif {![string compare $line "$par - '$ta2'"]} {
 		send_user "match 10\n"
 		incr matches
-	} elseif {![string compare $line "$usr - '$tu1':$dac='$user_req3(defaultaccount)':$dw='$user_req3(defaultwckey)':$wk='$wckey_check':$fs=$user_req3(fairshare):$gm=$user_req3(grpcpumins):$gc=$user_req3(grpcpus):$gj=$user_req3(grpjobs):$gn=$user_req3(grpnodes):$gs=$user_req3(grpsubmitjobs):$gw=$user_req3(grpwall):$mp=$user_req3(maxcpumins):$mu=$user_req3(maxcpus):$mj=$user_req3(maxjobs):$mnj=$user_req3(maxnodes):$ms=$user_req3(maxsubmitjobs):$md=$user_req3(maxwall):$qs='$user_req3(qos)'"]} {
+	} elseif {![string compare $line "$usr - '$tu1':$dac='$user_req3(defaultaccount)':$dw='$user_req3(defaultwckey)':$wk='$wckey_check':$fs=$user_req3(fairshare):$gm=$user_req3(grptresmins):$gc=$user_req3(grptres):$gj=$user_req3(grpjobs):$gs=$user_req3(grpsubmitjobs):$gw=$user_req3(grpwall):$mp=$user_req3(maxtresmins):$mu=$user_req3(maxtres):$mj=$user_req3(maxjobs):$ms=$user_req3(maxsubmitjobs):$md=$user_req3(maxwall):$qs='$user_req3(qos)'"]} {
 		send_user "match 11\n"
 		incr matches
-	} elseif {![string compare $line "$usr - '$tu2':$dac='$user_req2(defaultaccount)':$dw='$user_req2(defaultwckey)':$al='$user_req2(adminlevel)':$wk='$wckey_check':$fs=$user_req2(fairshare):$gm=$user_req2(grpcpumins):$gc=$user_req2(grpcpus):$gj=$user_req2(grpjobs):$gn=$user_req2(grpnodes):$gs=$user_req2(grpsubmitjobs):$gw=$user_req2(grpwall):$mp=$user_req2(maxcpumins):$mu=$user_req2(maxcpus):$mj=$user_req2(maxjobs):$mnj=$user_req2(maxnodes):$ms=$user_req2(maxsubmitjobs):$md=$user_req2(maxwall):$qs='$user_req2(qos)'"]} {
+	} elseif {![string compare $line "$usr - '$tu2':$dac='$user_req2(defaultaccount)':$dw='$user_req2(defaultwckey)':$al='$user_req2(adminlevel)':$wk='$wckey_check':$fs=$user_req2(fairshare):$gm=$user_req2(grptresmins):$gc=$user_req2(grptres):$gj=$user_req2(grpjobs):$gs=$user_req2(grpsubmitjobs):$gw=$user_req2(grpwall):$mp=$user_req2(maxtresmins):$mu=$user_req2(maxtres):$mj=$user_req2(maxjobs):$ms=$user_req2(maxsubmitjobs):$md=$user_req2(maxwall):$qs='$user_req2(qos)'"]} {
 		# should come through 2 times
 		send_user "match 12\n"
 		incr matches
diff --git a/testsuite/expect/test21.30 b/testsuite/expect/test21.30
index b1ea572b5..4b1e62ab8 100755
--- a/testsuite/expect/test21.30
+++ b/testsuite/expect/test21.30
@@ -94,6 +94,9 @@ set maxjobsub   MaxSubmitJobs
 set maxjobsub_num 2
 set time_spacing 1
 
+# cr_core = 1 / cr_cpu = 0
+set selectparam  0
+
 # mod qos
 array set mod_qos_vals {
 	GrpNodes        -1
@@ -170,6 +173,11 @@ proc check_state { job } {
 
 }
 
+# Determine what the selecttype param is
+if {[string first "CR_CORE" [test_select_type_params]] != -1} {
+	set selectparam 1
+}
+
 set got_node 0
 spawn $srun -N1 printenv SLURM_NODELIST
 expect {
@@ -319,13 +327,18 @@ if {[test_super_user] == 0} {
 }
 
 #
-# Some tests will not work properly when allocating whole nodes to jobs
+# Some tests will not work properly when allocating sockets or whole nodes to jobs
 #
 set select_type [test_select_type]
 if {![string compare $select_type "linear"] || [default_part_exclusive]} {
 	send_user "\nWARNING: This test is incompatible with exclusive node allocations\n"
 	exit 0
 }
+set select_type_params [test_select_type_params]
+if {[string match "*CR_SOCKET*" $select_type_params]} {
+	send_user "\nWARNING: This test is incompatible with CR_SOCKET allocations\n"
+	exit 0
+}
 
 # Remove any vestigial accounts or qos
 spawn $sacctmgr -i delete qos $qostest
@@ -598,6 +611,12 @@ set mod_qos_vals(MaxWall) "-1"
 #
 # Test Max CPUs Per User
 #
+
+# If CR_CORE set maxcpuspu a multiple number of threads
+if {$selectparam} {
+	set maxcpuspu_num [expr $maxcpuspu_num * $nthreads]
+}
+
 set mod_qos_vals(MaxCpusPerUser) $maxcpuspu_num
 mod_qos $qostest [array get mod_qos_vals]
 sleep $time_spacing
diff --git a/testsuite/expect/test21.31 b/testsuite/expect/test21.31
index 2bf204fb5..139c9a9e6 100755
--- a/testsuite/expect/test21.31
+++ b/testsuite/expect/test21.31
@@ -136,21 +136,21 @@ set resource1(cluster) "$cluster,$tc2"
 #
 # add a global resource designating multiple clusters
 #
-add_res $sr1 [array get resource1]
+add_resource $sr1 [array get resource1]
 
 #
 # Use sacctmgr list to verify the test global resource fields
 #
-if {[check_res_limits $sr1 [array get resource1_chck]] == 1} {
+if {[check_resource_limits $sr1 [array get resource1_chck]] == 1} {
 	send_user "\nFAILURE: list resource output is incorrect. \n"
 	incr exit_code 1
 }
 
 #
-# use scontrol show lic to verify the cluster license was created
+# use scontrol show license to verify the cluster license was created
 #
 set matches 0
-set my_pid [eval spawn $scontrol show lic]
+set my_pid [eval spawn $scontrol show license]
 expect {
 	-re "LicenseName=($sr1@$resource1(Server))" {
 		incr matches
@@ -178,18 +178,18 @@ if {$matches != 2} {
 #
 # modify resources
 #
-if {[mod_res $sr1 [array get resource1_chng]] == 1} {
+if {[mod_resource $sr1 [array get resource1_chng]] == 1} {
 	send_user "\nFAILURE: Resource modify not working ($matches != 1).\n"
 	incr exit_code 1
 }
 
 #
-# Use sacctmgr to list the test res modifications
+# Use sacctmgr to list the test resource modifications
 #
-# Use sacctmgr list to verify the test res fields
+# Use sacctmgr list to verify the test resource fields
 #
 set resource1_chck(count) $resource1_chng(count)
-if {[check_res_limits $sr1 [array get resource1_chck]] == 1} {
+if {[check_resource_limits $sr1 [array get resource1_chck]] == 1} {
 	send_user "\nFAILURE: list resource output is incorrect.\n"
 	incr exit_code 1
 }
@@ -198,7 +198,7 @@ if {[check_res_limits $sr1 [array get resource1_chck]] == 1} {
 # use scontrol to verify the modified cluster license
 #
 set matches 0
-set my_pid [eval spawn $scontrol show lic $sr1@$resource1(Server)]
+set my_pid [eval spawn $scontrol show license $sr1@$resource1(Server)]
 expect {
 	-re "LicenseName=$sr1@$resource1(Server)" {
 		incr matches
@@ -232,7 +232,7 @@ incr exit_code [remove_res $sr1]
 # use scontrol to verify cluster license was removed
 #
 set matches 0
-set my_pid [eval spawn $scontrol show lic]
+set my_pid [eval spawn $scontrol show license]
 expect {
 	-re "LicenseName=$sr1@$resource1(Server)" {
 		incr matches
@@ -254,19 +254,19 @@ if {$matches != 0} {
 #
 # add multiple global resources in a single call
 #
-add_res "$sr2,$sr3" [array get resource2]
+add_resource "$sr2,$sr3" [array get resource2]
 
 #
 # Use sacctmgr list to verify both global resources were added
 #
 # Check resource 2
-if {[check_res_limits $sr2 [array get resource2_chck]] == 1} {
+if {[check_resource_limits $sr2 [array get resource2_chck]] == 1} {
 	send_user "\nFAILURE: list resource output is incorrect.\n"
 	incr exit_code 1
 }
 
 # Check resource 3
-if {[check_res_limits $sr3 [array get resource2_chck]] == 1} {
+if {[check_resource_limits $sr3 [array get resource2_chck]] == 1} {
 	send_user "\nFAILURE: list resource output is incorrect.\n"
 	incr exit_code 1
 }
diff --git a/testsuite/expect/test21.32 b/testsuite/expect/test21.32
index 75ba1b787..c23872098 100755
--- a/testsuite/expect/test21.32
+++ b/testsuite/expect/test21.32
@@ -202,7 +202,7 @@ check_pre " "
 remove_qos $qos_names_str
 
 if {$exit_code == 0} {
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 } else {
 	send_user "\nFAILURE\n"
 }
diff --git a/testsuite/expect/test21.34 b/testsuite/expect/test21.34
new file mode 100755
index 000000000..5c42a584b
--- /dev/null
+++ b/testsuite/expect/test21.34
@@ -0,0 +1,441 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test that partition and job qos limits are enforced when using
+#          the PartitionQos flag for the job's qos
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+source ./globals_accounting
+source  ./inc21.34_tests
+
+set test_id          21.34
+set exit_code        0
+set test_node        ""
+# Total cpus in test node
+set totcpus          0
+set nthreads         0
+set acct             test_acct
+set user_name        ""
+set test_part        "test$test_id\_part"
+set part_qos         "test$test_id\_part_qos"
+set job_qos          "test$test_id\_job_qos"
+set qostest          ""
+set grn              GrpNodes
+set grn_num          0
+set grcpu            GrpCpus
+set grcpu_num        0
+set grpcpumin        GrpCPUMins
+set grpcpumin_num    0
+# Set grpcpurunmin_num to multiple of CPUs per core to work with most configurations
+# Also make sure that it is at least 4 so we can add and subtract from it
+set grpcpurunmin     GrpCPURunMins
+set grpcpurunmin_num 40
+set grjobs           GrpJobs
+set grjobs_num       2
+set grpmem           GrpMem
+set grpmem_num       100
+set grsub            GrpSubmit
+set grsub_num        2
+set grpwall          GrpWall
+set grpwall_num      1
+set maxcpu           MaxCpus
+set maxcpu_num       0
+# Set maxcpumin_num to multiple of CPUs per core to work with most configurations
+set maxcpumin        MaxCPUMins
+set maxcpumin_num    0
+set maxwall          MaxWall
+set maxwall_num      2
+set maxcpuspu        MaxCPUSPerUser
+set maxcpuspu_num    2
+set maxnodes         MaxNodes
+set maxnode_num      0
+set maxnodespu       MaxNodesPerUser
+set maxnodespu_num   0
+set maxjobs          MaxJobs
+set maxjobs_num      2
+set maxjobsub        MaxSubmitJobs
+set maxjobsub_num    2
+set time_spacing     1
+
+# cr_core = 1 / cr_cpu = 0
+set selectparam      0
+set def_part         [default_partition]
+
+# mod qos
+array set mod_job_qos {
+	GrpNodes        -1
+	GrpCpus         -1
+	GrpJob          -1
+	GrpSubmit       -1
+	GrpCpuMin       -1
+	GrpCpuRunMin    -1
+	GrpMem          -1
+	GrpWall         -1
+	MaxCpus         -1
+	MaxNode         -1
+	MaxJobs         -1
+	MaxSubmitJobs   -1
+	MaxCpuMin       -1
+	MaxWall         -1
+	MaxCpusPerUser  -1
+	MaxNode         -1
+	MaxNodesPerUser -1
+}
+
+array set mod_part_qos {
+	GrpNodes        -1
+	GrpCpus         -1
+	GrpJob          -1
+	GrpSubmit       -1
+	GrpCpuMin       -1
+	GrpCpuRunMin    -1
+	GrpMem          -1
+	GrpWall         -1
+	MaxCpus         -1
+	MaxNode         -1
+	MaxJobs         -1
+	MaxSubmitJobs   -1
+	MaxCpuMin       -1
+	MaxWall         -1
+	MaxCpusPerUser  -1
+	MaxNode         -1
+	MaxNodesPerUser -1
+}
+
+print_header $test_id
+
+proc cleanup { } {
+
+	global acct job_qos part_qos scontrol sacctmgr test_part def_part
+	global exit_code
+
+	# Delete the test qos
+	set match 0
+	spawn $sacctmgr -i delete qos $job_qos,$part_qos
+	expect {
+		-re "Deleting" {
+			set match 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	#delete account
+	spawn $sacctmgr -i delete account $acct
+	expect {
+		-re "Deleting accounts" {
+			exp_continue
+		}
+		-re "Error" {
+			send_user "\nFAILURE: account was not deleted\n"
+			set exit_code 1
+		}
+		timeout {
+			send_user "\nFAILURE: sacctmgr is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	spawn $scontrol delete partitionname=$test_part
+	expect {
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {[string length $def_part]} {
+		spawn $scontrol update partitionname=$def_part default=yes
+		expect {
+			timeout {
+				send_user "\nFAILURE: scontrol is not responding\n"
+				set exit_code 1
+			}
+			eof {
+				wait
+			}
+		}
+	}
+}
+
+# Checks the state of the job
+proc check_state { job } {
+
+	global scontrol job_id exit_code
+
+	set state_match 0
+	spawn $scontrol show job $job
+	expect {
+		"JobState=PENDING" {
+			incr state_match
+		}
+		timeout {
+			send_user "\nFAILURE scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$state_match != 1} {
+		send_user "\nFAILURE job should be pending, but is not\n"
+		set exit_code 1
+	}
+
+}
+
+# Remove any vestigial data
+cleanup
+
+if { [test_account_storage] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountStorageType\n"
+	exit 0
+} elseif { [test_enforce_limits] == 0 } {
+	send_user "\nWARNING: This test can't be run without a usable AccountingStorageEnforce\n"
+	exit 0
+}
+if { [test_limits_enforced] == 0 } {
+	send_user "\nWARNING: This test can't be run without enforcing limits\n"
+	exit 0
+}
+if {[test_super_user] == 0} {
+	send_user "\nWARNING Test can only be ran as SlurmUser\n"
+	exit 0
+}
+
+# Check to see that there are enough resources in the default partition
+set tmpc 0
+set tmpn 0
+spawn $scontrol show part [default_partition]
+expect {
+	-re "TotalCPUs=($number)" {
+		set tmpc [expr $expect_out(1,string) - 1]
+		exp_continue
+	}
+	-re "TotalNodes=($number)" {
+		set tmpn [expr $expect_out(1,string) - 1]
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$tmpc == 0 || $tmpn == 0} {
+	send_user "\nWARNING: not enough Nodes and/or CPUs\n"
+	exit 0
+}
+
+# Determine what the selecttype param is
+if {[string first "CR_CORE" [test_select_type_params]] != -1} {
+	set selectparam 1
+}
+
+# Get the number of nodes in the default partition
+set num_nodes [available_nodes [default_partition] "idle"]
+
+if {$num_nodes == 0} {
+	send_user "\nFAILURE: no cpus where found\n"
+	exit 1
+} else {
+	# Set QoS node values
+	set grn_num     $num_nodes
+	set maxnode_num $num_nodes
+	set maxnodespu_num $num_nodes
+}
+
+# Create 2 test qos
+add_qos $part_qos ""
+add_qos $job_qos ""
+
+# create a tmp partition to use for testing
+spawn $scontrol create partitionname=$test_part qos=$part_qos default=yes \
+    nodes=[available_nodes_hostnames $def_part]
+expect {
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code
+	}
+	eof {
+		wait
+	}
+}
+
+set got_node 0
+spawn $srun -N1 printenv SLURM_NODELIST
+expect {
+	-re "($alpha_numeric_under)" {
+		set test_node $expect_out(1,string)
+		set got_node 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: srun is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$got_node != 1} {
+	send_user "\nFAILURE: did not get node for testing\n"
+	exit 0
+}
+
+# Get the number of cpus on a node
+
+spawn $scontrol show node $test_node
+expect {
+	-re "CPUTot=($number)" {
+		set totcpus $expect_out(1,string)
+		exp_continue
+	}
+	-re "ThreadsPerCore=($number)" {
+		set nthreads $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$totcpus == 0} {
+	send_user "\nFAILURE: no cpus where found\n"
+	exit 1
+} else {
+	# Set QoS CPU values
+	set grcpu_num [expr $totcpus - $nthreads]
+	set grpcpumin_num $totcpus
+	set maxcpu_num [expr $totcpus - $nthreads]
+	set maxcpumin_num $totcpus
+}
+
+# Gets user
+spawn $bin_id -u -n
+expect {
+	-re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+# Add account with qos
+set acctmatch 0
+spawn $sacctmgr -i add account $acct qos=$job_qos
+expect {
+	-re "Adding Account" {
+		incr acctmatch
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+
+	}
+}
+if {$acctmatch != 1} {
+	send_user "\nFAILURE: sacctmgr had a problem adding the account\n"
+	cleanup
+	exit 1
+}
+
+# Add user to account
+spawn $sacctmgr -i create user name=$user_name account=$acct
+expect {
+	timeout {
+		send_user "\nFAILURE: sacctmgr not responding\n"
+	}
+	eof {
+		wait
+	}
+}
+
+
+send_user "\n========== Run limit test on partition's qos limits ==========\n\n"
+part_test
+
+#
+# Set partitionqos flag on job's qos
+#
+set changed 0
+spawn $sacctmgr -i mod qos $job_qos set flag=partitionqos
+expect {
+	-re "Modified qos" {
+		set changed 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr is not resonding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+
+send_user "\n========== Run limit test on job's qos limits ==========\n\n"
+qos_test
+
+cleanup
+
+if {$exit_code == 0} {
+	print_success $test_id
+} else {
+	send_user "\nFAILURE: test $test_id\n"
+}
+
+exit $exit_code
diff --git a/testsuite/expect/test22.1 b/testsuite/expect/test22.1
index f67b53063..1d30b6567 100755
--- a/testsuite/expect/test22.1
+++ b/testsuite/expect/test22.1
@@ -161,6 +161,8 @@ set user2_wckey1_alloc $job2_alloc
 #node0 down
 set node0_down_start [expr $period_start+(60*45)]
 set node0_down_end [expr $period_start+(60*75)]
+set node0_start_str [timestamp -format %Y-%m-%dT%X -seconds $node0_down_start]
+set node0_end_str [timestamp -format %Y-%m-%dT%X -seconds $node0_down_end]
 
 #
 # Check accounting config and bail if not found.
@@ -319,20 +321,20 @@ set file [open $sql_in "w"]
 
 # put in the cluster for back in the day before accounting was made here for us we are using 'Tue Jan  1 00:00:00 2008' = 1199174400 as the start
 
-puts $file "insert into cluster_event_table (node_name, cluster, cpu_count, period_start, period_end, reason, cluster_nodes) values"
-puts $file "('', '$cluster', $cluster_cpus, $period_start, $period_end, 'Cluster processor count', '$node_list' )"
+puts $file "insert into cluster_event_table (node_name, cluster, tres, period_start, period_end, reason, cluster_nodes) values"
+puts $file "('', '$cluster', '1=$cluster_cpus', $period_start, $period_end, 'Cluster processor count', '$node_list' )"
 
 #put a node down for 30 minutes starting at 45 minutes after the start to make sure our rollups work so we should get 15 minutes on one hour and 15 on the other
-puts $file ", ('$node0', '$cluster', $node0_cpus, $node0_down_start, $node0_down_end, 'down','')"
-#puts $file ", ('$node1', '$cluster', $node1_cpus, $period_start, $period_end, 'down')"
+puts $file ", ('$node0', '$cluster', '1=$node0_cpus', $node0_down_start, $node0_down_end, 'down','')"
+#puts $file ", ('$node1', '$cluster', '1=$node1_cpus', $period_start, $period_end, 'down')"
 puts $file "on duplicate key update period_start=VALUES(period_start), period_end=VALUES(period_end);"
 
 #now we will put in a job running for an hour and 5 minutes
-puts $file "insert into job_table (jobid, associd, wckey, wckeyid, uid, gid, `partition`, blockid, cluster, account, eligible, submit, start, end, suspended, name, track_steps, state, comp_code, priority, req_cpus, alloc_cpus, nodelist, kill_requid, qos, deleted) values"
-puts $file "('65537', '$user1acct1', '$wckey1', '$user1wckey1', '$uid', '$gid', 'debug', '', '$cluster', '$job1_acct', $job1_start, $job1_start, $job1_start, $job1_end, '0', 'test_job1', '0', '3', '0', '$job1_cpus', '$job1_cpus', '$job1_cpus', '$job1_nodes', '0', '0', '0')"
-puts $file ", ('65538', '$user2acct3', '$wckey1', '$user2wckey1', '$uid', '$gid', 'debug', '', '$cluster', '$job2_acct', $job2_elig, $job2_elig, $job2_start, $job2_end, '0', 'test_job2', '0', '3', '0', '$job2_cpus', '$job2_cpus', '$job2_cpus', '$job2_nodes', '0', '0', '0')"
-puts $file ", ('65539', '$user1acct2', '$wckey1', '$user1wckey1', '$uid', '$gid', 'debug', '', '$cluster', '$job3_acct', $job3_elig, $job3_elig, $job3_start, $job3_end, '0', 'test_job3', '0', '3', '0', '$job3_cpus', '$job3_cpus', '$job3_cpus', '$job3_nodes', '0', '0', '0')"
-puts $file "on duplicate key update id=LAST_INSERT_ID(id), eligible=VALUES(eligible), submit=VALUES(submit), start=VALUES(start), end=VALUES(end), associd=VALUES(associd), alloc_cpus=VALUES(alloc_cpus), wckey=VALUES(wckey), wckeyid=VALUES(wckeyid);";
+puts $file "insert into job_table (jobid, associd, wckey, wckeyid, uid, gid, `partition`, blockid, cluster, account, eligible, submit, start, end, suspended, name, track_steps, state, comp_code, priority, req_cpus, tres_alloc, nodelist, kill_requid, qos, deleted) values"
+puts $file "('65537', '$user1acct1', '$wckey1', '$user1wckey1', '$uid', '$gid', 'debug', '', '$cluster', '$job1_acct', $job1_start, $job1_start, $job1_start, $job1_end, '0', 'test_job1', '0', '3', '0', '$job1_cpus', $job1_cpus, '1=$job1_cpus', '$job1_nodes', '0', '0', '0')"
+puts $file ", ('65538', '$user2acct3', '$wckey1', '$user2wckey1', '$uid', '$gid', 'debug', '', '$cluster', '$job2_acct', $job2_elig, $job2_elig, $job2_start, $job2_end, '0', 'test_job2', '0', '3', '0', '$job2_cpus', '$job2_cpus', '1=$job2_cpus', '$job2_nodes', '0', '0', '0')"
+puts $file ", ('65539', '$user1acct2', '$wckey1', '$user1wckey1', '$uid', '$gid', 'debug', '', '$cluster', '$job3_acct', $job3_elig, $job3_elig, $job3_start, $job3_end, '0', 'test_job3', '0', '3', '0', '$job3_cpus', '$job3_cpus', '1=$job3_cpus', '$job3_nodes', '0', '0', '0')"
+puts $file "on duplicate key update id=LAST_INSERT_ID(id), eligible=VALUES(eligible), submit=VALUES(submit), start=VALUES(start), end=VALUES(end), associd=VALUES(associd), tres_alloc=VALUES(tres_alloc), wckey=VALUES(wckey), wckeyid=VALUES(wckeyid);";
 close $file
 
 exec $bin_rm -f $sql_rem
@@ -373,7 +375,7 @@ set matches 0
 set my_pid [eval spawn $sacct -p -C $cluster --format=cluster,account,associd,wckey,wckeyid,start,end,elapsed --noheader --start=$start_str --end=$end_str]
 expect {
 	-re "There was a problem" {
-		send_user "FAILURE: there was a problem with the sacctmgr command\n"
+		send_user "FAILURE: there was a problem with the sacct command\n"
 	    	incr exit_code 1
 	}
 	-re "$cluster.$account1.$user1acct1.$wckey1.$user1wckey1.$job1_start_str.$job1_end_str.$job1_diff_str." {
@@ -415,6 +417,52 @@ if { $exit_code } {
 	exit $exit_code
 }
 
+#
+# Use sacctmgr to see if node event loaded
+#
+
+send_user "$cluster..$start_str.$end_str.$cluster_cpus.$node_list.\n"
+
+set matches 0
+set my_pid [eval spawn $sacctmgr -p list events cluster=$cluster format=cluster,noden,start,end,cpu --noheader start=$start_str end=$end_str]
+expect {
+	-re "There was a problem" {
+		send_user "FAILURE: there was a problem with the sacctmgr command\n"
+		incr exit_code 1
+	}
+	-re "($cluster..$start_str.$end_str.$cluster_cpus.)" {
+		send_user "got 1\n"
+		incr matches
+		exp_continue
+	}
+	-re "($cluster.$node0.$node0_start_str.$node0_end_str.$node0_cpus.)" {
+		send_user "got 2\n"
+		incr matches
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sacctmgr archive load not responding\n"
+		slow_kill $my_pid
+		incr exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$matches != 2} {
+	send_user "\nFAILURE:  cluster env wasn't loaded correctly with only $matches.\n"
+	incr exit_code 1
+}
+
+if { $exit_code } {
+	#incr exit_code [archive_load $sql_rem]
+	remove_user "" "" $users
+	remove_acct "" $accounts
+	remove_cluster "$cluster"
+	exit $exit_code
+}
+
 #
 # Use sacctmgr to roll up that time period
 #
diff --git a/testsuite/expect/test22.2 b/testsuite/expect/test22.2
index 6bea9c33f..2128dab78 100755
--- a/testsuite/expect/test22.2
+++ b/testsuite/expect/test22.2
@@ -117,7 +117,7 @@ proc sreport_opt { soption } {
 			set not_support 1
 			exp_continue
 		}
-		-re "Cluster|Account|0-49 cpus|50-249 cpus|250-499 cpus|500-999 cpus|1000 cpus|of cluster" {
+		-re "Cluster|Account|0-49 CPUs|50-249 CPUs|250-499 CPUs|500-999 CPUs|1000 CPUs|of cluster" {
 			if {$debug} {send_user "\nmatch4\n"}
 			incr matches
 			exp_continue
@@ -150,12 +150,12 @@ proc sreport_opt { soption } {
 			set not_support 1
 			exp_continue
 		}
-		-re "Cluster\\|Account\\|0-49 cpus\\|50-249 cpus\\|250-499 cpus\\|" {
+		-re "Cluster\\|Account\\|0-49 CPUs\\|50-249 CPUs\\|250-499 CPUs\\|" {
 			if {$debug} {send_user "\nmatch5\n"}
 			incr matches
 			exp_continue
 		}
-		-re "500-999 cpus\\|>= 1000 cpus\\|% of cluster\\|" {
+		-re "500-999 CPUs\\|>= 1000 CPUs\\|% of cluster\\|" {
 			if {$debug} {send_user "\nmatch6\n"}
 			incr matches
 			exp_continue
@@ -184,12 +184,12 @@ proc sreport_opt { soption } {
 			set not_support 1
 			exp_continue
 		}
-		-re "Cluster\\|Account\\|0-49 cpus\\|50-249 cpus\\|250-499 cpus\\|" {
+		-re "Cluster\\|Account\\|0-49 CPUs\\|50-249 CPUs\\|250-499 CPUs\\|" {
 			if {$debug} {send_user "\nmatch7\n"}
 			incr matches
 			exp_continue
 		}
-		-re "500-999 cpus\\|>= 1000 cpus\\|% of cluster" {
+		-re "500-999 CPUs\\|>= 1000 CPUs\\|% of cluster" {
 			if {$debug} {send_user "\nmatch8\n"}
 			incr matches
 			exp_continue
diff --git a/testsuite/expect/test24.1.prog.c b/testsuite/expect/test24.1.prog.c
index 564407770..ce664852c 100644
--- a/testsuite/expect/test24.1.prog.c
+++ b/testsuite/expect/test24.1.prog.c
@@ -73,11 +73,12 @@ static void _list_delete_job(void *job_entry)
 int _setup_assoc_list(void)
 {
 	slurmdb_update_object_t update;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	slurmdb_tres_rec_t *tres = NULL;
 
 	/* make the main list */
-	assoc_mgr_association_list =
-		list_create(slurmdb_destroy_association_rec);
+	assoc_mgr_assoc_list =
+		list_create(slurmdb_destroy_assoc_rec);
 	assoc_mgr_user_list =
 		list_create(slurmdb_destroy_user_rec);
 	assoc_mgr_qos_list =
@@ -88,11 +89,25 @@ int _setup_assoc_list(void)
 	running_cache = 1;
 	assoc_mgr_init(NULL, NULL, SLURM_SUCCESS);
 
-	/* Here we make the associations we want to add to the system.
+	/* Here we make the tres we want to add to the system.
 	 * We do this as an update to avoid having to do setup. */
 	memset(&update, 0, sizeof(slurmdb_update_object_t));
+	update.type = SLURMDB_ADD_TRES;
+	update.objects = list_create(slurmdb_destroy_tres_rec);
+
+	tres = xmalloc(sizeof(slurmdb_tres_rec_t));
+	tres->id = 1;
+	tres->type = xstrdup("cpu");
+	list_append(update.objects, tres);
+
+	if (assoc_mgr_update_tres(&update, false))
+		error("assoc_mgr_update_tres: %m");
+	FREE_NULL_LIST(update.objects);
+
+	/* Here we make the associations we want to add to the system.
+	 * We do this as an update to avoid having to do setup. */
 	update.type = SLURMDB_ADD_ASSOC;
-	update.objects = list_create(slurmdb_destroy_association_rec);
+	update.objects = list_create(slurmdb_destroy_assoc_rec);
 
 	/* Just so we don't have to worry about lft's and rgt's we
 	 * will just append these on in order.
@@ -105,8 +120,8 @@ int _setup_assoc_list(void)
 
 	/* First only add the accounts */
 	/* root association */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 1;
 	/* assoc->lft = 1; */
 	/* assoc->rgt = 28; */
@@ -114,8 +129,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 2;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 40;
@@ -125,8 +140,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountA id 2 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 21;
 	/* assoc->lft = 3; */
 	/* assoc->rgt = 6; */
@@ -136,8 +151,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountB id 21 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 211;
 	/* assoc->lft = 4; */
 	/* assoc->rgt = 5; */
@@ -149,8 +164,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountA id 2 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 22;
 	/* assoc->lft = 7; */
 	/* assoc->rgt = 12; */
@@ -160,8 +175,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountC id 22 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 221;
 	/* assoc->lft = 8; */
 	/* assoc->rgt = 9; */
@@ -172,8 +187,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("User2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 222;
 	/* assoc->lft = 10; */
 	/* assoc->rgt = 11; */
@@ -185,8 +200,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 3;
 	/* assoc->lft = 14; */
 	/* assoc->rgt = 23; */
@@ -196,8 +211,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountD id 3 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 31;
 	/* assoc->lft = 19; */
 	/* assoc->rgt = 22; */
@@ -207,8 +222,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountE id 31 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 311;
 	/* assoc->lft = 20; */
 	/* assoc->rgt = 21; */
@@ -220,8 +235,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountD id 3 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 32;
 	/* assoc->lft = 15; */
 	/* assoc->rgt = 18; */
@@ -231,8 +246,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountF id 32 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 321;
 	/* assoc->lft = 16; */
 	/* assoc->rgt = 17; */
@@ -244,8 +259,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 4;
 	/* assoc->lft = 24; */
 	/* assoc->rgt = 27; */
@@ -255,8 +270,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountG id 4 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 41;
 	/* assoc->lft = 25; */
 	/* assoc->rgt = 26; */
@@ -267,7 +282,7 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("User6");
 	list_append(update.objects, assoc);
 
-	if (assoc_mgr_update_assocs(&update))
+	if (assoc_mgr_update_assocs(&update, false))
 		error("assoc_mgr_update_assocs: %m");
 	list_destroy(update.objects);
 
@@ -310,6 +325,7 @@ int main (int argc, char **argv)
 	conf->priority_weight_js = 0;
 	conf->priority_weight_part = 0;
 	conf->priority_weight_qos = 0;
+	xfree(conf->priority_weight_tres);
 	slurm_conf_unlock();
 
 	/* we don't want to do any decay here so make the save state
@@ -328,7 +344,7 @@ int main (int argc, char **argv)
 	 * sleep to make sure the thread gets started. */
 	sleep(1);
 	memset(&resp, 0, sizeof(shares_response_msg_t));
-	resp.assoc_shares_list = assoc_mgr_get_shares(NULL, 0, NULL, NULL);
+	assoc_mgr_get_shares(NULL, 0, NULL, &resp);
 	process(&resp, 0);
 
 	/* free memory */
@@ -338,8 +354,8 @@ int main (int argc, char **argv)
 		list_destroy(job_list);
 	if (resp.assoc_shares_list)
 		list_destroy(resp.assoc_shares_list);
-	if (assoc_mgr_association_list)
-		list_destroy(assoc_mgr_association_list);
+	if (assoc_mgr_assoc_list)
+		list_destroy(assoc_mgr_assoc_list);
 	if (assoc_mgr_qos_list)
 		list_destroy(assoc_mgr_qos_list);
 	return 0;
diff --git a/testsuite/expect/test24.2 b/testsuite/expect/test24.2
index 885e38299..e6bf185b3 100755
--- a/testsuite/expect/test24.2
+++ b/testsuite/expect/test24.2
@@ -113,7 +113,7 @@ proc sshare_opt { soption } {
 		return $matches
 	}
 
-	if { $soption == "-noheader" || $soption == "h" } {
+	if { $soption == "-noheader" || $soption == "n" } {
 
 		spawn $sshare -$soption
 		expect {
@@ -125,7 +125,7 @@ proc sshare_opt { soption } {
 				set not_support 2
 				exp_continue
 			}
-			-re "Account|User|Raw Shares|Norm Shares|Raw Usage|Norm Usage|Effectv Usage" {
+			-re "Account|User|RawShares|NormShares|RawUsage|NormUsage|EffectvUsage" {
 				if {$debug} {send_user "\nmatch4\n"}
 				incr matches
 				exp_continue
@@ -164,12 +164,12 @@ proc sshare_opt { soption } {
 				set not_support 2
 				exp_continue
 			}
-			-re "Account\\|User\\|Raw Shares\\|Norm Shares\\|" {
+			-re "Account\\|User\\|RawShares\\|NormShares\\|" {
 				if {$debug} {send_user "\nmatch5\n"}
 				incr matches
 				exp_continue
 			}
-			-re "Raw Usage\\|Effectv Usage\\|FairShare\\|" {
+			-re "RawUsage\\|EffectvUsage\\|FairShare\\|" {
 				if {$debug} {send_user "\nmatch5\n"}
 				incr matches
 				exp_continue
@@ -203,12 +203,12 @@ proc sshare_opt { soption } {
 				set not_support 2
 				exp_continue
 			}
-			-re "Account\\|User\\|Raw Shares\\|Norm Shares\\|" {
+			-re "Account\\|User\\|RawShares\\|NormShares\\|" {
 				if {$debug} {send_user "\nmatch6\n"}
 				incr matches
 				exp_continue
 			}
-			-re "Raw Usage\\|Effectv Usage\\|FairShare" {
+			-re "RawUsage\\|EffectvUsage\\|FairShare" {
 				if {$debug} {send_user "\nmatch7\n"}
 				incr matches
 				exp_continue
@@ -334,7 +334,7 @@ if {$matches != 3} {
 	set exit_code 1
 }
 
-set matches [sshare_opt h ]
+set matches [sshare_opt n ]
 if {$matches != 0} {
 	send_user "\nFAILURE: sshare -n failed ($matches != 0)\n"
 	set exit_code 1
diff --git a/testsuite/expect/test24.3 b/testsuite/expect/test24.3
index c0f9afec0..ccac48d49 100755
--- a/testsuite/expect/test24.3
+++ b/testsuite/expect/test24.3
@@ -79,91 +79,91 @@ expect {
 		exp_continue
 	}
 
-	"root|||1.000000|210||1.000000|0.500000|0|0|" {
+	"root|||1.000000|210||1.000000|0.500000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountA||40|0.266667|45|0.214286|0.214286|0.572929|0|0|" {
+	"AccountA||40|0.266667|45|0.214286|0.214286|0.572929||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountB||30|0.200000|20|0.095238|0.184524|0.527550|0|0|" {
+	"AccountB||30|0.200000|20|0.095238|0.184524|0.527550||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountB|User1|1|0.200000|20|0.095238|0.184524|0.527550|0|0|" {
+	"AccountB|User1|1|0.200000|20|0.095238|0.184524|0.527550||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountC||10|0.066667|25|0.119048|0.142857|0.226431|0|0|" {
+	"AccountC||10|0.066667|25|0.119048|0.142857|0.226431||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountC|User2|1|0.033333|25|0.119048|0.130952|0.065672|0|0|" {
+	"AccountC|User2|1|0.033333|25|0.119048|0.130952|0.065672||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountC|User3|1|0.033333|0|0.000000|0.071429|0.226431|0|0|" {
+	"AccountC|User3|1|0.033333|0|0.000000|0.071429|0.226431||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountD||60|0.400000|25|0.119048|0.119048|0.813594|0|0|" {
+	"AccountD||60|0.400000|25|0.119048|0.119048|0.813594||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountE||25|0.166667|25|0.119048|0.119048|0.609507|0|0|" {
+	"AccountE||25|0.166667|25|0.119048|0.119048|0.609507||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountE|User4|1|0.166667|25|0.119048|0.119048|0.609507|0|0|" {
+	"AccountE|User4|1|0.166667|25|0.119048|0.119048|0.609507||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountF||35|0.233333|0|0.000000|0.069444|0.813594|0|0|" {
+	"AccountF||35|0.233333|0|0.000000|0.069444|0.813594||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountF|User5|1|0.233333|0|0.000000|0.069444|0.813594|0|0|" {
+	"AccountF|User5|1|0.233333|0|0.000000|0.069444|0.813594||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountG||0|0.000000|30|0.142857|0.142857|0.000000|0|0|" {
+	"AccountG||0|0.000000|30|0.142857|0.142857|0.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountG|User6|0|0.000000|30|0.142857|0.142857|0.000000|0|0|" {
+	"AccountG|User6|0|0.000000|30|0.142857|0.142857|0.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountH||50|0.333333|110|0.523810|0.523810|0.336475|0|0|" {
+	"AccountH||50|0.333333|110|0.523810|0.523810|0.336475||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountH|UHRA1|parent|0.333333|20|0.095238|0.523810|0.336475|0|0|" {
+	"AccountH|UHRA1|parent|0.333333|20|0.095238|0.523810|0.336475||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountH|UHRA2|40|0.078431|20|0.095238|0.196078|0.176777|0|0|" {
+	"AccountH|UHRA2|40|0.078431|20|0.095238|0.196078|0.176777||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountH|UHRA3|50|0.098039|25|0.119048|0.238095|0.185749|0|0|" {
+	"AccountH|UHRA3|50|0.098039|25|0.119048|0.238095|0.185749||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountHTA||parent|0.333333|45|0.214286|0.523810|0.336475|0|0|" {
+	"AccountHTA||parent|0.333333|45|0.214286|0.523810|0.336475||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountHTA|UHTAStd1|parent|0.333333|10|0.047619|0.523810|0.336475|0|0|" {
+	"AccountHTA|UHTAStd1|parent|0.333333|10|0.047619|0.523810|0.336475||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountHTA|UHTAStd2|30|0.058824|10|0.047619|0.131653|0.211966|0|0|" {
+	"AccountHTA|UHTAStd2|30|0.058824|10|0.047619|0.131653|0.211966||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"AccountHTA|UHTAStd3|50|0.098039|25|0.119048|0.238095|0.185749|0|0|" {
+	"AccountHTA|UHTAStd3|50|0.098039|25|0.119048|0.238095|0.185749||cpu=0|" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test24.3.prog.c b/testsuite/expect/test24.3.prog.c
index 34b4f708d..5cd31f6f8 100644
--- a/testsuite/expect/test24.3.prog.c
+++ b/testsuite/expect/test24.3.prog.c
@@ -78,26 +78,46 @@ static void _list_delete_job(void *job_entry)
 int _setup_assoc_list(void)
 {
 	slurmdb_update_object_t update;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	slurmdb_tres_rec_t *tres = NULL;
 
 	/* make the main list */
-	assoc_mgr_association_list =
-		list_create(slurmdb_destroy_association_rec);
+	assoc_mgr_assoc_list =
+		list_create(slurmdb_destroy_assoc_rec);
 	assoc_mgr_user_list =
 		list_create(slurmdb_destroy_user_rec);
 	assoc_mgr_qos_list =
 		list_create(slurmdb_destroy_qos_rec);
+	assoc_mgr_tres_list =
+		list_create(slurmdb_destroy_tres_rec);
+	assoc_mgr_tres_array = xmalloc(sizeof(slurmdb_tres_rec_t));
 
 	/* we just want make it so we setup_children so just pretend
 	 * we are running off cache */
 	running_cache = 1;
 	assoc_mgr_init(NULL, NULL, SLURM_SUCCESS);
 
+
+	/* Here we make the tres we want to add to the system.
+	 * We do this as an update to avoid having to do setup. */
+	memset(&update, 0, sizeof(slurmdb_update_object_t));
+	update.type = SLURMDB_ADD_TRES;
+	update.objects = list_create(slurmdb_destroy_tres_rec);
+
+	tres = xmalloc(sizeof(slurmdb_tres_rec_t));
+	tres->id = 1;
+	tres->type = xstrdup("cpu");
+	list_append(update.objects, tres);
+
+	if (assoc_mgr_update_tres(&update, false))
+		error("assoc_mgr_update_tres: %m");
+	FREE_NULL_LIST(update.objects);
+
 	/* Here we make the associations we want to add to the system.
 	 * We do this as an update to avoid having to do setup. */
 	memset(&update, 0, sizeof(slurmdb_update_object_t));
 	update.type = SLURMDB_ADD_ASSOC;
-	update.objects = list_create(slurmdb_destroy_association_rec);
+	update.objects = list_create(slurmdb_destroy_assoc_rec);
 
 	/* Just so we don't have to worry about lft's and rgt's we
 	 * will just append these on in order.
@@ -110,8 +130,8 @@ int _setup_assoc_list(void)
 
 	/* First only add the accounts */
 	/* root association */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 1;
 	/* assoc->lft = 1; */
 	/* assoc->rgt = 28; */
@@ -119,8 +139,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 2;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 40;
@@ -130,8 +150,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountA id 2 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 21;
 	/* assoc->lft = 3; */
 	/* assoc->rgt = 6; */
@@ -141,8 +161,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountB id 21 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 211;
 	/* assoc->lft = 4; */
 	/* assoc->rgt = 5; */
@@ -154,8 +174,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountA id 2 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 22;
 	/* assoc->lft = 7; */
 	/* assoc->rgt = 12; */
@@ -165,8 +185,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountC id 22 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 221;
 	/* assoc->lft = 8; */
 	/* assoc->rgt = 9; */
@@ -177,8 +197,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("User2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 222;
 	/* assoc->lft = 10; */
 	/* assoc->rgt = 11; */
@@ -190,8 +210,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 3;
 	/* assoc->lft = 14; */
 	/* assoc->rgt = 23; */
@@ -201,8 +221,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountD id 3 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 31;
 	/* assoc->lft = 19; */
 	/* assoc->rgt = 22; */
@@ -212,8 +232,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountE id 31 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 311;
 	/* assoc->lft = 20; */
 	/* assoc->rgt = 21; */
@@ -225,8 +245,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountD id 3 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 32;
 	/* assoc->lft = 15; */
 	/* assoc->rgt = 18; */
@@ -236,8 +256,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountF id 32 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 321;
 	/* assoc->lft = 16; */
 	/* assoc->rgt = 17; */
@@ -249,8 +269,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 4;
 	/* assoc->lft = 24; */
 	/* assoc->rgt = 27; */
@@ -260,8 +280,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountG id 4 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 41;
 	/* assoc->lft = 25; */
 	/* assoc->rgt = 26; */
@@ -275,8 +295,8 @@ int _setup_assoc_list(void)
 	/* Check for proper handling of Fairshare=parent */
 
 	/* sub of root id 1 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 5;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -286,8 +306,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountH id 5 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 51;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -298,8 +318,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountHTA id 51 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 511;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -311,8 +331,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountHTA id 51 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 512;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -324,8 +344,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountHTA id 51 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 513;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -337,8 +357,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountH id 5 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 52;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -350,8 +370,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountH id 5 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 53;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -363,8 +383,8 @@ int _setup_assoc_list(void)
 	list_append(update.objects, assoc);
 
 	/* sub of AccountH id 5 */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 54;
 	/* assoc->lft = ; */
 	/* assoc->rgt = ; */
@@ -375,7 +395,7 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("UHRA3");
 	list_append(update.objects, assoc);
 
-	if (assoc_mgr_update_assocs(&update))
+	if (assoc_mgr_update_assocs(&update, false))
 		error("assoc_mgr_update_assocs: %m");
 	list_destroy(update.objects);
 
@@ -436,7 +456,7 @@ int main (int argc, char **argv)
 	 * sleep to make sure the thread gets started. */
 	sleep(1);
 	memset(&resp, 0, sizeof(shares_response_msg_t));
-	resp.assoc_shares_list = assoc_mgr_get_shares(NULL, 0, NULL, NULL);
+	assoc_mgr_get_shares(NULL, 0, NULL, &resp);
 	process(&resp, 0);
 
 	/* free memory */
@@ -446,9 +466,10 @@ int main (int argc, char **argv)
 		list_destroy(job_list);
 	if (resp.assoc_shares_list)
 		list_destroy(resp.assoc_shares_list);
-	if (assoc_mgr_association_list)
-		list_destroy(assoc_mgr_association_list);
+	if (assoc_mgr_assoc_list)
+		list_destroy(assoc_mgr_assoc_list);
 	if (assoc_mgr_qos_list)
 		list_destroy(assoc_mgr_qos_list);
+	xfree(assoc_mgr_tres_array);
 	return 0;
 }
diff --git a/testsuite/expect/test24.4 b/testsuite/expect/test24.4
index 45d759299..2fa63232b 100755
--- a/testsuite/expect/test24.4
+++ b/testsuite/expect/test24.4
@@ -78,143 +78,143 @@ expect {
 		exp_continue
 	}
 
-	"root|||0.000000|240||1.000000|||0|0|" {
+	"root|||0.000000|240||1.000000||||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"root|u1|10|0.048077|0|0.000000|0.000000|1.000000|inf|0|0|" {
+	"root|u1|10|0.048077|0|0.000000|0.000000|1.000000|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"root|u2|10|0.048077|10|0.041667|0.041667|0.666667|1.153846|0|0|" {
+	"root|u2|10|0.048077|10|0.041667|0.041667|0.666667|1.153846||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aA||40|0.192308|45|0.187500|0.187500||1.025641|0|0|" {
+	"aA||40|0.192308|45|0.187500|0.187500||1.025641||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aAA||30|0.750000|20|0.083333|0.444444||1.687500|0|0|" {
+	"aAA||30|0.750000|20|0.083333|0.444444||1.687500||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aAA|uAA1|1|1.000000|20|0.083333|1.000000|0.619048|1.000000|0|0|" {
+	"aAA|uAA1|1|1.000000|20|0.083333|1.000000|0.619048|1.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aAB||10|0.250000|25|0.104167|0.555556||0.450000|0|0|" {
+	"aAB||10|0.250000|25|0.104167|0.555556||0.450000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aAB|uAB1|1|0.500000|25|0.104167|1.000000|0.523810|0.500000|0|0|" {
+	"aAB|uAB1|1|0.500000|25|0.104167|1.000000|0.523810|0.500000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aAB|uAB2|1|0.500000|0|0.000000|0.000000|0.571429|inf|0|0|" {
+	"aAB|uAB2|1|0.500000|0|0.000000|0.000000|0.571429|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aB||60|0.288462|25|0.104167|0.104167||2.769231|0|0|" {
+	"aB||60|0.288462|25|0.104167|0.104167||2.769231||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aBA||25|0.416667|25|0.104167|1.000000||0.416667|0|0|" {
+	"aBA||25|0.416667|25|0.104167|1.000000||0.416667||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aBA|uBA1|1|1.000000|25|0.104167|1.000000|0.714286|1.000000|0|0|" {
+	"aBA|uBA1|1|1.000000|25|0.104167|1.000000|0.714286|1.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aBB||35|0.583333|0|0.000000|0.000000||inf|0|0|" {
+	"aBB||35|0.583333|0|0.000000|0.000000||inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aBB|uBB1|1|1.000000|0|0.000000|0.000000|0.761905|inf|0|0|" {
+	"aBB|uBB1|1|1.000000|0|0.000000|0.000000|0.761905|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aC||0|0.000000|30|0.125000|0.125000||0.000000|0|0|" {
+	"aC||0|0.000000|30|0.125000|0.125000||0.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aC|uC1|0|0.000000|30|0.125000|1.000000|0.047619|0.000000|0|0|" {
+	"aC|uC1|0|0.000000|30|0.125000|1.000000|0.047619|0.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aD||50|0.240385|110|0.458333|0.458333||0.524476|0|0|" {
+	"aD||50|0.240385|110|0.458333|0.458333||0.524476||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aD|uD1|parent|0.240385|20|0.083333|0.181818|0.476190||0|0|" {
+	"aD|uD1|parent|0.240385|20|0.083333|0.181818|0.476190|||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aD|uD2|40|0.235294|20|0.083333|0.181818|0.238095|1.294118|0|0|" {
+	"aD|uD2|40|0.235294|20|0.083333|0.181818|0.238095|1.294118||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aD|uD3|50|0.294118|25|0.104167|0.227273|0.333333|1.294118|0|0|" {
+	"aD|uD3|50|0.294118|25|0.104167|0.227273|0.333333|1.294118||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aDA||parent|0.240385|45|0.187500|0.409091|||0|0|" {
+	"aDA||parent|0.240385|45|0.187500|0.409091||||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aDA|uDA1|parent|0.240385|10|0.041667|0.090909|0.476190||0|0|" {
+	"aDA|uDA1|parent|0.240385|10|0.041667|0.090909|0.476190|||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aDA|uDA2|30|0.176471|10|0.041667|0.090909|0.380952|1.941176|0|0|" {
+	"aDA|uDA2|30|0.176471|10|0.041667|0.090909|0.380952|1.941176||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aDA|uDA3|50|0.294118|25|0.104167|0.227273|0.333333|1.294118|0|0|" {
+	"aDA|uDA3|50|0.294118|25|0.104167|0.227273|0.333333|1.294118||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aE||10|0.048077|0|0.000000|0.000000||inf|0|0|" {
+	"aE||10|0.048077|0|0.000000|0.000000||inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aE|aE1|10|0.500000|0|0.000000|0.000000|1.000000|inf|0|0|" {
+	"aE|aE1|10|0.500000|0|0.000000|0.000000|1.000000|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aE|aE2|10|0.500000|0|0.000000|0.000000|1.000000|inf|0|0|" {
+	"aE|aE2|10|0.500000|0|0.000000|0.000000|1.000000|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aF||20|0.096154|0|0.000000|0.000000||inf|0|0|" {
+	"aF||20|0.096154|0|0.000000|0.000000||inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aF|uF1|10|0.333333|0|0.000000|0.000000|1.000000|inf|0|0|" {
+	"aF|uF1|10|0.333333|0|0.000000|0.000000|1.000000|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aF|uF2|20|0.666667|0|0.000000|0.000000|1.000000|inf|0|0|" {
+	"aF|uF2|20|0.666667|0|0.000000|0.000000|1.000000|inf||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aG||8|0.038462|20|0.083333|0.083333||0.461538|0|0|" {
+	"aG||8|0.038462|20|0.083333|0.083333||0.461538||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aG|uG1|10|0.500000|10|0.041667|0.500000|0.190476|1.000000|0|0|" {
+	"aG|uG1|10|0.500000|10|0.041667|0.500000|0.190476|1.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aGA||10|0.500000|10|0.041667|0.500000||1.000000|0|0|" {
+	"aGA||10|0.500000|10|0.041667|0.500000||1.000000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aGA|uGA1|20|0.500000|4|0.016667|0.400000|0.190476|1.250000|0|0|" {
+	"aGA|uGA1|20|0.500000|4|0.016667|0.400000|0.190476|1.250000||cpu=0|" {
 		incr matches
 		exp_continue
 	}
-	"aGA|uGA2|20|0.500000|6|0.025000|0.600000|0.095238|0.833333|0|0|" {
+	"aGA|uGA2|20|0.500000|6|0.025000|0.600000|0.095238|0.833333||cpu=0|" {
 		incr matches
 		exp_continue
 	}
diff --git a/testsuite/expect/test24.4.prog.c b/testsuite/expect/test24.4.prog.c
index e6863538a..c95cf38f6 100644
--- a/testsuite/expect/test24.4.prog.c
+++ b/testsuite/expect/test24.4.prog.c
@@ -76,11 +76,12 @@ static void _list_delete_job(void *job_entry)
 int _setup_assoc_list(void)
 {
 	slurmdb_update_object_t update;
-	slurmdb_association_rec_t *assoc = NULL;
+	slurmdb_assoc_rec_t *assoc = NULL;
+	slurmdb_tres_rec_t *tres = NULL;
 
 	/* make the main list */
-	assoc_mgr_association_list =
-		list_create(slurmdb_destroy_association_rec);
+	assoc_mgr_assoc_list =
+		list_create(slurmdb_destroy_assoc_rec);
 	assoc_mgr_user_list =
 		list_create(slurmdb_destroy_user_rec);
 	assoc_mgr_qos_list =
@@ -91,37 +92,52 @@ int _setup_assoc_list(void)
 	running_cache = 1;
 	assoc_mgr_init(NULL, NULL, SLURM_SUCCESS);
 
+	/* Here we make the tres we want to add to the system.
+	 * We do this as an update to avoid having to do setup. */
+	memset(&update, 0, sizeof(slurmdb_update_object_t));
+	update.type = SLURMDB_ADD_TRES;
+	update.objects = list_create(slurmdb_destroy_tres_rec);
+
+	tres = xmalloc(sizeof(slurmdb_tres_rec_t));
+	tres->id = 1;
+	tres->type = xstrdup("cpu");
+	list_append(update.objects, tres);
+
+	if (assoc_mgr_update_tres(&update, false))
+		error("assoc_mgr_update_tres: %m");
+	FREE_NULL_LIST(update.objects);
+
 	/* Here we make the associations we want to add to the system.
 	 * We do this as an update to avoid having to do setup. */
 	memset(&update, 0, sizeof(slurmdb_update_object_t));
 	update.type = SLURMDB_ADD_ASSOC;
-	update.objects = list_create(slurmdb_destroy_association_rec);
+	update.objects = list_create(slurmdb_destroy_assoc_rec);
 
-	/* root association */
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	/* root assoc */
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 1;
 	assoc->acct = xstrdup("root");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 2;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 40;
 	assoc->acct = xstrdup("aA");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 21;
 	assoc->parent_id = 2;
 	assoc->shares_raw = 30;
 	assoc->acct = xstrdup("aAA");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 211;
 	assoc->parent_id = 21;
 	assoc->shares_raw = 1;
@@ -130,16 +146,16 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uAA1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 22;
 	assoc->parent_id = 2;
 	assoc->shares_raw = 10;
 	assoc->acct = xstrdup("aAB");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 221;
 	assoc->parent_id = 22;
 	assoc->shares_raw = 1;
@@ -148,8 +164,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uAB1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 222;
 	assoc->parent_id = 22;
 	assoc->shares_raw = 1;
@@ -158,24 +174,24 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uAB2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 3;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 60;
 	assoc->acct = xstrdup("aB");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 31;
 	assoc->parent_id = 3;
 	assoc->shares_raw = 25;
 	assoc->acct = xstrdup("aBA");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 311;
 	assoc->parent_id = 31;
 	assoc->shares_raw = 1;
@@ -184,16 +200,16 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uBA1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 32;
 	assoc->parent_id = 3;
 	assoc->shares_raw = 35;
 	assoc->acct = xstrdup("aBB");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 321;
 	assoc->parent_id = 32;
 	assoc->shares_raw = 1;
@@ -202,8 +218,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uBB1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 4;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 0;
@@ -211,8 +227,8 @@ int _setup_assoc_list(void)
 	assoc->acct = xstrdup("aC");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 41;
 	assoc->parent_id = 4;
 	assoc->shares_raw = 0;
@@ -223,16 +239,16 @@ int _setup_assoc_list(void)
 
 	/* Check for proper handling of Fairshare=parent */
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 5;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 50;
 	assoc->acct = xstrdup("aD");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 51;
 	assoc->parent_id = 5;
 	assoc->shares_raw = SLURMDB_FS_USE_PARENT;
@@ -240,8 +256,8 @@ int _setup_assoc_list(void)
 	assoc->acct = xstrdup("aDA");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 511;
 	assoc->parent_id = 51;
 	assoc->shares_raw = SLURMDB_FS_USE_PARENT;
@@ -250,8 +266,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uDA1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 512;
 	assoc->parent_id = 51;
 	assoc->shares_raw = 30;
@@ -260,8 +276,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uDA2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 513;
 	assoc->parent_id = 51;
 	assoc->shares_raw = 50;
@@ -270,8 +286,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uDA3");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 52;
 	assoc->parent_id = 5;
 	assoc->shares_raw = SLURMDB_FS_USE_PARENT;
@@ -280,8 +296,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uD1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 53;
 	assoc->parent_id = 5;
 	assoc->shares_raw = 40;
@@ -290,8 +306,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uD2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 54;
 	assoc->parent_id = 5;
 	assoc->shares_raw = 50;
@@ -302,8 +318,8 @@ int _setup_assoc_list(void)
 
 	/* Check for proper tie handling */
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 6;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 10;
@@ -311,8 +327,8 @@ int _setup_assoc_list(void)
 	assoc->acct = xstrdup("aE");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 61;
 	assoc->parent_id = 6;
 	assoc->shares_raw = 10;
@@ -321,8 +337,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("aE1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 62;
 	assoc->parent_id = 6;
 	assoc->shares_raw = 10;
@@ -331,8 +347,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("aE2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 7;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 10;
@@ -341,8 +357,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("u1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 8;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 20;
@@ -350,8 +366,8 @@ int _setup_assoc_list(void)
 	assoc->acct = xstrdup("aF");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 81;
 	assoc->parent_id = 8;
 	assoc->shares_raw = 10;
@@ -360,8 +376,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uF1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 82;
 	assoc->parent_id = 8;
 	assoc->shares_raw = 20;
@@ -370,8 +386,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uF2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 9;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 8;
@@ -379,8 +395,8 @@ int _setup_assoc_list(void)
 	assoc->acct = xstrdup("aG");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 91;
 	assoc->parent_id = 9;
 	assoc->shares_raw = 10;
@@ -389,8 +405,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uG1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 92;
 	assoc->parent_id = 9;
 	assoc->shares_raw = 10;
@@ -398,8 +414,8 @@ int _setup_assoc_list(void)
 	assoc->acct = xstrdup("aGA");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 921;
 	assoc->parent_id = 92;
 	assoc->shares_raw = 20;
@@ -408,8 +424,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uGA1");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 921;
 	assoc->parent_id = 92;
 	assoc->shares_raw = 20;
@@ -418,8 +434,8 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("uGA2");
 	list_append(update.objects, assoc);
 
-	assoc = xmalloc(sizeof(slurmdb_association_rec_t));
-	assoc->usage = create_assoc_mgr_association_usage();
+	assoc = xmalloc(sizeof(slurmdb_assoc_rec_t));
+	assoc->usage = slurmdb_create_assoc_usage(g_tres_count);
 	assoc->id = 1001;
 	assoc->parent_id = 1;
 	assoc->shares_raw = 10;
@@ -428,7 +444,7 @@ int _setup_assoc_list(void)
 	assoc->user = xstrdup("u2");
 	list_append(update.objects, assoc);
 
-	if (assoc_mgr_update_assocs(&update))
+	if (assoc_mgr_update_assocs(&update, false))
 		error("assoc_mgr_update_assocs: %m");
 	list_destroy(update.objects);
 
@@ -489,7 +505,7 @@ int main (int argc, char **argv)
 	 * sleep to make sure the thread gets started. */
 	sleep(1);
 	memset(&resp, 0, sizeof(shares_response_msg_t));
-	resp.assoc_shares_list = assoc_mgr_get_shares(NULL, 0, NULL, NULL);
+	assoc_mgr_get_shares(NULL, 0, NULL, &resp);
 	process(&resp, 0);
 
 	/* free memory */
@@ -499,9 +515,10 @@ int main (int argc, char **argv)
 		list_destroy(job_list);
 	if (resp.assoc_shares_list)
 		list_destroy(resp.assoc_shares_list);
-	if (assoc_mgr_association_list)
-		list_destroy(assoc_mgr_association_list);
+	if (assoc_mgr_assoc_list)
+		list_destroy(assoc_mgr_assoc_list);
 	if (assoc_mgr_qos_list)
 		list_destroy(assoc_mgr_qos_list);
+	xfree(assoc_mgr_tres_array);
 	return 0;
 }
diff --git a/testsuite/expect/test28.2 b/testsuite/expect/test28.2
index 38d31e1b7..2db8a1e8a 100755
--- a/testsuite/expect/test28.2
+++ b/testsuite/expect/test28.2
@@ -70,6 +70,9 @@ delete_file
 make_bash_script $file_script "
 $bin_echo array_id=\$SLURM_ARRAY_JOB_ID
 $bin_echo task_id=\$SLURM_ARRAY_TASK_ID
+$bin_echo task_min=\$SLURM_ARRAY_TASK_MIN
+$bin_echo task_max=\$SLURM_ARRAY_TASK_MAX
+$bin_echo task_step=\$SLURM_ARRAY_TASK_STEP
 $bin_sleep aaaa
 exit 0
 "
@@ -154,6 +157,7 @@ for {set cnt 0} {$cnt<$array_end} {incr cnt} {
 }
 send_user "\nchecking environment variables\n"
 # Checks that the array job ids are correct
+set max_inx [expr $array_end - 1]
 for {set index 0} {$index < $array_end} {incr index} {
 	set env_cnt 0
 	spawn $bin_cat test$test_id-$job_id\_$index\.output
@@ -166,12 +170,24 @@ for {set index 0} {$index < $array_end} {incr index} {
 			incr env_cnt
 			exp_continue
 		}
+		-re "task_min=0" {
+			incr env_cnt
+			exp_continue
+		}
+		-re "task_max=$max_inx" {
+			incr env_cnt
+			exp_continue
+		}
+		-re "task_step=1" {
+			incr env_cnt
+			exp_continue
+		}
 		eof {
 			wait
 		}
 	}
-	if {$env_cnt != 2} {
-		send_user "\nFAILURE: Missing environment variables in file test$test_id-$job_id\_$index\.output\n"
+	if {$env_cnt != 5} {
+		send_user "\nFAILURE: Missing environment variables in file test$test_id-$job_id\_$index\.output ($env_cnt != 5)\n"
 		set exit_code 1
 	}
 }
@@ -197,8 +213,8 @@ for {set index 0} {$index < $array_end} {incr index} {
 
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
+	delete_file
 } else {
 	send_user "\nFAILURE\n"
 }
-delete_file
 exit $exit_code
diff --git a/testsuite/expect/test28.3 b/testsuite/expect/test28.3
index be996aefe..858b843d1 100755
--- a/testsuite/expect/test28.3
+++ b/testsuite/expect/test28.3
@@ -94,7 +94,9 @@ expect {
 }
 if {$job_id != 0} {
 	check_ids $job_id
-	cancel_job $job_id
+	if {[cancel_job $job_id] != 0} {
+		set exit_code 1
+	}
 }
 
 if {$exit_code == 0} {
diff --git a/testsuite/expect/test28.4 b/testsuite/expect/test28.4
index a585c1188..763a08c5f 100755
--- a/testsuite/expect/test28.4
+++ b/testsuite/expect/test28.4
@@ -41,7 +41,7 @@ set array_in      2
 print_header $test_id
 
 if {[get_array_config] < [expr $array_size + 1]} {
-	send_user "\nWARNING: MaxArraySize is to small\n"
+	send_user "\nWARNING: MaxArraySize is too small\n"
 	exit 0
 }
 
@@ -136,7 +136,9 @@ set new_job_id [check_update ${job_id}_2 4]
 # Make sure other tasks remain unchanged
 check_update ${job_id}_1 1
 
-cancel_job $job_id
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	exec $bin_rm -f $file_script
diff --git a/testsuite/expect/test28.5 b/testsuite/expect/test28.5
index 8e42ac292..fdf51004f 100755
--- a/testsuite/expect/test28.5
+++ b/testsuite/expect/test28.5
@@ -184,7 +184,9 @@ if {$job_id != 0} {
 	release_job ${job_id}_$array_in
 	check_release_job ${job_id}_$array_in
 
-	cancel_job $job_id
+	if {[cancel_job $job_id] != 0} {
+		set exit_code 1
+	}
 }
 
 if {$exit_code == 0} {
diff --git a/testsuite/expect/test28.8 b/testsuite/expect/test28.8
new file mode 100755
index 000000000..be66013e7
--- /dev/null
+++ b/testsuite/expect/test28.8
@@ -0,0 +1,199 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of job array suspend/resume.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "WARNING: ..." with an explanation of why the test can't be made, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2005-2007 The Regents of the University of California.
+# Copyright (C) 2008 Lawrence Livermore National Security.
+# Copyright (C) 2015 SchedMD LLC.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette@schedmd.com>
+# CODE-OCEC-09-009. All rights reserved.
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id        "28.8"
+set array_size     4
+set exit_code      0
+set file_script    "test$test_id.bash"
+set job_id         0
+set not_supported  0
+
+print_header $test_id
+
+if {[get_array_config] < [expr $array_size + 1]} {
+	send_user "\nWARNING: MaxArraySize is to small for this test\n"
+	exit 0
+}
+if {[test_gang] != 0} {
+	send_user "\nWARNING: This test can't be run with gang scheduling\n"
+	exit 0
+}
+if {[is_super_user] == 0} {
+	send_user "\nWARNING: This test can't be run except as SlurmUser\n"
+	exit 0
+}
+
+set conf_test 0
+log_user 0
+spawn $scontrol show config
+expect {
+	-re "launch/poe" {
+		incr conf_test
+		exp_continue
+	}
+	-re "proctrack/pgid" {
+		incr conf_test
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
+if {$conf_test == 2} {
+	send_user "\nWARNING: launch/poe plus proctrack/pgid are incompatible with this test\n"
+	exit 0
+}
+
+proc suspend_job { job_id operation } {
+	global exit_code scontrol not_supported
+
+	spawn $scontrol $operation $job_id
+	expect {
+		-re "Requested operation not supported" {
+			send_user "\nWARNING: job suspend not supported\n"
+			set not_supported 1
+			exp_continue
+		}
+		-re "No error" {
+			exp_continue
+		}
+		-re "error" {
+			send_user "\nFAILURE: scontrol error\n"
+			set exit_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+}
+
+proc check_output { file_name } {
+	global exit_code bin_cat
+	set match1 0
+	set match2 0
+
+	spawn $bin_cat $file_name
+	expect {
+		-re "JobSuspended" {
+			set match1 1
+			exp_continue
+		}
+		-re "AllDone" {
+			set match2 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: cat not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if { $match1 == 0 } {
+		send_user "\nFAILURE: job never suspended\n"
+		set exit_code 1
+	}
+	if { $match2 == 0 } {
+		send_user "\nFAILURE: job failed to run to completion\n"
+		set exit_code 1
+	}
+}
+
+################Test Starts Here################
+make_bash_script $file_script "sleep 100"
+
+spawn $sbatch -N1 --array=0-[expr $array_size -1] --output=/dev/null -t1 $file_script
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		send_user "\njob $job_id was submitted\n"
+		exp_continue
+	}
+	-re "error" {
+		send_user "\nFAILURE: sbatch did not submit jobs\n"
+		set exit_code 1
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {[wait_for_job ${job_id}_1 RUNNING] != 0} {
+	send_user "\nFAILURE: error starting job ${job_id}_1\n"
+	cancel_job $job_id
+	exit 1
+}
+
+sleep 5
+suspend_job $job_id suspend
+if {$not_supported == 1} {
+	exec $bin_rm -f $file_script
+	cancel_job $job_id
+	exit 0
+}
+
+
+if {[wait_for_job ${job_id}_1 SUSPENDED] != 0} {
+	send_user "\nFAILURE: error suspending job $job_id\n"
+	cancel_job $job_id
+	exit 1
+}
+
+suspend_job $job_id resume
+if {[wait_for_job ${job_id}_1 RUNNING] != 0} {
+	send_user "\nFAILURE: error resuming job $job_id\n"
+	cancel_job $job_id
+	exit 1
+}
+cancel_job $job_id
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_script
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test28.9 b/testsuite/expect/test28.9
new file mode 100755
index 000000000..70eb1dedb
--- /dev/null
+++ b/testsuite/expect/test28.9
@@ -0,0 +1,116 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of maximum running task count in a job array.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2011-2013 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id       "28.9"
+set exit_code     0
+set array_size    4
+set file_script   "test$test_id.sh"
+set job_id        0
+set array_in      2
+
+print_header $test_id
+
+if {[get_array_config] < [expr $array_size + 1]} {
+	send_user "\nWARNING: MaxArraySize is too small\n"
+	exit 0
+}
+
+# Submit a job array with 4 elements
+make_bash_script $file_script "sleep 20"
+
+spawn $sbatch -N1 --array=0-[expr $array_size -1]%2 --output=/dev/null -t1 $file_script
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	-re "error" {
+		send_user "\nFAILURE: sbatch did not submit jobs\n"
+		set exit_code 1
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: sbatch did not submit jobs\n"
+	exit 1
+}
+
+if {[wait_for_job ${job_id}_0 "RUNNING"] != 0} {
+	send_user "\nFAILURE: waiting for job to start\n"
+	cancel_job $job_id
+	exit 1
+}
+
+set inx 0
+while { $inx < 10 } {
+	$bin_sleep 9
+	set run_count 0
+	spawn $scontrol show job $job_id
+	expect {
+		-re "RUNNING" {
+			incr run_count
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	if {$run_count > 2} {
+		send_user "\nFAILURE: too many tasks in the job array are running ($run_count > 2)\n"
+		set exit_code 1
+	}
+	send_user "\n+++ $run_count running tasks +++\n"
+	if {$run_count == 0} {
+		break
+	}
+	incr inx
+}
+
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $file_script
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test3.10 b/testsuite/expect/test3.10
index 06ebd7529..a9eb609b7 100755
--- a/testsuite/expect/test3.10
+++ b/testsuite/expect/test3.10
@@ -85,6 +85,7 @@ expect {
 if {$authorized == 0} {
 	send_user "\nWARNING: You are not authorized to run this test\n"
 	cancel_job $job_id
+	exec rm -f $file_in $file_out $file_err
 	exit 0
 }
 cancel_job $job_id
@@ -183,7 +184,9 @@ if {$matches == 0} {
 	set exit_code 1
 }
 
-cancel_job $job_id
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test3.11 b/testsuite/expect/test3.11
index 4c8376061..f714e479d 100755
--- a/testsuite/expect/test3.11
+++ b/testsuite/expect/test3.11
@@ -48,6 +48,7 @@ set exit_code		0
 set res_name		""
 set res_name_save	""
 set res_nodes		""
+set res_nodecnt		0
 set res_thread_cnt	0
 set user_name		""
 set def_partition 	""
@@ -409,6 +410,12 @@ set cores_per_node [expr $cores_per_socket * $sockets_per_node]
 set node_count [available_nodes $def_partition ""]
 set cluster_cpus [expr $cores_per_node * $node_count]
 
+set cr_socket 0
+set select_type_params [test_select_type_params]
+if {[string match "*CR_SOCKET*" $select_type_params]} {
+	set cr_socket 1
+}
+
 #
 # Get the user name
 #
@@ -438,11 +445,11 @@ inc3_11_3
 inc3_11_4
 inc3_11_5
 inc3_11_6
-if {$cons_res_actived == 1 && $cores_per_node > 1 && $core_spec == 0 && $exclusive == 0} {
+if {$cons_res_actived == 1 && $cores_per_node > 1 && $core_spec == 0 && $exclusive == 0 && $cr_socket == 0} {
 	inc3_11_7
 	inc3_11_8
 }
-if {$cons_res_actived == 1 && $cores_per_node > 1 && $core_spec == 0 && $exclusive == 0 && $irregular_node_name == 0} {
+if {$cons_res_actived == 1 && $cores_per_node > 1 && $core_spec == 0 && $exclusive == 0 && $cr_socket == 0 && $irregular_node_name == 0} {
 	inc3_11_9
 }
 
diff --git a/testsuite/expect/test3.14 b/testsuite/expect/test3.14
new file mode 100755
index 000000000..c4e23fbb9
--- /dev/null
+++ b/testsuite/expect/test3.14
@@ -0,0 +1,172 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of advanced reservation "replace" option.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id		"3.14"
+set exit_code		0
+set node_list_new       ""
+set node_list_orig      ""
+set resv_name           "resv$test_id"
+set user_name		""
+
+print_header $test_id
+
+if {[is_super_user] == 0} {
+	send_user "\nWARNING: This test can't be run except as SlurmUser\n"
+	exit 0
+}
+if { [test_bluegene] } {
+	send_user "\nWARNING: This test is not compatible with bluegene systems\n"
+	exit 0
+}
+
+set def_part_name [default_partition]
+set nb_nodes [get_node_cnt_in_part $def_part_name]
+if {$nb_nodes < 3} {
+	send_user "\nWARNING: This test requires at least 3 nodes in the cluster.\n"
+	exit 0
+}
+
+spawn $bin_id -un
+expect {
+	-re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Create the advanced reservation
+#
+spawn $scontrol create reservation ReservationName=$resv_name starttime=now duration=2 nodecnt=2 flags=replace partition=$def_part_name users=$user_name
+expect {
+	-re "Error|error" {
+		send_user "\nFAILURE: error creating reservation\n"
+		exit 1
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Check the advanced reservation's allocated nodes and "REPLACE" flag
+#
+set match 0
+spawn $scontrol show ReservationName=$resv_name
+expect {
+	-re "Nodes=($alpha_numeric_nodelist)" {
+		set node_list_orig $expect_out(1,string)
+		exp_continue
+	}
+	-re "Flags=REPLACE" {
+		set match 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$match != 1} {
+	send_user "\nFAILURE: reservation REPLACE flag not found\n"
+	set exit_code 1
+}
+
+#
+# Use a node from the reservation, so it gets replaced
+#
+spawn $srun -n1 --reservation=$resv_name $bin_hostname
+expect {
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Check that advanced reservation's allocated nodes has been updated
+#
+spawn $scontrol show ReservationName=$resv_name
+expect {
+	-re "Nodes=($alpha_numeric_nodelist)" {
+		set node_list_new $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {![string compare $node_list_orig $node_list_new]} {
+	send_user "\nFAILURE: reservation failed to replace allocated node\n"
+	set exit_code 1
+}
+
+#
+# Delete the advanced reservation
+#
+spawn $scontrol delete ReservationName=$resv_name
+expect {
+	-re "Error|error" {
+		send_user "\nFAILURE: error deleting reservation\n"
+		set exit_code 1
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+} else {
+	send_user "\nFAILURE\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test3.15 b/testsuite/expect/test3.15
new file mode 100755
index 000000000..a16c1dc54
--- /dev/null
+++ b/testsuite/expect/test3.15
@@ -0,0 +1,355 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Test of advanced reservation of licenses.
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Morris Jette <jette@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id		"3.15"
+set exit_code		0
+set script_name         "test$test_id.bash"
+set license_name        "test$test_id"
+set resv_name           "resv$test_id"
+set user_name		""
+
+proc reconfigure { } {
+	global error_code scontrol
+	spawn $scontrol reconfigure
+	expect {
+		-re "Error|error" {
+			send_user "\nFAILURE: scontrol reconfigure error\n"
+			set exit_code 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+}
+
+proc submit_job { license_count } {
+	global script_name bin_sleep license_name sbatch number exit_code
+	set job_id 0
+	make_bash_script $script_name "$bin_sleep 300"
+	spawn $sbatch -n1 -t1 -o /dev/null -L $license_name:$license_count $script_name
+	expect {
+		-re "Submitted batch job ($number)" {
+			set job_id $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sbatch not responding\n"
+			set exit_code 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	if { $job_id == 0 } {
+		send_user "\nFAILURE: failed to submit job\n"
+		set exit_code 1
+	}
+	return $job_id
+}
+
+proc reason_is_licenses { job_id } {
+	global squeue
+	set reason_licenses 0
+	spawn $squeue -j $job_id -o "%r"
+	expect {
+		-re "Licenses" {
+			set reason_licenses 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: squeue not responding\n"
+			set exit_code 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	return $reason_licenses
+}
+
+proc update_resv { license_count } {
+	global scontrol resv_name license_name exit_code
+	set updated_ok 0
+	spawn $scontrol update reservation ReservationName=$resv_name licenses=$license_name:$license_count
+	expect {
+		-re "updated" {
+			set updated_ok 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	if { $updated_ok == 0 } {
+		send_user "\nFAILURE: failed to set update reservation\n"
+		set exit_code 1
+	}
+}
+
+proc test_license_count {want_total want_used want_free } {
+	global scontrol license_name number exit_code
+	set license_free  0
+	set license_total 0
+	set license_used  0
+	spawn $scontrol -o show license $license_name
+	expect {
+		-re "Total=($number)" {
+			set license_total $expect_out(1,string)
+			exp_continue
+		}
+		-re "Used=($number)" {
+			set license_used $expect_out(1,string)
+			exp_continue
+		}
+		-re "Free=($number)" {
+			set license_free $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol not responding\n"
+			set exit_code 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	if { $license_total != $want_total } {
+		send_user "\nFAILURE: Incorrect license_total count ($license_total != $want_total)\n"
+		set exit_code 1
+	}
+	if { $license_used != $want_used } {
+		send_user "\nFAILURE: Incorrect license_used count ($license_used != $want_used)\n"
+		set exit_code 1
+	}
+	if { $license_free != $want_free } {
+		send_user "\nFAILURE: Incorrect license_free count ($license_free != $want_free)\n"
+		set exit_code 1
+	}
+}
+
+################# TEST STARTS HERE ######################
+print_header $test_id
+
+if {[is_super_user] == 0} {
+	send_user "\nWARNING: This test can't be run except as SlurmUser\n"
+	exit 0
+}
+
+spawn $bin_id -un
+expect {
+	-re "($alpha_numeric_under)" {
+		set user_name $expect_out(1,string)
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Add licenses to system configuration, 8 licenses named "test3.15"
+#
+set got_config 0
+set licenses   ""
+log_user 0
+spawn $scontrol show config
+expect {
+	-re "Licenses += .null" {
+		exp_continue
+	}
+	-re "Licenses += ($alpha_numeric_colon)" {
+		set licenses $expect_out(1,string)
+		exp_continue
+	}
+	-re "SLURM_CONF *= (/.*)/($alpha).*SLURM_VERSION" {
+		set config_dir $expect_out(1,string)
+		set got_config 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+log_user 1
+if {[string first ${license_name} ${licenses}] != -1} {
+	send_user "\nFAILURE: License ${license_name} already configured, likely vestigial from previous test, fix slurm.conf and test again\n"
+	exit 1
+}
+if {$got_config == 0} {
+	send_user "\nFAILURE: Could not identify slurm.conf location\n"
+	exit 1
+}
+
+set cwd "[$bin_pwd]"
+
+exec $bin_rm -fr $cwd/slurm.conf.orig
+spawn $bin_cp -v $config_dir/slurm.conf $cwd/slurm.conf.orig
+expect {
+	timeout {
+		send_user "\nFAILURE: slurm.conf was not copied\n"
+		exit 1
+	}
+	eof {
+		wait
+	}
+}
+set sep ""
+if {[string length ${licenses}] > 0} {
+	set sep ","
+}
+exec $bin_grep -v Licenses $config_dir/slurm.conf > $cwd/slurm.conf.work
+exec $bin_echo "Licenses=${licenses}${sep}${license_name}:8" >> $cwd/slurm.conf.work
+exec $bin_cp $cwd/slurm.conf.work $config_dir/slurm.conf
+exec $bin_rm -f $cwd/slurm.conf.work
+
+reconfigure
+
+#
+# Create the advanced reservation with 6 of 8 licenses named "test3.15"
+#
+spawn $scontrol create reservation ReservationName=$resv_name starttime=now duration=2 nodecnt=2 flags=license_only users=$user_name licenses=${license_name}:6
+expect {
+	-re "Error|error" {
+		send_user "\nFAILURE: error creating reservation\n"
+		set exit_code 1
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+#
+# Submit batch job to claim 3 of 2 unreserved licenses of type "test3.15"
+#
+set job_id1 [submit_job 3]
+
+#
+# Check that job reason is "Licenses"
+#
+sleep 5
+if { [reason_is_licenses $job_id1] == 0 } {
+	send_user "\nFAILURE: failed to set proper job reason for job $job_id1\n"
+	set exit_code 1
+}
+
+#
+# Drop reservation to only 2 of the "test3.15" licenses (leaving 6 licenses)
+#
+update_resv 2
+
+#
+# Check that job reason is no longer "Licenses"
+#
+sleep 5
+if { [reason_is_licenses $job_id1] != 0 } {
+	send_user "\nFAILURE: failed to set proper job reason for job $job_id1\n"
+	set exit_code 1
+}
+
+#
+# Test scontrol show license output
+#
+test_license_count 8 3 5
+
+if {$exit_code == 0} {
+	send_user "\nSo far, so good. Starting test of second job\n\n"
+}
+
+#
+# Submit batch job to claim 4 of 3 unreserved licenses of type "test3.15"
+#
+set job_id2 [submit_job 4]
+
+#
+# Check that job reason is "Licenses"
+#
+sleep 5
+if { [reason_is_licenses $job_id2] == 0 } {
+	send_user "\nFAILURE: failed to set proper job reason for job $job_id2\n"
+	set exit_code 1
+}
+
+#
+# Drop reservation to only 1 of the "test3.15" licenses (leaving 7 licenses)
+#
+update_resv 1
+
+#
+# Check that job reason is no longer "Licenses"
+#
+sleep 5
+if { [reason_is_licenses $job_id2] != 0 } {
+	send_user "\nFAILURE: failed to set proper job reason for job $job_id2\n"
+	set exit_code 1
+}
+
+#
+# Test scontrol show license output
+#
+test_license_count 8 7 1
+
+#
+# Cancel the jobs and
+# Restore the configuration
+#
+cancel_job $job_id1
+cancel_job $job_id2
+exec $bin_cp $cwd/slurm.conf.orig $config_dir/slurm.conf
+reconfigure
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $cwd/slurm.conf.orig $script_name
+	send_user "\nSUCCESS\n"
+} else {
+	send_user "\nFAILURE\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test31.1 b/testsuite/expect/test31.1
index a1f6da317..78616dc8d 100755
--- a/testsuite/expect/test31.1
+++ b/testsuite/expect/test31.1
@@ -232,7 +232,6 @@ if { [test_front_end] == 1 } {
 }
 
 set got_config 0
-log_user 0
 spawn $scontrol show config
 expect {
 	-re "SLURM_CONF.*= (/.*)/($alpha).*SLURM_VERSION" {
@@ -248,7 +247,10 @@ expect {
 		wait
 	}
 }
-log_user 1
+if {$got_config == 0} {
+	send_user "\nFAILURE: Could not identify slurm.conf location\n"
+	exit 1
+}
 
 set get_name 0
 spawn $srun -t1 -l $bin_printenv SLURMD_NODENAME
@@ -277,6 +279,7 @@ if {$get_name != 1} {
 #
 # Copy slurm.conf file
 #
+exec $bin_rm -fr $cwd/slurm.conf.orig
 spawn $bin_cp -v $config_dir/slurm.conf $cwd/slurm.conf.orig
 expect {
 	timeout {
@@ -417,6 +420,6 @@ update_conf
 
 if {$exit_code == 0} {
 	exec $bin_rm -fr $config_dir/$inc_slurm $config_dir/$pro_epi_prog $cwd/slurm.conf.orig
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test33.1 b/testsuite/expect/test33.1
index b40e6f9f7..529f000a7 100755
--- a/testsuite/expect/test33.1
+++ b/testsuite/expect/test33.1
@@ -118,7 +118,10 @@ if {[test_cray]} {
 MPIParams=ports=12000-13000\nProctrackType=proctrack/cray\n"
 }
 
+###############################################
 # test 33.1.1
+###############################################
+send_user "test33.1.1\n"
 set file_in "$wd/etc.33.1.1/slurm.conf"
 file delete $file_in
 set fd [open "$file_in" a]
@@ -136,7 +139,10 @@ close $fd
 run_config "etc.33.1.1"
 file delete $file_in
 
+###############################################
 # test 33.1.2
+###############################################
+send_user "\n\ntest33.1.2\n"
 set file_in "$wd/etc.33.1.2/slurm.conf"
 file delete $file_in
 set fd [open "$file_in" a]
@@ -154,7 +160,10 @@ close $fd
 run_config "etc.33.1.2"
 file delete $file_in
 
+###############################################
 # test 33.1.3
+###############################################
+send_user "\n\ntest33.1.3\n"
 set file_in "$wd/etc.33.1.3/slurm.conf"
 file delete $file_in
 set fd [open "$file_in" a]
@@ -171,7 +180,10 @@ close $fd
 run_config "etc.33.1.3"
 file delete $file_in
 
+###############################################
 # test 33.1.4
+###############################################
+send_user "\n\ntest33.1.4\n"
 set file_in "$wd/etc.33.1.4/slurm.conf"
 file delete $file_in
 set fd [open "$file_in" a]
diff --git a/testsuite/expect/test34.1 b/testsuite/expect/test34.1
index 93041a1e1..2968859ba 100755
--- a/testsuite/expect/test34.1
+++ b/testsuite/expect/test34.1
@@ -387,6 +387,6 @@ clean_up 0
 
 if {$exit_code == 0} {
 	exec $bin_rm $job1 $job2 $file_in
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test34.2 b/testsuite/expect/test34.2
index a282063ad..98bdb3561 100755
--- a/testsuite/expect/test34.2
+++ b/testsuite/expect/test34.2
@@ -326,6 +326,6 @@ clean_up 0
 
 if {$exit_code == 0} {
 	exec $bin_rm $file_in
-	send_user "\nSUCCCESS\n"
+	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test35.1 b/testsuite/expect/test35.1
new file mode 100755
index 000000000..421f438a5
--- /dev/null
+++ b/testsuite/expect/test35.1
@@ -0,0 +1,210 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Cray persistent burst buffer creation
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "WARNING: ..." with an explanation of why the test can't be made, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette(at)schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "35.1"
+set exit_code   0
+set output_mk   "test$test_id.mk.output"
+set output_rm   "test$test_id.rm.output"
+set output_use  "test$test_id.use.output"
+set script_mk   "test$test_id.mk.bash"
+set script_rm   "test$test_id.rm.bash"
+set script_use  "test$test_id.use.bash"
+
+proc find_bb_name { fname bb_name } {
+	global bin_cat
+
+	set found 0
+	spawn $bin_cat $fname
+	expect {
+		-re "Name=$bb_name" {
+			set found 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	return $found
+}
+
+print_header $test_id
+
+set bb_types [get_bb_types]
+if {[string first "cray" $bb_types] == -1} {
+	send_user "\nWARNING: This test can only be run with the burst_buffer/cray plugin\n"
+	exit 0
+}
+if {[get_bb_persistent] == 0} {
+	send_user "\nWARNING: This test can only be run if persistent burst_buffers can be created/deleted\n"
+	exit 0
+}
+
+make_bash_script $script_mk "#BB create_persistent name=test$test_id capacity=48 access=striped type=scratch
+$scontrol show burst"
+
+make_bash_script $script_rm "#BB destroy_persistent name=test$test_id
+$scontrol show burst"
+
+make_bash_script $script_use "#DW persistentdw name=test$test_id
+$scontrol show burst"
+
+#
+# Remove any vestigial buffer
+#
+send_user "\n\nClean up vestigial burst buffer\n"
+incr max_job_state_delay 300
+set job_id 0
+spawn $sbatch -N1 --output=/dev/null $script_rm
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id == 0} {
+	send_user "\nFAILURE: job did not launch\n"
+	set exit_code 1
+}
+if {[wait_for_job $job_id "DONE"] != 0} {
+	send_user "\nFAILURE: job did not complete\n"
+	set exit_code 1
+}
+
+#
+# Now submit a job to use the persistent burst buffer followed by a job
+# to create the burst buffer, make sure the buffer creation happens first
+#
+send_user "\n\nCreate and use a burst buffer\n"
+set job_id_use 0
+spawn $sbatch -N1 --output=$output_use $script_use
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id_use $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+set job_id_mk 0
+spawn $sbatch -N1 --output=$output_mk $script_mk
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id_mk $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$job_id_mk == 0} {
+	send_user "\nFAILURE: job did not launch\n"
+	set exit_code 1
+} elseif {[wait_for_job $job_id_mk "DONE"] != 0} {
+	send_user "\nFAILURE: job did not complete\n"
+	set exit_code 1
+}
+
+if {$job_id_use == 0} {
+	send_user "\nFAILURE: job did not launch\n"
+	set exit_code 1
+} elseif {[wait_for_job $job_id_use "DONE"] != 0} {
+	send_user "\nFAILURE: job did not complete\n"
+	set exit_code 1
+} elseif {[wait_for_file $output_use] != 0} {
+	set exit_code 1
+} elseif {[find_bb_name $output_use test$test_id] == 0} {
+	send_user "\nFAILURE: job using burst buffer ran before creation\n"
+	set exit_code 1
+} elseif {[wait_for_file $output_mk] != 0} {
+	set exit_code 1
+} elseif {[find_bb_name $output_mk test$test_id] == 0} {
+	send_user "\nFAILURE: job creating burst buffer failed to do so\n"
+	set exit_code 1
+}
+
+#
+# Now clean up the burst buffer
+#
+set job_id_rm 0
+spawn $sbatch -N1 --output=$output_rm $script_rm
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id_rm $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$job_id_rm == 0} {
+	send_user "\nFAILURE: job did not launch\n"
+	set exit_code 1
+} elseif {[wait_for_job $job_id_rm "DONE"] != 0} {
+	send_user "\nFAILURE: job did not complete\n"
+	set exit_code 1
+} elseif {[wait_for_file $output_rm] != 0} {
+	set exit_code 1
+} elseif {[find_bb_name $output_rm test$test_id] != 0} {
+	send_user "\nFAILURE: job deleting burst buffer failed to do so\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $output_mk $output_rm $output_use
+	exec $bin_rm -f $script_mk $script_rm $script_use
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test35.2 b/testsuite/expect/test35.2
new file mode 100755
index 000000000..8ed60e564
--- /dev/null
+++ b/testsuite/expect/test35.2
@@ -0,0 +1,175 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Cray job-specific burst buffer use
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "WARNING: ..." with an explanation of why the test can't be made, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC.
+# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
+# Written by Morris Jette <jette(at)schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+
+set test_id     "35.2"
+set data_in     "DATA_IN_0123456789"
+set data_out    "DATA_OUT_9876543210"
+set exit_code   0
+set input_data  "test$test_id.data.in"
+set output_data "test$test_id.data.out"
+set output_use  "test$test_id.use.output"
+set script_use  "test$test_id.use.bash"
+set tmp_file    "test$test_id"
+
+proc find_bb_jobid { fname bb_jobid } {
+	global bin_cat
+
+	set found 0
+	log_user 0
+	spawn $bin_cat $fname
+	expect {
+		-re "JobID=$bb_jobid" {
+			set found 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+	return $found
+}
+
+proc find_bb_data { file_name string_seek } {
+	global bin_cat
+
+	set found 0
+	spawn $bin_cat $file_name
+	expect {
+		-re $string_seek {
+			set found 1
+			exp_continue
+		}
+		eof {
+			wait
+		}
+	}
+	return $found
+}
+
+print_header $test_id
+
+set bb_types [get_bb_types]
+if {[string first "cray" $bb_types] == -1} {
+	send_user "\nWARNING: This test can only be run with the burst_buffer/cray plugin\n"
+	exit 0
+}
+
+set cwd [$bin_pwd]
+exec $bin_rm -f $input_data $output_data $output_use
+exec $bin_echo $data_in >$cwd/$input_data
+
+if {[get_bb_emulate] != 0} {
+	set work_file_in  $cwd/$input_data
+	set work_file_out $cwd/$output_data
+} else {
+	set work_file_in  /ss/$tmp_file
+	set work_file_out /ss/$tmp_file
+}
+
+make_bash_script $script_use "#DW jobdw type=scratch capacity=1GiB access_mode=striped,private pfs=/scratch
+#DW stage_in type=file source=$cwd/$input_data destination=/ss/$tmp_file
+#DW stage_out type=file destination=$cwd/$output_data source=/ss/$tmp_file
+$bin_cat  $work_file_in
+$bin_echo $data_out >$work_file_out
+$scontrol show burst
+exit 0"
+
+incr max_job_state_delay 300
+set job_id_use 0
+spawn $sbatch -N1 --output=$output_use $script_use
+expect {
+	-re "Submitted batch job ($number)" {
+		set job_id_use $expect_out(1,string)
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: sbatch not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+if {$job_id_use == 0} {
+	send_user "\nFAILURE: job did not launch\n"
+	set exit_code 1
+} elseif {[wait_for_job $job_id_use "DONE"] != 0} {
+	send_user "\nFAILURE: job did not complete\n"
+	set exit_code 1
+} elseif {[wait_for_file $output_use] != 0} {
+	set exit_code 1
+} elseif {[find_bb_jobid $output_use $job_id_use] == 0} {
+	send_user "\nFAILURE: job burst buffer not found\n"
+	set exit_code 1
+} elseif {[find_bb_data $output_use $data_in] == 0} {
+	send_user "\nFAILURE: job burst buffer input data not found\n"
+	set exit_code 1
+} elseif {[find_bb_data $output_data $data_out] == 0} {
+	send_user "\nFAILURE: job burst buffer output data not found\n"
+	set exit_code 1
+}
+
+set found 0
+spawn $scontrol show burst
+expect {
+	-re "JobID=$job_id_use" {
+		set found 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$found != 0} {
+	send_user "\nFAILURE: job burst buffer not removed\n"
+	set exit_code 1
+}
+
+if {[find_bb_data $output_data $data_out] == 0} {
+	send_user "\nFAILURE: job output burst buffer data not correct\n"
+	set exit_code 1
+}
+
+if {$exit_code == 0} {
+	exec $bin_rm -f $input_data $output_data
+	exec $bin_rm -f $output_use $script_use
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test4.12 b/testsuite/expect/test4.12
index 9e45cb739..e481e538c 100755
--- a/testsuite/expect/test4.12
+++ b/testsuite/expect/test4.12
@@ -453,19 +453,13 @@ if {![string compare $select_type "bluegene"]} {
 	set select_params [test_select_type_params]
 	if {$part_exclusive == 1} {
 		set smallest $inode_procs
-	} elseif {![string compare $select_params "CR_CPU"]} {
-		set smallest $inode_threads_per_core
-	} elseif {![string compare $select_params "CR_CPU_MEMORY"]} {
+	} elseif {[string first "CR_CPU" $select_params] != -1} {
 		set smallest $inode_threads_per_core
 	} elseif {![string compare $select_params "NONE"]} {
 		set smallest $inode_threads_per_core
-	} elseif {![string compare $select_params "CR_CORE"]} {
-		set smallest $inode_threads_per_core
-	} elseif {![string compare $select_params "CR_CORE_MEMORY"]} {
+	} elseif {[string first "CR_CORE" $select_params] != -1} {
 		set smallest $inode_threads_per_core
-	} elseif {![string compare $select_params "CR_SOCKET"]} {
-		set smallest [expr $inode_cores_per_socket *$inode_threads_per_core]
-	} elseif {![string compare $select_params "CR_SOCKET_MEMORY"]} {
+	} elseif {[string first "CR_SOCKET" $select_params] != -1} {
 		set smallest [expr $inode_cores_per_socket *$inode_threads_per_core]
 	} else {
 		send_user "\nWARNING: failed to parse SelectTypeParameters '$select_params'\n"
diff --git a/testsuite/expect/test4.13 b/testsuite/expect/test4.13
new file mode 100755
index 000000000..9800cba32
--- /dev/null
+++ b/testsuite/expect/test4.13
@@ -0,0 +1,258 @@
+#!/usr/bin/env expect
+############################################################################
+# Purpose: Test of SLURM functionality
+#          Validate that sinfo -O (--Format) option displays the
+#          correct user specified values.
+#
+#
+# Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
+#          "FAILURE: ..." otherwise with an explanation of the failure, OR
+#          anything else indicates a failure mode that must be investigated.
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee <nyee32@schedmd.com>
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
+source ./globals
+source ./globals_accounting
+
+set test_id             4.13
+set test_acct           "test$test_id\_acct"
+set test_part           "test$test_id\_part"
+set test_node           [get_idle_node_in_part [default_partition]]
+set alpha_numeric_dot   "\[a-zA-Z0-9_.:\-\]+"
+set exit_code           0
+
+array set node_sinfo_vals { }
+array set part_sinfo_vals { }
+
+array set node_info {
+	AllocMem           allocmem
+	CPUTot             cpus
+	CPULoad            cpusload
+	CoresPerSocket     cores
+	TmpDisk            disk
+	Features           features
+	FreeMem            freemem
+	Gres               gres
+	NodeAddr           nodeaddr
+	NodeName           nodehost
+	RealMemory         memory
+	State              statelong
+	Sockets            sockets
+	ThreadsPerCore     threads
+	Version            version
+	Weight             weight
+}
+
+array set part_info {
+	State              available
+	MaxCPUsPerNode     maxcpuspernode
+	Nodes              nodehost
+	TotalNodes         nodes
+	PreemptMode        preemptmode
+	Priority           priority
+	Shared             share
+	RootOnly           root
+}
+
+print_header $test_id
+
+proc cleanup { } {
+
+	global scontrol test_part exit_code
+
+	spawn $scontrol delete partitionname=$test_part
+	expect {
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+}
+
+send_user "\n=== Testing Node Information ===\n"
+set first_option 1
+foreach option [array names node_info] {
+
+	if {$first_option == 1} {
+		set first_option 0
+	} else {
+		log_user 0
+	}
+	spawn $scontrol show node $test_node
+	expect {
+		-re "\\m$option=($alpha_numeric_dot)\\M" {
+			set node_sinfo_vals($node_info($option)) \
+			    $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+}
+
+set first_option 1
+foreach option [array names node_sinfo_vals] {
+
+	set match 0
+	if {$first_option == 1} {
+		set first_option 0
+	} else {
+		log_user 0
+	}
+	spawn $sinfo -n$test_node -O$option -h
+	expect {
+		-nocase -re "$node_sinfo_vals($option)" {
+			set match 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sinfo is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match != 1} {
+		send_user "\nFAILURE: Node information $option output does "
+		send_user "not match sinfo output. "
+		send_user "Looking for value: $node_sinfo_vals($option)\n"
+		exit 1
+	}
+	log_user 1
+}
+
+#
+# Add test partition
+#
+spawn $scontrol create partitionname=$test_part nodes=$test_node
+expect {
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+
+set match 0
+spawn $scontrol show partitionname=$test_part
+expect {
+	-re "PartitionName=$test_part" {
+		set match 1
+		exp_continue
+	}
+	timeout {
+		send_user "\nFAILURE: scontrol is not responding\n"
+		set exit_code 1
+	}
+	eof {
+		wait
+	}
+}
+if {$match != 1} {
+	send_user "\nFAILURE: test partition was not created\n"
+	exit 1
+}
+
+send_user "\n=== Testing Partition Information ===\n"
+set first_option 1
+foreach option [array names part_info] {
+
+	set part_sinfo_vals($part_info($option)) "UNKNOWN"
+	set match 0
+	if {$first_option == 1} {
+		set first_option 0
+	} else {
+		log_user 0
+	}
+	spawn $scontrol show partition $test_part
+	expect {
+		-re "\\m$option=($alpha_numeric_dot)\\M" {
+			set part_sinfo_vals($part_info($option)) \
+			    $expect_out(1,string)
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: scontrol is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+	log_user 1
+}
+
+set first_option 1
+foreach option [array names part_sinfo_vals] {
+
+	set match 0
+	if {$first_option == 1} {
+		set $first_option 0
+	} else {
+		log_user 0
+	}
+	spawn $sinfo -p$test_part -O$option -h
+	expect {
+		-nocase -re "$part_sinfo_vals($option)" {
+			set match 1
+			exp_continue
+		}
+		timeout {
+			send_user "\nFAILURE: sinfo is not responding\n"
+			set exit_code 1
+		}
+		eof {
+			wait
+		}
+	}
+
+	if {$match != 1} {
+		send_user "\nFAILURE: Partition information $option "
+		send_user "output does not match sinfo output. "
+		send_user "Looking for value: "
+		send_user "$option = $part_sinfo_vals($option)\n"
+		cleanup
+		exit 1
+	}
+	log_user 1
+}
+
+cleanup
+
+if {$exit_code == 0} {
+	send_user "\nSUCCESS\n"
+}
+exit $exit_code
diff --git a/testsuite/expect/test4.5 b/testsuite/expect/test4.5
index dc69be998..e698f0761 100755
--- a/testsuite/expect/test4.5
+++ b/testsuite/expect/test4.5
@@ -301,6 +301,7 @@ if {$mismatches != 0} {
 	set exit_code 1
 }
 if {$exit_code == 0} {
+	file delete $tmp_sc
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test5.10 b/testsuite/expect/test5.10
index 38bf08602..ab6cb9b28 100755
--- a/testsuite/expect/test5.10
+++ b/testsuite/expect/test5.10
@@ -195,7 +195,7 @@ expect {
 
 if {[string length $nodes] == 0} {
 	send_user "\nFAILURE: did not get a valid node list\n"
-	exit 0
+	exit 1
 }
 
 # Create 2 test partitions
diff --git a/testsuite/expect/test5.4 b/testsuite/expect/test5.4
index 12f11a573..deabb1125 100755
--- a/testsuite/expect/test5.4
+++ b/testsuite/expect/test5.4
@@ -240,8 +240,12 @@ if {[test_alps] == 0 && $step_found == 0} {
 	set exit_code 1
 }
 
-cancel_job $job_id1
-cancel_job $job_id2
+if {[cancel_job $job_id1] != 0} {
+	set exit_code 1
+}
+if {[cancel_job $job_id2] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test5.5 b/testsuite/expect/test5.5
index 7fef4392d..adb37cec2 100755
--- a/testsuite/expect/test5.5
+++ b/testsuite/expect/test5.5
@@ -119,8 +119,12 @@ expect {
 	}
 }
 
-cancel_job $job_id2
-cancel_job $job_id1
+if {[cancel_job $job_id1] != 0} {
+	set exit_code 1
+}
+if {[cancel_job $job_id2] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
diff --git a/testsuite/expect/test5.6 b/testsuite/expect/test5.6
index 7c852e3c1..e24d382c5 100755
--- a/testsuite/expect/test5.6
+++ b/testsuite/expect/test5.6
@@ -420,8 +420,12 @@ if {$job_found == 0} {
 #
 # Cancel jobs and remove files
 #
-cancel_job $job_id2
-cancel_job $job_id1
+if {[cancel_job $job_id1] != 0} {
+	set exit_code 1
+}
+if {[cancel_job $job_id2] != 0} {
+	set exit_code 1
+}
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 	exec $bin_rm -f $file_in
diff --git a/testsuite/expect/test5.9 b/testsuite/expect/test5.9
index 58d84f0a5..6c6088ec5 100755
--- a/testsuite/expect/test5.9
+++ b/testsuite/expect/test5.9
@@ -72,8 +72,8 @@ set sq_format(network)       "ip"
 set sq_format(requeue)       1
 # profile
 set sq_format(profile)       Energy
-# ntasks-per-core
-set sq_format(ntpercore)     2
+# ntasks-per-socket
+set sq_format(ntpersocket)   2
 # ntasks-per-node
 set sq_format(ntpernode)     2
 # state compact
@@ -165,7 +165,7 @@ if {$got_node != 1} {
 spawn $scontrol show node $test_node
 expect {
 	-re "CoresPerSocket=($number)" {
-		set sq_format(ntpercore) $expect_out(1,string)
+		set sq_format(ntpersocket) $expect_out(1,string)
 		exp_continue
 	}
 	-re "ThreadsPerCore=($number)" {
@@ -181,10 +181,10 @@ expect {
 	}
 }
 
-if {$sq_format(ntpercore) == 0 || $sq_format(cpuspertask) == 0} {
+if {$sq_format(ntpersocket) == 0 || $sq_format(cpuspertask) == 0} {
 	send_user "\nFAILURE: failed to get number of threads or cores "
 	send_user "ThreadsPerCore=$sq_format(cpuspertask) & "
-	send_user "CoresPerSocket=$sq_format(ntpercore)\n"
+	send_user "CoresPerSocket=$sq_format(ntpersocket)\n"
 	exit 0
 }
 
@@ -253,7 +253,7 @@ $srun $bin_sleep 100"
 spawn $sbatch -A$sq_format(account) -N$sq_format(numnodes) \
     -n$sq_format(numcpus) -t$sq_format(timelimit) -c$sq_format(cpuspertask) \
     --switch=$sq_format(reqswitch) --network=$sq_format(network) --requeue \
-    --profile=$sq_format(profile) --ntasks-per-core=$sq_format(ntpercore) \
+    --profile=$sq_format(profile) --ntasks-per-socket=$sq_format(ntpersocket) \
     --ntasks-per-node=$sq_format(ntpernode) -o$sq_format(stdout) \
     --comment=$sq_format(comment) -e$sq_format(stderr) --exclusive \
     -w$test_node $sq_format(name)
diff --git a/testsuite/expect/test6.11 b/testsuite/expect/test6.11
index 29abed061..2ee7ac27c 100755
--- a/testsuite/expect/test6.11
+++ b/testsuite/expect/test6.11
@@ -39,12 +39,12 @@ set job_id       0
 
 print_header $test_id
 
-make_bash_script "id_script" { $bin_id }
+make_bash_script $file_in { $bin_id }
 
 #
 # Submit a job so we have something to work with
 #
-set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --hold -t1 id_script]
+set sbatch_pid [spawn $sbatch --output=/dev/null --error=/dev/null --hold -t1 $file_in]
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id $expect_out(1,string)
@@ -124,8 +124,7 @@ expect {
 }
 
 if {$exit_code == 0} {
-	exec $bin_rm -f id_script
+	exec $bin_rm -f $file_in
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
-
diff --git a/testsuite/expect/test6.12 b/testsuite/expect/test6.12
index 261b76356..b2d3127fa 100755
--- a/testsuite/expect/test6.12
+++ b/testsuite/expect/test6.12
@@ -159,7 +159,7 @@ if {$found_step == 0} {
 	send_user "\nFAILURE: job step not found, apparently killed\n"
 	set exit_code 1
 }
-exec $scancel --quiet $job_id
+cancel_job $job_id
 
 #
 # Build and run second test script
@@ -241,7 +241,7 @@ if {$found_job == 0} {
 				wait
 			}
 		}
-		exec $scancel --quiet $job_id
+		cancel_job $job_id
 		set exit_code 1
 	}
 }
@@ -319,7 +319,9 @@ if {$found_step == 1} {
 	set exit_code 1
 }
 
-cancel_job $job_id
+if {[cancel_job $job_id] != 0} {
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
diff --git a/testsuite/expect/test6.13 b/testsuite/expect/test6.13
index 7410f3671..b32ace4db 100755
--- a/testsuite/expect/test6.13
+++ b/testsuite/expect/test6.13
@@ -1,8 +1,8 @@
 #!/usr/bin/env expect
 ############################################################################
 # Purpose: Test of SLURM functionality
-#          Test routing all signals through slurmctld rather than directly
-#          to slurmd (undocumented --ctld option).
+#          Test routing all signals through slurmctld rather than slurmd
+#          (undocumented --ctld option).
 #
 # Output:  "TEST: #.#" followed by "SUCCESS" if test was successful, OR
 #          "FAILURE: ..." otherwise with an explanation of the failure, OR
@@ -221,7 +221,7 @@ sleep 5
 set matches 0
 spawn $scancel --batch --signal=STOP --ctld --verbose $job_id
 expect {
-	-re "Signal ($number) to job $job_id" {
+	-re "Signal ($number) to batch job $job_id" {
 		set sig_num $expect_out(1,string)
 		if {$sig_num != 9} {
 			incr matches
diff --git a/testsuite/expect/test6.5 b/testsuite/expect/test6.5
index 63d4edf5a..b0ec6e825 100755
--- a/testsuite/expect/test6.5
+++ b/testsuite/expect/test6.5
@@ -98,7 +98,7 @@ exec $bin_rm -f $file_in
 
 spawn $scancel --verbose $job_id1
 expect {
-	-re "Signalling job" {
+	-re "Terminating job" {
 		incr matches
 		exp_continue
 	}
@@ -139,4 +139,3 @@ if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
-
diff --git a/testsuite/expect/test6.7 b/testsuite/expect/test6.7
index a7a3ee87e..9d095f9ec 100755
--- a/testsuite/expect/test6.7
+++ b/testsuite/expect/test6.7
@@ -198,7 +198,7 @@ sleep 5
 set matches 0
 spawn $scancel --batch --signal=STOP --verbose $job_id
 expect {
-	-re "Signal ($number) to job $job_id" {
+	-re "Signal ($number) to batch job $job_id" {
 		set sig_num $expect_out(1,string)
 		if {$sig_num != 9} {
 			incr matches
diff --git a/testsuite/expect/test7.1 b/testsuite/expect/test7.1
index 630a478d0..73f5a93d4 100755
--- a/testsuite/expect/test7.1
+++ b/testsuite/expect/test7.1
@@ -34,6 +34,7 @@ source ./globals
 
 set test_id              "7.1"
 set exit_code            0
+set file_in              "test$test_id.input"
 set job_id1              0
 set job_id2              0
 set job_id3              0
@@ -56,12 +57,12 @@ if { ![string compare [priority_type] multifactor] } {
 }
 
 
-make_bash_script "pwd_script" { $bin_pwd }
+make_bash_script $file_in { $bin_pwd }
 
 #
 # Spawn three sbatch job, one held
 #
-set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null  pwd_script]
+set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null  $file_in]
 expect {
 	-re "Submitted batch job ($number)" {
 		set job_id1 $expect_out(1,string)
@@ -76,7 +77,7 @@ expect {
 		wait
 	}
 }
-set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null pwd_script]
+set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null $file_in]
 expect {
 	-re "Submitted batch job ($number)" {
                 set job_id2 $expect_out(1,string)
@@ -91,7 +92,7 @@ expect {
 		wait
         }
 }
-set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null --hold pwd_script]
+set sbatch_pid [spawn $sbatch -t1 --output=/dev/null --error=/dev/null --hold $file_in]
 expect {
 	-re "Submitted batch job ($number)" {
                 set job_id3 $expect_out(1,string)
@@ -127,7 +128,9 @@ if {$job_id1 != 0} {
 			wait
 		}
 	}
-	cancel_job $job_id1
+        if {[cancel_job $job_id1] != 0} {
+		set exit_code   1
+	}
 } else {
 	set exit_code   1
 }
@@ -147,7 +150,9 @@ if {$job_id2 != 0} {
 			wait
                 }
         }
-        cancel_job $job_id2
+        if {[cancel_job $job_id2] != 0} {
+		set exit_code   1
+	}
 } else {
         set exit_code   1
 }
@@ -167,7 +172,9 @@ if {$job_id3 != 0} {
 			wait
                 }
         }
-        cancel_job $job_id3
+        if {[cancel_job $job_id3] != 0} {
+		set exit_code   1
+	}
 } else {
         set exit_code   1
 }
@@ -192,7 +199,7 @@ if {$prio3 != 0} {
 }
 
 if {$exit_code == 0} {
-	exec $bin_rm -f pwd_script
+	exec $bin_rm -f $file_in
 	send_user "\nSUCCESS\n"
 }
 exit $exit_code
diff --git a/testsuite/expect/test7.13 b/testsuite/expect/test7.13
index 615f5ad2d..ef5f681e7 100755
--- a/testsuite/expect/test7.13
+++ b/testsuite/expect/test7.13
@@ -121,7 +121,7 @@ expect {
 }
 
 if {$matches != 2} {
-	send_user "\nFAILURE: Job ExitCode incorrect\n"
+	send_user "\nFAILURE: Job ExitCode incorrect ($matches != 2)\n"
 	exit 1
 }
 
@@ -131,21 +131,24 @@ if {$matches != 2} {
 if { [test_account_storage] == 1 } {
 	sleep 2
 	set matches 0
-	spawn $sacct -n -P -j $job_id -o ExitCode,DerivedExitCode,Comment
+	spawn $sacct -n -P -j $job_id -o JobID,ExitCode,DerivedExitCode,Comment
 	expect {
-		-re "0:0\\|123:0\\|" {	# Job record
+		-re "$job_id\\|0:0\\|123:0\\|" {	# Job record
 			incr matches
 			exp_continue
 		}
-		-re "0:0\\|\\|" {	# Job.batch record
+		-re "$job_id.batch\\|0:0\\|\\|" {	# Batch script
 			incr matches
 			exp_continue
 		}
-		-re "0:0\\|\\|" {	# Step 0 record
+		-re "$job_id.extern\\|0:0\\|\\|" {	# Container, optional
+			exp_continue
+		}
+		-re "$job_id.($number)\\|0:0\\|\\|" {	# Step 0 record
 			incr matches
 			exp_continue
 		}
-		-re "123:0\\|\\|" {	# Step 1 record
+		-re "$job_id.($number)\\|123:0\\|\\|" {	# Step 1 record
 			incr matches
 			exp_continue
 		}
@@ -158,7 +161,7 @@ if { [test_account_storage] == 1 } {
 		}
 	}
 	if {$matches != 4} {
-		send_user "\nFAILURE: sacct of $job_id failed ($matches)\n"
+		send_user "\nFAILURE: sacct of $job_id failed ($matches != 5)\n"
 		exit 1
 	}
 }
@@ -226,9 +229,8 @@ expect {
 		wait
 	}
 }
-
 if {$matches != 2} {
-	send_user "\nFAILURE: Job ExitCode incorrect\n"
+	send_user "\nFAILURE: Job ExitCode incorrect ($matches != 2)\n"
 	exit 1
 }
 
@@ -238,21 +240,24 @@ if {$matches != 2} {
 if { [test_account_storage] == 1 } {
 	sleep 2
 	set matches 0
-	spawn $sacct -n -P -j $job_id -o ExitCode,DerivedExitCode,Comment
+	spawn $sacct -n -P -j $job_id -o JobID,ExitCode,DerivedExitCode,Comment
 	expect {
-		-re "33:0\\|0:0\\|" {	# Job record
+		-re "$job_id\\|33:0\\|0:0\\|" {		# Job record
 			incr matches
 			exp_continue
 		}
-		-re "33:0\\|\\|" {	# Job.batch record
+		-re "$job_id.batch\\|33:0\\|\\|" {	# Batch script
 			incr matches
 			exp_continue
 		}
-		-re "0:0\\|\\|" {	# Step 0 record
+		-re "$job_id.extern\\|0:0\\|\\|" {	# Container, optional
+			exp_continue
+		}
+		-re "$job_id.($number)\\|0:0\\|\\|" {	# Step 0 record
 			incr matches
 			exp_continue
 		}
-		-re "0:0\\|\\|" {	# Step 1 record
+		-re "$job_id.($number)\\|0:0\\|\\|" {	# Step 1 record
 			incr matches
 			exp_continue
 		}
@@ -265,7 +270,7 @@ if { [test_account_storage] == 1 } {
 		}
 	}
 	if {$matches != 4} {
-		send_user "\nFAILURE: sacct of $job_id failed ($matches)\n"
+		send_user "\nFAILURE: sacct of $job_id failed ($matches != 4)\n"
 		exit 1
 	}
 }
diff --git a/testsuite/expect/test7.17 b/testsuite/expect/test7.17
index 500198fc6..78441a389 100755
--- a/testsuite/expect/test7.17
+++ b/testsuite/expect/test7.17
@@ -32,6 +32,7 @@ source ./globals
 set test_id     "7.17"
 set exit_code   0
 set test_prog   "test$test_id.prog"
+set cfgdir      [pwd]
 
 print_header $test_id
 
@@ -52,16 +53,155 @@ if {[test_aix]} {
 }
 exec $bin_chmod 700 $test_prog
 
-spawn ./$test_prog
+send_user "\n====test7.17.1====\n"
+set cpu_match 0
+spawn ./$test_prog "gpu:2" "$cfgdir" "/test7.17.1" 8
 expect {
-	-re "FAILURE" {
+	-re "failure" {
+		send_user "\nFAILURE: running test7.17.1\n"
 		set exit_code 1
 		exp_continue
 	}
+	-re "cpu_alloc=8" {
+		set cpu_match 1
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$cpu_match != 1} {
+	send_user "\nFAILURE: number of CPUs is incorrect\n"
+	set exit_code 1
+}
+
+send_user "\n====test7.17.2====\n"
+set cpu_match 0
+spawn ./$test_prog "gpu:2" "$cfgdir" "/test7.17.2" 16
+expect {
+	-re "failure" {
+		send_user "\nFAILURE: running test7.17.2\n"
+		set exit_code 1
+		exp_continue
+	}
+	-re "cpu_alloc=16" {
+		set cpu_match 1
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$cpu_match != 1} {
+	send_user "\nFAILURE: number of cpu is incorrect\n"
+	set exit_code 1
+}
+
+send_user "\n====test7.17.3====\n"
+set cpu_match 0
+spawn ./$test_prog "gpu:2" "$cfgdir" "/test7.17.3" 16
+expect {
+	-re "failure" {
+		send_user "\nFAILURE: running test7.17.3\n"
+		set exit_code 1
+		exp_continue
+	}
+	-re "cpu_alloc=16" {
+		set cpu_match 1
+		exp_continue
+	}
 	eof {
 		wait
 	}
 }
+if {$cpu_match != 1} {
+	send_user "\nFAILURE: number of cpu is incorrect should be 16\n"
+	set exit_code 1
+}
+
+send_user "\n====test7.17.4====\n"
+set cpu_match 0
+spawn ./$test_prog "gpu:2" "$cfgdir" "/test7.17.4" 16
+expect {
+	-re "failure" {
+		send_user "\nFAILURE: running test7.17.4\n"
+		set exit_code 1
+		exp_continue
+	}
+	-re "cpu_alloc=ALL" {
+		set cpu_match 1
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$cpu_match != 1} {
+	send_user "\nFAILURE: number of cpu is incorrect, should be ALL\n"
+	set exit_code 1
+}
+
+#######################################
+# Bad tests
+#######################################
+
+# Run a job with invalid job allocation
+send_user "\n====test7.17.5====\n"
+set fail_match 0
+spawn ./$test_prog "craynetworks:100" "$cfgdir" "/test7.17.5" 16
+expect {
+	-re "failure" {
+		send_user "This error is expected\n"
+		set fail_match 1
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+
+if {$fail_match != 1} {
+	send_user "\nFAILURE: This test should have failed but did not\n"
+	set exit_code 1
+}
+
+# Run with no gres.conf file
+send_user "\n====test7.17.6====\n"
+set fail_match 0
+spawn ./$test_prog "gpu:2" "$cfgdir" "/test7.17.6" 0
+expect {
+	-re "failure" {
+		send_user "This error is expected\n"
+		set fail_match 1
+		exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$fail_match != 1} {
+	send_user "\nFAILURE: This test should have failed but did not\n"
+	set exit_code 1
+}
+
+# Test with invalid job allocation
+send_user "\n====test7.17.7====\n"
+set fail_match 0
+spawn ./$test_prog "gpu:2" "$cfgdir" "/test7.17.7" 8
+expect {
+	-re "fatal" {
+		send_user "This error is expected\n"
+		set fail_match 1
+		#exp_continue
+	}
+	eof {
+		wait
+	}
+}
+if {$fail_match != 1} {
+	send_user "\nFAILURE: This test should have failed but did not\n"
+	set exit_code 1
+}
 
 if {$exit_code == 0} {
 	send_user "\nSUCCESS\n"
diff --git a/testsuite/expect/test7.17.prog.c b/testsuite/expect/test7.17.prog.c
index bcc7ea5d5..d79f71d0d 100644
--- a/testsuite/expect/test7.17.prog.c
+++ b/testsuite/expect/test7.17.prog.c
@@ -64,6 +64,14 @@ int main(int argc, char *argv[])
 	Buf buffer;
 	List job_gres_list = NULL, node_gres_list = NULL;
 	bitstr_t *cpu_bitmap;
+	char config_dir[10000], test[1000];
+	char slurm_conf[1000];
+
+	/* Setup slurm.conf and gres.conf test paths */
+	strcpy(config_dir, argv[2]);
+	strcpy(config_dir,strcat(config_dir, "/test7.17_configs"));
+	strcpy(test, strcat(config_dir, argv[3]));
+	strcpy(slurm_conf,strcat(test, "/slurm.conf"));
 
 	/* Enable detailed logging for now */
 	opts.stderr_level = LOG_LEVEL_DEBUG;
@@ -72,44 +80,45 @@ int main(int argc, char *argv[])
 	/*
 	 * Logic normally executed by slurmd daemon
 	 */
+	setenv("SLURM_CONF", slurm_conf, 1);
 	rc = gres_plugin_init();
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_init");
+		slurm_perror("failure: gres_plugin_init");
 		exit(1);
 	}
 
-	/* FIXME: Read values from slurm.conf? */
-	cpu_count = 32;
-	node_name = "jette";
-	rc = gres_plugin_node_config_load(cpu_count, node_name);
+	setenv("SLURM_CONFIG_DIR",config_dir, 1);
+
+	cpu_count = strtol(argv[4], NULL, 10);
+	node_name = "test_node";
+	rc = gres_plugin_node_config_load(cpu_count, node_name, NULL);
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_node_config_load");
+		slurm_perror("failure: gres_plugin_node_config_load");
 		exit(1);
 	}
 
 	buffer = init_buf(1024);
 	rc = gres_plugin_node_config_pack(buffer);
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_node_config_pack");
+		slurm_perror("failure: gres_plugin_node_config_pack");
 		exit(1);
 	}
 
 	/*
 	 * Logic normally executed by slurmctld daemon
 	 */
-	/* FIXME: Read values from slurm.conf */
-	orig_config = "craynetwork:4";
+	orig_config = "gpu:8";
 	rc = gres_plugin_init_node_config(node_name, orig_config,
 					  &node_gres_list);
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_init_node_config");
+		slurm_perror("failure: gres_plugin_init_node_config");
 		exit(1);
 	}
 
 	set_buf_offset(buffer, 0);
 	rc = gres_plugin_node_config_unpack(buffer, node_name);
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_node_config_unpack");
+		slurm_perror("failure: gres_plugin_node_config_unpack");
 		exit(1);
 	}
 
@@ -117,15 +126,16 @@ int main(int argc, char *argv[])
 					      &new_config, &node_gres_list,
 					      0, &reason_down);
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_node_config_validate");
+		slurm_perror("failure: gres_plugin_node_config_validate");
 		exit(1);
 	}
 
-	if (argc == 2)
+	if (argc > 2)
 		job_config = argv[1];
+
 	rc = gres_plugin_job_state_validate(job_config, &job_gres_list);
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_job_state_validate");
+		slurm_perror("failure: gres_plugin_job_state_validate");
 		exit(1);
 	}
 
@@ -133,6 +143,7 @@ int main(int argc, char *argv[])
 	gres_plugin_job_state_log(job_gres_list, job_id);
 
 	cpu_bitmap = bit_alloc(cpu_count);
+	bit_nset(cpu_bitmap, 0, cpu_count - 1);
 	cpu_alloc = gres_plugin_job_test(job_gres_list, node_gres_list, true,
 					 cpu_bitmap, 0, cpu_count - 1,
 					 job_id, node_name);
@@ -143,10 +154,10 @@ int main(int argc, char *argv[])
 
 	rc = gres_plugin_fini();
 	if (rc != SLURM_SUCCESS) {
-		slurm_perror("FAILURE: gres_plugin_fini");
+		slurm_perror("failure: gres_plugin_fini");
 		exit(1);
 	}
 
-	printf("Test ran to completion\n");
+	printf("Test %s ran to completion\n\n", argv[3]);
 	exit(0);
 }
diff --git a/testsuite/expect/test7.17_configs/test7.17.1/gres.conf b/testsuite/expect/test7.17_configs/test7.17.1/gres.conf
new file mode 100644
index 000000000..04551525a
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.1/gres.conf
@@ -0,0 +1,5 @@
+###############################################
+# Test gres.conf Semi compact format
+###############################################
+Name=gpu File=/dev/tty[0-3] CPUs=[0-3]
+Name=gpu File=/dev/tty[4-7] CPUs=[4-7]
diff --git a/testsuite/expect/test7.17_configs/test7.17.1/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.1/slurm.conf
new file mode 100644
index 000000000..c49aed2d9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.1/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2 gres=gpu:8
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.2/gres.conf b/testsuite/expect/test7.17_configs/test7.17.2/gres.conf
new file mode 100644
index 000000000..07cd8390a
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.2/gres.conf
@@ -0,0 +1,7 @@
+###############################################
+# Test gres.conf Semi compact format
+###############################################
+Name=gpu File=/dev/tty[0-3] CPUs=0,1,2,3,4,5,6,7
+Name=gpu File=/dev/tty[4-7] CPUs=8,9,10,11,12,13,14,15
+
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.2/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.2/slurm.conf
new file mode 100644
index 000000000..c49aed2d9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.2/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2 gres=gpu:8
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.3/gres.conf b/testsuite/expect/test7.17_configs/test7.17.3/gres.conf
new file mode 100644
index 000000000..716965561
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.3/gres.conf
@@ -0,0 +1,11 @@
+###############################################
+# Test gres.conf expanded format
+###############################################
+Name=gpu File=/dev/tty0 CPUs=0,1,2,3,4,5,6,7
+Name=gpu File=/dev/tty1 CPUs=0,1,2,3,4,5,6,7
+Name=gpu File=/dev/tty2 CPUs=0,1,2,3,4,5,6,7
+Name=gpu File=/dev/tty3 CPUs=0,1,2,3,4,5,6,7
+Name=gpu File=/dev/tty4 CPUs=8,9,10,11,12,13,14,15
+Name=gpu File=/dev/tty5 CPUs=8,9,10,11,12,13,14,15
+Name=gpu File=/dev/tty6 CPUs=8,9,10,11,12,13,14,15
+Name=gpu File=/dev/tty7 CPUs=8,9,10,11,12,13,14,15
diff --git a/testsuite/expect/test7.17_configs/test7.17.3/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.3/slurm.conf
new file mode 100644
index 000000000..c49aed2d9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.3/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2 gres=gpu:8
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.4/gres.conf b/testsuite/expect/test7.17_configs/test7.17.4/gres.conf
new file mode 100644
index 000000000..2a0071e6e
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.4/gres.conf
@@ -0,0 +1,5 @@
+###############################################
+# Test gres.conf Semi compact format
+###############################################
+Name=gpu File=/dev/tty[0-3]
+Name=gpu File=/dev/tty[4-7]
diff --git a/testsuite/expect/test7.17_configs/test7.17.4/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.4/slurm.conf
new file mode 100644
index 000000000..c49aed2d9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.4/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2 gres=gpu:8
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.5/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.5/slurm.conf
new file mode 100644
index 000000000..c49aed2d9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.5/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2 gres=gpu:8
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.6/gres.conf b/testsuite/expect/test7.17_configs/test7.17.6/gres.conf
new file mode 100644
index 000000000..04551525a
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.6/gres.conf
@@ -0,0 +1,5 @@
+###############################################
+# Test gres.conf Semi compact format
+###############################################
+Name=gpu File=/dev/tty[0-3] CPUs=[0-3]
+Name=gpu File=/dev/tty[4-7] CPUs=[4-7]
diff --git a/testsuite/expect/test7.17_configs/test7.17.6/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.6/slurm.conf
new file mode 100644
index 000000000..97f5783e9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.6/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+# GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test7.17_configs/test7.17.7/gres.conf b/testsuite/expect/test7.17_configs/test7.17.7/gres.conf
new file mode 100644
index 000000000..8ac2fca18
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.7/gres.conf
@@ -0,0 +1,5 @@
+###############################################
+# Test gres.conf Semi compact format
+###############################################
+Name=gpu File=/dev/tty[0-3] CPUs=[0-10000]
+Name=gpu File=/dev/tty[4-7] CPUs=[4-7]
diff --git a/testsuite/expect/test7.17_configs/test7.17.7/slurm.conf b/testsuite/expect/test7.17_configs/test7.17.7/slurm.conf
new file mode 100644
index 000000000..c49aed2d9
--- /dev/null
+++ b/testsuite/expect/test7.17_configs/test7.17.7/slurm.conf
@@ -0,0 +1,12 @@
+ControlMachine=test_machine
+ControlAddr=123.4.5.6
+
+ClusterName=test_cluster
+
+SlurmUser=root
+SlurmdUser=root
+GresTypes=gpu
+
+Nodename=DEFAULT Sockets=1 CoresPerSocket=4 ThreadsPerCore=2 gres=gpu:8
+NodeName=test_node[0-9] nodeaddr=localhost port=27001-27010
+
diff --git a/testsuite/expect/test8.1 b/testsuite/expect/test8.1
index 351b9c425..2e1bbdf82 100755
--- a/testsuite/expect/test8.1
+++ b/testsuite/expect/test8.1
@@ -71,9 +71,10 @@ if {![string compare $type "Q"]} {
 # Build input script file
 #
 exec $bin_rm -f $file_in
-exec echo "#!$bin_bash"         >$file_in
-exec echo "$bin_sleep 1 &"     >>$file_in
-exec $bin_chmod 700 $file_in
+
+make_bash_script $file_in "
+sleep 10
+"
 
 #
 # Submit a slurm job using various sbatch options for blue gene
diff --git a/testsuite/expect/test8.2 b/testsuite/expect/test8.2
index d3a0f4fca..b4234ff0d 100755
--- a/testsuite/expect/test8.2
+++ b/testsuite/expect/test8.2
@@ -110,6 +110,9 @@ if {$job_id == 0} {
 #
 # Confirm parameters passed into SLURM
 #
+# Sleep a bit for job to catch up
+sleep 1
+
 set matches 0
 spawn $scontrol show job $job_id
 expect {
diff --git a/testsuite/expect/test8.21 b/testsuite/expect/test8.21
index d7b4f40ce..acaf4f654 100755
--- a/testsuite/expect/test8.21
+++ b/testsuite/expect/test8.21
@@ -340,7 +340,7 @@ for {set dim1 $job_start1} {$dim1 <= $job_fini1} {incr dim1} {
 }
 
 if {$exit_code == 0} {
-	exec $bin_rm -f $file_prog
+	# DO NOT REMOVE test8.21.bash file ($file_prog) 
 	send_user "\nSUCCESS\n"
 } else {
 	cancel_job $job_id
diff --git a/testsuite/expect/test8.21.bash b/testsuite/expect/test8.21.bash
index 8fb666d19..bd480e9bb 100755
--- a/testsuite/expect/test8.21.bash
+++ b/testsuite/expect/test8.21.bash
@@ -1,5 +1,28 @@
 #!/usr/bin/env bash
-
+############################################################################
+# Portion of Slurm test suite
+############################################################################
+# Copyright (C) 2015 SchedMD LLC
+# Written by Nathan Yee, SchedMD
+#
+# This file is part of SLURM, a resource management program.
+# For details, see <http://slurm.schedmd.com/>.
+# Please also read the included file: DISCLAIMER.
+#
+# SLURM is free software; you can redistribute it and/or modify it under
+# the terms of the GNU General Public License as published by the Free
+# Software Foundation; either version 2 of the License, or (at your option)
+# any later version.
+#
+# SLURM is distributed in the hope that it will be useful, but WITHOUT ANY
+# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
+# FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more
+# details.
+#
+# You should have received a copy of the GNU General Public License along
+# with SLURM; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301  USA.
+############################################################################
 if [ $# -ne 5 ]; then
 	echo "test8.21.bash <srun_path> <squeue_path> <job_id> <job_size> <mode:1|2?"
 	exit 1
diff --git a/testsuite/expect/test8.4 b/testsuite/expect/test8.4
index a09bd15ab..84d153f6c 100755
--- a/testsuite/expect/test8.4
+++ b/testsuite/expect/test8.4
@@ -148,6 +148,7 @@ if {$no_start != 0} {
 #
 if {[wait_for_job $job_id "DONE"] != 0} {
 	send_user "\nFAILURE: waiting for job to complete\n"
+	cancel_job $job_id
 	set exit_code 1
 }
 
diff --git a/testsuite/slurm_unit/Makefile.in b/testsuite/slurm_unit/Makefile.in
index f15cb3c4b..acf746293 100644
--- a/testsuite/slurm_unit/Makefile.in
+++ b/testsuite/slurm_unit/Makefile.in
@@ -96,6 +96,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -104,10 +105,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -120,7 +123,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -249,6 +252,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -298,8 +303,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -318,6 +327,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -361,6 +373,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -384,6 +397,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/testsuite/slurm_unit/api/Makefile.in b/testsuite/slurm_unit/api/Makefile.in
index e777cba9b..fc317b820 100644
--- a/testsuite/slurm_unit/api/Makefile.in
+++ b/testsuite/slurm_unit/api/Makefile.in
@@ -99,6 +99,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -107,10 +108,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -123,7 +126,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -487,6 +490,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -536,8 +541,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -556,6 +565,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -599,6 +611,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -622,6 +635,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/testsuite/slurm_unit/api/manual/Makefile.in b/testsuite/slurm_unit/api/manual/Makefile.in
index 2b63e14f3..ca408a06d 100644
--- a/testsuite/slurm_unit/api/manual/Makefile.in
+++ b/testsuite/slurm_unit/api/manual/Makefile.in
@@ -101,6 +101,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -109,10 +110,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -125,7 +128,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -274,6 +277,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -323,8 +328,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -343,6 +352,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -386,6 +398,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -409,6 +422,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
diff --git a/testsuite/slurm_unit/common/Makefile.in b/testsuite/slurm_unit/common/Makefile.in
index b6bb1cdd1..e89ef0a60 100644
--- a/testsuite/slurm_unit/common/Makefile.in
+++ b/testsuite/slurm_unit/common/Makefile.in
@@ -103,6 +103,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_bluegene.m4 \
 	$(top_srcdir)/auxdir/x_ac_cflags.m4 \
 	$(top_srcdir)/auxdir/x_ac_cray.m4 \
+	$(top_srcdir)/auxdir/x_ac_curl.m4 \
 	$(top_srcdir)/auxdir/x_ac_databases.m4 \
 	$(top_srcdir)/auxdir/x_ac_debug.m4 \
 	$(top_srcdir)/auxdir/x_ac_dlfcn.m4 \
@@ -111,10 +112,12 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_gpl_licensed.m4 \
 	$(top_srcdir)/auxdir/x_ac_hwloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_iso.m4 \
+	$(top_srcdir)/auxdir/x_ac_json.m4 \
 	$(top_srcdir)/auxdir/x_ac_lua.m4 \
 	$(top_srcdir)/auxdir/x_ac_man2html.m4 \
 	$(top_srcdir)/auxdir/x_ac_munge.m4 \
 	$(top_srcdir)/auxdir/x_ac_ncurses.m4 \
+	$(top_srcdir)/auxdir/x_ac_netloc.m4 \
 	$(top_srcdir)/auxdir/x_ac_nrt.m4 \
 	$(top_srcdir)/auxdir/x_ac_ofed.m4 \
 	$(top_srcdir)/auxdir/x_ac_pam.m4 \
@@ -127,7 +130,7 @@ am__aclocal_m4_deps = $(top_srcdir)/auxdir/ax_lib_hdf5.m4 \
 	$(top_srcdir)/auxdir/x_ac_sgi_job.m4 \
 	$(top_srcdir)/auxdir/x_ac_slurm_ssl.m4 \
 	$(top_srcdir)/auxdir/x_ac_sun_const.m4 \
-	$(top_srcdir)/auxdir/x_ac_xcpu.m4 $(top_srcdir)/configure.ac
+	$(top_srcdir)/configure.ac
 am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
 	$(ACLOCAL_M4)
 mkinstalldirs = $(install_sh) -d
@@ -480,6 +483,8 @@ CXXCPP = @CXXCPP@
 CXXDEPMODE = @CXXDEPMODE@
 CXXFLAGS = @CXXFLAGS@
 CYGPATH_W = @CYGPATH_W@
+DATAWARP_CPPFLAGS = @DATAWARP_CPPFLAGS@
+DATAWARP_LDFLAGS = @DATAWARP_LDFLAGS@
 DEFS = @DEFS@
 DEPDIR = @DEPDIR@
 DLLTOOL = @DLLTOOL@
@@ -529,8 +534,12 @@ INSTALL_DATA = @INSTALL_DATA@
 INSTALL_PROGRAM = @INSTALL_PROGRAM@
 INSTALL_SCRIPT = @INSTALL_SCRIPT@
 INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
+JSON_CPPFLAGS = @JSON_CPPFLAGS@
+JSON_LDFLAGS = @JSON_LDFLAGS@
 LD = @LD@
 LDFLAGS = @LDFLAGS@
+LIBCURL = @LIBCURL@
+LIBCURL_CPPFLAGS = @LIBCURL_CPPFLAGS@
 LIBOBJS = @LIBOBJS@
 LIBS = @LIBS@
 LIBTOOL = @LIBTOOL@
@@ -549,6 +558,9 @@ MUNGE_LIBS = @MUNGE_LIBS@
 MYSQL_CFLAGS = @MYSQL_CFLAGS@
 MYSQL_LIBS = @MYSQL_LIBS@
 NCURSES = @NCURSES@
+NETLOC_CPPFLAGS = @NETLOC_CPPFLAGS@
+NETLOC_LDFLAGS = @NETLOC_LDFLAGS@
+NETLOC_LIBS = @NETLOC_LIBS@
 NM = @NM@
 NMEDIT = @NMEDIT@
 NRT_CPPFLAGS = @NRT_CPPFLAGS@
@@ -592,6 +604,7 @@ SEMAPHORE_LIBS = @SEMAPHORE_LIBS@
 SEMAPHORE_SOURCES = @SEMAPHORE_SOURCES@
 SET_MAKE = @SET_MAKE@
 SHELL = @SHELL@
+SLEEP_CMD = @SLEEP_CMD@
 SLURMCTLD_PORT = @SLURMCTLD_PORT@
 SLURMCTLD_PORT_COUNT = @SLURMCTLD_PORT_COUNT@
 SLURMDBD_PORT = @SLURMDBD_PORT@
@@ -615,6 +628,7 @@ STRIP = @STRIP@
 SUCMD = @SUCMD@
 UTIL_LIBS = @UTIL_LIBS@
 VERSION = @VERSION@
+_libcurl_config = @_libcurl_config@
 abs_builddir = @abs_builddir@
 abs_srcdir = @abs_srcdir@
 abs_top_builddir = @abs_top_builddir@
-- 
GitLab